[r-cran-vgam] 15/63: Import Upstream version 0.7-9
Andreas Tille
tille at debian.org
Tue Jan 24 13:54:23 UTC 2017
This is an automated email from the git hooks/post-receive script.
tille pushed a commit to branch master
in repository r-cran-vgam.
commit c24ec82feafc51ca1c85035028d40b3c203a56f3
Author: Andreas Tille <tille at debian.org>
Date: Tue Jan 24 14:16:48 2017 +0100
Import Upstream version 0.7-9
---
DESCRIPTION | 20 +-
NAMESPACE | 80 +-
NEWS | 61 +-
R/aamethods.q | 300 ++--
R/bAIC.q | 53 +-
R/build.terms.s | 50 -
R/build.terms.vlm.q | 74 +-
R/calibrate.q | 14 +-
R/cao.R | 8 +-
R/cao.fit.q | 228 ++-
R/coef.vlm.q | 158 +-
R/cqo.R | 4 +-
R/cqo.fit.q | 57 +-
R/family.aunivariate.q | 231 +--
R/family.basics.q | 131 +-
R/family.binomial.q | 502 ++++---
R/family.bivariate.q | 246 ++--
R/family.categorical.q | 437 ++++--
R/family.censored.q | 10 +-
R/family.circular.q | 64 +-
R/family.extremes.q | 1348 ++++++++---------
R/family.fishing.q | 4 +-
R/family.functions.q | 7 +-
R/family.genetic.q | 808 ++++++-----
R/family.glmgam.q | 177 +--
R/family.mixture.q | 36 +-
R/family.nonlinear.q | 12 +-
R/family.normal.q | 265 ++--
R/family.positive.q | 68 +-
R/family.qreg.q | 496 ++++++-
R/family.rcqo.q | 10 +-
R/family.rrr.q | 881 +++++-------
R/family.survival.q | 82 +-
R/family.ts.q | 18 +-
R/family.univariate.q | 2961 ++++++++++++++++++++++++--------------
R/family.zeroinf.q | 184 ++-
R/formula.vlm.q | 185 +++
R/links.q | 18 +-
R/model.matrix.vglm.q | 415 +++---
R/plot.vglm.q | 608 +++-----
R/predict.vgam.q | 21 +-
R/predict.vglm.q | 229 ++-
R/predict.vlm.q | 378 +++--
R/print.summary.others.q | 29 -
R/qrrvglm.control.q | 32 +-
R/qtplot.q | 18 +-
R/residuals.vlm.q | 75 +
R/rrvglm.R | 24 +-
R/rrvglm.control.q | 22 +-
R/rrvglm.fit.q | 168 +--
R/s.vam.q | 45 +-
R/summary.others.q | 35 -
R/summary.vgam.q | 6 +-
R/summary.vglm.q | 26 +-
R/summary.vlm.q | 24 +-
R/uqo.R | 24 +-
R/vgam.R | 12 +-
R/vgam.control.q | 19 +-
R/vgam.fit.q | 100 +-
R/vgam.match.q | 2 +-
R/vglm.R | 10 +-
R/vglm.control.q | 28 +-
R/vglm.fit.q | 160 +-
R/vlm.R | 30 +-
R/vlm.wfit.q | 76 +-
R/vsmooth.spline.q | 44 +-
data/olympic.txt | 88 ++
man/AA.Aa.aa.Rd | 15 +-
man/AB.Ab.aB.ab.Rd | 5 +-
man/AB.Ab.aB.ab2.Rd | 7 +-
man/ABO.Rd | 32 +-
man/AICvlm.Rd | 13 +-
man/BratUC.Rd | 2 +-
man/Coef.rrvglm-class.Rd | 1 -
man/Coef.rrvglm.Rd | 1 -
man/CommonVGAMffArguments.Rd | 30 +-
man/DagumUC.Rd | 8 +-
man/DeLury.Rd | 4 +
man/FiskUC.Rd | 18 +-
man/G1G2G3.Rd | 12 +-
man/Inv.gaussian.Rd | 11 +-
man/InvlomaxUC.Rd | 14 +-
man/InvparalogisticUC.Rd | 14 +-
man/Links.Rd | 1 -
man/LomaxUC.Rd | 14 +-
man/Max.Rd | 1 -
man/MaxwellUC.Rd | 8 +-
man/Opt.Rd | 1 -
man/ParalogisticUC.Rd | 14 +-
man/Pareto.Rd | 8 +-
man/ParetoIVUC.Rd | 12 +-
man/RayleighUC.Rd | 8 +-
man/SinmadUC.Rd | 14 +-
man/Tol.Rd | 1 -
man/VGAM-package.Rd | 19 +-
man/acat.Rd | 1 -
man/alaplace3.Rd | 58 +-
man/alaplaceUC.Rd | 61 +-
man/amlbinomial.Rd | 6 +-
man/amlexponential.Rd | 6 +-
man/{alsqreg.Rd => amlnormal.Rd} | 34 +-
man/amlpoisson.Rd | 6 +-
man/auuc.Rd | 3 +-
man/benini.Rd | 6 +-
man/beniniUC.Rd | 8 +-
man/beta.ab.Rd | 2 +-
man/betabin.ab.Rd | 24 +-
man/betabinUC.Rd | 8 +-
man/betabinomial.Rd | 39 +-
man/betaprime.Rd | 2 +-
man/bilogis4UC.Rd | 8 +-
man/bilogistic4.Rd | 4 +-
man/binom2.or.Rd | 12 +-
man/binom2.rho.Rd | 1 -
man/binomialff.Rd | 16 +-
man/bisa.Rd | 2 +-
man/bisaUC.Rd | 2 +-
man/bminz.Rd | 7 +-
man/bortUC.Rd | 13 +-
man/calibrate.Rd | 1 -
man/calibrate.qrrvglm.Rd | 1 -
man/calibrate.qrrvglm.control.Rd | 1 -
man/cao.Rd | 17 +-
man/cao.control.Rd | 1 -
man/cardUC.Rd | 8 +-
man/cauchy.Rd | 18 +-
man/ccoef.Rd | 1 -
man/cdf.lmscreg.Rd | 9 +-
man/cenpoisson.Rd | 2 +-
man/cgumbel.Rd | 3 +-
man/chest.Rd | 3 +-
man/coalminers.Rd | 3 +-
man/constraints.Rd | 11 +-
man/cqo.Rd | 84 +-
man/cratio.Rd | 1 -
man/cumulative.Rd | 134 +-
man/deplot.lmscreg.Rd | 5 +-
man/dexpbinomial.Rd | 5 +-
man/dirichlet.Rd | 11 +-
man/dirmultinomial.Rd | 2 +-
man/enzyme.Rd | 1 -
man/felixUC.Rd | 8 +-
man/fgm.Rd | 2 +-
man/fill.Rd | 195 +--
man/fitted.vlm.Rd | 8 +-
man/frank.Rd | 2 +-
man/frankUC.Rd | 8 +-
man/frechet.Rd | 8 +-
man/frechetUC.Rd | 8 +-
man/freund61.Rd | 2 +-
man/fsqrt.Rd | 6 +-
man/gammahyp.Rd | 2 +-
man/gaussianff.Rd | 2 +-
man/gev.Rd | 31 +-
man/gevUC.Rd | 40 +-
man/gew.Rd | 1 -
man/ggammaUC.Rd | 8 +-
man/golf.Rd | 6 +-
man/gpd.Rd | 22 +-
man/gpdUC.Rd | 40 +-
man/grc.Rd | 29 +-
man/gumbel.Rd | 5 +-
man/gumbelIbiv.Rd | 2 +-
man/gumbelUC.Rd | 10 +-
man/hspider.Rd | 1 -
man/hunua.Rd | 6 +-
man/hyperg.Rd | 2 +-
man/hzetaUC.Rd | 7 +-
man/invbinomial.Rd | 2 +-
man/kumarUC.Rd | 20 +-
man/laplaceUC.Rd | 8 +-
man/leipnik.Rd | 2 +-
man/levy.Rd | 2 +-
man/lgammaUC.Rd | 8 +-
man/lino.Rd | 2 +-
man/linoUC.Rd | 7 +-
man/lirat.Rd | 1 -
man/lms.bcg.Rd | 14 +-
man/lms.bcn.Rd | 11 +-
man/lms.yjn.Rd | 11 +-
man/logff.Rd | 4 +-
man/logistic.Rd | 3 +
man/logit.Rd | 2 +
man/loglapUC.Rd | 106 ++
man/loglaplace.Rd | 254 ++++
man/loglinb2.Rd | 1 -
man/loglinb3.Rd | 3 +-
man/lqnorm.Rd | 2 +-
man/lv.Rd | 1 -
man/lvplot.Rd | 1 -
man/lvplot.rrvglm.Rd | 1 -
man/margeff.Rd | 132 ++
man/mbinomial.Rd | 2 +-
man/mccullagh89.Rd | 2 +-
man/mckaygamma2.Rd | 12 +-
man/micmen.Rd | 1 -
man/model.framevlm.Rd | 20 +-
man/model.matrixvlm.Rd | 18 +-
man/morgenstern.Rd | 2 +-
man/multinomial.Rd | 134 +-
man/nakagami.Rd | 2 +-
man/nakagamiUC.Rd | 7 +-
man/nbolf.Rd | 6 +-
man/negbinomial.Rd | 4 +-
man/notdocumentedyet.Rd | 46 +-
man/nzc.Rd | 3 +-
man/olympic.Rd | 55 +
man/ordpoisson.Rd | 6 +-
man/oxtemp.Rd | 1 -
man/pareto1.Rd | 4 +-
man/persp.qrrvglm.Rd | 1 -
man/plackett.Rd | 2 +-
man/plotdeplot.lmscreg.Rd | 5 +-
man/plotqrrvglm.Rd | 1 -
man/plotqtplot.lmscreg.Rd | 1 -
man/plotvgam.Rd | 65 +-
man/plotvgam.control.Rd | 13 +-
man/pneumo.Rd | 4 +-
man/poissonp.Rd | 2 +-
man/polf.Rd | 6 +-
man/polonoUC.Rd | 2 +-
man/posbinomUC.Rd | 3 +-
man/posbinomial.Rd | 4 +-
man/posnegbinUC.Rd | 1 +
man/posnegbinomial.Rd | 4 +-
man/posnormUC.Rd | 2 +-
man/pospoisUC.Rd | 3 +-
man/predict.qrrvglm.Rd | 7 +-
man/predict.vglm.Rd | 23 +-
man/qrrvglm.control.Rd | 1 -
man/qtplot.gumbel.Rd | 5 +-
man/qtplot.lmscreg.Rd | 1 -
man/quasibinomialff.Rd | 1 -
man/rayleigh.Rd | 4 +-
man/rcqo.Rd | 2 +-
man/recexp1.Rd | 15 +-
man/recnormal1.Rd | 15 +-
man/riceUC.Rd | 8 +-
man/rrar.Rd | 14 +-
man/rrvglm-class.Rd | 1 -
man/rrvglm.Rd | 2 +-
man/rrvglm.control.Rd | 5 +-
man/ruge.Rd | 1 -
man/s.Rd | 1 -
man/seq2binomial.Rd | 2 +-
man/skellam.Rd | 3 +-
man/skewnormal1.Rd | 2 +-
man/snormUC.Rd | 8 +-
man/sratio.Rd | 1 -
man/tikuvUC.Rd | 8 +-
man/toxop.Rd | 1 -
man/triangle.Rd | 5 +-
man/triangleUC.Rd | 8 +-
man/trplot.Rd | 1 -
man/trplot.qrrvglm.Rd | 1 -
man/undocumented-methods.Rd | 39 +
man/uqo.Rd | 1 -
man/usagrain.Rd | 3 +-
man/venice.Rd | 5 +-
man/vgam-class.Rd | 1 -
man/vgam.Rd | 2 -
man/vgam.control.Rd | 28 +-
man/vglm-class.Rd | 1 -
man/vglm.Rd | 164 ++-
man/vglm.control.Rd | 152 +-
man/waitakere.Rd | 3 +-
man/weightsvglm.Rd | 1 -
man/wffc.P2star.Rd | 11 +-
man/wffc.Rd | 59 +-
man/wffc.indiv.Rd | 14 +-
man/wffc.nc.Rd | 20 +-
man/wffc.teams.Rd | 14 +-
man/yip88.Rd | 17 +-
man/zanegbinUC.Rd | 16 +-
man/zanegbinomial.Rd | 4 +-
man/zapoisUC.Rd | 3 +-
man/zapoisson.Rd | 8 +-
man/zero.Rd | 1 -
man/{dzeta.Rd => zetaUC.Rd} | 16 +-
man/zibinomial.Rd | 31 +-
man/zinegbinomial.Rd | 2 +-
man/zipebcom.Rd | 20 +-
man/zipfUC.Rd | 8 +-
src/vgam.f | 106 +-
284 files changed, 9332 insertions(+), 7042 deletions(-)
diff --git a/DESCRIPTION b/DESCRIPTION
index 0f6845b..f735b8e 100755
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -1,18 +1,20 @@
Package: VGAM
-Version: 0.7-8
-Date: 2009-02-05
+Version: 0.7-9
+Date: 2009-06-15
Title: Vector Generalized Linear and Additive Models
Author: Thomas W. Yee <t.yee at auckland.ac.nz>
-Maintainer: Thomas Yee <t.yee at auckland.ac.nz>
+Maintainer: Thomas Yee <t.yee at auckland.ac.nz>
Depends: R (>= 2.5.0), splines, methods, stats, stats4
Description: Vector generalized linear and additive models, and
- associated models (Reduced-Rank VGLMs, Quadratic RR-VGLMs,
- Reduced-Rank VGAMs). This package fits many models and
- distribution by maximum likelihood estimation (MLE) or
- penalized MLE. Also fits constrained ordination models in
- ecology.
+ associated models (Reduced-Rank VGLMs, Quadratic RR-VGLMs,
+ Reduced-Rank VGAMs). This package fits many models and
+ distribution by maximum likelihood estimation (MLE) or
+ penalized MLE. Also fits constrained ordination models in
+ ecology.
License: GPL-2
URL: http://www.stat.auckland.ac.nz/~yee/VGAM
LazyLoad: yes
LazyData: yes
-Packaged: Thu Feb 5 13:51:42 2009; yee
+Packaged: 2009-06-15 17:14:11 UTC; yee
+Repository: CRAN
+Date/Publication: 2009-06-16 13:09:39
diff --git a/NAMESPACE b/NAMESPACE
index 686ac9f..49853f2 100644
--- a/NAMESPACE
+++ b/NAMESPACE
@@ -12,6 +12,7 @@ useDynLib(VGAM)
export(
+Build.terms.vlm,
procVec,
rss.vgam,
vcontrol.expression,
@@ -19,6 +20,12 @@ vplot, vplot.default, vplot.factor, vplot.list,
vplot.matrix, vplot.numeric, vvplot.factor)
export(
+case.namesvlm,
+variable.namesvlm
+)
+
+
+export(
d2theta.deta2, Deviance.categorical.data.vgam,
lm2qrrvlm.model.matrix,
m2avglm,
@@ -71,6 +78,7 @@ dinv.gaussian, pinv.gaussian, rinv.gaussian, wald, expexp1, expexp)
export(A1A2A3, a2m, AAaa.nohw,
AICvlm, AICvgam, AICrrvglm,
+AICqrrvglm, # AICvglm,
anova.vgam,
anova.vglm,
beta4,
@@ -106,6 +114,7 @@ erlang,
family.vglm,
dfelix, felix,
fitted.values.uqo, fitted.vlm, fittedvsmooth.spline, fsqrt,
+formulavlm, formulaNA.VGAM,
garma, gaussianff,
hypersecant, hypersecant.1,
hyperg,
@@ -118,7 +127,8 @@ lms.bcg, lms.bcn, lms.yjn, lms.yjn2,
lqnorm,
dbilogis4, pbilogis4, rbilogis4, bilogistic4,
logistic1, logistic2,
-logLik.vlm, lv.cao, lv.Coef.cao, lv.Coef.qrrvglm,
+logLik.vlm, lv.cao,
+lv.Coef.qrrvglm,
lvplot.cao, lv.qrrvglm,
Max.Coef.qrrvglm, Max.qrrvglm,
is.bell.vlm, is.bell.rrvglm, is.bell.qrrvglm, is.bell.cao, is.bell,
@@ -157,8 +167,9 @@ simple.exponential, simple.poisson,
mbinomial,
seq2binomial, size.binomial,
stdze1, stdze2,
-summary.cao, summary.grc, summary.lms, summary.qrrvglm,
-summary.rc.exponential, summary.rrvglm, summary.uqo, summaryvgam,
+summary.cao, summary.grc,
+ summary.qrrvglm,
+summary.rrvglm, summary.uqo, summaryvgam,
summaryvglm, summaryvlm, s.vam, terms.vlm,
theta2eta, Tol.Coef.qrrvglm, Tol.Coef.uqo, Tol.qrrvglm, Tol.uqo,
triangle, dtriangle, ptriangle, qtriangle, rtriangle,
@@ -176,12 +187,15 @@ export(lm2vlm.model.matrix)
+
+
importFrom("stats", model.matrix)
importFrom("stats", model.frame)
importFrom("stats", terms)
importFrom("stats", "coef")
+ importFrom("stats", "coefficients")
importFrom("stats", "logLik")
importFrom("graphics", "plot")
importFrom("stats", "vcov")
@@ -192,6 +206,39 @@ export(lm2vlm.model.matrix)
+
+ importFrom("stats", "resid")
+ importFrom("stats", "residuals")
+ importFrom("stats", "fitted")
+ importFrom("stats", "predict")
+ importFrom("stats", "df.residual")
+
+ importFrom("stats", "deviance")
+ importFrom("stats", "fitted.values")
+ importFrom("stats", "effects")
+ importFrom("stats", "weights")
+
+ importFrom("stats", "formula")
+ importFrom("stats", "case.names")
+ importFrom("stats", "variable.names")
+
+
+ importFrom("stats", dchisq, pchisq,
+ pf,
+ dexp, rexp,
+ dpois, ppois, qpois, rpois,
+ dnorm, pnorm, qnorm, rnorm)
+
+ importFrom("graphics", "persp")
+
+
+
+
+
+
+
+
+
export(ddagum, rdagum, qdagum, pdagum, dagum)
export(dfisk, pfisk, qfisk, rfisk, fisk)
export(dlomax, plomax, qlomax, rlomax, lomax)
@@ -206,9 +253,15 @@ export(dpolono, rpolono)
export(dgpd, pgpd, qgpd, rgpd, gpd)
export(dgev, pgev, qgev, rgev, gev, egev)
export(dlaplace, plaplace, qlaplace, rlaplace, laplace)
-export(dalaplace, palaplace, qalaplace, ralaplace,
+export(dalap, palap, qalap, ralap,
alaplace1.control,
alaplace1, alaplace2, alaplace3)
+export(dloglap, ploglap, qloglap, rloglap)
+export(loglaplace1.control, loglaplace1)
+export(dlogitlap, plogitlap, qlogitlap, rlogitlap,
+ logitlaplace1.control, logitlaplace1)
+export(dprobitlap, pprobitlap, qprobitlap, rprobitlap)
+export(dclogloglap, pclogloglap, qclogloglap, rclogloglap)
export(dcard, pcard, qcard, rcard, cardioid)
export(fff, fff.control,
mbesselI0,
@@ -217,7 +270,7 @@ export(fff, fff.control,
export(
AA.Aa.aa, AB.Ab.aB.ab2, AB.Ab.aB.ab, ABO, acat,
-beta.ab, betaff, betaffqn,
+beta.ab, betaff,
dbetageom, pbetageom, rbetageom, betageometric,
betaprime,
betaII,
@@ -230,7 +283,9 @@ calibrate, cao.control,
cao, ccoef, cdf.lmscreg, cgo, chisq, clo,
Coef.qrrvglm, Coef, Coef.rrvglm, Coef.vlm,
predict.qrrvglm,
-cratio, cumulative, scumulative, deplot.lmscreg, dirichlet,
+cratio, cumulative)
+export(
+deplot.lmscreg, dirichlet,
exponential, G1G2G3)
export(
@@ -250,13 +305,14 @@ grc,
dhzeta, phzeta, qhzeta, rhzeta, hzeta,
nidentity, identity,
prentice74,
-alsqreg, amlbinomial, amlexponential, amlpoisson, Wr1, Wr2,
+amlnormal, amlbinomial, amlexponential, amlpoisson, Wr1, Wr2,
dkumar, pkumar, qkumar, rkumar, kumar,
dyules, pyules, ryules, yulesimon,
logff, dlog, plog, rlog,
loglinb2, loglinb3,
loglog, lognormal3, lvplot.qrrvglm,
-lvplot, lvplot.rrvglm, lv, Max, MNSs, multinomial)
+lvplot, lvplot.rrvglm, lv, Max, MNSs,
+dmultinomial, multinomial, margeff)
export(
slash, dslash, pslash, rslash)
@@ -310,7 +366,8 @@ exportClasses("vglmff", "vlm", "vglm", "vgam",
"rrvglm", "qrrvglm", "grc",
"vlmsmall", "uqo", "cao",
"summary.vgam", "summary.vglm","summary.vlm",
-"summary.qrrvglm", "summary.cao", "summary.rrvglm",
+ "summary.qrrvglm",
+"summary.cao", "summary.rrvglm",
"Coef.rrvglm", "Coef.uqo", "Coef.qrrvglm", "Coef.cao",
"vcov.qrrvglm",
"vsmooth.spline.fit", "vsmooth.spline")
@@ -322,8 +379,8 @@ exportClasses("SurvS4")
exportMethods(
"coef", "Coef", "coefficients",
"constraints",
-"effects", "fitted", "fitted.values",
-"predict",
+"effects",
+"predict", "fitted", "fitted.values",
"print",
"resid",
"residuals", "show",
@@ -337,6 +394,7 @@ exportMethods(
"lv", "Max", "Opt", "Tol",
"biplot", "deplot", "lvplot", "qtplot", "rlplot", "meplot",
"plot", "trplot", "vplot",
+"formula", "case.names", "variable.names",
"weights",
"persp")
diff --git a/NEWS b/NEWS
index 7e96c2d..8fa3e6f 100755
--- a/NEWS
+++ b/NEWS
@@ -5,6 +5,63 @@
**************************************************
+ CHANGES IN VGAM VERSION 0.7-9
+
+NEW FEATURES
+
+ o New functions: margeff() for marginal effects of a vglm()
+ "multinomial" or "cumulative" model.
+ o Almost all VGAM family functions now have a "loglikelihood"
+ slot that incorporates any constants in the density function.
+ Hence the fitted likelihood may differ by a constant from
+ previous results.
+ In particular, models such as multinomial(), cumulative()
+ and binom2.or() have this new feature.
+ o vglm() now has a modified 'xij' argument which implements
+ eta-specific covariates. Usage now involves the form2' argument,
+ and the 'xij' argument does not interfere with constraint matrices.
+ Documentation is supplied on the VGAM website, in particular,
+ http://www.stat.auckland.ac.nz/~yee/VGAM/doc/xij.pdf
+ o cases.names() and variable.names() methods functions
+ written for vglm()/vgam() objects.
+ o cumulative() has improved initial values, especially for
+ long data format, i.e., when each row of the data frame is an
+ observation rather than inputting a matrix of counts.
+ o rrvglm() handles a factor response without giving a warning.
+ o New data: olympic.
+ o testf90.f90 has been renamed to testf90.f95. This may decrease
+ the incidences of compiler problems on some platforms (f95 seems
+ more popular than f90).
+ o For cqo() objects: AIC(), resid() have been written.
+ o Improved functions: negbinomial() default initial values are
+ more robust to outliers in the response, betabinomial() and
+ betabin.ab() have better initialization and "loglikelihood"
+ slot matches dbetabin.ab(log=TRUE).
+ o Renamed VGAM family functions: alsqreg() becomes amlnormal().
+ o Renamed arguments: lmu replaces link.mu in zibinomial().
+ o dzeta(p) has changed wrt 'p'.
+ o The functions summary.lms() and summary.rc.exponential() are
+ no longer distributed to avoid a warning wrt S3 vs S4
+ methods dispatch.
+ o The VGAM family functions for genetic models have been improved,
+ e.g., some basic error checking.
+ Also some changes in the names of the parameters, e.g.,
+ "q" to "pB" for ABO(), plus some switching of the order of the
+ arguments.
+
+
+BUG FIXES
+
+ o VGAM interferes much less in regard to generic functions
+ such as predict(), fitted(), resid(), wrt other packages and
+ also including base's lm(), glm(), etc.
+ o AIC() method for rrvglm() objects was wrong (did not account
+ for argument 'Structural.zero').
+ o dzibinom(log=TRUE) was wrong.
+
+
+
+
CHANGES IN VGAM VERSION 0.7-8
NEW FEATURES
@@ -32,7 +89,7 @@ NEW FEATURES
compiler. Actually, removing the .f90 file(s) will not be too
much of a problem as there is very little F90 code in use by
the package at the moment.
- o New functions: dbinom2.rho(), rbinom2.rho(), dposnegbin(),
+ o New functions: dbinom2.rho(), rbinom2.rho(), dposnegbin().
o New data: wffc, wffc.indiv, wffc.teams, wffc.nc.
o Improved functions: binom2.rho(), micmen(), negbinomial(),
poissonff(), posnegbinomial(), zanegbinomial(),
@@ -970,7 +1027,7 @@ NEW FEATURES
NEW FEATURES
- o vglm() now has a xij argument which implments eta-specific covariates.
+ o vglm() now has a xij argument which implements eta-specific covariates.
Documentation is supplied on the VGAM website.
o grc() has been written for Goodman's RC association model for a
contingency table. Documentation is in rrvglm.pdf
diff --git a/R/aamethods.q b/R/aamethods.q
index 9f054e1..f7093a9 100644
--- a/R/aamethods.q
+++ b/R/aamethods.q
@@ -3,8 +3,10 @@
-if(!exists("is.R")) is.R <- function()
- exists("version") && !is.null(version$language) && version$language=="R"
+
+
+
+
is.Numeric <- function(x, allowable.length=Inf, integer.valued=FALSE, positive=FALSE)
if(all(is.numeric(x)) && all(is.finite(x)) &&
@@ -13,35 +15,23 @@ is.Numeric <- function(x, allowable.length=Inf, integer.valued=FALSE, positive=F
(if(positive) all(x>0) else TRUE)) TRUE else FALSE
-if(is.R())
- VGAMenv = new.env()
-
+VGAMenv = new.env()
-
-
-if(is.R()) {
- .onLoad <- function(lib, pkg) require(methods) # 25/1/05
+.onLoad <- function(lib, pkg) require(methods) # 25/1/05
+
+
+if(!any(search()=="package:methods"))
+ library(methods)
- if(!any(search()=="package:methods"))
- library(methods)
-
- if(!any(search()=="package:splines"))
- require(splines)
-
-}
-
-
-
-
-
+if(!any(search()=="package:splines"))
+ require(splines)
-.VGAM.prototype.list = if(is.R())
-list(
+.VGAM.prototype.list = list(
"constraints" = expression({}),
"fini" = expression({}),
"first" = expression({}),
@@ -50,27 +40,9 @@ list(
"middle" = expression({}),
"middle2" = expression({}),
"deriv" = expression({}),
- "weight" = expression({})) else
-list(
- "blurb" = "",
- "constraints" = expression({}),
- "deviance" = function() {},
- "fini" = expression({}),
- "first" = expression({}),
- "initialize" = expression({}),
- "inverse" = function() {},
- "last" = expression({}),
- "link" = function() {},
- "loglikelihood"= function() {},
- "middle" = expression({}),
- "middle2" = expression({}),
- "summary.dispersion" = FALSE,
- "vfamily" = "",
- "deriv" = expression({}),
- "weight" = expression({})) # Splus doesn't use prototypes
+ "weight" = expression({}))
-if(is.R())
setClass("vglmff", representation(
"blurb" = "character",
"constraints" = "expression",
@@ -88,32 +60,14 @@ setClass("vglmff", representation(
"vfamily" = "character",
"deriv" = "expression",
"weight" = "expression"), # "call"
-prototype = .VGAM.prototype.list) else
-setClass("vglmff", representation(
- "blurb" = "character",
- "constraints" = "expression",
- "deviance" = "function",
- "fini" = "expression",
- "first" = "expression",
- "initialize" = "expression",
- "inverse" = "function",
- "last" = "expression",
- "link" = "function",
- "loglikelihood"= "function",
- "middle" = "expression",
- "middle2" = "expression",
- "summary.dispersion" = "logical",
- "vfamily" = "character",
- "deriv" = "expression",
- "weight" = "expression"))
+prototype = .VGAM.prototype.list)
valid.vglmff = function(object) {
-
compulsory = c("initialize", "weight", "deriv", "inverse")
- for(i in compulsory) {
- if(!length(slot(object, i)))
- stop(paste("slot \"", i, "\" is empty"))
+ for(ii in compulsory) {
+ if(!length(slot(object, ii)))
+ stop("slot ", ii, " is empty")
}
if(length(as.list(object at inverse)) != 3)
@@ -121,12 +75,19 @@ valid.vglmff = function(object) {
}
if(FALSE)
-setValidity("vglmff", valid.vglmff)
+ setValidity("vglmff", valid.vglmff)
+
-print.vglmff <- function(x, ...)
-{
+
+if(!isGeneric("print"))
+ setGeneric("print", function(x, ...) standardGeneric("print"),
+ package="VGAM")
+
+
+
+print.vglmff <- function(x, ...) {
f <- x at vfamily
if(is.null(f))
stop("not a VGAM family function")
@@ -139,8 +100,8 @@ print.vglmff <- function(x, ...)
if(length(f)>1) cat("Informal classes:", paste(f, collapse=", "), "\n")
cat("\n")
- for(i in 1:length(nn))
- cat(nn[i])
+ for(ii in 1:length(nn))
+ cat(nn[ii])
cat("\n")
invisible(return(x))
}
@@ -162,141 +123,113 @@ setMethod("show", "vglmff",
-if(is.R())
-setClass("vlm", representation(
- "assign" = "list",
+
+setClass("vlmsmall", representation(
"call" = "call",
- "callXm2" = "call",
- "coefficients" = if(is.R()) "numeric" else "named",
+ "coefficients" = "numeric",
"constraints" = "list",
- "contrasts" = "list",
"control" = "list",
"criterion" = "list",
- "df.residual" = "numeric",
- "df.total" = "numeric",
- "dispersion" = "numeric",
- "effects" = "numeric",
"fitted.values"= "matrix",
"misc" = "list",
"model" = "data.frame",
- "na.action" = "list", # ' if(is.R()) "omit" else '
- "offset" = "matrix",
+ "na.action" = "list",
"post" = "list",
"preplot" = "list",
- "prior.weights"= if(is.R()) "numeric" else "named",
- "qr" = "list",
- "R" = if(is.R()) "matrix" else "upper",
- "rank" = "integer",
+ "prior.weights"= "numeric",
"residuals" = "matrix",
- "rss" = "numeric",
- "smart.prediction" = "list",
- "terms" = "list",
"weights" = "matrix",
- "x" = if(is.R()) "matrix" else "model.matrix",
- "Xm2" = if(is.R()) "matrix" else "model.matrix",
- "Ym2" = if(is.R()) "matrix" else "model.matrix",
- "xlevels" = "list",
- "y" = "matrix")
-) else
+ "x" = "matrix",
+ "y" = "matrix"),
+)
+
+
setClass("vlm", representation(
"assign" = "list",
- "call" = "call",
- "coefficients" = if(is.R()) "numeric" else "named",
- "constraints" = "list",
+ "callXm2" = "call",
"contrasts" = "list",
- "control" = "list",
- "criterion" = "list",
"df.residual" = "numeric",
"df.total" = "numeric",
"dispersion" = "numeric",
"effects" = "numeric",
- "fitted.values"= "matrix",
- "misc" = "list",
- "model" = "data.frame",
- "na.action" = "list", # ' if(is.R()) "omit" else '
"offset" = "matrix",
- "post" = "list",
- "preplot" = "list",
- "prior.weights"= if(is.R()) "numeric" else "named",
- "qr" = "qr",
- "R" = if(is.R()) "matrix" else "upper",
+ "qr" = "list",
+ "R" = "matrix",
"rank" = "integer",
- "residuals" = "matrix",
"rss" = "numeric",
"smart.prediction" = "list",
"terms" = "list",
- "weights" = "matrix",
- "x" = if(is.R()) "matrix" else "model.matrix",
- "xlevels" = "list",
- "y" = "matrix")
+ "Xm2" = "matrix",
+ "Ym2" = "matrix",
+ "xlevels" = "list"
+ ),
+ contains = "vlmsmall"
)
-setClass("vglm", representation("vlm",
+setClass("vglm", representation(
"extra" = "list",
"family" = "vglmff",
- "iter" = if(is.R()) "numeric" else "integer",
- "predictors" = if(is.R()) "matrix" else "matrix"))
+ "iter" = "numeric",
+ "predictors" = "matrix"),
+ contains = "vlm")
-setClass("vgam", representation("vglm",
+setClass("vgam", representation(
"Bspline" = "list", # each [[i]] is a "vsmooth.spline.fit"
- "nl.chisq" = if(is.R()) "numeric" else "named",
- "nl.df" = if(is.R()) "numeric" else "named",
- "spar" = if(is.R()) "numeric" else "named",
- "s.xargument" = if(is.R()) "character" else "named",
- "var" = "matrix"))
+ "nl.chisq" = "numeric",
+ "nl.df" = "numeric",
+ "spar" = "numeric",
+ "s.xargument" = "character",
+ "var" = "matrix"),
+ contains = "vglm")
-if(is.R())
- setClass("summary.vgam",
- representation("vgam",
+setClass("summary.vgam", representation(
anova="data.frame",
cov.unscaled="matrix",
correlation="matrix",
df="numeric",
pearson.resid="matrix",
sigma="numeric"),
-prototype(anova=data.frame())) else
- setClass("summary.vgam",
- representation("vgam",
- anova="data.frame",
- cov.unscaled="matrix",
- correlation="matrix",
- df="numeric",
- pearson.resid="matrix",
- sigma="numeric"))
+ prototype(anova=data.frame()),
+ contains = "vgam")
- setClass("summary.vglm",
- representation("vglm",
+setClass("summary.vglm", representation(
coef3="matrix",
cov.unscaled="matrix",
correlation="matrix",
df="numeric",
pearson.resid="matrix",
- sigma="numeric"))
+ sigma="numeric"),
+ contains = "vglm")
- setClass("summary.vlm",
- representation("vlm",
+
+setClass("summary.vlm", representation(
coef3="matrix",
cov.unscaled="matrix",
correlation="matrix",
df="numeric",
pearson.resid="matrix",
- sigma="numeric"))
+ sigma="numeric"),
+ contains = "vlm")
- setClass( "rrvglm", representation("vglm"))
+ setClass(Class="rrvglm",
+ contains="vglm")
+
+
+if(FALSE)
setClass("qrrvglm", representation(
"assign" = "list",
"call" = "call",
- "coefficients" = if(is.R()) "numeric" else "named",
+ "coefficients" = "numeric",
"constraints" = "list",
"contrasts" = "list",
"control" = "list",
@@ -307,26 +240,37 @@ prototype(anova=data.frame())) else
"extra" = "list",
"family" = "vglmff",
"fitted.values"= "matrix",
- "iter" = if(is.R()) "numeric" else "integer",
+ "iter" = "numeric",
"misc" = "list",
"model" = "data.frame",
- "na.action" = "list", # ' if(is.R()) "omit" else '
+ "na.action" = "list",
"offset" = "matrix",
"post" = "list",
- "predictors" = if(is.R()) "matrix" else "matrix",
+ "predictors" = "matrix",
"preplot" = "list",
- "prior.weights"= if(is.R()) "numeric" else "named",
+ "prior.weights"= "numeric",
"residuals" = "matrix",
"smart.prediction" = "list",
"terms" = "list",
"weights" = "matrix",
- "x" = if(is.R()) "matrix" else "model.matrix",
- "Xm2" = if(is.R()) "matrix" else "model.matrix",
- "Ym2" = if(is.R()) "matrix" else "model.matrix",
+ "x" = "matrix",
+ "Xm2" = "matrix",
+ "Ym2" = "matrix",
"xlevels" = "list",
"y" = "matrix")
)
+
+
+
+ setClass(Class="qrrvglm",
+ contains = "rrvglm")
+
+
+
+
+
+
if(FALSE)
setAs("qrrvglm", "vglm", function(from)
new("vglm", "extra"=from at extra,
@@ -366,7 +310,8 @@ new("vglm", "extra"=from at extra,
- setClass("grc", representation("rrvglm", not.needed="numeric"))
+ setClass("grc", representation(not.needed="numeric"),
+ contains = "rrvglm")
setMethod("summary", "grc",
@@ -374,15 +319,13 @@ setMethod("summary", "grc",
summary.grc(object, ...))
-
if(FALSE) {
-setClass("vfamily",
- representation("list"))
+ setClass("vfamily",
+ representation("list"))
}
-
if(!isGeneric("Coef"))
setGeneric("Coef", function(object, ...) standardGeneric("Coef"),
package="VGAM")
@@ -397,6 +340,7 @@ setGeneric("Coefficients", function(object, ...)
+
if(!isGeneric("logLik"))
setGeneric("logLik", function(object, ...) standardGeneric("logLik"),
package="VGAM")
@@ -416,34 +360,17 @@ if(!isGeneric("vcov"))
-setClass("vlmsmall", representation(
- "call" = "call",
- "coefficients" = if(is.R()) "numeric" else "named",
- "constraints" = "list",
- "control" = "list",
- "criterion" = "list",
- "fitted.values"= "matrix",
- "misc" = "list",
- "model" = "data.frame",
- "na.action" = "list", # ' if(is.R()) "omit" else '
- "post" = "list",
- "preplot" = "list",
- "prior.weights"= if(is.R()) "numeric" else "named",
- "residuals" = "matrix",
- "weights" = "matrix",
- "x" = if(is.R()) "matrix" else "model.matrix",
- "y" = "matrix"),
-)
-
-setClass("uqo", representation("vlmsmall",
+setClass("uqo", representation(
"lv" = "matrix",
"extra" = "list",
"family" = "vglmff",
- "iter" = if(is.R()) "numeric" else "integer",
- "predictors" = "matrix"))
+ "iter" = "numeric",
+ "predictors" = "matrix"),
+ contains = "vlmsmall")
-setClass(Class="cao", repr=representation("vgam", "uqo"))
+setClass(Class="cao",
+ contains="vgam")
if(!isGeneric("lvplot"))
@@ -519,6 +446,25 @@ if(!isGeneric("AIC"))
+ if(!isGeneric("formula"))
+ setGeneric("formula", function(x, ...) standardGeneric("formula"),
+ package="VGAM")
+
+
+ if(!isGeneric("case.names"))
+ setGeneric("case.names", function(object, ...)
+ standardGeneric("case.names"),
+ package="VGAM")
+
+ if(!isGeneric("variable.names"))
+ setGeneric("variable.names", function(object, ...)
+ standardGeneric("variable.names"),
+ package="VGAM")
+
+
+
+
+
diff --git a/R/bAIC.q b/R/bAIC.q
index 152bbf2..1642d22 100644
--- a/R/bAIC.q
+++ b/R/bAIC.q
@@ -27,16 +27,48 @@ AICvgam = function(object, ..., k=2) {
}
AICrrvglm = function(object, ..., k=2) {
+ print("20090316; k in AICrrvglm(); zz")
+ print( k )
estdisp = object at misc$estimated.dispersion
no.dpar = if(length(estdisp) && is.logical(estdisp) && estdisp)
length(object at misc$dispersion) else 0
- elts.tildeA = (object at misc$M - object at control$Rank) * object at control$Rank
- -2 * logLik.vlm(object, ...) + k * (length(coefvlm(object)) +
- no.dpar + elts.tildeA)
+ Structural.zero = object at control$Structural.zero
+ MMM = object at misc$M
+ Rank = object at control$Rank
+ elts.tildeA = (MMM - Rank - length(Structural.zero)) * Rank
+ print("object at control$Structural.zero")
+ print("object at control$Structural.zero")
+ -2 * logLik.vlm(object, ...) +
+ k * (length(coefvlm(object)) + no.dpar + elts.tildeA)
}
-AICqrrgvlm = function(object, ..., k=2) {
- stop("this function not written yet")
+AICqrrvglm = function(object, ..., k=2) {
+ print("20090316; k in AICqrrvglm(); zz")
+ print( k )
+
+ estdisp = object at misc$estimated.dispersion
+ no.dpar = if(length(estdisp) && is.logical(estdisp) && estdisp)
+ length(object at misc$dispersion) else 0
+ Structural.zero = object at control$Structural.zero
+ MMM = object at misc$M
+ Rank = object at control$Rank
+ elts.tildeA = (MMM - Rank - length(Structural.zero)) * Rank
+
+ EqualTolerances = object at control$EqualTolerances
+ ITolerances = object at control$ITolerances
+ if(!(length(EqualTolerances) == 1 && is.logical(EqualTolerances)))
+ stop("could not determine whether the fitted object used an ",
+ "equal-tolerances assumption based on argument 'EqualTolerances'")
+ if(!(length(ITolerances) == 1 && is.logical(ITolerances)))
+ stop("could not determine whether the fitted object used an ",
+ "equal-tolerances assumption based on argument 'ITolerances'")
+ NOS = if(length(object at y)) ncol(object at y) else MMM
+ MSratio = MMM / NOS # First value is g(mean) = quadratic form in l
+ if(round(MSratio) != MSratio) stop("'MSratio' is not an integer")
+ elts.D = ifelse(ITolerances || EqualTolerances, 1, NOS) * Rank*(Rank+1)/2
+
+ deviance(object, ...) +
+ k * (length(coefvlm(object)) + no.dpar + elts.tildeA + elts.D)
}
setMethod("AIC", "vlm",
@@ -61,6 +93,15 @@ setMethod("AIC", "qrrvglm",
}
+
+
+
+
+
+
+
+
+
if(FALSE) {
@@ -83,7 +124,7 @@ AICrrvglm = function(object, ..., k=2) {
sign = 1
}
if(!length(crit) || !is.numeric(crit))
- stop("can't get at the deviance or loglikelihood of the object")
+ stop("cannot get at the deviance or loglikelihood of the object")
sign * crit + 2 * (length(coef(object)) +
object at control$rank * (object at misc$M - object at control$rank))
diff --git a/R/build.terms.s b/R/build.terms.s
deleted file mode 100644
index cfb748b..0000000
--- a/R/build.terms.s
+++ /dev/null
@@ -1,50 +0,0 @@
-Build.terms <- function(x, coefs, cov = NULL, assign, collapse = TRUE)
-{
-
- cov.true <- !is.null(cov)
- if(collapse) {
- fit <- drop(x %*% coefs)
- if(cov.true) {
- var <- ((x %*% cov) * x) %*% rep(1, length(coefs))
- list(fitted.values = fit, se.fit = drop(sqrt(var)))
- }
- else fit
- } else {
- constant <- attr(x, "constant")
- if(!is.null(constant))
- constant <- sum(constant * coefs)
- if(missing(assign))
- assign <- attr(x, "assign")
- if(is.null(assign))
- stop("Need an 'assign' list")
- fit <- array(0, c(nrow(x), length(assign)), list(dimnames(x)[[1
- ]], names(assign)))
- if(cov.true)
- se <- fit
- TL <- sapply(assign, length)
- simple <- TL == 1
- complex <- TL > 1
- if(any(simple)) {
- asss <- unlist(assign[simple])
- ones <- rep(1, nrow(x))
- fit[, simple] <- x[, asss] * outer(ones, coefs[asss])
- if(cov.true)
- se[, simple] <- abs(x[, asss]) * outer(ones,
- sqrt(diag(cov))[asss])
- }
- if(any(complex)) {
- assign <- assign[complex]
- for(term in names(assign)) {
- TT <- assign[[term]]
- xt <- x[, TT]
- fit[, term] <- xt %*% coefs[TT]
- if(cov.true)
- se[, term] <- sqrt(drop(((xt %*% cov[TT, TT]) *
- xt) %*% rep(1, length(TT))))
- }
- }
- attr(fit, "constant") <- constant
- if(is.null(cov)) fit else list(fitted.values = fit, se.fit = se)
- }
-}
-
diff --git a/R/build.terms.vlm.q b/R/build.terms.vlm.q
index 4551040..b45f930 100644
--- a/R/build.terms.vlm.q
+++ b/R/build.terms.vlm.q
@@ -3,11 +3,12 @@
-if(is.R()) {
-
if(!isGeneric("terms"))
setGeneric("terms", function(x, ...) standardGeneric("terms"))
-}
+
+
+
+
terms.vlm = function(x, ...) {
v = x at terms
@@ -20,26 +21,24 @@ terms.vlm = function(x, ...) {
}
-if(!isGeneric("terms"))
- setGeneric("terms", function(x, ...)
- standardGeneric("terms"))
-
setMethod("terms", "vlm", function(x, ...) terms.vlm(x, ...))
-Build.terms.vlm <- function(x, coefs, cov = NULL, assign, collapse = TRUE, M,
- dimname=NULL, coefmat = NULL)
-{
- cov.true <- !is.null(cov)
+
+Build.terms.vlm = function(x, coefs, cov = NULL, assign, collapse = TRUE, M,
+ dimname=NULL, coefmat = NULL) {
+
+
+ cov.true = !is.null(cov)
if(collapse) {
- fit <- matrix(x %*% coefs, ncol=M, byrow=TRUE)
- dimnames(fit) <- dimname
+ fit = matrix(x %*% coefs, ncol=M, byrow=TRUE)
+ dimnames(fit) = dimname
if(M==1)
- fit <- c(fit)
+ fit = c(fit)
if(cov.true) {
- var <- ((x %*% cov) * x) %*% rep(1, length(coefs))
+ var = ((x %*% cov) * x) %*% rep(1, length(coefs))
list(fitted.values = fit, se.fit = if(M==1) c(sqrt(var)) else
matrix(sqrt(var), ncol=M, byrow=TRUE, dimnames=dimname))
} else {
@@ -47,46 +46,45 @@ Build.terms.vlm <- function(x, coefs, cov = NULL, assign, collapse = TRUE, M,
}
} else {
-
-
- constant <- attr(x, "constant")
+ constant = attr(x, "constant")
if(!is.null(constant)) {
- constant <- as.vector( t(coefmat) %*% constant )
+ constant = as.vector( t(coefmat) %*% constant )
}
if(missing(assign))
- assign <- attr(x, "assign")
+ assign = attr(x, "assign")
if(is.null(assign))
stop("Need an 'assign' list")
- fit <- array(0, c(nrow(x), length(assign)),
- list(dimnames(x)[[1]], names(assign)))
+ fit = array(0, c(nrow(x), length(assign)),
+ list(dimnames(x)[[1]], names(assign)))
if(cov.true)
- se <- fit
- TL <- sapply(assign, length)
- simple <- TL == 1
- complex <- TL > 1
+ se = fit
+ TL = sapply(assign, length)
+ simple = TL == 1
+ complex = TL > 1
if(any(simple)) {
- asss <- unlist(assign[simple])
- ones <- rep(1, nrow(x))
- fit[, simple] <- x[, asss] * outer(ones, coefs[asss])
+ asss = unlist(assign[simple])
+ ones = rep(1, nrow(x))
+ fit[, simple] = x[, asss] * outer(ones, coefs[asss])
if(cov.true)
- se[, simple] <- abs(x[, asss]) * outer(ones,
- sqrt(diag(cov))[asss])
+ se[,simple] = abs(x[,asss]) * outer(ones, sqrt(diag(cov))[asss])
}
if(any(complex)) {
- assign <- assign[complex]
+ assign = assign[complex]
for(term in names(assign)) {
- TT <- assign[[term]]
- xt <- x[, TT]
- fit[, term] <- xt %*% coefs[TT]
+ TT = assign[[term]]
+ xt = x[, TT]
+ fit[, term] = xt %*% coefs[TT]
if(cov.true)
- se[, term] <- sqrt(drop(((xt %*% cov[TT, TT]) *
- xt) %*% rep(1, length(TT))))
+ se[, term] = sqrt(drop(((xt %*% cov[TT, TT]) * xt) %*%
+ rep(1, length(TT))))
}
}
- attr(fit, "constant") <- constant
+ attr(fit, "constant") = constant
if(cov.true) list(fitted.values = fit, se.fit = se) else fit
}
}
+
+
diff --git a/R/calibrate.q b/R/calibrate.q
index cff6e66..c6d9601 100644
--- a/R/calibrate.q
+++ b/R/calibrate.q
@@ -16,7 +16,7 @@ calibrate.qrrvglm.control = function(object,
Rank = object at control$Rank
EqualTolerances = object at control$EqualTolerances
if(!is.Numeric(gridSize, positive=TRUE, integer=TRUE, allow=1))
- stop("bad input for \"gridSize\"")
+ stop("bad input for 'gridSize'")
if(gridSize < 2)
stop("gridSize must be >= 2")
list(# maxit=Maxit.optim, # Note the name change
@@ -48,7 +48,7 @@ calibrate.qrrvglm = function(object,
type <- match.arg(type, c("lv","predictors","response","vcov","all3or4"))[1]
if(!Quadratic && type=="vcov")
- stop("cannot have type=\"vcov\" when object is a \"cao\" object")
+ stop("cannot have 'type=\"vcov\"' when object is a \"cao\" object")
if(is.vector(newdata))
newdata = rbind(newdata)
@@ -92,8 +92,7 @@ calibrate.qrrvglm = function(object,
for(ii in 1:nrow(initial.vals)) {
if(optim.control$trace) {
cat("Starting from grid-point", ii, ":")
- if(exists("flush.console"))
- flush.console()
+ flush.console()
}
ans = if(is.R()) {
if(Quadratic)
@@ -129,7 +128,6 @@ calibrate.qrrvglm = function(object,
if(ans$convergence == 0)
cat("Successful convergence\n") else
cat("Unsuccessful convergence\n")
- if(exists("flush.console"))
flush.console()
}
if(ans$convergence == 0) {
@@ -260,6 +258,8 @@ calibrate.qrrvglm = function(object,
if(everything) list(eta=eta, mu=mu, value=value, vcmat=vcmat) else value
}
+
+
.my.calib.objfunction.cao = function(bnu, y, extra=NULL,
objfun, object, Coefs,
misc.list,
@@ -285,3 +285,7 @@ setMethod("calibrate", "qrrvglm", function(object, ...)
calibrate.qrrvglm(object, ...))
+
+
+
+
diff --git a/R/cao.R b/R/cao.R
index d88d900..e66fe80 100644
--- a/R/cao.R
+++ b/R/cao.R
@@ -54,7 +54,7 @@ cao <- function(formula,
y <- model.response(mf, "numeric") # model.extract(mf, "response")
x <- model.matrix(mt, mf, contrasts)
- attr(x, "assign") <- attrassigndefault(x, mt) # So as to make it like Splus
+ attr(x, "assign") = attrassigndefault(x, mt)
offset <- model.offset(mf)
if(is.null(offset))
offset <- 0 # yyy ???
@@ -69,7 +69,7 @@ cao <- function(formula,
if(is.function(family))
family <- family()
if(!inherits(family, "vglmff")) {
- stop(paste("family=", family, "is not a VGAM family function"))
+ stop("'family=", family, "' is not a VGAM family function")
}
eval(vcontrol.expression)
@@ -151,8 +151,8 @@ cao <- function(formula,
slot(answer, "control") = fit$control
slot(answer, "extra") = if(length(fit$extra)) {
if(is.list(fit$extra)) fit$extra else {
- warning(paste("\"extra\" is not a list, therefore",
- "placing \"extra\" into a list"))
+ warning("'extra' is not a list, therefore ",
+ "placing 'extra' into a list")
list(fit$extra)
}
} else list() # R-1.5.0
diff --git a/R/cao.fit.q b/R/cao.fit.q
index cc03a18..cc2802e 100644
--- a/R/cao.fit.q
+++ b/R/cao.fit.q
@@ -19,7 +19,6 @@ cao.fit <- function(x, y, w=rep(1, length(x[, 1])),
check.rank = TRUE #
nonparametric <- TRUE
optim.maxit <- control$optim.maxit
- backchat <- FALSE # control$backchat && !control$Quadratic # rrr;
save.weight <- control$save.weight
trace <- control$trace
minimize.criterion <- control$min.criterion
@@ -27,9 +26,9 @@ cao.fit <- function(x, y, w=rep(1, length(x[, 1])),
n <- dim(x)[1]
- copyxbig <- FALSE # May be overwritten in @initialize
+ copy_X_vlm <- FALSE # May be overwritten in @initialize
- xbig.save <- NULL
+ X_vlm_save <- NULL
intercept.only <- ncol(x) == 1 && dimnames(x)[[2]] == "(Intercept)"
y.names <- predictors.names <- NULL # May be overwritten in @initialize
@@ -49,7 +48,7 @@ cao.fit <- function(x, y, w=rep(1, length(x[, 1])),
"binomialff"=1, "quasipoissonff"=0, "quasibinomialff"=0,
"negbinomial"=3,
"gamma2"=5, "gaussianff"=8,
- 0) # stop("can't fit this model using fast algorithm")
+ 0) # stop("cannot fit this model using fast algorithm")
if(!modelno) stop("the family function does not work with cao()")
if(modelno == 1) modelno = get("modelno", envir = VGAMenv)
@@ -152,10 +151,8 @@ cao.fit <- function(x, y, w=rep(1, length(x[, 1])),
for(iter in 1:optim.maxit) {
if(control$trace) {
cat("\nIteration", iter, "\n")
- if(exists("flush.console"))
- flush.console()
+ flush.console()
}
-flush.console()
conjgrad = optim(par=c(Cmat), fn=callcaof,
gr=if(control$GradientFunction) calldcaof else NULL,
@@ -189,7 +186,7 @@ flush.console()
}
Cmat = crow1C(Cmat, control$Crow1positive) # Make sure the signs are right
-flush.console()
+ flush.console()
temp9 =
callcaof(cmatrix=Cmat,
etamat=eta, xmat=x, ymat=y, wvec=w, modelno=modelno,
@@ -323,56 +320,56 @@ cao.control = function(Rank=1,
spar2 = 0, # 0 means df2.nl is used
...)
{
- if(!is.Numeric(iShape, posit=TRUE)) stop("bad input for \"iShape\"")
- if(!is.Numeric(iKvector, posit=TRUE)) stop("bad input for \"iKvector\"")
+ if(!is.Numeric(iShape, posit=TRUE)) stop("bad input for 'iShape'")
+ if(!is.Numeric(iKvector, posit=TRUE)) stop("bad input for 'iKvector'")
if(!is.Numeric(method.init, posit=TRUE, allow=1, integer=TRUE))
- stop("bad input for \"method.init\"")
- if(criterion != "deviance") stop("\"criterion\" must be \"deviance\"")
+ stop("bad input for 'method.init'")
+ if(criterion != "deviance") stop("'criterion' must be \"deviance\"")
if(GradientFunction) stop("14/1/05; GradientFunction=TRUE not working yet")
se.fit = as.logical(FALSE)
if(se.fit) stop("se.fit = FALSE handled only")
if(length(Cinit) && !is.Numeric(Cinit))
- stop("Bad input for \"Cinit\"")
+ stop("Bad input for 'Cinit'")
if(!is.Numeric(Bestof, allow=1, integ=TRUE, posit=TRUE))
- stop("Bad input for \"Bestof\"")
+ stop("Bad input for 'Bestof'")
if(!is.Numeric(maxitl, allow=1, integ=TRUE, posit=TRUE))
- stop("Bad input for \"maxitl\"")
+ stop("Bad input for 'maxitl'")
if(!is.Numeric(bf.epsilon, allow=1, posit=TRUE))
- stop("Bad input for \"bf.epsilon\"")
+ stop("Bad input for 'bf.epsilon'")
if(!is.Numeric(bf.maxit, integ=TRUE, posit=TRUE, allow=1))
- stop("Bad input for \"bf.maxit\"")
+ stop("Bad input for 'bf.maxit'")
if(!is.Numeric(Etamat.colmax, posit=TRUE, allow=1) || Etamat.colmax < Rank)
- stop("bad input for \"Etamat.colmax\"")
+ stop("bad input for 'Etamat.colmax'")
if(!is.Numeric(Maxit.optim, integ=TRUE, posit=TRUE, allow=1))
- stop("Bad input for \"Maxit.optim\"")
+ stop("Bad input for 'Maxit.optim'")
if(!is.Numeric(optim.maxit, allow=1, integ=TRUE, posit=TRUE))
- stop("Bad input for \"optim.maxit\"")
+ stop("Bad input for 'optim.maxit'")
if(!is.Numeric(SD.sitescores, allow=1, posit=TRUE))
- stop("Bad input for \"SD.sitescores\"")
+ stop("Bad input for 'SD.sitescores'")
if(!is.Numeric(SD.Cinit, allow=1, posit=TRUE))
- stop("Bad input for \"SD.Cinit\"")
+ stop("Bad input for 'SD.Cinit'")
if(!is.Numeric(df1.nl) || any(df1.nl < 0))
- stop("Bad input for \"df1.nl\"")
+ stop("Bad input for 'df1.nl'")
if(any(df1.nl >= 0 & df1.nl < 0.05)) {
warning("df1.nl values between 0 and 0.05 converted to 0.05")
df1.nl[df1.nl < 0.05] = 0.05
}
if(!is.Numeric(df2.nl) || any(df2.nl < 0))
- stop("Bad input for \"df2.nl\"")
+ stop("Bad input for 'df2.nl'")
if(any(df2.nl >= 0 & df2.nl < 0.05)) {
warning("df2.nl values between 0 and 0.05 converted to 0.05")
df2.nl[df2.nl < 0.05] = 0.05
}
if(!is.Numeric(spar1) || any(spar1 < 0))
- stop("Bad input for \"spar1\"")
+ stop("Bad input for 'spar1'")
if(!is.Numeric(spar2) || any(spar2 < 0))
- stop("Bad input for \"spar2\"")
+ stop("Bad input for 'spar2'")
if(!is.Numeric(epsilon, posit=TRUE, allow=1))
- stop("Bad input for \"epsilon\"")
+ stop("Bad input for 'epsilon'")
if(!is.Numeric(SmallNo, posit=TRUE, allow=1))
- stop("Bad input for \"SmallNo\"")
+ stop("Bad input for 'SmallNo'")
if((SmallNo < .Machine$double.eps) ||
(SmallNo > .0001)) stop("SmallNo is out of range")
@@ -433,6 +430,8 @@ create.cms <- function(Rank=1, M, MSratio=1, which, p1=1) {
}
+
+
callcaof = function(cmatrix,
etamat, xmat, ymat, wvec, modelno,
Control, Nice21=TRUE,
@@ -440,7 +439,7 @@ callcaof = function(cmatrix,
n, M,
othint, othdbl,
alldump=FALSE) {
-if(exists("flush.console")) flush.console()
+ flush.console()
control = Control
Rank = control$Rank
@@ -488,9 +487,7 @@ if(exists("flush.console")) flush.console()
nstar = if(Nice21) ifelse(modelno==3 || modelno==5,n*2,n) else n*M
lenbeta = pstar. * ifelse(Nice21, NOS, 1) # Holds the linear coeffs
- inited = if(is.R()) {
- if(exists(".VGAM.CAO.etamat", envir=VGAMenv)) 1 else 0
- } else 0
+ inited = if(exists(".VGAM.CAO.etamat", envir=VGAMenv)) 1 else 0
usethiseta = if(inited==1)
getfromVGAMenv("etamat", prefix = ".VGAM.CAO.") else t(etamat)
@@ -518,10 +515,9 @@ if(exists("flush.console")) flush.console()
bf.maxit=control$bf.maxit,
bf.epsilon=control$bf.epsilon,
trace=FALSE, se.fit=control$se.fit,
- xbig.save=bnumat, Blist=Blist. ,
+ X_vlm_save=bnumat, Blist=Blist. ,
ncolBlist=ncolBlist. ,
M= M. , qbig=NULL, U=NULL, # NULL implies not needed
- backchat=FALSE,
all.knots=control$all.knots, nk=NULL,
sf.only=TRUE)
@@ -545,7 +541,6 @@ if(exists("flush.console")) flush.console()
if(!all.equal(as.vector(ncbvec), rep(1, len=queue)))
stop("ncbvec not right---should be a queue-vector of ones")
pbig = pstar. #
- backchat = FALSE
npetc = c(n=nrow(nu1mat), p. =ncol(nu1mat), q=Rank, # q=length(which),
@@ -605,21 +600,16 @@ if(exists("flush.console")) flush.console()
nknots = as.integer(smooth.frame$nknots),
itwk = integer(2 * M. ),
kindex = as.integer(smooth.frame$kindex))
-if(exists("flush.console")) flush.console()
+flush.console()
if(ans1$errcode == 0) {
assign2VGAMenv(c("etamat", "beta"), ans1, prefix=".VGAM.CAO.")
- if(is.R()) {
- assign(".VGAM.CAO.cmatrix", matrix(cmatrix,p2,Rank), envir=VGAMenv)
- } else {
- .VGAM.CAO.cmatrix <<- matrix(cmatrix,p2,Rank) # matrix reqd for R=2
- }
-
+ assign(".VGAM.CAO.cmatrix", matrix(cmatrix,p2,Rank), envir=VGAMenv)
} else {
cat("warning in callcaof: error code =", ans1$errcode, "\n")
cat("warning in callcaof: npetc[14] =", ans1$npetc[14], "\n")
- if(exists("flush.console")) flush.console()
+ flush.console()
rmfromVGAMenv(c("etamat", "beta"), prefix=".VGAM.CAO.")
}
@@ -676,7 +666,7 @@ if(exists("flush.console")) flush.console()
spar2=if(Rank == 2) spar2 else NULL)
} else
ans1$deviance
- if(exists("flush.console")) flush.console()
+ flush.console()
returnans
}
@@ -692,7 +682,7 @@ calldcaof = function(cmatrix,
if(alldump) stop("really used?")
-if(exists("flush.console")) flush.console()
+ flush.console()
if(!Nice21) stop("Nice21 must be TRUE")
control = Control
@@ -725,14 +715,12 @@ if(exists("flush.console")) flush.console()
lenbeta = pstar. * ifelse(Nice21, NOS, 1)
if(TRUE) {
- inited = if(is.R()) {
- if(exists(".VGAM.CAO.etamat", envir = VGAMenv)) 1 else 0
- } else 0
- usethiseta = if(inited==1) {if(is.R()) get(".VGAM.CAO.etamat",
- envir = VGAMenv) else .VGAM.CAO.etamat} else t(etamat)
+ inited = if(exists(".VGAM.CAO.etamat", envir = VGAMenv)) 1 else 0
+ usethiseta = if(inited==1) get(".VGAM.CAO.etamat",
+ envir = VGAMenv) else t(etamat)
}
- usethisbeta = if(inited==2) {if(is.R()) get(".VGAM.CAO.beta",
- envir = VGAMenv) else .VGAM.CAO.beta} else double(lenbeta)
+ usethisbeta = if(inited==2) get(".VGAM.CAO.beta",
+ envir = VGAMenv) else double(lenbeta)
@@ -757,10 +745,9 @@ if(exists("flush.console")) flush.console()
bf.maxit=control$bf.maxit,
bf.epsilon=control$bf.epsilon,
trace=FALSE, se.fit=control$se.fit,
- xbig.save=bnumat, Blist=Blist.,
+ X_vlm_save=bnumat, Blist=Blist.,
ncolBlist=ncolBlist. ,
M= M. , qbig=NULL, U=U, # NULL value implies not needed
- backchat=FALSE,
all.knots=control$all.knots, nk=NULL,
sf.only=TRUE)
@@ -793,7 +780,6 @@ if(exists("flush.console")) flush.console()
df1.nl = procVec(control$df1.nl, yn= yn , Def=control$DF1)
spar1 = procVec(control$spar1, yn= yn , Def= control$SPAR1)
}
- backchat = FALSE
npetc = c(n=n, p=1+Rank, length(which), se.fit=control$se.fit, 0,
@@ -801,7 +787,7 @@ if(exists("flush.console")) flush.console()
pbig=sum( ncolBlist.),
qbig=qbig, dimw= dimw. , dimu= dimu. , ier=0, ldk=ldk)
-if(exists("flush.console")) flush.console()
+ flush.console()
ans1 <-
dotFortran(name = if(Nice21) "vdcaof" else stop("need Nice21"),
@@ -849,22 +835,15 @@ if(exists("flush.console")) flush.console()
nknots = as.integer(smooth.frame$nknots),
itwk = integer(2* M. ),
kindex = as.integer(smooth.frame$kindex))
-if(exists("flush.console")) flush.console()
-
- if(is.R()) {
- assign(".VGAM.CAO.etamat", ans1$etamat, envir = VGAMenv)
- assign(".VGAM.CAO.z",ans1$z,envir=VGAMenv)# z; minus any offset
- assign(".VGAM.CAO.U", ans1$U, envir=VGAMenv) # U
- } else {
- .VGAM.CAO.etamat <<- ans1$etamat
- .VGAM.CAO.z <<- ans1$z
- .VGAM.CAO.U <<- ans1$U
- }
+ flush.console()
+
+ assign(".VGAM.CAO.etamat", ans1$etamat, envir = VGAMenv)
+ assign(".VGAM.CAO.z", ans1$z, envir=VGAMenv) # z; minus any offset
+ assign(".VGAM.CAO.U", ans1$U, envir=VGAMenv) # U
if(ans1$errcode == 0) {
} else {
cat("warning in calldcaof: error code =", ans1$errcode, "\n")
- if(exists("flush.console"))
- flush.console()
+ flush.console()
}
returnans = if(alldump) {
@@ -909,7 +888,7 @@ if(exists("flush.console")) flush.console()
spar2=ans1$spardf[2*NOS+(1:NOS)])
} else
ans1$deriv
- if(exists("flush.console")) flush.console()
+ flush.console()
returnans
}
@@ -952,7 +931,7 @@ Coef.cao = function(object,
if(!is.Numeric(maxgriditer, posit=TRUE, allow=1, int=TRUE) || maxgriditer<3)
stop("bad input for argument 'maxgriditer'")
if(!is.logical(ConstrainedO <- object at control$ConstrainedO))
- stop("can't determine whether the model is constrained or not")
+ stop("cannot determine whether the model is constrained or not")
if(!is.Numeric(smallno, posit=TRUE, allow=1) ||
smallno > 0.5 || smallno < 0.0001)
stop("bad input for argument 'smallno'")
@@ -1014,7 +993,7 @@ Coef.cao = function(object,
match(whichSpecies[sppno], ynames) else whichSpecies[sppno]
if(is.na(indexSpecies))
- stop("mismatch found in \"whichSpecies\"")
+ stop("mismatch found in 'whichSpecies'")
while(griditer == 1 ||
((griditer <= maxgriditer) &&
@@ -1167,11 +1146,10 @@ printCoef.cao = function(object, digits = max(2, options()$digits-2), ...) {
- setMethod("show", "Coef.cao", function(object)
- printCoef.cao(object))
- setMethod("print", "Coef.cao", function(x, ...)
- printCoef.cao(object=x, ...))
-
+setMethod("show", "Coef.cao", function(object)
+ printCoef.cao(object))
+setMethod("print", "Coef.cao", function(x, ...)
+ printCoef.cao(object=x, ...))
setMethod("coef", "cao", function(object, ...) Coef.cao(object, ...))
setMethod("coefficients", "cao", function(object, ...) Coef.cao(object, ...))
@@ -1252,7 +1230,7 @@ lvplot.cao = function(object,
indexSpecies = if(is.character(whichSpecies))
match(whichSpecies[sppno], sppnames) else whichSpecies[sppno]
if(is.na(indexSpecies))
- stop("mismatch found in \"whichSpecies\"")
+ stop("mismatch found in 'whichSpecies'")
xx = lvmat
yy = r.curves[,indexSpecies]
o = sort.list(xx)
@@ -1278,7 +1256,7 @@ lvplot.cao = function(object,
indexSpecies = if(is.character(whichSpecies))
match(whichSpecies[sppno], sppnames) else whichSpecies[sppno]
if(is.na(indexSpecies))
- stop("mismatch found in \"whichSpecies\"")
+ stop("mismatch found in 'whichSpecies'")
points(Coeflist at Optimum[1,indexSpecies],
Coeflist at Optimum[2,indexSpecies],
col=pcol[sppno], cex=pcex[sppno], pch=pch[sppno])
@@ -1326,27 +1304,11 @@ predict.cao <- function (object, newdata=NULL,
}
}
- attrassignlm <- function(object, ...)
- attrassigndefault(model.matrix(object), object at terms)
-
- attrassigndefault <- function(mmat, tt) {
- if (!inherits(tt, "terms"))
- stop("need terms object")
- aa <- attr(mmat, "assign")
- if (is.null(aa))
- stop("argument is not really a model matrix")
- ll <- attr(tt, "term.labels")
- if (attr(tt, "intercept") > 0)
- ll <- c("(Intercept)", ll)
- aaa <- factor(aa, labels = ll)
- split(order(aa), aaa)
- }
-
if(!length(newdata)) {
X <- model.matrixvlm(object, type="lm", ...)
offset <- object at offset
tt <- terms(object)
- if(is.R() && !length(object at x))
+ if(!length(object at x))
attr(X, "assign") <- attrassignlm(X, tt)
} else {
if(is.smart(object) && length(object at smart.prediction)) {
@@ -1358,24 +1320,23 @@ predict.cao <- function (object, newdata=NULL,
if(length(object at contrasts)) object at contrasts else NULL,
xlev = object at xlevels)
- if(is.R() && nice21 && nrow(X)!=nrow(newdata)) {
+ if(nice21 && nrow(X)!=nrow(newdata)) {
as.save = attr(X, "assign")
X = X[rep(1, nrow(newdata)),,drop=FALSE]
dimnames(X) = list(dimnames(newdata)[[1]], "(Intercept)")
attr(X, "assign") = as.save # Restored
}
- offset <- if (!is.null(off.num<-attr(tt,"offset"))) {
- eval(attr(tt,"variables")[[off.num+1]], newdata)
- } else if (!is.null(object at offset))
+ offset = if(!is.null(off.num <- attr(tt, "offset"))) {
+ eval(attr(tt, "variables")[[off.num+1]], newdata)
+ } else if(!is.null(object at offset))
eval(object at call$offset, newdata)
if(is.smart(object) && length(object at smart.prediction)) {
wrapup.smart()
}
- if(is.R())
- attr(X, "assign") <- attrassigndefault(X, tt)
+ attr(X, "assign") <- attrassigndefault(X, tt)
}
cancoefs = ccoef(object)
@@ -1403,7 +1364,7 @@ predict.cao <- function (object, newdata=NULL,
indexSpecies = if(is.character(whichSpecies))
match(whichSpecies[sppno], sppnames) else whichSpecies[sppno]
if(is.na(indexSpecies))
- stop("mismatch found in \"whichSpecies\"")
+ stop("mismatch found in 'whichSpecies'")
temp345 = predictcao(object, grid=lvmat, sppno=thisSpecies,
Rank=Rank, deriv=deriv, MSratio=MSratio,
@@ -1456,7 +1417,7 @@ predictcao <- function(object, grid, sppno, Rank=1, deriv=0, MSratio=1,
if(type != "link" && type != "terms")
stop("'link' must be \"link\" or \"terms\"")
if(ncol(grid <- as.matrix(grid)) != Rank)
- stop(paste("'grid' must have", Rank, "columns"))
+ stop("'grid' must have ", Rank, " columns")
if(!is.Numeric(1+deriv, allow=1, positive=TRUE, integ=TRUE))
stop("'deriv' must be a non-negative integer")
if(type == "terms" && deriv != 0)
@@ -1464,7 +1425,7 @@ predictcao <- function(object, grid, sppno, Rank=1, deriv=0, MSratio=1,
temp.b = object at Bspline[[sppno]]
if(type == "terms") {
- meanlv = apply(grid, 2, mean)
+ meanlv = colMeans(grid)
answer = matrix(0, nrow(grid), Rank)
} else {
nlfunvalues = 0
@@ -1558,7 +1519,7 @@ plot.cao = function(x,
indexSpecies = if(is.character(whichSpecies))
match(whichSpecies[sppno], sppnames) else whichSpecies[sppno]
if(is.na(indexSpecies))
- stop("mismatch found in \"whichSpecies\"")
+ stop("mismatch found in 'whichSpecies'")
terms.mat = predictcao(object=x, grid=lvmat, type="terms",
sppno=indexSpecies, Rank=Rank,
deriv=deriv, MSratio=MSratio)
@@ -1583,7 +1544,7 @@ plot.cao = function(x,
...)
}
if(residuals.arg) {
- stop("can't handle residuals=TRUE yet")
+ stop("cannot handle residuals=TRUE yet")
}
counter = counter + 1
lines(xvals, yvals,
@@ -1599,7 +1560,7 @@ plot.cao = function(x,
setMethod("plot", "cao",
function(x, y, ...) {
- if(!missing(y)) stop("can't process the \"y\" argument")
+ if(!missing(y)) stop("cannot process the 'y' argument")
invisible(plot.cao(x, ...))})
@@ -1721,33 +1682,47 @@ setMethod("persp", "cao", function(x, ...) persp.cao(x=x, ...))
lv.cao = function(object, ...) {
Coef(object, ...)@lv
}
-lv.Coef.cao = function(object, ...) {
- if(length(list(...))) warning("Too late! Ignoring the extra arguments")
- object at lv
-}
+
if(!isGeneric("lv"))
- setGeneric("lv", function(object, ...) standardGeneric("lv"))
-setMethod("lv", "cao", function(object, ...) lv.cao(object, ...))
-setMethod("lv", "Coef.cao", function(object, ...) lv.Coef.cao(object, ...))
+ setGeneric("lv", function(object, ...) standardGeneric("lv"),
+ package="VGAM")
+
+ setMethod("lv", "cao", function(object, ...) lv.cao(object, ...))
+
+setClass(Class="summary.cao",
+ representation("misc" = "list",
+ "call" = "call"),
+ contains="Coef.cao")
+
+
+
-setClass(Class="summary.cao", representation("Coef.cao",
- "misc" = "list",
- "call" = "call"))
summary.cao = function(object, ...) {
answer = Coef(object, ...)
- class(answer) = "summary.cao"
+
+ print("20090417; in summary.cao()")
+
+ answer = as(answer, "summary.cao")
+
+ print('20090417 get warning; need to stop "class(answer) = "summary.cao"" ')
+
answer at misc = object at misc
answer at call = object at call
answer
}
+setMethod("summary", "cao", function(object, ...)
+ summary.cao(object, ...))
+
+
+
printsummary.cao = function(x, ...) {
cat("\nCall:\n")
dput(x at call)
@@ -1765,9 +1740,6 @@ printsummary.cao = function(x, ...) {
invisible(x)
}
-setMethod("summary", "cao", function(object, ...)
- summary.cao(object, ...))
-
setMethod("print", "summary.cao",
function(x, ...)
invisible(printsummary.cao(x, ...)))
@@ -1791,6 +1763,7 @@ ccoef.Coef.cao = function(object, ...) {
if(!isGeneric("ccoef"))
setGeneric("ccoef", function(object, ...) standardGeneric("ccoef"))
+
setMethod("ccoef", "cao", function(object, ...) ccoef.cao(object, ...))
setMethod("ccoef", "Coef.cao", function(object, ...) ccoef.Coef.cao(object, ...))
@@ -1824,3 +1797,14 @@ setMethod("print", "cao", function(x, ...) print.vgam(x, ...))
+
+
+
+
+
+
+
+
+
+
+
diff --git a/R/coef.vlm.q b/R/coef.vlm.q
index ac16497..8f8fff1 100644
--- a/R/coef.vlm.q
+++ b/R/coef.vlm.q
@@ -3,151 +3,60 @@
-coefvlm <- function(object, matrix.out=FALSE, label=TRUE, compress=TRUE)
-{
+
+
+
+coefvlm <- function(object, matrix.out=FALSE, label=TRUE) {
ans <- object at coefficients
if(!label)
names(ans) <- NULL
- if(!matrix.out && compress)
+ if(!matrix.out)
return(ans)
ncolx <- object at misc$p # = length(object at constraints)
M <- object at misc$M
- xij <- object at control$xij
Blist <- object at constraints
- if(!length(xij) && all(trivial.constraints(Blist) == 1)) {
- B <- matrix(ans, nrow=ncolx, ncol=M, byrow=TRUE)
+ if(all(trivial.constraints(Blist) == 1)) {
+ Bmat <- matrix(ans, nrow=ncolx, ncol=M, byrow=TRUE)
} else {
- B <- matrix(as.numeric(NA), nrow=ncolx, ncol=M)
-
- if(length(xij)) {
-
- Xmat = object at x # model.matrix(object)
- atx = attr(Xmat, "assign")
- tmp9=attributes(lm2vlm.model.matrix(Xmat,object at constraints,xij=xij))
- nasgn = tmp9$assign
- vasgn = tmp9$vassign
- if(!length(atx) || !length(nasgn) || !length(vasgn))
- stop("can't get atx, nasgn and/or vasgn")
-
- if(inherits(xij, "formula"))
- xij = list(xij)
-
- for(i in 1:length(xij)) {
- tform = terms(xij[[i]])
- atform = attr(tform, "term.labels")
- for(k in 1:length(atform)) {
- for(s in atx[[(atform[[k]])]]) {
- if(length(Blist[[s]])) {
- Blist[[s]] = ei(k, M) # changed
- }
- }
- }
- }
-
-
- ncolBlist <- unlist(lapply(Blist, ncol)) # Modified
- ans.save = ans # small
- ans = rep(as.numeric(NA), len=sum(ncolBlist)) # big
- ans.copied = rep(FALSE, len=length(ans))
- ans.save.copied = rep(FALSE, len=length(ans.save))
-
- ptr0 = rep(as.numeric(NA), len=length(xij))
- for(i in 1:length(xij)) {
- tform = terms(xij[[i]])
- atform = attr(tform, "term.labels")
- response.name = (dimnames(attr(tform, "factors"))[[1]])[1]
-
- for(s in 1:M)
- if(length(nasgn[[atform[s]]])) {
- ptr0[i] = s
- break
- }
- dest.col.index = nasgn[[atform[ptr0[i]]]]
- rindex = vlabel(response.name,
- length(dest.col.index), M=M, separator="")
- dummy = ans.save*0 + 1:length(ans.save) # names retained
- dest.col.index = dummy[rindex]
-
- for(k in ((1:M))) {
- from.col.index = nasgn[[atform[k]]] # May be NULL
- if(length(from.col.index)) {
- ans[from.col.index] = ans.save[dest.col.index]
- ans.copied[from.col.index] = TRUE
- ans.save.copied[dest.col.index] = TRUE
- }
- }
- }
-
- if(any(!ans.copied)) {
- ans[!ans.copied] = ans.save[!ans.save.copied]
- }
- names(ans) = vlabel(names(ncolBlist), ncolBlist,
- M=M, separator="")
+ Bmat <- matrix(as.numeric(NA), nrow=ncolx, ncol=M)
- }
-
- if(!matrix.out && !compress)
+ if(!matrix.out)
return(ans)
ncolBlist <- unlist(lapply(Blist, ncol))
nasgn <- names(Blist)
temp <- c(0, cumsum(ncolBlist))
- for(i in 1:length(nasgn)) {
- index <- (temp[i]+1):temp[i+1]
- cm <- Blist[[nasgn[i]]]
- B[i,] <- cm %*% ans[index]
+ for(ii in 1:length(nasgn)) {
+ index <- (temp[ii]+1):temp[ii+1]
+ cmat <- Blist[[nasgn[ii]]]
+ Bmat[ii,] <- cmat %*% ans[index]
}
}
if(label) {
d1 <- object at misc$colnames.x
d2 = object at misc$predictors.names # Could be NULL
- dimnames(B) <- list(d1, d2)
- }
-
- if(compress && length(xij)) {
- ci2 = NULL
- for(i in 1:length(xij)) {
- tform = terms(xij[[i]])
- atform = attr(tform, "term.labels")
- response.name = (dimnames(attr(tform, "factors"))[[1]])[1]
-
- dest.col.index = atx[[atform[ptr0[i]]]]
-
-
- for(k in ((1:M)[-ptr0[i]])) {
- from.col.index = atx[[atform[k]]] # May be NULL
- if(length(from.col.index)) {
- B[dest.col.index,] = B[dest.col.index,] + B[from.col.index,]
- tmp5 = dimnames(B)[[1]]
- tmp5[dest.col.index] = vlabel(response.name,
- length(dest.col.index), M=M, separator="")
- dimnames(B) = list(tmp5, dimnames(B)[[2]])
- ci2 = c(ci2, from.col.index)
- }
- }
- }
- B = B[-ci2,,drop=FALSE] # Delete rows not wanted
+ dimnames(Bmat) <- list(d1, d2)
}
- B
+ Bmat
} # end of coefvlm
- setMethod("coefficients", "vlm", function(object, ...)
- coefvlm(object, ...))
- setMethod("coef", "vlm", function(object, ...)
- coefvlm(object, ...))
- setMethod("coefficients", "vglm", function(object, ...)
- coefvlm(object, ...))
- setMethod("coef", "vglm", function(object, ...)
- coefvlm(object, ...))
+setMethod("coefficients", "vlm", function(object, ...)
+ coefvlm(object, ...))
+setMethod("coef", "vlm", function(object, ...)
+ coefvlm(object, ...))
+setMethod("coefficients", "vglm", function(object, ...)
+ coefvlm(object, ...))
+setMethod("coef", "vglm", function(object, ...)
+ coefvlm(object, ...))
@@ -184,28 +93,7 @@ setMethod("Coef", "vlm", function(object, ...)
Coef.vlm(object, ...))
-if(!is.R()) {
-setMethod("Coefficients", "vglm", function(object, ...)
- Coef.vlm(object, ...))
-setMethod("Coef", "vglm", function(object, ...)
- Coef.vlm(object, ...))
-setMethod("Coefficients", "vgam", function(object, ...)
- Coef.vlm(object, ...))
-setMethod("Coef", "vgam", function(object, ...)
- Coef.vlm(object, ...))
-setMethod("Coefficients", "rrvglm", function(object, ...)
- Coef.vlm(object, ...))
-setMethod("Coef", "rrvglm", function(object, ...)
- Coef.vlm(object, ...))
-}
-
-if(FALSE)
-coef.rrvglm = function(object,
- type = c("all", "vlm"),
- matrix.out=FALSE, label=TRUE) {
-
-}
diff --git a/R/cqo.R b/R/cqo.R
index aef3edc..3b8c8de 100644
--- a/R/cqo.R
+++ b/R/cqo.R
@@ -49,7 +49,7 @@ cqo <- function(formula,
y <- model.response(mf, "numeric") # model.extract(mf, "response")
x <- model.matrix(mt, mf, contrasts)
- attr(x, "assign") <- attrassigndefault(x, mt) # So as to make it like Splus
+ attr(x, "assign") = attrassigndefault(x, mt)
offset <- model.offset(mf)
if(is.null(offset))
offset <- 0 # yyy ???
@@ -64,7 +64,7 @@ cqo <- function(formula,
if(is.function(family))
family <- family()
if(!inherits(family, "vglmff")) {
- stop(paste("family=", family, "is not a VGAM family function"))
+ stop("'family=", family, "' is not a VGAM family function")
}
control$criterion = "coefficients" # Specifically for vcontrol.expression
diff --git a/R/cqo.fit.q b/R/cqo.fit.q
index 7531f3b..22e454f 100644
--- a/R/cqo.fit.q
+++ b/R/cqo.fit.q
@@ -5,7 +5,7 @@
callcqof = function(cmatrix, etamat, xmat, ymat, wvec,
- xbig.save1, modelno, Control,
+ X_vlm_1save, modelno, Control,
n, M, p1star, p2star, nice31, allofit=FALSE) {
ocmatrix = cmatrix
control = Control
@@ -37,7 +37,7 @@ callcqof = function(cmatrix, etamat, xmat, ymat, wvec,
if(control$trace) {
cat(paste("Taking evasive action for latent variable ",
lookat, ".\n", sep=""))
- if(exists("flush.console")) flush.console()
+ flush.console()
}
rmfromVGAMenv(c("etamat", "z", "U", "beta", "deviance",
"cmatrix", "ocmatrix"), prefix=".VGAM.CQO.")
@@ -67,7 +67,7 @@ callcqof = function(cmatrix, etamat, xmat, ymat, wvec,
p1star=p1star, p2star=p2star, nice31=nice31, lenbeta,
itol=itol, control$trace, p1, p2, control$method.init)
bnumat = if(nice31) matrix(0,nstar,pstar) else
- cbind(matrix(0,nstar,p2star), xbig.save1)
+ cbind(matrix(0,nstar,p2star), X_vlm_1save)
if(TRUE) {
}
@@ -107,14 +107,14 @@ if(TRUE) {
rmfromVGAMenv(c("etamat", "z", "U", "beta", "deviance",
"cmatrix", "ocmatrix"), prefix=".VGAM.CQO.")
}
- if(control$trace && exists("flush.console"))
+ if(control$trace)
flush.console()
if(allofit) list(deviance=ans1$deviance, coefficients=ans1$beta) else
ans1$deviance
}
calldcqof = function(cmatrix, etamat, xmat, ymat, wvec,
- xbig.save1, modelno, Control,
+ X_vlm_1save, modelno, Control,
n, M, p1star, p2star, nice31, allofit=FALSE) {
control = Control
Rank = control$Rank
@@ -147,7 +147,7 @@ calldcqof = function(cmatrix, etamat, xmat, ymat, wvec,
if(control$trace) {
cat(paste("Taking evasive action for latent variable ",
lookat, ".\n", sep=""))
- if(exists("flush.console")) flush.console()
+ flush.console()
}
rmfromVGAMenv(c("etamat", "z", "U", "beta", "deviance",
"cmatrix", "ocmatrix"), prefix=".VGAM.CQO.")
@@ -178,9 +178,8 @@ calldcqof = function(cmatrix, etamat, xmat, ymat, wvec,
itol=itol, control$trace,
p1, p2, control$method.init) # other ints
bnumat = if(nice31) matrix(0,nstar,pstar) else
- cbind(matrix(0,nstar,p2star), xbig.save1)
- if(exists("flush.console"))
- flush.console()
+ cbind(matrix(0,nstar,p2star), X_vlm_1save)
+ flush.console()
ans1 <-
dotFortran(name="dcqof", numat=as.double(numat), as.double(ymat),
@@ -208,8 +207,7 @@ calldcqof = function(cmatrix, etamat, xmat, ymat, wvec,
warning(paste("error code in calldcqof =", ans1$errcode))
}
- if(exists("flush.console"))
- flush.console()
+ flush.console()
ans1$deriv
}
@@ -290,7 +288,6 @@ cqo.fit <- function(x, y, w=rep(1, length(x[, 1])),
Rank <- control$Rank
rrcontrol <- control #
- backchat = FALSE # Specifically for rrr.init.expression
if(length(family at initialize))
eval(family at initialize) # Initialize mu and M (and optionally w)
@@ -314,8 +311,8 @@ cqo.fit <- function(x, y, w=rep(1, length(x[, 1])),
if(any(is.na(index)))
stop("Dzero argument didn't fully match y-names")
if(length(index) == M)
- stop(paste("all linear predictors are linear in the",
- "latent variable(s); so set Quadratic=FALSE"))
+ stop("all linear predictors are linear in the",
+ " latent variable(s); so set 'Quadratic=FALSE'")
rrcontrol$Dzero = control$Dzero = index
}
@@ -399,7 +396,7 @@ cqo.fit <- function(x, y, w=rep(1, length(x[, 1])),
ncolBlist <- unlist(lapply(Blist, ncol))
dimB <- sum(ncolBlist)
- xbig.save <- if(nice31) {
+ X_vlm_save <- if(nice31) {
NULL
} else {
tmp500=lm2qrrvlm.model.matrix(x=x,Blist=Blist,C=Cmat,control=control)
@@ -412,9 +409,9 @@ cqo.fit <- function(x, y, w=rep(1, length(x[, 1])),
lm2vlm.model.matrix(xsmall.qrr, B.list, xij=control$xij)
}
- if(length(coefstart) && length(xbig.save)) {
- eta <- if(ncol(xbig.save) > 1) xbig.save %*% coefstart +
- offset else xbig.save * coefstart + offset
+ if(length(coefstart) && length(X_vlm_save)) {
+ eta <- if(ncol(X_vlm_save) > 1) X_vlm_save %*% coefstart +
+ offset else X_vlm_save * coefstart + offset
eta <- if(M > 1) matrix(eta, ncol=M, byrow=TRUE) else c(eta)
mu <- family at inverse(eta, extra)
}
@@ -535,8 +532,7 @@ cqo.fit <- function(x, y, w=rep(1, length(x[, 1])),
if(Rank==1) "lv" else paste("lv", 1:Rank, sep=""))
if(p2>5) print(ans, dig=3) else print(t(ans), dig=3)
}
- if(exists("flush.console"))
- flush.console()
+ flush.console()
})
sd.scale.X2.expression = expression({
if(length(isdlv)) {
@@ -604,8 +600,8 @@ cqo.fit <- function(x, y, w=rep(1, length(x[, 1])),
} else {
xij = NULL # temporary measure
U = t(sqrt(wts))
- tmp = vlm.wfit(x=X1, z=etamat, Blist=NULL, U=U, matrix.out=TRUE,
- XBIG=FALSE, rss=TRUE, qr=FALSE, xij=xij)
+ tmp = vlm.wfit(xmat=X1, zmat=etamat, Blist=NULL, U=U, matrix.out=TRUE,
+ is.vlmX=FALSE, rss=TRUE, qr=FALSE, xij=xij)
ans = crow1C(as.matrix(tmp$resid), rep(Crow1positive, len=effrank))
if(effrank < Rank) {
ans = cbind(ans, ans.save[,-(1:effrank)]) # ans is better
@@ -643,8 +639,7 @@ cqo.init.derivative.expression <- expression({
} else "Quasi-Newton"
if(trace && control$OptimizeWrtC) {
cat("\nUsing", which.optimizer, "algorithm\n")
- if(exists("flush.console"))
- flush.console()
+ flush.console()
}
@@ -661,14 +656,14 @@ if(FALSE) {
get("CQO.FastAlgorithm", envir = VGAMenv)) else
(exists("CQO.FastAlgorithm",inherits=TRUE) && CQO.FastAlgorithm)
if(!canfitok)
- stop("can't fit this model using fast algorithm")
+ stop("cannot fit this model using fast algorithm")
p2star = if(nice31)
ifelse(control$IToleran, Rank, Rank+0.5*Rank*(Rank+1)) else
(NOS*Rank + Rank*(Rank+1)/2 * ifelse(control$EqualTol,1,NOS))
p1star = if(nice31) ifelse(modelno==3 || modelno==5,1+p1,p1) else
- (ncol(xbig.save)-p2star)
- xbig.save1 = if(p1star > 0) xbig.save[,-(1:p2star)] else NULL
+ (ncol(X_vlm_save)-p2star)
+ X_vlm_1save = if(p1star > 0) X_vlm_save[,-(1:p2star)] else NULL
})
@@ -686,7 +681,7 @@ if(is.R()) {
parscale=rep(control$Parscale, len=length(Cmat)),
maxit=control$Maxit.optim),
etamat=eta, xmat=x, ymat=y, wvec=w,
- xbig.save1 = xbig.save1,
+ X_vlm_1save = X_vlm_1save,
modelno=modelno, Control=control,
n=n, M=M, p1star=p1star, p2star=p2star, nice31=nice31)
@@ -720,8 +715,7 @@ tmp.fitted = alt$fitted # contains \bI_{Rank} \bnu if Corner
if(trace && control$OptimizeWrtC) {
cat("\n")
cat(which.optimizer, "using", if(is.R()) "optim():" else "nlminb():", "\n")
- cat("Objective =", if(is.R())
- quasi.newton$value else format(quasi.newton$objective), "\n")
+ cat("Objective =", quasi.newton$value, "\n")
cat("Parameters (= c(C)) = ", if(length(quasi.newton$par) < 5) "" else "\n")
cat(if(is.R()) alt$Cmat else format(alt$Cmat), fill=TRUE)
cat("\n")
@@ -734,8 +728,7 @@ if(trace && control$OptimizeWrtC) {
if(length(quasi.newton$message))
cat("Message =", quasi.newton$message, "\n")
cat("\n")
- if(exists("flush.console"))
- flush.console()
+ flush.console()
}
Amat = alt$Amat #
diff --git a/R/family.aunivariate.q b/R/family.aunivariate.q
index 934c648..dfb85dc 100644
--- a/R/family.aunivariate.q
+++ b/R/family.aunivariate.q
@@ -7,42 +7,52 @@
-dkumar = function(x, shape1, shape2) {
- ans = shape1 * shape2 * x^(shape1-1) * (1 - x^shape1)^(shape2-1)
- ans[(x <= 0) | (x >= 1)] = 0
- ans[(shape1 <= 0) | (shape2 <= 0)] = NA
- ans
+
+
+
+
+dkumar = function(x, shape1, shape2, log = FALSE) {
+ if(!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ N = max(length(x), length(shape1), length(shape2))
+ x = rep(x, len=N); shape1 = rep(shape1, len=N); shape2 = rep(shape2, len=N)
+
+ logdensity = rep(log(0), len=N)
+ xok = (0 <= x & x <= 1)
+ logdensity[xok] = log(shape1[xok]) + log(shape2[xok]) +
+ (shape1[xok]-1) * log(x[xok]) +
+ (shape2[xok]-1) * log1p(-x[xok]^shape1[xok])
+ logdensity[shape1 <= 0] = NaN
+ logdensity[shape2 <= 0] = NaN
+ if(log.arg) logdensity else exp(logdensity)
}
rkumar = function(n, shape1, shape2) {
- if(!is.Numeric(n, integ=TRUE,allow=1)) stop("bad input for argument \"n\"")
ans = (1 - (1 - runif(n))^(1/shape2))^(1/shape1)
- ans[(shape1 <= 0) | (shape2 <= 0)] = NA
+ ans[(shape1 <= 0) | (shape2 <= 0)] = NaN
ans
}
qkumar = function(p, shape1, shape2) {
- if(!is.Numeric(p)) stop("bad input for argument \"p\"")
- if(!is.Numeric(shape1, posi=TRUE)) stop("bad input for argument \"shape1\"")
- if(!is.Numeric(shape2, posi=TRUE)) stop("bad input for argument \"shape2\"")
- bad = (p < 0) | (p > 1)
- if(any(bad))
- stop("bad input for 'p'")
ans = (1 - (1 - p)^(1/shape2))^(1/shape1)
- ans[(shape1 <= 0) | (shape2 <= 0)] = NA
+ ans[(shape1 <= 0) | (shape2 <= 0)] = NaN
+ ans[p < 0] = NaN
+ ans[p > 1] = NaN
ans
}
pkumar = function(q, shape1, shape2) {
ans = 1 - (1 - q^shape1)^shape2
- ans[q>=1] = 1
- ans[q<=0] = 0
- ans[(shape1 <= 0) | (shape2 <= 0)] = NA
+ ans[q <= 0] = 0
+ ans[q >= 1] = 1
+ ans[(shape1 <= 0) | (shape2 <= 0)] = NaN
ans
}
@@ -54,19 +64,19 @@ kumar.control <- function(save.weight=TRUE, ...)
}
-kumar = function(lshape1="loge", lshape2="loge",
- eshape1=list(), eshape2=list(),
- ishape1=NULL, ishape2=NULL,
- nsimEIM=500, zero=NULL)
+ kumar = function(lshape1="loge", lshape2="loge",
+ eshape1=list(), eshape2=list(),
+ ishape1=NULL, ishape2=NULL,
+ nsimEIM=500, zero=NULL)
{
if(mode(lshape1) != "character" && mode(lshape1) != "name")
lshape1 = as.character(substitute(lshape1))
if(mode(lshape2) != "character" && mode(lshape2) != "name")
lshape2 = as.character(substitute(lshape2))
if(length(ishape1) && (!is.Numeric(ishape1, allow=1, positive=TRUE)))
- stop("bad input for argument \"ishape1\"")
+ stop("bad input for argument 'ishape1'")
if(length(ishape2) && !is.Numeric(ishape2))
- stop("bad input for argument \"ishape2\"")
+ stop("bad input for argument 'ishape2'")
if(!is.list(eshape1)) eshape1 = list()
if(!is.list(eshape2)) eshape2 = list()
if(!is.Numeric(nsimEIM, allow=1, integ=TRUE) || nsimEIM <= 50)
@@ -130,9 +140,9 @@ kumar = function(lshape1="loge", lshape2="loge",
function(mu,y,w,residuals=FALSE,eta,extra=NULL) {
shape1 = eta2theta(eta[,1], link= .lshape1, earg= .eshape1)
shape2 = eta2theta(eta[,2], link= .lshape2, earg= .eshape2)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * (log(shape1) + log(shape2) + (shape1-1)*log(y) +
- (shape2-1)*log1p(-y^shape1)))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dkumar(x=y, shape1=shape1, shape2=shape2, log=TRUE))
+ }
}, list( .lshape1=lshape1, .lshape2=lshape2,
.eshape1=eshape1, .eshape2=eshape2 ))),
vfamily=c("kumar"),
@@ -167,7 +177,7 @@ kumar = function(lshape1="loge", lshape2="loge",
run.mean = ((ii-1) * run.mean + temp3) / ii
}
wz = if(intercept.only)
- matrix(apply(run.mean,2,mean), n, dimm(M), byrow=TRUE) else run.mean
+ matrix(colMeans(run.mean), n, dimm(M), byrow=TRUE) else run.mean
dtheta.detas = cbind(dshape1.deta, dshape2.deta)
index0 = iam(NA, NA, M=M, both=TRUE, diag=TRUE)
@@ -180,17 +190,29 @@ kumar = function(lshape1="loge", lshape2="loge",
-drice = function(x, vee, sigma) {
- ans = (x / sigma^2) * exp(-(x^2+vee^2)/(2*sigma^2)) *
- besselI(abs(x*vee/sigma^2), nu=0)
- ans[(x <= 0)] = 0
- ans[!is.finite(vee) | !is.finite(sigma) | (vee < 0) | (sigma <= 0)] = NA
- ans
+drice = function(x, vee, sigma, log = FALSE) {
+ if(!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
+
+ N = max(length(x), length(vee), length(sigma))
+ x = rep(x, len=N); vee = rep(vee, len=N); sigma = rep(sigma, len=N)
+
+ logdensity = rep(log(0), len=N)
+ xok = (x > 0)
+ x.abs = abs(x[xok]*vee[xok]/sigma[xok]^2)
+ logdensity[xok] = log(x[xok]) - 2 * log(sigma[xok]) +
+ (-(x[xok]^2+vee[xok]^2)/(2*sigma[xok]^2)) +
+ log(besselI(x.abs, nu=0, expon.scaled = TRUE)) + x.abs
+ logdensity[sigma <= 0] = NaN
+ logdensity[vee < 0] = NaN
+ if(log.arg) logdensity else exp(logdensity)
}
rrice = function(n, vee, sigma) {
- if(!is.Numeric(n, integ=TRUE,allow=1)) stop("bad input for argument \"n\"")
+ if(!is.Numeric(n, integ=TRUE,allow=1)) stop("bad input for argument 'n'")
theta = 1 # any number
X = rnorm(n, mean=vee * cos(theta), sd=sigma)
Y = rnorm(n, mean=vee * sin(theta), sd=sigma)
@@ -205,7 +227,7 @@ riceff.control <- function(save.weight=TRUE, ...)
}
-riceff = function(lvee="loge", lsigma="loge",
+ riceff = function(lvee="loge", lsigma="loge",
evee=list(), esigma=list(),
ivee=NULL, isigma=NULL,
nsimEIM=100, zero=NULL)
@@ -215,9 +237,9 @@ riceff = function(lvee="loge", lsigma="loge",
if(mode(lsigma) != "character" && mode(lsigma) != "name")
lsigma = as.character(substitute(lsigma))
if(length(ivee) && !is.Numeric(ivee, positive=TRUE))
- stop("bad input for argument \"ivee\"")
+ stop("bad input for argument 'ivee'")
if(length(isigma) && !is.Numeric(isigma, positive=TRUE))
- stop("bad input for argument \"isigma\"")
+ stop("bad input for argument 'isigma'")
if(!is.list(evee)) evee = list()
if(!is.list(esigma)) esigma = list()
if(!is.Numeric(nsimEIM, allow=1, integ=TRUE) || nsimEIM <= 50)
@@ -284,10 +306,9 @@ riceff = function(lvee="loge", lsigma="loge",
function(mu,y,w,residuals=FALSE,eta,extra=NULL) {
vee = eta2theta(eta[,1], link= .lvee, earg= .evee)
sigma = eta2theta(eta[,2], link= .lsigma, earg= .esigma)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * (log(y) - 2*log(sigma) +
- log(besselI(y*vee/sigma^2, nu=0)) -
- (y^2 + vee^2)/(2*sigma^2)))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * drice(x=y, vee=vee, sigma=sigma, log = TRUE))
+ }
}, list( .lvee=lvee, .lsigma=lsigma,
.evee=evee, .esigma=esigma ))),
vfamily=c("riceff"),
@@ -321,7 +342,7 @@ riceff = function(lvee="loge", lsigma="loge",
run.cov = ((ii-1) * run.cov + temp3[,1] * temp3[,2]) / ii
}
wz = if(intercept.only)
- matrix(apply(cbind(run.var, run.cov), 2, mean),
+ matrix(colMeans(cbind(run.var, run.cov)),
n, dimm(M), byrow=TRUE) else cbind(run.var, run.cov)
dtheta.detas = cbind(dvee.deta, dsigma.deta)
@@ -383,19 +404,19 @@ skellam.control <- function(save.weight=TRUE, ...)
}
-skellam = function(lmu1="loge", lmu2="loge",
- emu1=list(), emu2=list(),
- imu1=NULL, imu2=NULL,
- nsimEIM=100, parallel=FALSE, zero=NULL)
+ skellam = function(lmu1="loge", lmu2="loge",
+ emu1=list(), emu2=list(),
+ imu1=NULL, imu2=NULL,
+ nsimEIM=100, parallel=FALSE, zero=NULL)
{
if(mode(lmu1) != "character" && mode(lmu1) != "name")
lmu1 = as.character(substitute(lmu1))
if(mode(lmu2) != "character" && mode(lmu2) != "name")
lmu2 = as.character(substitute(lmu2))
if(length(imu1) && !is.Numeric(imu1, positive=TRUE))
- stop("bad input for argument \"imu1\"")
+ stop("bad input for argument 'imu1'")
if(length(imu2) && !is.Numeric(imu2, positive=TRUE))
- stop("bad input for argument \"imu2\"")
+ stop("bad input for argument 'imu2'")
if(!is.list(emu1)) emu1 = list()
if(!is.list(emu2)) emu2 = list()
if(!is.Numeric(nsimEIM, allow=1, integ=TRUE) || nsimEIM <= 50)
@@ -422,8 +443,7 @@ skellam = function(lmu1="loge", lmu2="loge",
namesof("mu1", .lmu1, earg= .emu1, tag=FALSE),
namesof("mu2", .lmu2, earg= .emu2, tag=FALSE))
if(!length(etastart)) {
- junk = if(is.R()) lm.wfit(x=x, y=y, w=w) else
- lm.wfit(x=x, y=y, w=w, method="qr")
+ junk = lm.wfit(x=x, y=y, w=w)
var.y.est = sum(w * junk$resid^2) / junk$df.residual
mean.init = weighted.mean(y, w)
mu1.init = max((var.y.est + mean.init)/2, 0.01)
@@ -454,6 +474,10 @@ skellam = function(lmu1="loge", lmu2="loge",
mu1 = eta2theta(eta[,1], link= .lmu1, earg= .emu1)
mu2 = eta2theta(eta[,2], link= .lmu2, earg= .emu2)
if(residuals) stop("loglikelihood residuals not implemented yet") else {
+
+
+
+
if( is.logical( .parallel ) && length( .parallel )==1 &&
.parallel )
sum(w * log(besselI(2*mu1, nu=y, expon=TRUE))) else
@@ -499,7 +523,7 @@ skellam = function(lmu1="loge", lmu2="loge",
run.cov = ((ii-1) * run.cov + temp3[,1] * temp3[,2]) / ii
}
wz = if(intercept.only)
- matrix(apply(cbind(run.var, run.cov), 2, mean),
+ matrix(colMeans(cbind(run.var, run.cov)),
n, dimm(M), byrow=TRUE) else cbind(run.var, run.cov)
dtheta.detas = cbind(dmu1.deta, dmu2.deta)
@@ -531,7 +555,7 @@ dyules = function(x, rho, log=FALSE) {
ryules = function(n, rho) {
- if(!is.Numeric(n, integ=TRUE,allow=1)) stop("bad input for argument \"n\"")
+ if(!is.Numeric(n, integ=TRUE,allow=1)) stop("bad input for argument 'n'")
rgeom(n, prob=exp(-rexp(n, rate=rho))) + 1
}
@@ -553,10 +577,10 @@ yulesimon.control <- function(save.weight=TRUE, ...)
}
-yulesimon = function(link="loge", earg=list(), irho=NULL, nsimEIM=200)
+ yulesimon = function(link="loge", earg=list(), irho=NULL, nsimEIM=200)
{
if(length(irho) && !is.Numeric(irho, positi=TRUE))
- stop("argument \"irho\" must be > 0")
+ stop("argument 'irho' must be > 0")
if(mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if(!is.list(earg)) earg = list()
@@ -601,8 +625,9 @@ yulesimon = function(link="loge", earg=list(), irho=NULL, nsimEIM=200)
loglikelihood=eval(substitute(
function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
rho = eta2theta(eta, .link, earg=.earg)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * (log(rho) + lbeta(y, rho+1)))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dyules(x=y, rho=rho, log=TRUE))
+ }
}, list( .link=link, .earg=earg ))),
vfamily=c("yulesimon"),
deriv=eval(substitute(expression({
@@ -621,7 +646,7 @@ yulesimon = function(link="loge", earg=list(), irho=NULL, nsimEIM=200)
run.var = ((ii-1) * run.var + temp3^2) / ii
}
wz = if(intercept.only)
- matrix(apply(cbind(run.var), 2, mean),
+ matrix(colMeans(cbind(run.var)),
n, dimm(M), byrow=TRUE) else cbind(run.var)
wz = wz * drho.deta^2
@@ -669,9 +694,9 @@ pslash <- function(q, mu=0, sigma=1){
rslash <- function (n, mu=0, sigma=1){
if (!is.Numeric(n, posit = TRUE, integ = TRUE, allow = 1))
- stop("bad input for argument \"n\"")
+ stop("bad input for argument 'n'")
if (any(sigma <= 0))
- stop("argument \"sigma\" must be positive")
+ stop("argument 'sigma' must be positive")
rnorm(n=n, mean=mu, sd=sigma) / runif(n=n)
}
@@ -680,11 +705,11 @@ slash.control <- function(save.weight=TRUE, ...)
list(save.weight=save.weight)
}
-slash = function(lmu="identity", lsigma="loge", emu=list(), esigma=list(),
- imu=NULL, isigma=NULL,
- iprobs = c(0.1, 0.9),
- nsimEIM=250, zero=NULL,
- smallno = .Machine$double.eps*1000)
+ slash = function(lmu="identity", lsigma="loge", emu=list(), esigma=list(),
+ imu=NULL, isigma=NULL,
+ iprobs = c(0.1, 0.9),
+ nsimEIM=250, zero=NULL,
+ smallno = .Machine$double.eps*1000)
{
if(mode(lmu) != "character" && mode(lmu) != "name")
lmu = as.character(substitute(lmu))
@@ -693,16 +718,16 @@ slash = function(lmu="identity", lsigma="loge", emu=list(), esigma=list(),
if(length(isigma) && !is.Numeric(isigma, posit=TRUE))
stop("'isigma' must be > 0")
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(!is.list(emu)) emu = list()
if(!is.list(esigma)) esigma = list()
if(!is.Numeric(nsimEIM, allow=1, integ=TRUE) || nsimEIM <= 50)
stop("'nsimEIM' should be an integer greater than 50")
if(!is.Numeric(iprobs, posit=TRUE) || max(iprobs) >= 1 ||
length(iprobs)!=2)
- stop("bad input for argument \"iprobs\"")
+ stop("bad input for argument 'iprobs'")
if(!is.Numeric(smallno, posit=TRUE) || smallno > 0.1)
- stop("bad input for argument \"smallno\"")
+ stop("bad input for argument 'smallno'")
new("vglmff",
blurb=c("Slash distribution\n\n",
@@ -763,9 +788,10 @@ slash = function(lmu="identity", lsigma="loge", emu=list(), esigma=list(),
mu = eta2theta(eta[,1], link= .lmu, earg= .emu)
sigma = eta2theta(eta[,2], link= .lsigma, earg= .esigma)
zedd = (y-mu)/sigma
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * ifelse(abs(zedd)<.smallno, -log(2*sigma*sqrt(2*pi)),
- log1p(-exp(-zedd^2/2)) - log(sqrt(2*pi)*sigma*zedd^2)))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dslash(x=y, mu=mu, sigma=sigma, log=TRUE,
+ smallno= .smallno))
+ }
}, list( .lmu=lmu, .lsigma=lsigma,
.emu=emu, .esigma=esigma, .smallno=smallno ))),
vfamily=c("slash"),
@@ -810,7 +836,7 @@ slash = function(lmu="identity", lsigma="loge", emu=list(), esigma=list(),
temp3[,ind1$row.index]*temp3[,ind1$col.index]) / ii
}
wz = if(intercept.only)
- matrix(apply(run.varcov, 2, mean, na.rm=FALSE),
+ matrix(colMeans(run.varcov, na.rm=FALSE),
n, ncol(run.varcov), byrow=TRUE) else run.varcov
dthetas.detas = cbind(dmu.deta, dsigma.deta)
wz = wz * dthetas.detas[,ind1$row] * dthetas.detas[,ind1$col]
@@ -823,18 +849,26 @@ slash = function(lmu="identity", lsigma="loge", emu=list(), esigma=list(),
-dnefghs = function(x, tau) {
- ans = sin(pi*tau) * exp((1-tau)*x) / (pi*(1+exp(x)))
- ans[(tau < 0) | (tau > 1)] = NA
- ans
+dnefghs = function(x, tau, log=FALSE) {
+ if(!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ N = max(length(x), length(tau))
+ x = rep(x, len=N); tau = rep(tau, len=N);
+
+ logdensity = log(sin(pi*tau)) + (1-tau)*x - log(pi) - log1p(exp(x))
+ logdensity[tau < 0] = NaN
+ logdensity[tau > 1] = NaN
+ if(log.arg) logdensity else exp(logdensity)
}
-nefghs = function(link="logit", earg=list(), itau=NULL, method.init=1)
+ nefghs = function(link="logit", earg=list(), itau=NULL, method.init=1)
{
if(length(itau) && !is.Numeric(itau, positi=TRUE) || any(itau >= 1))
- stop("argument \"itau\" must be in (0,1)")
+ stop("argument 'itau' must be in (0,1)")
if(mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if(!is.list(earg)) earg = list()
@@ -878,8 +912,9 @@ nefghs = function(link="logit", earg=list(), itau=NULL, method.init=1)
loglikelihood=eval(substitute(
function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
tau = eta2theta(eta, .link, earg=.earg)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * (log(sin(pi*tau)) - log(pi) + (1-tau)*y - log1p(exp(y))) )
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dnefghs(x=y, tau=tau, log=TRUE))
+ }
}, list( .link=link, .earg=earg ))),
vfamily=c("nefghs"),
deriv=eval(substitute(expression({
@@ -898,16 +933,29 @@ nefghs = function(link="logit", earg=list(), itau=NULL, method.init=1)
-logF = function(lshape1="loge", lshape2="loge",
- eshape1=list(), eshape2=list(),
- ishape1=NULL, ishape2=1,
- method.init=1)
+dlogF = function(x, shape1, shape2, log=FALSE) {
+ if(!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ logdensity = -shape2*x - lbeta(shape1, shape2) -
+ (shape1 + shape2) * log1p(exp(-x))
+ if(log.arg) logdensity else exp(logdensity)
+}
+
+
+
+
+ logF = function(lshape1="loge", lshape2="loge",
+ eshape1=list(), eshape2=list(),
+ ishape1=NULL, ishape2=1,
+ method.init=1)
{
if(length(ishape1) && !is.Numeric(ishape1, positi=TRUE))
- stop("argument \"ishape1\" must be positive")
+ stop("argument 'ishape1' must be positive")
if( # length(ishape2) &&
!is.Numeric(ishape2, positi=TRUE))
- stop("argument \"ishape2\" must be positive")
+ stop("argument 'ishape2' must be positive")
if(mode(lshape1) != "character" && mode(lshape1) != "name")
lshape1 = as.character(substitute(lshape1))
if(mode(lshape2) != "character" && mode(lshape2) != "name")
@@ -973,9 +1021,9 @@ logF = function(lshape1="loge", lshape2="loge",
function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
shape1 = eta2theta(eta[,1], .lshape1, earg=.eshape1)
shape2 = eta2theta(eta[,2], .lshape2, earg=.eshape2)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * (-shape2*y - lbeta(shape1, shape2) -
- (shape1 + shape2) * log1p(exp(-y))) )
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dlogF(x=y, shape1=shape1, shape2=shape2, log=TRUE))
+ }
}, list( .lshape1=lshape1, .lshape2=lshape2,
.eshape1=eshape1, .eshape2=eshape2 ))),
vfamily=c("logF"),
@@ -1005,6 +1053,9 @@ logF = function(lshape1="loge", lshape2="loge",
}
+
+
+
dbenf = function(x, ndigits=1, log=FALSE) {
if(!is.Numeric(ndigits, allow=1, posit=TRUE, integ=TRUE) || ndigits > 2)
stop("argument 'ndigits' must be 1 or 2")
@@ -1027,8 +1078,8 @@ rbenf = function(n, ndigits=1) {
lowerlimit = ifelse(ndigits==1, 1, 10)
upperlimit = ifelse(ndigits==1, 9, 99)
use.n = if((length.n <- length(n)) > 1) length.n else
- if(!is.Numeric(n, integ=TRUE, allow=1))
- stop("bad input for argument \"n\"") else n
+ if(!is.Numeric(n, integ=TRUE, allow=1, posit=TRUE))
+ stop("bad input for argument 'n'") else n
myrunif = runif(use.n)
ans = rep(lowerlimit, length = use.n)
for(ii in (lowerlimit+1):upperlimit) {
diff --git a/R/family.basics.q b/R/family.basics.q
index afaa560..44830a6 100644
--- a/R/family.basics.q
+++ b/R/family.basics.q
@@ -14,25 +14,25 @@ getind <- function(constraints, M, ncolx) {
if(!length(constraints)) {
constraints = vector("list", ncolx)
- for(i in 1:ncolx)
- constraints[[i]] <- diag(M)
+ for(ii in 1:ncolx)
+ constraints[[ii]] <- diag(M)
}
ans <- vector("list", M+1)
- names(ans) <- c(paste("eta", 1:M, sep=""), "ncolxbig")
+ names(ans) <- c(paste("eta", 1:M, sep=""), "ncolX_vlm")
temp2 <- matrix(unlist(constraints), nrow=M)
- for(k in 1:M) {
+ for(kk in 1:M) {
ansx <- NULL
- for(i in 1:length(constraints)) {
- temp <- constraints[[i]]
- isfox <- any(temp[k,] != 0)
+ for(ii in 1:length(constraints)) {
+ temp <- constraints[[ii]]
+ isfox <- any(temp[kk,] != 0)
if(isfox) {
- ansx <- c(ansx, i)
+ ansx <- c(ansx, ii)
}
}
- ans[[k]] <- list(xindex=ansx,
- xbigindex=(1:ncol(temp2))[temp2[k,] != 0])
+ ans[[kk]] <- list(xindex=ansx,
+ X_vlmindex=(1:ncol(temp2))[temp2[kk,] != 0])
}
ans[[M+1]] <- ncol(temp2)
@@ -54,19 +54,19 @@ cm.vgam <- function(cm, x, bool, constraints,
if(!length(constraints)) {
constraints <- vector("list", length(nasgn))
- for(i in 1:length(nasgn)) {
- constraints[[i]] <- diag(M)
+ for(ii in 1:length(nasgn)) {
+ constraints[[ii]] <- diag(M)
}
names(constraints) <- nasgn
}
if(!is.list(constraints))
- stop("\"constraints\" must be a list")
+ stop("'constraints' must be a list")
if(length(constraints) != length(nasgn) ||
any(sort(names(constraints)) != sort(nasgn))) {
cat("names(constraints)\n")
cat("The above don't match;\n")
- stop("\"constraints\" is half-pie")
+ stop("'constraints' is half-pie")
}
if(is.logical(bool)) {
@@ -74,8 +74,8 @@ cm.vgam <- function(cm, x, bool, constraints,
if(intercept.apply && any(nasgn=="(Intercept)"))
constraints[["(Intercept)"]] <- cm
if(length(ninasgn))
- for(i in ninasgn)
- constraints[[i]] <- cm
+ for(ii in ninasgn)
+ constraints[[ii]] <- cm
} else {
return(constraints)
}
@@ -100,11 +100,11 @@ cm.vgam <- function(cm, x, bool, constraints,
if(attr(tbool, "intercept"))
tl <- c("(Intercept)", tl)
- for(i in nasgn) {
- if(default && any(tl==i))
- constraints[[i]] <- cm
- if(!default && !any(tl==i))
- constraints[[i]] <- cm
+ for(ii in nasgn) {
+ if(default && any(tl == ii))
+ constraints[[ii]] <- cm
+ if(!default && !any(tl == ii))
+ constraints[[ii]] <- cm
}
}
@@ -123,23 +123,22 @@ cm.nointercept.vgam <- function(constraints, x, nointercept, M)
names(constraints) <- nasgn
}
if(!is.list(constraints))
- stop("\"constraints\" must be a list")
- for(i in 1:length(asgn))
- constraints[[nasgn[i]]] <-
- if(is.null(constraints[[nasgn[i]]])) diag(M) else
- eval(constraints[[nasgn[i]]])
+ stop("'constraints' must be a list")
+ for(ii in 1:length(asgn))
+ constraints[[nasgn[ii]]] <- if(is.null(constraints[[nasgn[ii]]]))
+ diag(M) else eval(constraints[[nasgn[ii]]])
if(is.null(nointercept))
return(constraints)
if(!is.numeric(nointercept))
- stop("\"nointercept\" must be numeric")
+ stop("'nointercept' must be numeric")
nointercept <- unique(sort(nointercept))
if(length(nointercept) == 0 || length(nointercept) >= M)
stop("too few or too many values")
if(any(nointercept < 1 | nointercept > M))
- stop("\"nointercept\" out of range")
+ stop("'nointercept' out of range")
if(nasgn[1] != "(Intercept)" || M == 1)
stop("Need an (Intercept) constraint matrix with M>1")
if(!all.equal(constraints[["(Intercept)"]], diag(M)))
@@ -162,32 +161,31 @@ cm.zero.vgam <- function(constraints, x, zero, M)
constraints <- vector("list", length(nasgn)) # list()
names(constraints) <- nasgn
}
- if(!is.list(constraints)) stop("\"constraints\" must be a list")
- for(i in 1:length(asgn))
- constraints[[nasgn[i]]] <-
- if(is.null(constraints[[nasgn[i]]])) diag(M) else
- eval(constraints[[nasgn[i]]])
+ if(!is.list(constraints)) stop("'constraints' must be a list")
+ for(ii in 1:length(asgn))
+ constraints[[nasgn[ii]]] <- if(is.null(constraints[[nasgn[ii]]]))
+ diag(M) else eval(constraints[[nasgn[ii]]])
if(is.null(zero))
return(constraints)
- if(!is.numeric(zero)) stop("\"zero\" must be numeric")
+ if(!is.numeric(zero)) stop("'zero' must be numeric")
if(any(zero < 1 | zero > M))
- stop("\"zero\" out of range")
+ stop("'zero' out of range")
if(nasgn[1] != "(Intercept)")
- stop("can't fit an intercept to a no-intercept model")
+ stop("cannot fit an intercept to a no-intercept model")
if(2 <= length(constraints))
- for(i in 2:length(constraints)) {
- temp <- constraints[[nasgn[i]]]
+ for(ii in 2:length(constraints)) {
+ temp <- constraints[[nasgn[ii]]]
temp[zero,] <- 0
index <- NULL
- for(j in 1:ncol(temp))
- if(all(temp[,j]==0)) index <- c(index,j)
- if(length(index)==ncol(temp))
+ for(kk in 1:ncol(temp))
+ if(all(temp[,kk] == 0)) index <- c(index,kk)
+ if(length(index) == ncol(temp))
stop("constraint matrix has no columns!")
if(!is.null(index))
temp <- temp[,-index,drop=FALSE]
- constraints[[nasgn[i]]] <- temp
+ constraints[[nasgn[ii]]] <- temp
}
constraints
}
@@ -204,8 +202,8 @@ process.constraints <- function(constraints, x, M, by.col=TRUE, specialCM=NULL)
if(is.null(constraints)) {
constraints <- vector("list", length(nasgn))
- for(i in 1:length(nasgn))
- constraints[[i]] <- diag(M)
+ for(ii in 1:length(nasgn))
+ constraints[[ii]] <- diag(M)
names(constraints) <- nasgn
}
@@ -213,14 +211,14 @@ process.constraints <- function(constraints, x, M, by.col=TRUE, specialCM=NULL)
constraints <- list(constraints)
if(!is.list(constraints))
- stop("\"constraints\" must be a list")
+ stop("'constraints' must be a list")
lenconstraints <- length(constraints)
if(lenconstraints > 0)
for(i in 1:lenconstraints) {
constraints[[i]] <- eval(constraints[[i]])
if(!is.null(constraints[[i]]) && !is.matrix(constraints[[i]]))
- stop("\"constraints\"[[",i,"]] is not a matrix")
+ stop("'constraints[[",i,"]]' is not a matrix")
}
if(is.null(names(constraints)))
@@ -284,16 +282,16 @@ trivial.constraints <- function(Blist, target=diag(M))
trivc <- rep(1, length(Blist))
names(trivc) <- names(Blist)
- for(i in 1:length(Blist)) {
- d <- dim(Blist[[i]])
- if(d[1] != dimtar[1]) trivc[i] <- 0
- if(d[2] != dimtar[2]) trivc[i] <- 0
- if(d[1] != M) trivc[i] <- 0
- if(length(Blist[[i]]) != length(target)) trivc[i] <- 0
- if(trivc[i] == 0) next
- if(!all(c(Blist[[i]]) == c(target)))
- trivc[i] <- 0
- if(trivc[i] == 0) next
+ for(ii in 1:length(Blist)) {
+ d <- dim(Blist[[ii]])
+ if(d[1] != dimtar[1]) trivc[ii] <- 0
+ if(d[2] != dimtar[2]) trivc[ii] <- 0
+ if(d[1] != M) trivc[ii] <- 0
+ if(length(Blist[[ii]]) != length(target)) trivc[ii] <- 0
+ if(trivc[ii] == 0) next
+ if(!all(c(Blist[[ii]]) == c(target)))
+ trivc[ii] <- 0
+ if(trivc[ii] == 0) next
}
trivc
}
@@ -327,7 +325,8 @@ add.constraints <- function(constraints, new.constraints,
if(check &&
(!(all(dim(constraints[[i]])==dim(new.constraints[[i]])) &&
all(constraints[[i]]==new.constraints[[i]]))))
- stop("apparent contradiction in the specification of the constraints")
+ stop("apparent contradiction in the specification ",
+ "of the constraints")
if(overwrite)
constraints[[i]] <- new.constraints[[i]]
} else
@@ -353,7 +352,7 @@ iam <- function(j, k, M, hbw=M, both=FALSE, diagonal=TRUE)
{
if(M==1)
- if(!diagonal) stop("can't handle this")
+ if(!diagonal) stop("cannot handle this")
if(M==1)
if(both) return(list(row.index=1, col.index=1)) else return(1)
@@ -404,7 +403,7 @@ m2avglm <- function(object, upper=FALSE, allow.vector=FALSE) {
m2adefault <- function(m, M, upper=FALSE, allow.vector=FALSE)
{
if(!is.numeric(m))
- stop("argument m is not numeric")
+ stop("argument 'm' is not numeric")
if(!is.matrix(m))
m <- cbind(m)
@@ -639,7 +638,7 @@ weightsvlm = function(object, type = c("prior", "working"),
matrix.arg=matrix.arg, deriv.arg=deriv.arg,
ignore.slot=ignore.slot, ...)
} else {
- if(deriv.arg) stop("can't set deriv=TRUE when type=\"prior\"")
+ if(deriv.arg) stop("cannot set 'deriv=TRUE' when 'type=\"prior\"'")
ans = pweights(object)
if(matrix.arg) as.matrix(ans) else c(ans)
}
@@ -704,7 +703,7 @@ qnupdate = function(w, wzold, dderiv, deta, M, keeppd=TRUE,
} else {
warning("no BFGS quasi-Newton update made at all")
cat("no BFGS quasi-Newton update made at all\n")
- if(exists("flush.console")) flush.console()
+ flush.console()
}
wznew
}
@@ -741,12 +740,12 @@ VGAM.matrix.norm = function(A, power=2, suppressWarning=FALSE) {
if(power=="F") {
sqrt(sum(A^2))
} else if(power==1) {
- max(apply(abs(A), 2, sum))
+ max(colSums(abs(A)))
} else if(power==2) {
sqrt(max(eigen(t(A) %*% A)$value))
} else if(!is.finite(power)) {
- max(apply(abs(A), 1, sum))
- } else stop("argument \"power\" not recognised")
+ max(colSums(abs(A)))
+ } else stop("argument 'power' not recognised")
}
@@ -819,9 +818,9 @@ lerch <- function(x, s, v, tolerance=1.0e-10, iter=100) {
if(is.complex(c(x,s,v)))
stop("complex arguments not allowed in x, s and v")
if(!is.Numeric(tolerance, allow=1, posi=TRUE) || tolerance > 0.01)
- stop("bad input for argument \"tolerance\"")
+ stop("bad input for argument 'tolerance'")
if(!is.Numeric(iter, allow=1, integ=TRUE, posi=TRUE))
- stop("bad input for argument \"iter\"")
+ stop("bad input for argument 'iter'")
L = max(length(x), length(s), length(v))
x = rep(x, length=L); s = rep(s, length=L); v = rep(v, length=L);
xok = abs(x) < 1 & !(v <= 0 & v==round(v))
diff --git a/R/family.binomial.q b/R/family.binomial.q
index 18d3e9e..823e3da 100644
--- a/R/family.binomial.q
+++ b/R/family.binomial.q
@@ -3,6 +3,13 @@
+
+
+
+
+
+
+
process.binomial2.data.vgam <- expression({
@@ -48,13 +55,17 @@ process.binomial2.data.vgam <- expression({
+betabinomial.control <- function(save.weight=TRUE, ...)
+{
+ list(save.weight=save.weight)
+}
-
-betabinomial <- function(lmu="logit", lrho="logit",
+ betabinomial = function(lmu="logit", lrho="logit",
emu=list(), erho=list(),
- irho=NULL, method.init=1, zero=2)
+ irho=NULL, method.init=1,
+ shrinkage.init=0.95, nsimEIM=NULL, zero=2)
{
if(mode(lmu) != "character" && mode(lmu) != "name")
lmu = as.character(substitute(lmu))
@@ -63,18 +74,31 @@ betabinomial <- function(lmu="logit", lrho="logit",
if(!is.list(emu )) emu = list()
if(!is.list(erho)) erho = list()
if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
- method.init > 2) stop("argument \"method.init\" must be 1 or 2")
+ method.init > 4) stop("argument 'method.init' must be 1, 2, 3 or 4")
+ if(!is.Numeric(shrinkage.init, allow=1) || shrinkage.init < 0 ||
+ shrinkage.init > 1) stop("bad input for argument 'shrinkage.init'")
+ if(!is.null(nsimEIM)) {
+ if(!is.Numeric(nsimEIM, allow=1, integ=TRUE))
+ stop("bad input for argument 'nsimEIM'")
+ if(nsimEIM <= 10)
+ warning("'nsimEIM' should be an integer greater than 10, say")
+ }
new("vglmff",
blurb=c("Beta-binomial model\n",
"Links: ",
namesof("mu", lmu, earg= emu), ", ",
namesof("rho", lrho, earg= erho), "\n",
+ "Mean: mu", "\n",
"Variance: mu*(1-mu)*(1+(w-1)*rho)/w"),
constraints=eval(substitute(expression({
constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero=zero ))),
initialize=eval(substitute(expression({
+ if(is.null( .nsimEIM)) {
+ save.weight <- control$save.weight <- FALSE
+ }
+
eval(binomialff()@initialize) # Note: n,w,y,mustart is changed
ycounts = y * w # Convert proportions to counts
if(max(abs(ycounts-round(ycounts))) > 1.0e-6)
@@ -82,43 +106,40 @@ betabinomial <- function(lmu="logit", lrho="logit",
predictors.names = c(namesof("mu", .lmu, earg= .emu, tag=FALSE),
namesof("rho", .lrho, earg= .erho, tag=FALSE))
if(!length(etastart)) {
- if(is.Numeric( .irho )) {
- init.rho = rep( .irho, length=n)
+ betabinomial.Loglikfun = function(rhoval, y, x, w, extraargs) {
+ shape1 = extraargs$mustart*(1-rhoval)/rhoval
+ shape2 = (1-extraargs$mustart)*(1-rhoval)/rhoval
+ ycounts = extraargs$ycounts
+ nvec = extraargs$nvec
+ sum(dbetabin.ab(x=ycounts, size=nvec, shape1=shape1,
+ shape2=shape2, log=TRUE))
+ }
+ rho.grid = rvar = seq(0.05, 0.95, len=21) #
+ mustart.use = if( .method.init == 1) {
+ rep(weighted.mean(y, w), len=n)
+ } else if( .method.init == 2) {
+ .sinit * weighted.mean(y, w) + (1- .sinit) * y
+ } else if( .method.init == 3) {
+ y.matrix = cbind(y)
+ mat.temp = matrix(colMeans(y.matrix), nrow(y.matrix),
+ ncol(y.matrix), byrow=TRUE)
+ 0.5 * mustart + 0.5 * mat.temp
} else {
- betabinomial.Loglikfun = function(rhoval, y, x, w, extraargs) {
- shape1 = extraargs$mustart*(1-rhoval)/rhoval
- shape2 = (1-extraargs$mustart)*(1-rhoval)/rhoval
- ycounts = extraargs$ycounts
- nvec = extraargs$nvec
- if(is.R()) sum(lbeta(shape1+ycounts, shape2+nvec-ycounts) -
- lbeta(shape1, shape2)) else
- sum(lgamma(shape1+ycounts) + lgamma(shape2+nvec-ycounts) -
- lgamma(shape1+shape2+nvec) -
- (lgamma(shape1) + lgamma(shape2) -
- lgamma(shape1+shape2)))
- }
- rho.grid = rvar = seq(0.05, 0.95, len=21) #
- mustart.use = if( .method.init == 2) {
- mustart
- } else {
- y.matrix = cbind(y)
- mat.temp = matrix(apply(y.matrix, 2, mean), nrow(y.matrix),
- ncol(y.matrix), byrow=TRUE)
- 0.5 * mustart + 0.5 * mat.temp
- }
- try.this = getMaxMin(rho.grid, objfun=betabinomial.Loglikfun,
- y=y, x=x, w=w, extraargs=list(
- ycounts=ycounts, nvec=w,
- mustart=mustart.use))
- init.rho = rep(try.this, len=n)
+ mustart
}
- etastart = cbind(theta2eta(mustart.use, .lmu, earg= .emu),
- theta2eta(init.rho, .lrho, earg= .erho))
- }
+ try.this = getMaxMin(rho.grid, objfun=betabinomial.Loglikfun,
+ y=y, x=x, w=w, extraargs=list(
+ ycounts=ycounts, nvec=w,
+ mustart=mustart.use))
+ init.rho = if(is.Numeric( .irho )) rep( .irho, length=n) else
+ rep(try.this, len=n)
+ }
+ etastart = cbind(theta2eta(mustart.use, .lmu, earg= .emu),
+ theta2eta(init.rho, .lrho, earg= .erho))
}), list( .lmu=lmu, .lrho=lrho,
.emu=emu, .erho=erho,
- .method.init=method.init,
- .irho=irho ))),
+ .method.init=method.init, .sinit=shrinkage.init,
+ .nsimEIM=nsimEIM, .irho=irho ))),
inverse=eval(substitute(function(eta, extra=NULL)
eta2theta(eta[,1], .lmu, earg= .emu),
list( .lmu=lmu, .emu=emu ))),
@@ -127,9 +148,10 @@ betabinomial <- function(lmu="logit", lrho="logit",
misc$earg <- list(mu = .emu, rho = .erho)
misc$zero <- .zero
misc$expected <- TRUE
+ misc$nsimEIM = .nsimEIM
}), list( .lmu=lmu, .lrho=lrho,
.emu=emu, .erho=erho,
- .zero=zero ))),
+ .nsimEIM=nsimEIM, .zero=zero ))),
loglikelihood=eval(substitute(
function(mu,y,w,residuals=FALSE, eta, extra=NULL) {
ycounts = y * w # Convert proportions to counts
@@ -142,11 +164,8 @@ betabinomial <- function(lmu="logit", lrho="logit",
shape2 = (1-mymu) * (1 - rho) / rho
nvec = w
if(residuals) stop("loglikelihood residuals not implemented yet") else {
- if(is.R()) sum(lbeta(shape1+ycounts, shape2+nvec-ycounts) -
- lbeta(shape1, shape2)) else
- sum(lgamma(shape1+ycounts) + lgamma(shape2+nvec-ycounts) -
- lgamma(shape1+shape2+nvec) -
- (lgamma(shape1) + lgamma(shape2) - lgamma(shape1+shape2)))
+ sum(dbetabin.ab(x=ycounts, size=nvec, shape1=shape1,
+ shape2=shape2, log=TRUE))
}
}, list( .lmu=lmu,
.emu=emu, .erho=erho,
@@ -169,40 +188,67 @@ betabinomial <- function(lmu="logit", lrho="logit",
dmu.deta = dtheta.deta(mymu, .lmu, earg= .emu)
drho.deta = dtheta.deta(rho, .lrho, earg= .erho)
dl.dmu = dshape1.dmu * (digamma(shape1+ycounts) -
- digamma(shape2+nvec-ycounts) -
- digamma(shape1) + digamma(shape2))
+ digamma(shape2+nvec-ycounts) -
+ digamma(shape1) + digamma(shape2))
dl.drho = (-1/rho^2) * (mymu * digamma(shape1+ycounts) +
- (1-mymu) * digamma(shape2+nvec-ycounts) -
- digamma(shape1+shape2+nvec) -
- mymu * digamma(shape1) -
- (1-mymu)*digamma(shape2) + digamma(shape1+shape2))
- temp5 = cbind(dl.dmu * dmu.deta, dl.drho * drho.deta)
- temp5
+ (1-mymu) * digamma(shape2+nvec-ycounts) -
+ digamma(shape1+shape2+nvec) -
+ mymu * digamma(shape1) -
+ (1-mymu)*digamma(shape2) + digamma(shape1+shape2))
+ cbind(dl.dmu * dmu.deta, dl.drho * drho.deta)
}), list( .lmu=lmu,
.emu=emu, .erho=erho,
.lrho=lrho ))),
weight=eval(substitute(expression({
- wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(2)
- wz11 = -(expected.betabin.ab(nvec, shape1, shape2, TRUE) -
- trigamma(shape1+shape2+nvec) -
- trigamma(shape1) + trigamma(shape1+shape2))
- wz22 = -(expected.betabin.ab(nvec, shape1, shape2, FALSE) -
- trigamma(shape1+shape2+nvec) -
- trigamma(shape2) + trigamma(shape1+shape2))
- wz21 = -(trigamma(shape1+shape2) - trigamma(shape1+shape2+nvec))
- wz[,iam(1,1,M)] = dmu.deta^2 * (wz11 * dshape1.dmu^2 +
- wz22 * dshape2.dmu^2 +
- 2 * wz21 * dshape1.dmu * dshape2.dmu)
- wz[,iam(2,2,M)] = drho.deta^2 * (wz11 * dshape1.drho^2 +
- wz22 * dshape2.drho^2 +
- 2 * wz21 * dshape1.drho * dshape2.drho)
- wz[,iam(2,1,M)] = dmu.deta * drho.deta *
- (dshape1.dmu*(wz11*dshape1.drho + wz21*dshape2.drho) +
- dshape2.dmu*(wz21*dshape1.drho + wz22*dshape2.drho))
- wz
- }), list( .lmu=lmu,
- .emu=emu, .erho=erho,
- .lrho=lrho ))))
+ if(is.null( .nsimEIM)) {
+ wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(2)
+ wz11 = -(expected.betabin.ab(nvec, shape1, shape2, TRUE) -
+ trigamma(shape1+shape2+nvec) -
+ trigamma(shape1) + trigamma(shape1+shape2))
+ wz22 = -(expected.betabin.ab(nvec, shape1, shape2, FALSE) -
+ trigamma(shape1+shape2+nvec) -
+ trigamma(shape2) + trigamma(shape1+shape2))
+ wz21 = -(trigamma(shape1+shape2) - trigamma(shape1+shape2+nvec))
+ wz[,iam(1,1,M)] = dmu.deta^2 * (wz11 * dshape1.dmu^2 +
+ wz22 * dshape2.dmu^2 +
+ 2 * wz21 * dshape1.dmu * dshape2.dmu)
+ wz[,iam(2,2,M)] = drho.deta^2 * (wz11 * dshape1.drho^2 +
+ wz22 * dshape2.drho^2 +
+ 2 * wz21 * dshape1.drho * dshape2.drho)
+ wz[,iam(2,1,M)] = dmu.deta * drho.deta *
+ (dshape1.dmu*(wz11*dshape1.drho + wz21*dshape2.drho) +
+ dshape2.dmu*(wz21*dshape1.drho + wz22*dshape2.drho))
+ wz
+ } else {
+ run.varcov = 0
+ ind1 = iam(NA, NA, M=M, both=TRUE, diag=TRUE)
+ dthetas.detas = cbind(dmu.deta, drho.deta)
+
+ for(ii in 1:( .nsimEIM )) {
+ ysim = rbetabin.ab(n=n, size=nvec, shape1=shape1, shape2=shape2)
+ dl.dmu = dshape1.dmu * (digamma(shape1+ysim) -
+ digamma(shape2+nvec-ysim) -
+ digamma(shape1) + digamma(shape2))
+ dl.drho = (-1/rho^2) * (mymu * digamma(shape1+ysim) +
+ (1-mymu) * digamma(shape2+nvec-ysim) -
+ digamma(shape1+shape2+nvec) -
+ mymu * digamma(shape1) -
+ (1-mymu)*digamma(shape2) + digamma(shape1+shape2))
+
+ rm(ysim)
+ temp3 = cbind(dl.dmu, dl.drho) # n x M matrix
+ run.varcov = ((ii-1) * run.varcov +
+ temp3[,ind1$row.index]*temp3[,ind1$col.index]) / ii
+ }
+ wz = if(intercept.only)
+ matrix(colMeans(run.varcov),
+ n, ncol(run.varcov), byrow=TRUE) else run.varcov
+
+ wz = wz * dthetas.detas[,ind1$row] * dthetas.detas[,ind1$col]
+ wz
+ }
+ }), list( .lmu=lmu, .emu=emu, .erho=erho, .lrho=lrho,
+ .nsimEIM = nsimEIM ))))
}
@@ -226,7 +272,7 @@ dbinom2.or = function(mu1,
if(!is.Numeric(oratio, positive=TRUE))
stop("bad input for argument 'oratio'")
if(!is.Numeric(tol, positive=TRUE, allow=1) || tol > 0.1)
- stop("bad input for argument \"tol\"")
+ stop("bad input for argument 'tol'")
if(exchangeable && max(abs(mu1 - mu2)) > 0.00001)
stop("argument 'exchangeable' is TRUE but 'mu1' and 'mu2' differ")
}
@@ -268,7 +314,7 @@ rbinom2.or = function(n, mu1,
if(!is.Numeric(oratio, positive=TRUE))
stop("bad input for argument 'oratio'")
if(!is.Numeric(tol, positive=TRUE, allow=1) || tol > 0.1)
- stop("bad input for argument \"tol\"")
+ stop("bad input for argument 'tol'")
if(exchangeable && max(abs(mu1 - mu2)) > 0.00001)
stop("argument 'exchangeable' is TRUE but 'mu1' and 'mu2' differ")
}
@@ -296,10 +342,10 @@ rbinom2.or = function(n, mu1,
-binom2.or = function(lmu="logit", lmu1=lmu, lmu2=lmu, loratio="loge",
- emu=list(), emu1=emu, emu2=emu, eoratio=list(),
- imu1=NULL, imu2=NULL, ioratio = NULL,
- zero=3, exchangeable=FALSE, tol=0.001, morerobust=FALSE)
+ binom2.or = function(lmu="logit", lmu1=lmu, lmu2=lmu, loratio="loge",
+ emu=list(), emu1=emu, emu2=emu, eoratio=list(),
+ imu1=NULL, imu2=NULL, ioratio = NULL,
+ zero=3, exchangeable=FALSE, tol=0.001, morerobust=FALSE)
{
if(mode(lmu) != "character" && mode(lmu) != "name")
lmu = as.character(substitute(lmu))
@@ -313,7 +359,7 @@ binom2.or = function(lmu="logit", lmu1=lmu, lmu2=lmu, loratio="loge",
!all.equal(emu1, emu2)))
stop("exchangeable=TRUE but marginal links are not equal")
if(!is.Numeric(tol, positive=TRUE, allow=1) || tol > 0.1)
- stop("bad input for argument \"tol\"")
+ stop("bad input for argument 'tol'")
if(!is.list(emu1)) emu1 = list()
if(!is.list(emu2)) emu2 = list()
if(!is.list(eoratio)) eoratio = list()
@@ -384,22 +430,17 @@ binom2.or = function(lmu="logit", lmu1=lmu, lmu2=lmu, loratio="loge",
.loratio=loratio ))),
loglikelihood=eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
- if(residuals) stop("loglikelihood residuals not implemented yet") else {
- if( .morerobust) {
- vsmallno = 1.0e4 * .Machine$double.xmin
- mu.use = mu
- mu.use[mu.use < vsmallno] = vsmallno
- sum(w * y * log(mu.use))
- } else
- sum(w * y * log(mu))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ if( .morerobust) {
+ vsmallno = 1.0e4 * .Machine$double.xmin
+ mu[mu < vsmallno] = vsmallno
+ }
+ sum(dmultinomial(x=w*y, size=w, prob=mu, log=TRUE, docheck=FALSE))
}
- }, list( .morerobust=morerobust ))),
+ }, list( .morerobust=morerobust ))),
vfamily=c("binom2.or", "binom2"),
deriv=eval(substitute(expression({
smallno = 1.0e4 * .Machine$double.eps
-iii = c(46,55,63)
-iii = c(39)
-iii = 1:n
mu.use = mu
mu.use[mu.use < smallno] = smallno
mu.use[mu.use > 1-smallno] = 1-smallno
@@ -539,10 +580,10 @@ binom2.rho.control <- function(save.weight=TRUE, ...)
-binom2.rho = function(lrho="rhobit", erho=list(),
- imu1=NULL, imu2=NULL,
- init.rho=NULL,
- zero=3, exchangeable=FALSE, nsimEIM=NULL)
+ binom2.rho = function(lrho="rhobit", erho=list(),
+ imu1=NULL, imu2=NULL,
+ init.rho=NULL,
+ zero=3, exchangeable=FALSE, nsimEIM=NULL)
{
if(mode(lrho) != "character" && mode(lrho) != "name")
@@ -618,6 +659,12 @@ binom2.rho = function(lrho="rhobit", erho=list(),
misc$expected = TRUE
}), list( .lmu12=lmu12, .emu12=emu12, .lrho=lrho, .erho=erho,
.nsimEIM=nsimEIM ))),
+ loglikelihood=eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(dmultinomial(x=w*y, size=w, prob=mu, log=TRUE, docheck=FALSE))
+ }
+ }, list( .erho=erho ))),
vfamily=c("binom2.rho", "binom2"),
deriv=eval(substitute(expression({
pmargin = cbind(eta2theta(eta[,1], .lmu12, earg= .emu12),
@@ -688,7 +735,7 @@ binom2.rho = function(lrho="rhobit", erho=list(),
temp3[,ind1$row.index] * temp3[,ind1$col.index]) / ii
}
wz = if(intercept.only)
- matrix(apply(run.varcov, 2, mean),
+ matrix(colMeans(run.varcov),
n, ncol(run.varcov), byrow=TRUE) else run.varcov
wz = wz * dthetas.detas[,ind1$row] * dthetas.detas[,ind1$col]
@@ -766,8 +813,11 @@ size.binomial <- function(prob=0.5, link="loge", earg=list())
loglikelihood=eval(substitute(
function(mu, y, w, res=FALSE,eta, extra=NULL) {
nvec <- mu/extra$temp2
- sum(w * (lgamma(nvec+1) - lgamma(y+1) - lgamma(nvec-y+1) +
- y * log(.prob / (1- .prob)) + nvec * log1p(- .prob)))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+
+ sum(w * (lgamma(nvec+1) - lgamma(y+1) - lgamma(nvec-y+1) +
+ y * log(.prob / (1- .prob)) + nvec * log1p(- .prob)))
+ }
}, list( .prob=prob ))),
vfamily=c("size.binomial"),
deriv=eval(substitute(expression({
@@ -791,11 +841,11 @@ size.binomial <- function(prob=0.5, link="loge", earg=list())
dbetabin.ab = function(x, size, shape1, shape2, log = FALSE) {
log.arg = log
rm(log)
- if(!is.Numeric(x)) stop("bad input for argument \"x\"")
+ if(!is.Numeric(x)) stop("bad input for argument 'x'")
if(!is.Numeric(size, posit=TRUE, integer=TRUE))
- stop("bad input for argument \"size\"")
- if(!is.Numeric(shape1, pos=TRUE)) stop("bad input for argument \"shape1\"")
- if(!is.Numeric(shape2, pos=TRUE)) stop("bad input for argument \"shape2\"")
+ stop("bad input for argument 'size'")
+ if(!is.Numeric(shape1, pos=TRUE)) stop("bad input for argument 'shape1'")
+ if(!is.Numeric(shape2, pos=TRUE)) stop("bad input for argument 'shape2'")
L = max(length(x), length(size), length(shape1), length(shape2))
x = rep(x, len=L); size = rep(size, len=L);
shape1 = rep(shape1, len=L); shape2 = rep(shape2, len=L);
@@ -814,11 +864,11 @@ dbetabin.ab = function(x, size, shape1, shape2, log = FALSE) {
pbetabin.ab = function(q, size, shape1, shape2, log.p=FALSE) {
- if(!is.Numeric(q)) stop("bad input for argument \"q\"")
+ if(!is.Numeric(q)) stop("bad input for argument 'q'")
if(!is.Numeric(size, posit=TRUE, integer=TRUE))
- stop("bad input for argument \"size\"")
- if(!is.Numeric(shape1, pos=TRUE)) stop("bad input for argument \"shape1\"")
- if(!is.Numeric(shape2, pos=TRUE)) stop("bad input for argument \"shape2\"")
+ stop("bad input for argument 'size'")
+ if(!is.Numeric(shape1, pos=TRUE)) stop("bad input for argument 'shape1'")
+ if(!is.Numeric(shape2, pos=TRUE)) stop("bad input for argument 'shape2'")
N = max(length(q), length(size), length(shape1), length(shape2))
q = rep(q, len=N); shape1 = rep(shape1, len=N); shape2 = rep(shape2, len=N)
size = rep(size, len=N);
@@ -830,25 +880,25 @@ pbetabin.ab = function(q, size, shape1, shape2, log.p=FALSE) {
temp = if(max(qstar) >= 0) dbetabin.ab(0:max(qstar),
size=size[1], shape1=shape1[1], shape2=shape2[1]) else 0*qstar
unq = unique(qstar)
- for(i in unq) {
- index = qstar == i
- ans[index] = if(i >= 0) sum(temp[1:(1+i)]) else 0
+ for(ii in unq) {
+ index = qstar == ii
+ ans[index] = if(ii >= 0) sum(temp[1:(1+ii)]) else 0
}
} else
- for(i in 1:N) {
- qstar = floor(q[i])
- ans[i] = if(qstar >= 0) sum(dbetabin.ab(x=0:qstar, size=size[i],
- shape1=shape1[i], shape2=shape2[i])) else 0
+ for(ii in 1:N) {
+ qstar = floor(q[ii])
+ ans[ii] = if(qstar >= 0) sum(dbetabin.ab(x=0:qstar, size=size[ii],
+ shape1=shape1[ii], shape2=shape2[ii])) else 0
}
if(log.p) log(ans) else ans
}
rbetabin.ab = function(n, size, shape1, shape2) {
- if(!is.Numeric(n,integ=TRUE, allow=1)) stop("bad input for argument \"n\"")
+ if(!is.Numeric(n,integ=TRUE, allow=1)) stop("bad input for argument 'n'")
if(!is.Numeric(size, posit=TRUE, integer=TRUE))
- stop("bad input for argument \"size\"")
- if(!is.Numeric(shape1, pos=TRUE)) stop("bad input for argument \"shape1\"")
- if(!is.Numeric(shape2, pos=TRUE)) stop("bad input for argument \"shape2\"")
+ stop("bad input for argument 'size'")
+ if(!is.Numeric(shape1, pos=TRUE)) stop("bad input for argument 'shape1'")
+ if(!is.Numeric(shape2, pos=TRUE)) stop("bad input for argument 'shape2'")
size = rep(size, len=n);
shape1 = rep(shape1, len=n); shape2 = rep(shape2, len=n);
rbinom(n=n, size=size, prob=rbeta(n=n, shape1=shape1, shape2=shape2))
@@ -871,29 +921,25 @@ rbetabin = function(n, size, prob, rho) {
}
-expected.betabin.ab = function(nvec, shape1, shape2, first) {
+ expected.betabin.ab = function(nvec, shape1, shape2, first) {
- n = length(nvec)
- ans = rep(0.0, len=n)
- if(!is.R()) {
- lchoose = function(a,b) log(choose(a,b))
- lbeta = function(a,b) lgamma(a) + lgamma(b) - lgamma(a+b)
- }
+ N = length(nvec)
+ ans = rep(0.0, len=N)
if(first) {
- for(i in 1:n) {
- temp639 = lbeta(shape1[i], shape2[i])
- for(y in 0:nvec[i])
- ans[i] = ans[i] + trigamma(shape1[i]+y) *
- exp(lchoose(nvec[i], y) +
- lbeta(shape1[i]+y, shape2[i]+nvec[i]-y) - temp639)
+ for(ii in 1:N) {
+ temp639 = lbeta(shape1[ii], shape2[ii])
+ yy = 0:nvec[ii]
+ ans[ii] = ans[ii] + sum(trigamma(shape1[ii]+yy) *
+ exp(lchoose(nvec[ii], yy) +
+ lbeta(shape1[ii]+yy, shape2[ii]+nvec[ii]-yy) - temp639))
}
} else {
- for(i in 1:n) {
- temp639 = lbeta(shape1[i], shape2[i])
- for(y in 0:nvec[i])
- ans[i] = ans[i] + trigamma(nvec[i]+shape2[i]-y) *
- exp(lchoose(nvec[i], y) +
- lbeta(shape1[i]+y, shape2[i]+nvec[i]-y) - temp639)
+ for(ii in 1:N) {
+ temp639 = lbeta(shape1[ii], shape2[ii])
+ yy = 0:nvec[ii]
+ ans[ii] = ans[ii] + sum(trigamma(nvec[ii]+shape2[ii]-yy) *
+ exp(lchoose(nvec[ii], yy) +
+ lbeta(shape1[ii]+yy, shape2[ii]+nvec[ii]-yy) - temp639))
}
}
ans
@@ -901,119 +947,171 @@ expected.betabin.ab = function(nvec, shape1, shape2, first) {
-
-betabin.ab = function(link.shape12="loge", earg = list(),
- i1=1, i2=NULL, zero=NULL)
+betabin.ab.control <- function(save.weight=TRUE, ...)
{
- if(mode(link.shape12) != "character" && mode(link.shape12) != "name")
- link.shape12 = as.character(substitute(link.shape12))
- if(!is.Numeric(i1, positive=TRUE)) stop("bad input for argument \"i1\"")
+ list(save.weight=save.weight)
+}
+
+
+ betabin.ab = function(lshape12="loge", earg = list(),
+ i1=1, i2=NULL, method.init=1,
+ shrinkage.init=0.95, nsimEIM=NULL, zero=NULL) {
+ if(mode(lshape12) != "character" && mode(lshape12) != "name")
+ lshape12 = as.character(substitute(lshape12))
+ if(!is.Numeric(i1, positive=TRUE)) stop("bad input for argument 'i1'")
if(length(i2) && !is.Numeric(i2, pos=TRUE))
- stop("bad input for argument \"i2\"")
+ stop("bad input for argument 'i2'")
if(!is.list(earg)) earg = list()
+ if(!is.null(nsimEIM)) {
+ if(!is.Numeric(nsimEIM, allow=1, integ=TRUE))
+ stop("bad input for argument 'nsimEIM'")
+ if(nsimEIM <= 10)
+ warning("'nsimEIM' should be an integer greater than 10, say")
+ }
+ if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
+ method.init > 3) stop("argument 'method.init' must be 1, 2 or 3")
new("vglmff",
blurb=c("Beta-binomial model\n",
"Links: ",
- namesof("shape1", link.shape12, earg= earg), ", ",
- namesof("shape2", link.shape12, earg= earg), "\n",
- "Variance: mu*(1-mu)[1+(w-1)*rho]/w where mu=alpha/(alpha+beta)"),
+ namesof("shape1", lshape12, earg= earg), ", ",
+ namesof("shape2", lshape12, earg= earg), "\n",
+ "Mean: mu = shape1/(shape1+shape2)", "\n",
+ "Variance: mu*(1-mu)(1+(w-1)*rho)/w, where rho = 1/(shape1+shape2+1)"),
constraints=eval(substitute(expression({
constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero=zero ))),
initialize=eval(substitute(expression({
+ if(is.null( .nsimEIM)) {
+ save.weight <- control$save.weight <- FALSE
+ }
+
# Compute initial values for mustart -------
eval(binomialff()@initialize) # Note: n,w,y,mustart is changed
- predictors.names = c(namesof("shape1", .link.shape12, earg= .earg, tag=FALSE),
- namesof("shape2", .link.shape12, earg= .earg, short=FALSE))
+ predictors.names=c(namesof("shape1", .lshape12, earg= .earg, tag=FALSE),
+ namesof("shape2", .lshape12, earg= .earg, tag=FALSE))
if(!length(etastart)) {
shape1 = rep( .i1, len=n)
- shape2 = if(length( .i2)) rep( .i2,len=n) else shape1*(1/mustart-1)
+ shape2 = if(length( .i2)) rep( .i2,len=n) else {
+ if( .method.init == 1) {
+ shape1 * (1 / weighted.mean(y, w) - 1)
+ } else if( .method.init == 2) {
+ temp777 = .sinit * weighted.mean(y, w) + (1- .sinit) * y
+ shape1 * (1 / temp777 - 1)
+ } else {
+ shape1 * (1 / mustart - 1)
+ }
+ }
ycounts = y * w # Convert proportions to counts
if(max(abs(ycounts-round(ycounts))) > 1.0e-6)
stop("ycounts not integer")
ycounts = round(ycounts) # Make sure it is an integer
- etastart = cbind(theta2eta(shape1, .link.shape12, earg= .earg),
- theta2eta(shape2, .link.shape12, earg= .earg))
+ etastart = cbind(theta2eta(shape1, .lshape12, earg= .earg),
+ theta2eta(shape2, .lshape12, earg= .earg))
}
- }), list( .link.shape12=link.shape12, .earg=earg, .i1=i1 , .i2=i2 ))),
+ }), list( .lshape12=lshape12, .earg=earg, .i1=i1, .i2=i2, .nsimEIM=nsimEIM,
+ .method.init=method.init, .sinit=shrinkage.init ))),
inverse=eval(substitute(function(eta, extra=NULL) {
- shape1 = eta2theta(eta[,1], .link.shape12, earg= .earg)
- shape2 = eta2theta(eta[,2], .link.shape12, earg= .earg)
+ shape1 = eta2theta(eta[,1], .lshape12, earg= .earg)
+ shape2 = eta2theta(eta[,2], .lshape12, earg= .earg)
shape1 / (shape1 + shape2)
- }, list( .link.shape12=link.shape12, .earg=earg ))),
+ }, list( .lshape12=lshape12, .earg=earg ))),
last=eval(substitute(expression({
- misc$link = c("shape1" = .link.shape12, "shape2" = .link.shape12)
+ misc$link = c("shape1" = .lshape12, "shape2" = .lshape12)
misc$earg <- list(shape1 = .earg, shape2 = .earg)
- shape1 = eta2theta(eta[,1], .link.shape12, earg= .earg)
- shape2 = eta2theta(eta[,2], .link.shape12, earg= .earg)
+ shape1 = eta2theta(eta[,1], .lshape12, earg= .earg)
+ shape2 = eta2theta(eta[,2], .lshape12, earg= .earg)
misc$rho = 1 / (shape1 + shape2 + 1)
misc$expected = TRUE
- }), list( .link.shape12=link.shape12, .earg=earg ))),
+ misc$nsimEIM = .nsimEIM
+ }), list( .lshape12=lshape12, .earg=earg, .nsimEIM=nsimEIM ))),
loglikelihood=eval(substitute(
function(mu,y,w,residuals=FALSE,eta, extra=NULL) {
ycounts = y * w # Convert proportions to counts
- shape1 = eta2theta(eta[,1], .link.shape12, earg= .earg)
- shape2 = eta2theta(eta[,2], .link.shape12, earg= .earg)
+ shape1 = eta2theta(eta[,1], .lshape12, earg= .earg)
+ shape2 = eta2theta(eta[,2], .lshape12, earg= .earg)
nvec = w
if(residuals) stop("loglikelihood residuals not implemented yet") else {
- if(is.R()) sum(lbeta(shape1+ycounts, shape2+nvec-ycounts) -
- lbeta(shape1, shape2)) else
- sum(lgamma(shape1+ycounts) + lgamma(shape2+nvec-ycounts) -
- lgamma(shape1+shape2+nvec) -
- (lgamma(shape1) + lgamma(shape2) - lgamma(shape1+shape2)))
+ sum(dbetabin.ab(x=ycounts, size=nvec, shape1=shape1,
+ shape2=shape2, log=TRUE))
}
- }, list( .link.shape12=link.shape12, .earg=earg ))),
+ }, list( .lshape12=lshape12, .earg=earg ))),
vfamily=c("betabin.ab"),
deriv=eval(substitute(expression({
nvec = w # extra$nvec # for summary()
ycounts = y * w # Convert proportions to counts
- shape1 = eta2theta(eta[,1], .link.shape12, earg= .earg)
- shape2 = eta2theta(eta[,2], .link.shape12, earg= .earg)
- dshape1.deta = dtheta.deta(shape1, .link.shape12, earg= .earg)
- dshape2.deta = dtheta.deta(shape2, .link.shape12, earg= .earg)
+ shape1 = eta2theta(eta[,1], .lshape12, earg= .earg)
+ shape2 = eta2theta(eta[,2], .lshape12, earg= .earg)
+ dshape1.deta = dtheta.deta(shape1, .lshape12, earg= .earg)
+ dshape2.deta = dtheta.deta(shape2, .lshape12, earg= .earg)
dl.dshape1 = digamma(shape1+ycounts) - digamma(shape1+shape2+nvec) -
digamma(shape1) + digamma(shape1+shape2)
dl.dshape2 = digamma(nvec+shape2-ycounts) -
digamma(shape1+shape2+nvec) -
digamma(shape2) + digamma(shape1+shape2)
cbind(dl.dshape1 * dshape1.deta, dl.dshape2 * dshape2.deta)
- }), list( .link.shape12=link.shape12, .earg=earg ))),
+ }), list( .lshape12=lshape12, .earg=earg ))),
weight=eval(substitute(expression({
- wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(2)
- wz[,iam(1,1,M)] = -(expected.betabin.ab(nvec, shape1, shape2, TRUE) -
- trigamma(shape1+shape2+nvec) -
- trigamma(shape1) + trigamma(shape1+shape2)) *
- dshape1.deta^2
- wz[,iam(2,2,M)] = -(expected.betabin.ab(nvec, shape1, shape2, FALSE) -
- trigamma(shape1+shape2+nvec) -
- trigamma(shape2) + trigamma(shape1+shape2)) *
- dshape2.deta^2
- wz[,iam(2,1,M)] = -(trigamma(shape1+shape2) -
- trigamma(shape1+shape2+nvec)) *
- dshape1.deta * dshape2.deta
- wz
- }), list( .link.shape12=link.shape12, .earg=earg ))))
+ if(is.null( .nsimEIM)) {
+ wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(2)
+ wz[,iam(1,1,M)] = -(expected.betabin.ab(nvec,shape1,shape2, TRUE) -
+ trigamma(shape1+shape2+nvec) -
+ trigamma(shape1) + trigamma(shape1+shape2)) *
+ dshape1.deta^2
+ wz[,iam(2,2,M)] = -(expected.betabin.ab(nvec,shape1,shape2, FALSE)-
+ trigamma(shape1+shape2+nvec) -
+ trigamma(shape2) + trigamma(shape1+shape2)) *
+ dshape2.deta^2
+ wz[,iam(2,1,M)] = -(trigamma(shape1+shape2) -
+ trigamma(shape1+shape2+nvec)) *
+ dshape1.deta * dshape2.deta
+ wz
+ } else {
+ run.varcov = 0
+ ind1 = iam(NA, NA, M=M, both=TRUE, diag=TRUE)
+ dthetas.detas = cbind(dshape1.deta, dshape2.deta)
+
+ for(ii in 1:( .nsimEIM )) {
+ ysim = rbetabin.ab(n=n, size=nvec, shape1=shape1, shape2=shape2)
+ dl.dshape1 = digamma(shape1+ysim) -
+ digamma(shape1+shape2+nvec) -
+ digamma(shape1) + digamma(shape1+shape2)
+ dl.dshape2 = digamma(nvec+shape2-ysim) -
+ digamma(shape1+shape2+nvec) -
+ digamma(shape2) + digamma(shape1+shape2)
+ rm(ysim)
+ temp3 = cbind(dl.dshape1, dl.dshape2) # n x M matrix
+ run.varcov = ((ii-1) * run.varcov +
+ temp3[,ind1$row.index]*temp3[,ind1$col.index]) / ii
+ }
+ wz = if(intercept.only)
+ matrix(colMeans(run.varcov),
+ n, ncol(run.varcov), byrow=TRUE) else run.varcov
+
+ wz = wz * dthetas.detas[,ind1$row] * dthetas.detas[,ind1$col]
+ wz
+ }
+ }), list( .lshape12=lshape12, .earg=earg, .nsimEIM=nsimEIM ))))
}
-betageometric = function(lprob="logit", lshape="loge",
- eprob=list(), eshape=list(),
- iprob = NULL, ishape = 0.1,
- moreSummation=c(2,100), tolerance=1.0e-10, zero=NULL)
+ betageometric = function(lprob="logit", lshape="loge",
+ eprob=list(), eshape=list(),
+ iprob = NULL, ishape = 0.1,
+ moreSummation=c(2,100), tolerance=1.0e-10, zero=NULL)
{
if(mode(lprob) != "character" && mode(lprob) != "name")
lprob = as.character(substitute(lprob))
if(mode(lshape) != "character" && mode(lshape) != "name")
lshape = as.character(substitute(lshape))
if(!is.Numeric(ishape, positive=TRUE))
- stop("bad input for argument \"ishape\"")
+ stop("bad input for argument 'ishape'")
if(!is.Numeric(moreSummation, positive=TRUE, allow=2, integ=TRUE))
- stop("bad input for argument \"moreSummation\"")
+ stop("bad input for argument 'moreSummation'")
if(!is.Numeric(tolerance, positive=TRUE, allow=1) || 1.0-tolerance >= 1.0)
- stop("bad input for argument \"tolerance\"")
+ stop("bad input for argument 'tolerance'")
if(!is.list(eprob)) eprob = list()
if(!is.list(eshape)) eshape = list()
@@ -1072,6 +1170,11 @@ betageometric = function(lprob="logit", lshape="loge",
log1p((ii-1)*shape[index])
}
ans = ans - log1p((y+1-1)*shape)
+
+
+
+
+
sum(w * ans)
}
}, list( .lprob=lprob, .lshape=lshape,
@@ -1142,10 +1245,10 @@ seq2binomial = function(lprob1="logit", lprob2="logit",
lprob2 = as.character(substitute(lprob2))
if(length(iprob1) &&
(!is.Numeric(iprob1, positive=TRUE) || max(iprob1) >= 1))
- stop("bad input for argument \"iprob1\"")
+ stop("bad input for argument 'iprob1'")
if(length(iprob2) &&
(!is.Numeric(iprob2, positive=TRUE) || max(iprob2) >= 1))
- stop("bad input for argument \"iprob2\"")
+ stop("bad input for argument 'iprob2'")
if(!is.list(eprob1)) eprob1 = list()
if(!is.list(eprob2)) eprob2 = list()
@@ -1256,9 +1359,9 @@ zipebcom = function(lmu12="cloglog", lphi12="logit", loratio="loge",
if(mode(loratio) != "character" && mode(loratio) != "name")
loratio = as.character(substitute(loratio))
if(!is.Numeric(tol, positive=TRUE, allow=1) || tol > 0.1)
- stop("bad input for argument \"tol\"")
+ stop("bad input for argument 'tol'")
if(!is.Numeric(addRidge, allow=1, posit=TRUE) || addRidge > 0.5)
- stop("bad input for argument \"addRidge\"")
+ stop("bad input for argument 'addRidge'")
if(!is.list(emu12)) emu12 = list()
if(!is.list(ephi12)) ephi12 = list()
if(!is.list(eoratio)) eoratio = list()
@@ -1331,8 +1434,9 @@ zipebcom = function(lmu12="cloglog", lphi12="logit", loratio="loge",
.lmu12=lmu12, .lphi12=lphi12, .loratio=loratio,
.emu12=emu12, .ephi12=ephi12, .eoratio=eoratio ))),
loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * y * log(mu)),
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(dmultinomial(x=w*y, size=w, prob=mu, log=TRUE, docheck=FALSE))
+ },
vfamily=c("zipebcom"),
deriv=eval(substitute(expression({
A1vec = eta2theta(eta[,1], .lmu12, earg= .emu12)
diff --git a/R/family.bivariate.q b/R/family.bivariate.q
index a7db3be..897a873 100644
--- a/R/family.bivariate.q
+++ b/R/family.bivariate.q
@@ -7,16 +7,20 @@
+
+
+
+
bilogistic4.control <- function(save.weight=TRUE, ...)
{
list(save.weight=save.weight)
}
-bilogistic4 = function(llocation="identity",
- lscale="loge",
- iloc1=NULL, iscale1=NULL,
- iloc2=NULL, iscale2=NULL,
- method.init=1, zero=NULL) {
+ bilogistic4 = function(llocation="identity",
+ lscale="loge",
+ iloc1=NULL, iscale1=NULL,
+ iloc2=NULL, iscale2=NULL,
+ method.init=1, zero=NULL) {
if(mode(llocation) != "character" && mode(llocation) != "name")
llocation = as.character(substitute(llocation))
if(mode(lscale) != "character" && mode(lscale) != "name")
@@ -142,9 +146,9 @@ bilogistic4 = function(llocation="identity",
rbilogis4 = function(n, loc1=0, scale1=1, loc2=0, scale2=1) {
- if(!is.Numeric(n, posit=TRUE, allow=1, integ=TRUE)) stop("bad input for n")
- if(!is.Numeric(scale1, posit=TRUE)) stop("bad input for \"scale1\"")
- if(!is.Numeric(scale2, posit=TRUE)) stop("bad input for \"scale2\"")
+ if(!is.Numeric(n, posit=TRUE,allow=1,integ=TRUE)) stop("bad input for 'n'")
+ if(!is.Numeric(scale1, posit=TRUE)) stop("bad input for 'scale1'")
+ if(!is.Numeric(scale2, posit=TRUE)) stop("bad input for 'scale2'")
y1 = rlogis(n, loc=loc1, scale=scale1)
ezedd1 = exp(-(y1-loc1)/scale1)
y2 = loc2 - scale2 * log(1/sqrt(runif(n) / (1 + ezedd1)^2) - 1 - ezedd1)
@@ -152,35 +156,43 @@ rbilogis4 = function(n, loc1=0, scale1=1, loc2=0, scale2=1) {
}
pbilogis4 = function(q1, q2, loc1=0, scale1=1, loc2=0, scale2=1) {
- if(!is.Numeric(q1)) stop("bad input for \"q1\"")
- if(!is.Numeric(q2)) stop("bad input for \"q2\"")
- if(!is.Numeric(scale1, posit=TRUE)) stop("bad input for \"scale1\"")
- if(!is.Numeric(scale2, posit=TRUE)) stop("bad input for \"scale2\"")
+ if(!is.Numeric(q1)) stop("bad input for 'q1'")
+ if(!is.Numeric(q2)) stop("bad input for 'q2'")
+ if(!is.Numeric(scale1, posit=TRUE)) stop("bad input for 'scale1'")
+ if(!is.Numeric(scale2, posit=TRUE)) stop("bad input for 'scale2'")
1 / (1 + exp(-(q1-loc1)/scale1) + exp(-(q2-loc2)/scale2))
}
-dbilogis4 = function(x1, x2, loc1=0, scale1=1, loc2=0, scale2=1) {
- if(!is.Numeric(x1)) stop("bad input for \"x1\"")
- if(!is.Numeric(x2)) stop("bad input for \"x2\"")
- if(!is.Numeric(scale1, posit=TRUE)) stop("bad input for \"scale1\"")
- if(!is.Numeric(scale2, posit=TRUE)) stop("bad input for \"scale2\"")
- ezedd1 = exp(-(x1-loc1)/scale1)
- ezedd2 = exp(-(x2-loc2)/scale2)
- 2 * ezedd1 * ezedd2 / (scale1 * scale2 * (1 + ezedd1 + ezedd2)^3)
+dbilogis4 = function(x1, x2, loc1=0, scale1=1, loc2=0, scale2=1, log=FALSE) {
+ if(!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
+
+ L = max(length(x1), length(x2), length(loc1), length(loc2),
+ length(scale1), length(scale2))
+ x1 = rep(x1, len=L); x2 = rep(x2, len=L);
+ loc1 = rep(loc1, len=L); loc2 = rep(loc2, len=L);
+ scale1 = rep(scale1, len=L); scale2 = rep(scale2, len=L);
+ zedd1 = (-(x1-loc1)/scale1)
+ zedd2 = (-(x2-loc2)/scale2)
+ logdensity = log(2) + log(zedd1) + log(zedd2) - log(scale1) -
+ log(scale1) - 3 * log1p(exp(zedd1) + exp(zedd2))
+ if(log.arg) logdensity else exp(logdensity)
}
-freund61 = function(la="loge",
- lap="loge",
- lb="loge",
- lbp="loge",
- ia=NULL, iap=NULL, ib=NULL, ibp=NULL,
- independent=FALSE,
- zero=NULL) {
+ freund61 = function(la="loge",
+ lap="loge",
+ lb="loge",
+ lbp="loge",
+ ia=NULL, iap=NULL, ib=NULL, ibp=NULL,
+ independent=FALSE,
+ zero=NULL) {
if(mode(la) != "character" && mode(la) != "name")
la = as.character(substitute(la))
if(mode(lap) != "character" && mode(lap) != "name")
@@ -297,13 +309,13 @@ freund61 = function(la="loge",
-mckaygamma2 = function(la="loge",
- lp="loge",
- lq="loge",
- ia=NULL,
- ip=1,
- iq=1,
- zero=NULL) {
+ mckaygamma2 = function(la="loge",
+ lp="loge",
+ lq="loge",
+ ia=NULL,
+ ip=1,
+ iq=1,
+ zero=NULL) {
if(mode(la) != "character" && mode(la) != "name")
la = as.character(substitute(la))
if(mode(lp) != "character" && mode(lp) != "name")
@@ -400,8 +412,8 @@ mckaygamma2 = function(la="loge",
rfrank = function(n, alpha) {
- if(!is.Numeric(n, posit=TRUE, allow=1, integ=TRUE)) stop("bad input for n")
- if(!is.Numeric(alpha, posit=TRUE)) stop("bad input for \"alpha\"")
+ if(!is.Numeric(n, posit=TRUE, allow=1, integ=TRUE)) stop("bad input for 'n'")
+ if(!is.Numeric(alpha, posit=TRUE)) stop("bad input for 'alpha'")
alpha = rep(alpha, len=n)
U = runif(n)
V = runif(n)
@@ -421,9 +433,9 @@ rfrank = function(n, alpha) {
}
pfrank = function(q1, q2, alpha) {
- if(!is.Numeric(q1)) stop("bad input for \"q1\"")
- if(!is.Numeric(q2)) stop("bad input for \"q2\"")
- if(!is.Numeric(alpha, posit=TRUE)) stop("bad input for \"alpha\"")
+ if(!is.Numeric(q1)) stop("bad input for 'q1'")
+ if(!is.Numeric(q2)) stop("bad input for 'q2'")
+ if(!is.Numeric(alpha, posit=TRUE)) stop("bad input for 'alpha'")
L = max(length(q1), length(q2), length(alpha))
alpha = rep(alpha, len=L)
@@ -447,25 +459,35 @@ pfrank = function(q1, q2, alpha) {
ans
}
-dfrank = function(x1, x2, alpha) {
- if(!is.Numeric(x1)) stop("bad input for \"x1\"")
- if(!is.Numeric(x2)) stop("bad input for \"x2\"")
- if(!is.Numeric(alpha, posit=TRUE)) stop("bad input for \"alpha\"")
+dfrank = function(x1, x2, alpha, log = FALSE) {
+ if(!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ if(!is.Numeric(x1)) stop("bad input for 'x1'")
+ if(!is.Numeric(x2)) stop("bad input for 'x2'")
+ if(!is.Numeric(alpha, posit=TRUE)) stop("bad input for 'alpha'")
L = max(length(x1), length(x2), length(alpha))
alpha = rep(alpha, len=L)
x1 = rep(x1, len=L)
x2 = rep(x2, len=L)
- temp = (alpha-1) + (alpha^x1 - 1) * (alpha^x2 - 1)
- index = (abs(alpha-1) < .Machine$double.eps)
- ans = x1
- if(any(!index))
- ans[!index] = (alpha[!index]-1) * log(alpha[!index]) *
- (alpha[!index])^(x1[!index]+x2[!index]) / (temp[!index])^2
- ans[x1<=0 | x2<=0 | x1>=1 | x2>=1] = 0
- ans[index] = 1
- ans
+ if(log.arg) {
+ denom = alpha-1 + (alpha^x1 -1) * (alpha^x2 -1)
+ denom = abs(denom) # Needed; Genest (1987) uses this too, eqn (4.1)
+ log((alpha-1) * log(alpha)) + (x1+x2)*log(alpha) - 2 * log(denom)
+ } else {
+ temp = (alpha-1) + (alpha^x1 - 1) * (alpha^x2 - 1)
+ index = (abs(alpha-1) < .Machine$double.eps)
+ ans = x1
+ if(any(!index))
+ ans[!index] = (alpha[!index]-1) * log(alpha[!index]) *
+ (alpha[!index])^(x1[!index]+x2[!index]) / (temp[!index])^2
+ ans[x1<=0 | x2<=0 | x1>=1 | x2>=1] = 0
+ ans[index] = 1
+ ans
+ }
}
@@ -478,11 +500,11 @@ frank.control <- function(save.weight=TRUE, ...)
-frank = function(lapar="loge", eapar=list(), iapar=2, nsimEIM=250) {
+ frank = function(lapar="loge", eapar=list(), iapar=2, nsimEIM=250) {
if(mode(lapar) != "character" && mode(lapar) != "name")
lapar = as.character(substitute(lapar))
if(!is.Numeric(iapar, positive = TRUE))
- stop("\"iapar\" must be positive")
+ stop("'iapar' must be positive")
if(!is.list(eapar)) eapar = list()
if(length(nsimEIM) &&
(!is.Numeric(nsimEIM, allow=1, integ=TRUE) || nsimEIM <= 50))
@@ -523,10 +545,7 @@ frank = function(lapar="loge", eapar=list(), iapar=2, nsimEIM=250) {
function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
apar = eta2theta(eta, .lapar, earg= .eapar )
if(residuals) stop("loglikelihood residuals not implemented yet") else {
- denom = apar-1 + (apar^y[,1] -1) * (apar^y[,2] -1)
- denom = abs(denom) # Needed; Genest (1987) uses this too, eqn (4.1)
- sum(w * (log((apar-1) * log(apar)) + (y[,1]+y[,2])*log(apar) -
- 2 * log(denom)))
+ sum(w * dfrank(x1=y[,1], x2=y[,2], alpha=apar, log = TRUE))
}
}, list(.lapar=lapar, .eapar=eapar ))),
vfamily=c("frank"),
@@ -593,11 +612,11 @@ frank = function(lapar="loge", eapar=list(), iapar=2, nsimEIM=250) {
-gammahyp = function(ltheta="loge", itheta=NULL, expected=FALSE) {
+ gammahyp = function(ltheta="loge", itheta=NULL, expected=FALSE) {
if(mode(ltheta) != "character" && mode(ltheta) != "name")
ltheta = as.character(substitute(ltheta))
if(!is.logical(expected) || length(expected)!=1)
- stop("\"expected\" must be a single logical")
+ stop("'expected' must be a single logical")
new("vglmff",
blurb=c("Gamma Hyperbola Bivariate Distribution\n",
@@ -654,8 +673,8 @@ gammahyp = function(ltheta="loge", itheta=NULL, expected=FALSE) {
-morgenstern = function(lapar="rhobit", earg=list(), iapar=NULL, tola0=0.01,
- method.init=1) {
+ morgenstern = function(lapar="rhobit", earg=list(), iapar=NULL, tola0=0.01,
+ method.init=1) {
if(mode(lapar) != "character" && mode(lapar) != "name")
lapar = as.character(substitute(lapar))
if(!is.list(earg)) earg = list()
@@ -667,7 +686,7 @@ morgenstern = function(lapar="rhobit", earg=list(), iapar=NULL, tola0=0.01,
stop("'iapar' must not be between -tola0 and tola0")
if(!is.Numeric(method.init, allow=1, integ=TRUE, positi=TRUE) ||
method.init > 2.5)
- stop("argument \"method.init\" must be 1 or 2")
+ stop("argument 'method.init' must be 1 or 2")
new("vglmff",
blurb=c("Morgenstern's Bivariate Exponential Distribution\n",
@@ -749,9 +768,9 @@ morgenstern = function(lapar="rhobit", earg=list(), iapar=NULL, tola0=0.01,
rfgm = function(n, alpha) {
- if(!is.Numeric(n, posit=TRUE, allow=1, integ=TRUE)) stop("bad input for n")
- if(!is.Numeric(alpha)) stop("bad input for \"alpha\"")
- if(any(abs(alpha) > 1)) stop("\"alpha\" values out of range")
+ if(!is.Numeric(n, posit=TRUE, allow=1, integ=TRUE)) stop("bad input for 'n'")
+ if(!is.Numeric(alpha)) stop("bad input for 'alpha'")
+ if(any(abs(alpha) > 1)) stop("'alpha' values out of range")
y1 = V1 = runif(n)
V2 = runif(n)
@@ -767,8 +786,8 @@ rfgm = function(n, alpha) {
dfgm = function(x1, x2, alpha, log=FALSE) {
log.arg = log
rm(log)
- if(!is.Numeric(alpha)) stop("bad input for \"alpha\"")
- if(any(abs(alpha) > 1)) stop("\"alpha\" values out of range")
+ if(!is.Numeric(alpha)) stop("bad input for 'alpha'")
+ if(any(abs(alpha) > 1)) stop("'alpha' values out of range")
if( !is.logical( log.arg ) || length( log.arg )!=1 )
stop("bad input for 'log'")
@@ -792,10 +811,10 @@ dfgm = function(x1, x2, alpha, log=FALSE) {
pfgm = function(q1, q2, alpha) {
- if(!is.Numeric(q1)) stop("bad input for \"q1\"")
- if(!is.Numeric(q2)) stop("bad input for \"q2\"")
- if(!is.Numeric(alpha)) stop("bad input for \"alpha\"")
- if(any(abs(alpha) > 1)) stop("\"alpha\" values out of range")
+ if(!is.Numeric(q1)) stop("bad input for 'q1'")
+ if(!is.Numeric(q2)) stop("bad input for 'q2'")
+ if(!is.Numeric(alpha)) stop("bad input for 'alpha'")
+ if(any(abs(alpha) > 1)) stop("'alpha' values out of range")
L = max(length(q1), length(q2), length(alpha))
if(length(q1) != L) q1 = rep(q1, len=L)
@@ -825,14 +844,14 @@ fgm.control <- function(save.weight=TRUE, ...)
-fgm = function(lapar="rhobit", earg=list(), iapar=NULL,
- method.init=1, nsimEIM=200) {
+ fgm = function(lapar="rhobit", earg=list(), iapar=NULL,
+ method.init=1, nsimEIM=200) {
if(mode(lapar) != "character" && mode(lapar) != "name")
lapar = as.character(substitute(lapar))
if(!is.list(earg)) earg = list()
if(!is.Numeric(method.init, allow=1, integ=TRUE, positi=TRUE) ||
method.init > 2.5)
- stop("argument \"method.init\" must be 1 or 2")
+ stop("argument 'method.init' must be 1 or 2")
if(!length(nsimEIM) ||
(!is.Numeric(nsimEIM, allow=1, integ=TRUE) || nsimEIM <= 50))
stop("'nsimEIM' should be an integer greater than 50")
@@ -885,16 +904,7 @@ fgm = function(lapar="rhobit", earg=list(), iapar=NULL,
function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
alpha = eta2theta(eta, .lapar, earg= .earg )
if(residuals) stop("loglikelihood residuals not implemented yet") else {
- denm1 = alpha * (1 - 2 * y[,1]) * (1 - 2 * y[,2])
- denom = 1 + denm1
- mytolerance = 0.0 # .Machine$double.eps
- bad <- (denom <= mytolerance) # Range violation
- if(any(bad)) {
- cat("There are some range violations in @loglikelihood\n")
- if(exists("flush.console")) flush.console()
- }
- sum(w[bad]) * (-1.0e10) +
- sum(w[!bad] * log1p(denm1[!bad]))
+ sum(w * dfgm(x1=y[,1], x2=y[,2], alpha=alpha, log=TRUE))
}
}, list( .lapar=lapar, .earg=earg ))),
vfamily=c("fgm"),
@@ -907,7 +917,7 @@ fgm = function(lapar="rhobit", earg=list(), iapar=NULL,
bad <- (denom <= mytolerance) # Range violation
if(any(bad)) {
cat("There are some range violations in @deriv\n")
- if(exists("flush.console")) flush.console()
+ flush.console()
denom[bad] = 2 * mytolerance
}
dl.dalpha = numerator / denom
@@ -925,7 +935,7 @@ fgm = function(lapar="rhobit", earg=list(), iapar=NULL,
run.var = ((ii-1) * run.var + temp3^2) / ii
}
wz = if(intercept.only)
- matrix(apply(cbind(run.var), 2, mean),
+ matrix(colMeans(cbind(run.var)),
n, dimm(M), byrow=TRUE) else cbind(run.var)
wz = wz * dalpha.deta^2
@@ -935,7 +945,8 @@ fgm = function(lapar="rhobit", earg=list(), iapar=NULL,
-gumbelIbiv = function(lapar="identity", earg=list(), iapar=NULL, method.init=1) {
+ gumbelIbiv = function(lapar="identity", earg=list(),
+ iapar=NULL, method.init=1) {
if(mode(lapar) != "character" && mode(lapar) != "name")
lapar = as.character(substitute(lapar))
if(!is.list(earg)) earg = list()
@@ -943,7 +954,7 @@ gumbelIbiv = function(lapar="identity", earg=list(), iapar=NULL, method.init=1)
stop("'iapar' must be a single number")
if(!is.Numeric(method.init, allow=1, integ=TRUE, positi=TRUE) ||
method.init > 2.5)
- stop("argument \"method.init\" must be 1 or 2")
+ stop("argument 'method.init' must be 1 or 2")
new("vglmff",
blurb=c("Gumbel's Type I Bivariate Distribution\n",
@@ -986,7 +997,7 @@ gumbelIbiv = function(lapar="identity", earg=list(), iapar=NULL, method.init=1)
bad <- (denom <= mytolerance) # Range violation
if(any(bad)) {
cat("There are some range violations in @deriv\n")
- if(exists("flush.console")) flush.console()
+ flush.console()
}
sum(bad) * (-1.0e10) +
sum(w[!bad] * (-y[!bad,1] - y[!bad,2] +
@@ -1028,9 +1039,9 @@ gumbelIbiv = function(lapar="identity", earg=list(), iapar=NULL, method.init=1)
pplack = function(q1, q2, oratio) {
- if(!is.Numeric(q1)) stop("bad input for \"q1\"")
- if(!is.Numeric(q2)) stop("bad input for \"q2\"")
- if(!is.Numeric(oratio, posit=TRUE)) stop("bad input for \"oratio\"")
+ if(!is.Numeric(q1)) stop("bad input for 'q1'")
+ if(!is.Numeric(q2)) stop("bad input for 'q2'")
+ if(!is.Numeric(oratio, posit=TRUE)) stop("bad input for 'oratio'")
L = max(length(q1), length(q2), length(oratio))
if(length(q1) != L) q1 = rep(q1, len=L)
@@ -1060,8 +1071,8 @@ pplack = function(q1, q2, oratio) {
rplack = function(n, oratio) {
- if(!is.Numeric(n, posit=TRUE, allow=1, integ=TRUE)) stop("bad input for n")
- if(!is.Numeric(oratio, posit=TRUE)) stop("bad input for \"oratio\"")
+ if(!is.Numeric(n, posit=TRUE, allow=1, integ=TRUE)) stop("bad input for 'n'")
+ if(!is.Numeric(oratio, posit=TRUE)) stop("bad input for 'oratio'")
if(length(oratio) != n) oratio = rep(oratio, len=n)
y1 = U = runif(n)
@@ -1079,7 +1090,7 @@ rplack = function(n, oratio) {
dplack = function(x1, x2, oratio, log=FALSE) {
log.arg = log
rm(log)
- if(!is.Numeric(oratio, posit=TRUE)) stop("bad input for \"oratio\"")
+ if(!is.Numeric(oratio, posit=TRUE)) stop("bad input for 'oratio'")
L = max(length(x1), length(x2), length(oratio))
if(length(x1) != L) x1 = rep(x1, len=L)
if(length(x2) != L) x2 = rep(x2, len=L)
@@ -1109,8 +1120,8 @@ plackett.control <- function(save.weight=TRUE, ...)
-plackett = function(link="loge", earg=list(),
- ioratio=NULL, method.init=1, nsimEIM=200) {
+ plackett = function(link="loge", earg=list(),
+ ioratio=NULL, method.init=1, nsimEIM=200) {
if(mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if(!is.list(earg)) earg = list()
@@ -1119,7 +1130,6 @@ plackett = function(link="loge", earg=list(),
if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
method.init > 2) stop("method.init must be 1 or 2")
-
new("vglmff",
blurb=c("Plackett Distribution\n",
"Links: ",
@@ -1171,10 +1181,7 @@ plackett = function(link="loge", earg=list(),
function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
oratio = eta2theta(eta, .link, earg= .earg )
if(residuals) stop("loglikelihood residuals not implemented yet") else {
- y1 = y[,1]
- y2 = y[,2]
- sum(w * (log(oratio) + log1p((oratio-1) * (y1+y2-2*y1*y2)) - 1.5 *
- log((1 + (y1+y2)*(oratio-1))^2 - 4*oratio*(oratio-1)*y1*y2)))
+ sum(w * dplack(x1= y[,1], x2= y[,2], oratio=oratio, log=TRUE))
}
}, list( .link=link, .earg=earg ))),
vfamily=c("plackett"),
@@ -1209,7 +1216,7 @@ plackett = function(link="loge", earg=list(),
run.var = ((ii-1) * run.var + temp3^2) / ii
}
wz = if(intercept.only)
- matrix(apply(cbind(run.var), 2, mean),
+ matrix(colMeans(cbind(run.var)),
n, dimm(M), byrow=TRUE) else cbind(run.var)
wz = wz * doratio.deta^2
@@ -1223,10 +1230,10 @@ plackett = function(link="loge", earg=list(),
damh = function(x1, x2, alpha, log=FALSE) {
log.arg = log
rm(log)
- if(!is.Numeric(x1)) stop("bad input for \"x1\"")
- if(!is.Numeric(x2)) stop("bad input for \"x2\"")
- if(!is.Numeric(alpha)) stop("bad input for \"alpha\"")
- if(any(abs(alpha) > 1)) stop("\"alpha\" values out of range")
+ if(!is.Numeric(x1)) stop("bad input for 'x1'")
+ if(!is.Numeric(x2)) stop("bad input for 'x2'")
+ if(!is.Numeric(alpha)) stop("bad input for 'alpha'")
+ if(any(abs(alpha) > 1)) stop("'alpha' values out of range")
L = max(length(x1), length(x2), length(alpha))
alpha = rep(alpha, len=L)
x1 = rep(x1, len=L)
@@ -1243,10 +1250,10 @@ damh = function(x1, x2, alpha, log=FALSE) {
}
pamh = function(q1, q2, alpha) {
- if(!is.Numeric(q1)) stop("bad input for \"q1\"")
- if(!is.Numeric(q2)) stop("bad input for \"q2\"")
- if(!is.Numeric(alpha)) stop("bad input for \"alpha\"")
- if(any(abs(alpha) > 1)) stop("\"alpha\" values out of range")
+ if(!is.Numeric(q1)) stop("bad input for 'q1'")
+ if(!is.Numeric(q2)) stop("bad input for 'q2'")
+ if(!is.Numeric(alpha)) stop("bad input for 'alpha'")
+ if(any(abs(alpha) > 1)) stop("'alpha' values out of range")
L = max(length(q1), length(q2), length(alpha))
if(length(q1) != L) q1 = rep(q1, len=L)
@@ -1268,9 +1275,9 @@ pamh = function(q1, q2, alpha) {
}
ramh = function(n, alpha) {
- if(!is.Numeric(n, posit=TRUE, allow=1, integ=TRUE)) stop("bad input for n")
- if(!is.Numeric(alpha)) stop("bad input for \"alpha\"")
- if(any(abs(alpha) > 1)) stop("\"alpha\" values out of range")
+ if(!is.Numeric(n, posit=TRUE, allow=1, integ=TRUE)) stop("bad input for 'n'")
+ if(!is.Numeric(alpha)) stop("bad input for 'alpha'")
+ if(any(abs(alpha) > 1)) stop("'alpha' values out of range")
U1 = V1 = runif(n)
V2 = runif(n)
@@ -1286,8 +1293,8 @@ amh.control <- function(save.weight=TRUE, ...)
list(save.weight=save.weight)
}
-amh = function(lalpha="rhobit", ealpha=list(), ialpha=NULL,
- method.init=1, nsimEIM=250)
+ amh = function(lalpha="rhobit", ealpha=list(), ialpha=NULL,
+ method.init=1, nsimEIM=250)
{
if(mode(lalpha) != "character" && mode(lalpha) != "name")
lalpha = as.character(substitute(lalpha))
@@ -1343,8 +1350,7 @@ amh = function(lalpha="rhobit", ealpha=list(), ialpha=NULL,
function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
alpha = eta2theta(eta, .lalpha, earg= .ealpha )
if(residuals) stop("loglikelihood residuals not implemented yet") else {
- denom = 1 - alpha*(1-y[,1])*(1-y[,2])
- sum(w * (log1p(-alpha+2*alpha*y[,1]*y[,2]/denom) - 2*log(denom)))
+ sum(w * damh(x1=y[,1], x2=y[,2], alpha=alpha, log=TRUE))
}
}, list( .lalpha=lalpha, .earg=ealpha ))),
vfamily=c("amh"),
@@ -1377,7 +1383,7 @@ amh = function(lalpha="rhobit", ealpha=list(), ialpha=NULL,
run.var = ((ii-1) * run.var + temp3^2) / ii
}
wz = if(intercept.only)
- matrix(apply(cbind(run.var), 2, mean),
+ matrix(colMeans(cbind(run.var)),
n, dimm(M), byrow=TRUE) else cbind(run.var)
wz = wz * dalpha.deta^2
w * wz
diff --git a/R/family.categorical.q b/R/family.categorical.q
index 1945542..cd0ee39 100644
--- a/R/family.categorical.q
+++ b/R/family.categorical.q
@@ -4,8 +4,54 @@
+
+
+
+dmultinomial = function(x, size = NULL, prob, log = FALSE,
+ dochecking=TRUE, smallno = 1.0e-7) {
+ if(!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ x = as.matrix(x)
+ prob = as.matrix(prob)
+ if(((K <- ncol(x)) <= 1) || ncol(prob) != K)
+ stop("'x' and 'prob' must be matrices with two or more columns")
+ if(dochecking) {
+ if(min(prob) < 0)
+ stop("'prob' contains some negative values")
+ if(any(abs((rsprob <- rowSums(prob)) - 1) > smallno))
+ stop("some rows of 'prob' do not add to unity")
+ if(any(abs(x - round(x)) > smallno))
+ stop("'x' should be integer valued")
+ if(length(size)) {
+ if(any(abs(size - rowSums(x)) > smallno))
+ stop("rowSums(x) does not agree with 'size'")
+ } else {
+ size = round(rowSums(x))
+ }
+ } else {
+ if(!length(size))
+ size = round(rowSums(prob))
+ }
+ logdensity = lgamma(size + 1) + rowSums(x * log(prob) - lgamma(x + 1))
+ if(log.arg) logdensity else exp(logdensity)
+}
+
+
+
+
+
+
+
+
+
+
process.categorical.data.vgam = expression({
+
+
+
if(!is.matrix(y)) {
yf = as.factor(y)
@@ -49,14 +95,15 @@ process.categorical.data.vgam = expression({
y = y / nvec # Convert to proportions
if(!length(mustart)) {
- mustart = (nvec * y + 1/ncol(y)) / (nvec+1) # This may be better
- mustart = y + (1/ncol(y) - y)/nvec # This may be wrong
+ mustart = (nvec * y + 1/ncol(y)) / (nvec+1)
+ mustart = y + (1/ncol(y) - y)/nvec
}
})
+
Deviance.categorical.data.vgam <-
function(mu, y, w, residuals = FALSE, eta, extra=NULL)
{
@@ -94,11 +141,8 @@ Deviance.categorical.data.vgam <-
-
-
-
-sratio = function(link="logit", earg=list(),
- parallel=FALSE, reverse=FALSE, zero=NULL)
+ sratio = function(link="logit", earg=list(),
+ parallel=FALSE, reverse=FALSE, zero=NULL)
{
if(mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
@@ -172,8 +216,9 @@ sratio = function(link="logit", earg=list(),
}
}, list( .earg=earg, .link=link, .reverse=reverse) )),
loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * y * log(mu)),
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(dmultinomial(x=w*y, size=w, prob=mu, log=TRUE, docheck=FALSE))
+ },
vfamily=c("sratio", "vcategorical"),
deriv=eval(substitute(expression({
if(!length(extra$mymat)) {
@@ -209,8 +254,8 @@ sratio = function(link="logit", earg=list(),
-cratio = function(link="logit", earg=list(),
- parallel=FALSE, reverse=FALSE, zero=NULL)
+ cratio = function(link="logit", earg=list(),
+ parallel=FALSE, reverse=FALSE, zero=NULL)
{
if(mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
@@ -282,8 +327,9 @@ cratio = function(link="logit", earg=list(),
}
}, list( .earg=earg, .link=link, .reverse=reverse) )),
loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * y * log(mu)),
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * y * log(mu))
+ },
vfamily=c("cratio", "vcategorical"),
deriv=eval(substitute(expression({
if(!length(extra$mymat)) {
@@ -331,7 +377,6 @@ vglm.multinomial.deviance.control = function(maxit=21, panic=FALSE, ...)
vglm.multinomial.control = function(maxit=21, panic=FALSE,
criterion=c("aic1", "aic2", names( .min.criterion.VGAM )), ...)
{
-
if(mode(criterion) != "character" && mode(criterion) != "name")
criterion = as.character(substitute(criterion))
criterion = match.arg(criterion,
@@ -358,8 +403,8 @@ vglm.vcategorical.control = function(maxit=30, trace=FALSE, panic=TRUE, ...)
-multinomial = function(zero=NULL, parallel=FALSE, nointercept=NULL,
- refLevel="last")
+ multinomial = function(zero=NULL, parallel=FALSE, nointercept=NULL,
+ refLevel="last")
{
if(length(refLevel) != 1) stop("the length of 'refLevel' must be one")
if(is.character(refLevel)) {
@@ -372,7 +417,7 @@ multinomial = function(zero=NULL, parallel=FALSE, nointercept=NULL,
if(!is.Numeric(refLevel, allow=1, integer=TRUE, posit=TRUE))
stop("could not coerce 'refLevel' into a single positive integer")
} else if(!is.Numeric(refLevel, allow=1, integer=TRUE, posit=TRUE))
- stop("\"refLevel\" must be a single positive integer")
+ stop("'refLevel' must be a single positive integer")
new("vglmff",
blurb=c("Multinomial logit model\n\n",
@@ -449,8 +494,9 @@ multinomial = function(zero=NULL, parallel=FALSE, nointercept=NULL,
}
}), list( .refLevel = refLevel )),
loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * y * log(mu)),
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(dmultinomial(x=w*y, size=w, prob=mu, log=TRUE, docheck=FALSE))
+ },
vfamily=c("multinomial", "vcategorical"),
deriv=eval(substitute(expression({
if( .refLevel < 0) {
@@ -490,14 +536,14 @@ multinomial = function(zero=NULL, parallel=FALSE, nointercept=NULL,
-cumulative = function(link="logit", earg = list(),
- parallel=FALSE, reverse=FALSE,
- mv=FALSE,
- intercept.apply = FALSE)
+ cumulative = function(link="logit", earg = list(),
+ parallel=FALSE, reverse=FALSE,
+ mv=FALSE,
+ intercept.apply = FALSE)
{
if(mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
- if(!is.logical(mv) || length(mv)!=1) stop("\"mv\" must be a single logical")
+ if(!is.logical(mv) || length(mv)!=1) stop("'mv' must be a single logical")
if(!is.list(earg)) earg = list()
new("vglmff",
@@ -588,6 +634,10 @@ cumulative = function(link="logit", earg = list(),
paste("P[Y<=",1:M,"]", sep="")
predictors.names = namesof(mynames, .link, short=TRUE, earg= .earg)
y.names = paste("mu", 1:(M+1), sep="")
+ if(ncol(cbind(w)) == 1) {
+ for(iii in 1:ncol(y))
+ mustart[,iii] = weighted.mean(y[,iii], w)
+ }
if(length(dimnames(y)))
extra$dimnamesy2 = dimnames(y)[[2]]
@@ -672,8 +722,9 @@ cumulative = function(link="logit", earg = list(),
answer
}, list( .link=link, .reverse=reverse, .earg=earg, .mv=mv ))),
loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * y * log(mu)),
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(dmultinomial(x=w*y, size=w, prob=mu, log=TRUE, docheck=FALSE))
+ },
vfamily=c("cumulative", "vcategorical"),
deriv=eval(substitute(expression({
mu.use = pmax(mu, .Machine$double.eps * 1.0e-0)
@@ -681,17 +732,17 @@ cumulative = function(link="logit", earg = list(),
if( .mv ) {
NOS = extra$NOS
Llevels = extra$Llevels
- dcump.deta = answer.matrix = matrix(0, n, NOS * (Llevels-1))
+ dcump.deta = resmat = matrix(0, n, NOS * (Llevels-1))
for(iii in 1:NOS) {
cindex = (iii-1)*(Llevels-1) + 1:(Llevels-1)
aindex = (iii-1)*(Llevels) + 1:(Llevels-1)
cump = eta2theta(eta[,cindex,drop=FALSE], .link, earg= .earg)
dcump.deta[,cindex] = dtheta.deta(cump, .link, earg= .earg)
- answer.matrix[,cindex] =
+ resmat[,cindex] =
(y[,aindex,drop=FALSE]/mu.use[,aindex,drop=FALSE] -
- y[,1+aindex,drop=FALSE]/mu.use[,1+aindex,drop=FALSE])
+ y[,1+aindex,drop=FALSE]/mu.use[,1+aindex,drop=FALSE])
}
- (if( .reverse) -w else w) * dcump.deta * answer.matrix
+ (if( .reverse) -w else w) * dcump.deta * resmat
} else {
cump = eta2theta(eta, .link, earg= .earg)
dcump.deta = dtheta.deta(cump, .link, earg= .earg)
@@ -744,8 +795,8 @@ cumulative = function(link="logit", earg = list(),
-acat = function(link="loge", earg = list(),
- parallel=FALSE, reverse=FALSE, zero=NULL)
+ acat = function(link="loge", earg = list(),
+ parallel=FALSE, reverse=FALSE, zero=NULL)
{
if(mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
@@ -811,8 +862,9 @@ acat = function(link="loge", earg = list(),
.link, earg= .earg )
}, list( .earg=earg, .link=link, .reverse=reverse) )),
loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * y * log(mu)),
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(dmultinomial(x=w*y, size=w, prob=mu, log=TRUE, docheck=FALSE))
+ },
vfamily=c("acat", "vcategorical"),
deriv=eval(substitute(expression({
zeta = eta2theta(eta, .link, earg= .earg ) # May be zetar
@@ -833,10 +885,10 @@ acat = function(link="loge", earg = list(),
hess = attr(d1, "hessian") / d1
if(M>1)
- for(j in 1:(M-1))
- for(k in (j+1):M)
- wz[,iam(j,k,M)] = (hess[,j,k] - score[,j] * score[,k]) *
- dzeta.deta[,j] * dzeta.deta[,k]
+ for(jay in 1:(M-1))
+ for(kay in (jay+1):M)
+ wz[,iam(jay,kay,M)] = (hess[,jay,kay] - score[,jay] *
+ score[,kay]) * dzeta.deta[,jay] * dzeta.deta[,kay]
if( .reverse ) {
cump = tapplymat1(mu, "cumsum")
wz[,1:M] = (cump[,1:M]/zeta^2 - score^2) * dzeta.deta^2
@@ -852,8 +904,8 @@ acat.deriv = function(zeta, reverse, M, n)
{
alltxt = NULL
- for(i in 1:M) {
- index = if(reverse) i:M else 1:i
+ for(ii in 1:M) {
+ index = if(reverse) ii:M else 1:ii
vars = paste("zeta", index, sep="")
txt = paste(vars, collapse="*")
alltxt = c(alltxt, txt)
@@ -866,8 +918,8 @@ acat.deriv = function(zeta, reverse, M, n)
d1 = deriv3(txt, allvars, hessian=TRUE) # deriv3() computes the Hessian
zeta = as.matrix(zeta)
- for(i in 1:M)
- assign(paste("zeta", i, sep=""), zeta[,i])
+ for(ii in 1:M)
+ assign(paste("zeta", ii, sep=""), zeta[,ii])
ans = eval(d1)
ans
@@ -876,17 +928,17 @@ acat.deriv = function(zeta, reverse, M, n)
-brat = function(refgp="last",
+ brat = function(refgp="last",
refvalue = 1,
init.alpha = 1)
{
if(!is.Numeric(init.alpha, posit=TRUE))
- stop("\"init.alpha\" must contain positive values only")
+ stop("'init.alpha' must contain positive values only")
if(!is.Numeric(refvalue, allow=1, posit=TRUE))
- stop("\"refvalue\" must be a single positive value")
+ stop("'refvalue' must be a single positive value")
if(!is.character(refgp) &&
!is.Numeric(refgp, allow=1, integer=TRUE, posit=TRUE))
- stop("\"refgp\" must be a single positive integer")
+ stop("'refgp' must be a single positive integer")
new("vglmff",
blurb=c(paste("Bradley-Terry model (without ties)\n\n"),
@@ -899,7 +951,7 @@ brat = function(refgp="last",
try.index = 1:400
M = (1:length(try.index))[(try.index+1)*(try.index) == ncol(y)]
- if(!is.finite(M)) stop("can't determine M")
+ if(!is.finite(M)) stop("cannot determine 'M'")
init.alpha = matrix( rep( .init.alpha, len=M), n, M, byrow=TRUE)
etastart = matrix(theta2eta(init.alpha, "loge", earg=list()), n, M, byrow=TRUE)
refgp = .refgp
@@ -930,8 +982,9 @@ brat = function(refgp="last",
misc$refvalue = .refvalue
}), list( .refgp=refgp, .refvalue=refvalue ))),
loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * y * log(mu)),
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(dmultinomial(x=w*y, size=w, prob=mu, log=TRUE, docheck=FALSE))
+ },
vfamily=c("brat"),
deriv=eval(substitute(expression({
ans = NULL
@@ -986,14 +1039,14 @@ bratt = function(refgp="last",
i0 = 0.01)
{
if(!is.Numeric(i0, allow=1, positi=TRUE))
- stop("\"i0\" must be a single positive value")
+ stop("'i0' must be a single positive value")
if(!is.Numeric(init.alpha, positi=TRUE))
- stop("\"init.alpha\" must contain positive values only")
+ stop("'init.alpha' must contain positive values only")
if(!is.Numeric(refvalue, allow=1, positi=TRUE))
- stop("\"refvalue\" must be a single positive value")
+ stop("'refvalue' must be a single positive value")
if(!is.character(refgp) &&
!is.Numeric(refgp, allow=1, integer=TRUE, positi=TRUE))
- stop("\"refgp\" must be a single positive integer")
+ stop("'refgp' must be a single positive integer")
new("vglmff",
blurb=c(paste("Bradley-Terry model (with ties)\n\n"),
"Links: ",
@@ -1001,7 +1054,7 @@ bratt = function(refgp="last",
initialize=eval(substitute(expression({
try.index = 1:400
M = (1:length(try.index))[(try.index*(try.index-1)) == ncol(y)]
- if(!is.Numeric(M, allow=1, integ=TRUE)) stop("can't determine M")
+ if(!is.Numeric(M, allow=1, integ=TRUE)) stop("cannot determine 'M'")
NCo = M # number of contestants
are.ties = attr(y, "are.ties") # If Brat() was used
@@ -1038,7 +1091,7 @@ bratt = function(refgp="last",
probs = qprobs = NULL
M = ncol(eta)
for(ii in 1:nrow(eta)) {
- alpha = .brat.alpha(eta2theta(eta[ii,-M], "loge"), .refvalue, .refgp)
+ alpha = .brat.alpha(eta2theta(eta[ii,-M],"loge"), .refvalue, .refgp)
alpha0 = eta2theta(eta[ii,M], "loge")
alpha1 = alpha[extra$ybrat.indices[,"rindex"]]
alpha2 = alpha[extra$ybrat.indices[,"cindex"]]
@@ -1059,8 +1112,9 @@ bratt = function(refgp="last",
misc$alpha0 = alpha0
}), list( .refgp=refgp, .refvalue=refvalue ))),
loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * (y * log(mu) + 0.5 * extra$ties * log(attr(mu, "probtie")))),
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * (y * log(mu) + 0.5 * extra$ties * log(attr(mu, "probtie"))))
+ },
vfamily=c("bratt"),
deriv=eval(substitute(expression({
ans = NULL
@@ -1069,7 +1123,7 @@ bratt = function(refgp="last",
uindex = if( .refgp =="last") 1:(M-1) else (1:(M))[-( .refgp )]
eta = as.matrix(eta)
for(ii in 1:nrow(eta)) {
- alpha = .brat.alpha(eta2theta(eta[ii,-M], "loge"), .refvalue, .refgp)
+ alpha = .brat.alpha(eta2theta(eta[ii,-M],"loge"), .refvalue, .refgp)
alpha0 = eta2theta(eta[ii,M], "loge") # M == ncol(eta)
ymat = InverseBrat(y[ii,], NCo=M, diag=0)
tmat = InverseBrat(ties[ii,], NCo=M, diag=0)
@@ -1098,7 +1152,7 @@ bratt = function(refgp="last",
weight= eval(substitute(expression({
wz = matrix(0, n, dimm(M)) # includes diagonal
for(ii in 1:nrow(eta)) {
- alpha = .brat.alpha(eta2theta(eta[ii,-M], "loge"), .refvalue, .refgp)
+ alpha = .brat.alpha(eta2theta(eta[ii,-M],"loge"), .refvalue, .refgp)
alpha0 = eta2theta(eta[ii,M], "loge") # M == ncol(eta)
ymat = InverseBrat(y[ii,], NCo=M, diag=0)
tmat = InverseBrat(ties[ii,], NCo=M, diag=0)
@@ -1121,17 +1175,16 @@ bratt = function(refgp="last",
ind5 = iam(1,1, M=NCo, both=TRUE, diag=FALSE)
alphajunk = c(alpha, junk=NA)
mat4 = cbind(uindex[ind5$row],uindex[ind5$col])
- wz[ii,(M+1):ncol(wz)] =
- -(ymat[mat4] + ymat[mat4[,2:1]] + tmat[mat4]) *
- alphajunk[uindex[ind5$col]] * alphajunk[uindex[ind5$row]] /
- (alpha0 + alphajunk[uindex[ind5$row]] +
- alphajunk[uindex[ind5$col]])^2
+ wz[ii,(M+1):ncol(wz)] = -(ymat[mat4] + ymat[mat4[,2:1]] +
+ tmat[mat4]) * alphajunk[uindex[ind5$col]] *
+ alphajunk[uindex[ind5$row]] / (alpha0 +
+ alphajunk[uindex[ind5$row]] + alphajunk[uindex[ind5$col]])^2
}
- for(j in 1:length(uindex)) {
- jay = uindex[j]
+ for(sss in 1:length(uindex)) {
+ jay = uindex[sss]
naj = ymat[,jay] + ymat[jay,] + tmat[,jay]
Daj = alpha[jay] + alpha + alpha0
- wz[ii,iam(j, NCo, M=NCo, diag=TRUE)] =
+ wz[ii,iam(sss, NCo, M=NCo, diag=TRUE)] =
-alpha[jay] * alpha0 * sum(naj / Daj^2)
}
}
@@ -1151,7 +1204,7 @@ bratt = function(refgp="last",
.brat.indices = function(NCo, are.ties=FALSE) {
if(!is.Numeric(NCo, allow=1, integ=TRUE) || NCo < 2)
- stop("bad input for NCo")
+ stop("bad input for 'NCo'")
m = diag(NCo)
if(are.ties) {
cbind(rindex=row(m)[col(m) < row(m)], cindex=col(m)[col(m) < row(m)])
@@ -1165,8 +1218,8 @@ Brat = function(mat, ties=0*mat, string=c(" > "," == ")) {
callit = if(length(names(allargs))) names(allargs) else
as.character(1:length(allargs))
ans = ans.ties = NULL
- for(i in 1:length(allargs)) {
- m = allargs[[i]]
+ for(ii in 1:length(allargs)) {
+ m = allargs[[ii]]
if(!is.matrix(m) || dim(m)[1] != dim(m)[2])
stop("m must be a square matrix")
@@ -1207,19 +1260,20 @@ InverseBrat = function(yvec, NCo=
yvec = c(yvec)
ptr = 1
for(mul in 1:multiplicity)
- for(i1 in 1:(NCo))
- for(i2 in 1:(NCo))
- if(i1 != i2) {
- ans[i2,i1,mul] = yvec[ptr]
- ptr = ptr + 1
- }
+ for(i1 in 1:(NCo))
+ for(i2 in 1:(NCo))
+ if(i1 != i2) {
+ ans[i2,i1,mul] = yvec[ptr]
+ ptr = ptr + 1
+ }
ans = if(multiplicity>1) ans else matrix(ans, NCo, NCo)
if(is.array(yvec.orig) || is.matrix(yvec.orig)) {
names.yvec = dimnames(yvec.orig)[[2]]
- i = strsplit(names.yvec, string[1])
+ ii = strsplit(names.yvec, string[1])
cal = NULL
- for(k in c(NCo, 1:(NCo-1))) cal = c(cal, (i[[k]])[1])
+ for(kk in c(NCo, 1:(NCo-1)))
+ cal = c(cal, (ii[[kk]])[1])
if(multiplicity>1) {
dimnames(ans) = list(cal, cal, dimnames(yvec.orig)[[1]])
} else
@@ -1231,58 +1285,55 @@ InverseBrat = function(yvec, NCo=
-tapplymat1 <- function(mat, function.arg=c("cumsum", "diff", "cumprod"))
+tapplymat1 = function(mat, function.arg=c("cumsum", "diff", "cumprod"))
{
if(!missing(function.arg))
- function.arg <- as.character(substitute(function.arg))
- function.arg <- match.arg(function.arg, c("cumsum", "diff", "cumprod"))[1]
+ function.arg = as.character(substitute(function.arg))
+ function.arg = match.arg(function.arg, c("cumsum", "diff", "cumprod"))[1]
- type <- switch(function.arg,
- cumsum=1,
- diff=2,
- cumprod=3,
- stop("function.arg not matched"))
+ type = switch(function.arg, cumsum=1, diff=2, cumprod=3,
+ stop("function.arg not matched"))
if(!is.matrix(mat))
- mat <- as.matrix(mat)
- nr <- nrow(mat)
- nc <- ncol(mat)
- fred <- dotC(name="tapplymat1", mat=as.double(mat),
- as.integer(nr), as.integer(nc), as.integer(type))
-
- dim(fred$mat) <- c(nr, nc)
- dimnames(fred$mat) <- dimnames(mat)
+ mat = as.matrix(mat)
+ nr = nrow(mat)
+ nc = ncol(mat)
+ fred = dotC(name="tapplymat1", mat=as.double(mat),
+ as.integer(nr), as.integer(nc), as.integer(type))
+
+ dim(fred$mat) = c(nr, nc)
+ dimnames(fred$mat) = dimnames(mat)
switch(function.arg,
- cumsum=fred$mat,
- diff=fred$mat[,-1,drop=FALSE],
- cumprod=fred$mat)
+ cumsum =fred$mat,
+ diff =fred$mat[,-1,drop=FALSE],
+ cumprod=fred$mat)
}
-ordpoisson = function(cutpoints,
- countdata=FALSE, NOS=NULL, Levels=NULL,
- init.mu = NULL, parallel=FALSE, zero=NULL,
- link="loge", earg = list()) {
+ ordpoisson = function(cutpoints,
+ countdata=FALSE, NOS=NULL, Levels=NULL,
+ init.mu = NULL, parallel=FALSE, zero=NULL,
+ link="loge", earg = list()) {
if(mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if(!is.list(earg)) earg = list()
fcutpoints = cutpoints[is.finite(cutpoints)]
if(!is.Numeric(fcutpoints, integ=TRUE) || any(fcutpoints < 0))
- stop("\"cutpoints\" must have non-negative integer or Inf values only")
+ stop("'cutpoints' must have non-negative integer or Inf values only")
if(is.finite(cutpoints[length(cutpoints)]))
cutpoints = c(cutpoints, Inf)
if(!is.logical(countdata) || length(countdata)!=1)
- stop("\"countdata\" must be a single logical")
+ stop("'countdata' must be a single logical")
if(countdata) {
if(!is.Numeric(NOS, integ=TRUE, posit=TRUE))
- stop("\"NOS\" must have integer values only")
+ stop("'NOS' must have integer values only")
if(!is.Numeric(Levels, integ=TRUE, posit=TRUE) || any(Levels < 2))
- stop("\"Levels\" must have integer values (>= 2) only")
+ stop("'Levels' must have integer values (>= 2) only")
Levels = rep(Levels, length=NOS)
}
@@ -1367,16 +1418,18 @@ ordpoisson = function(cutpoints,
}
misc$parameters = mynames
misc$countdata = .countdata
- misc$true.mu <- FALSE # $fitted is not a true mu
+ misc$true.mu = FALSE # $fitted is not a true mu
}), list( .link=link, .countdata = countdata, .earg= earg ))),
loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
if(residuals) stop("loglikelihood residuals not implemented yet") else {
- probs = ordpoissonProbs(extra, mu)
- index0 <- y == 0
- probs[index0] = 1
- pindex0 <- probs == 0
- probs[pindex0] = 1
- sum(pindex0) * (-1.0e+10) + sum(w * y * log(probs))}},
+ probs = ordpoissonProbs(extra, mu)
+ index0 = y == 0
+ probs[index0] = 1
+ pindex0 = probs == 0
+ probs[pindex0] = 1
+ sum(pindex0) * (-1.0e+10) + sum(w * y * log(probs))
+ }
+ },
vfamily=c("ordpoisson", "vcategorical"),
deriv=eval(substitute(expression({
probs = ordpoissonProbs(extra, mu)
@@ -1385,20 +1438,19 @@ ordpoisson = function(cutpoints,
cp.vector = extra$cutpoints
NOS = extra$NOS
Levels = extra$Levels
- answer.matrix = matrix(0, n, M)
+ resmat = matrix(0, n, M)
dl.dprob = y / probs.use
dmu.deta = dtheta.deta(mu, .link, earg=.earg)
dprob.dmu = ordpoissonProbs(extra, mu, deriv=1)
cptr = 1
for(iii in 1:NOS) {
for(kkk in 1:Levels[iii]) {
- answer.matrix[,iii] = answer.matrix[,iii] +
- dl.dprob[,cptr] * dprob.dmu[,cptr]
- cptr = cptr + 1
+ resmat[,iii] = resmat[,iii] + dl.dprob[,cptr] * dprob.dmu[,cptr]
+ cptr = cptr + 1
}
}
- answer.matrix = w * answer.matrix * dmu.deta
- answer.matrix
+ resmat = w * resmat * dmu.deta
+ resmat
}), list( .link=link, .earg= earg, .countdata=countdata ))),
weight= eval(substitute(expression({
d2l.dmu2 = matrix(0, n, M) # Diagonal matrix
@@ -1459,10 +1511,11 @@ ordpoissonProbs = function(extra, mu, deriv=0) {
-scumulative = function(link="logit", earg = list(),
- lscale="loge", escale = list(),
- parallel=FALSE, sparallel=TRUE, reverse=FALSE,
- iscale = 1)
+ if(FALSE)
+ scumulative = function(link="logit", earg = list(),
+ lscale="loge", escale = list(),
+ parallel=FALSE, sparallel=TRUE, reverse=FALSE,
+ iscale = 1)
{
stop("sorry, not working yet")
if(mode(link) != "character" && mode(link) != "name")
@@ -1472,7 +1525,7 @@ scumulative = function(link="logit", earg = list(),
lscale = as.character(substitute(lscale))
if(!is.list(escale)) escale = list()
if(!is.Numeric(iscale, posit=TRUE))
- stop("bad input for argument \"iscale\"")
+ stop("bad input for argument 'iscale'")
new("vglmff",
@@ -1580,8 +1633,9 @@ scumulative = function(link="logit", earg = list(),
}, list( .link=link, .lscale=lscale, .reverse=reverse,
.iscale=iscale, .earg=earg, .escale=escale ))),
loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * y * log(mu)),
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(dmultinomial(x=w*y, size=w, prob=mu, log=TRUE, docheck=FALSE))
+ },
vfamily=c("scumulative", "vcategorical"),
deriv=eval(substitute(expression({
ooz = iter %% 2
@@ -1654,3 +1708,136 @@ scumulative = function(link="logit", earg = list(),
+margeff = function(object, subset=NULL) {
+
+ ii = ii.save = subset
+ if(!is(object, "vglm"))
+ stop("'object' is not a vglm() object")
+ if(!any(temp.logical <- is.element(c("multinomial","cumulative"),
+ object at family@vfamily)))
+ stop("'object' is not a 'multinomial' or 'cumulative' VGLM!")
+ model.multinomial = temp.logical[1]
+ if(is(object, "vgam"))
+ stop("'object' is a vgam() object")
+ if(length(object at control$xij))
+ stop("'object' contains 'xij' terms")
+ if(length(object at misc$form2))
+ stop("'object' contains 'form2' terms")
+
+ oassign = object at misc$orig.assign
+ if(any(unlist(lapply(oassign, length)) > 1))
+ warning("some terms in 'object' create more than one column of ",
+ "the LM design matrix")
+
+ nnn = object at misc$n
+ M = object at misc$M # ncol(B) # length(pvec) - 1
+
+
+ if(model.multinomial) {
+ rlev = object at misc$refLevel
+ cfit = coefvlm(object, matrix=TRUE)
+ B = if(!length(rlev)) {
+ cbind(cfit, 0)
+ } else {
+ if(rlev == M+1) { # Default
+ cbind(cfit, 0)
+ } else if(rlev == 1) {
+ cbind(0, cfit)
+ } else {
+ cbind(cfit[,1:(rlev-1)], 0, cfit[,rlev:M])
+ }
+ }
+ ppp = nrow(B)
+ pvec1 = fitted(object)[ 1,]
+ colnames(B) = if(length(names(pvec1))) names(pvec1) else
+ paste("mu", 1:(M+1), sep="")
+
+ if(is.null(ii)) {
+ BB = array(B, c(ppp, M+1, nnn))
+ pvec = c(t(fitted(object)))
+ pvec = rep(pvec, each=ppp)
+ temp1 = array(BB * pvec, c(ppp, M+1, nnn))
+ temp2 = aperm(temp1, c(2,1,3)) # (M+1) x ppp x nnn
+ temp2 = colSums(temp2) # ppp x nnn
+ temp2 = array(rep(temp2, each=M+1), c(M+1, ppp, nnn))
+ temp2 = aperm(temp2, c(2, 1, 3)) # ppp x (M+1) x nnn
+ temp3 = pvec
+ ans = array((BB - temp2) * temp3, c(ppp, M+1, nnn),
+ dimnames = list(dimnames(B)[[1]],
+ dimnames(B)[[2]], dimnames(fitted(object))[[1]]))
+ ans
+ } else
+ if(is.numeric(ii) && (length(ii) == 1)) {
+ pvec = fitted(object)[ii,]
+ temp1 = B * matrix(pvec, ppp, M+1, byrow=TRUE)
+ temp2 = matrix(rowSums(temp1), ppp, M+1)
+ temp3 = matrix(pvec, nrow(B), M+1, byrow=TRUE)
+ (B - temp2) * temp3
+ } else {
+ if(is.logical(ii))
+ ii = (1:nnn)[ii]
+
+ ans = array(0, c(ppp, M+1, length(ii)), dimnames=list(dimnames(B)[[1]],
+ dimnames(B)[[2]], dimnames(fitted(object)[ii,])[[1]]))
+ for(ilocal in 1:length(ii)) {
+ pvec = fitted(object)[ii[ilocal],]
+ temp1 = B * matrix(pvec, ppp, M+1, byrow=TRUE)
+ temp2 = matrix(rowSums(temp1), ppp, M+1)
+ temp3 = matrix(pvec, nrow(B), M+1, byrow=TRUE)
+ ans[,,ilocal] = (B - temp2) * temp3
+ }
+ ans
+ }
+ } else {
+
+ if(is.logical(is.multivariateY <- object at misc$mv) && is.multivariateY)
+ stop("cannot handle cumulative(mv=TRUE)")
+ reverse = object at misc$reverse
+ linkfunctions = object at misc$link
+ all.eargs = object at misc$earg
+ B = cfit = coefvlm(object, matrix=TRUE)
+ ppp = nrow(B)
+
+ hdot = lpmat = kronecker(predict(object), matrix(1, ppp, 1))
+ resmat = cbind(hdot, 1)
+ for(jlocal in 1:M) {
+ Cump = eta2theta(lpmat[,jlocal], link = linkfunctions[jlocal],
+ earg = all.eargs[[jlocal]])
+ hdot[,jlocal] = dtheta.deta(Cump, link = linkfunctions[jlocal],
+ earg = all.eargs[[jlocal]])
+ }
+
+ resmat[,1] = ifelse(reverse, -1, 1) * hdot[,1] * cfit[,1]
+
+ if(M > 1) {
+ resmat[,2:M] = ifelse(reverse, -1, 1) *
+ (hdot[,(2:M) ] * cfit[,(2:M) ] -
+ hdot[,(2:M)-1] * cfit[,(2:M)-1])
+ }
+
+ resmat[,M+1] = ifelse(reverse, 1, -1) * hdot[,M] * cfit[,M]
+
+ temp1 = array(resmat, c(ppp, nnn, M+1),
+ dimnames=list(dimnames(B)[[1]], dimnames(fitted(object))[[1]],
+ dimnames(fitted(object))[[2]]))
+ temp1 = aperm(temp1, c(1,3,2)) # ppp x (M+1) x nnn
+
+ if(is.null(ii)) {
+ return(temp1)
+ } else
+ if(is.numeric(ii) && (length(ii) == 1)) {
+ return(temp1[,,ii])
+ } else {
+ return(temp1[,,ii])
+ }
+ }
+}
+
+
+
+
+
+
+
+
+
diff --git a/R/family.censored.q b/R/family.censored.q
index ac1f6d3..2597d76 100644
--- a/R/family.censored.q
+++ b/R/family.censored.q
@@ -163,7 +163,7 @@ cexpon =
ecexpon = function(link="loge", location=0)
{
if(!is.Numeric(location, allow=1))
- stop("bad input for \"location\"")
+ stop("bad input for 'location'")
if(mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
@@ -178,7 +178,7 @@ ecexpon = function(link="loge", location=0)
initialize=eval(substitute(expression({
extra$location = .location # This is passed into, e.g., link, deriv etc.
if(any(y[,1] <= extra$location))
- stop(paste("all responses must be greater than", extra$location))
+ stop("all responses must be greater than ", extra$location)
predictors.names = namesof("rate", .link, tag= FALSE)
type <- attr(y, "type")
if (type=="right" || type=="left"){
@@ -440,7 +440,7 @@ crayleigh = function(link="loge", earg = list(), expected=FALSE) {
if(mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if(!is.logical(expected) || length(expected) != 1)
- stop("bad input for argument \"expected\"")
+ stop("bad input for argument 'expected'")
if(!is.list(earg)) earg = list()
new("vglmff",
@@ -517,9 +517,9 @@ weibull.sev = function(lshape="loge", lscale="loge",
if(mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(!is.Numeric(imethod, allow=1, integer=TRUE, positi=TRUE) || imethod > 2)
- stop("argument \"imethod\" must be 1 or 2")
+ stop("argument 'imethod' must be 1 or 2")
if(!is.list(eshape)) eshape = list()
if(!is.list(escale)) escale = list()
if(!is.Numeric(nrfs, allow=1) || nrfs<0 || nrfs > 1)
diff --git a/R/family.circular.q b/R/family.circular.q
index 81fbd63..02813ff 100644
--- a/R/family.circular.q
+++ b/R/family.circular.q
@@ -5,17 +5,25 @@
-dcard = function(x, mu, rho) {
- if(!is.Numeric(mu) || any(mu < 0) || any(mu > 2*pi))
- stop("'mu' must be between 0 and 2*pi inclusive")
- if(!is.Numeric(rho) || max(abs(rho) > 0.5))
- stop("'rho' must be between -0.5 and 0.5 inclusive")
+
+
+
+
+dcard = function(x, mu, rho, log = FALSE) {
+ if(!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
L = max(length(x), length(mu), length(rho))
x = rep(x, len=L); mu = rep(mu, len=L); rho = rep(rho, len=L);
- ans = (1 + 2 * rho * cos(x-mu)) / (2*pi)
- ans[x > (2*pi)] = 0
- ans[x < 0] = 0
- ans
+ logdensity = rep(log(0), len=L)
+ xok = (x > 0) & (x < (2*pi))
+ logdensity[xok] = -log(2*pi) + log1p(2 * rho[xok] * cos(x[xok]-mu[xok]))
+ logdensity[mu <= 0] = NaN
+ logdensity[mu >= 2*pi] = NaN
+ logdensity[rho <= -0.5] = NaN
+ logdensity[rho >= 0.5] = NaN
+ if(log.arg) logdensity else exp(logdensity)
}
pcard = function(q, mu, rho) {
@@ -78,20 +86,20 @@ cardioid.control <- function(save.weight=TRUE, ...)
}
-cardioid = function(lmu="elogit", lrho="elogit",
- emu=if(lmu=="elogit") list(min=0, max=2*pi) else list(),
- erho=if(lmu=="elogit") list(min=-0.5, max=0.5) else list(),
- imu=NULL, irho=0.3,
- nsimEIM=100, zero=NULL)
+ cardioid = function(lmu="elogit", lrho="elogit",
+ emu=if(lmu=="elogit") list(min=0, max=2*pi) else list(),
+ erho=if(lmu=="elogit") list(min=-0.5, max=0.5) else list(),
+ imu=NULL, irho=0.3,
+ nsimEIM=100, zero=NULL)
{
if(mode(lmu) != "character" && mode(lmu) != "name")
lmu = as.character(substitute(lmu))
if(mode(lrho) != "character" && mode(lrho) != "name")
lrho = as.character(substitute(lrho))
if(length(imu) && (!is.Numeric(imu, positive=TRUE) || any(imu > 2*pi)))
- stop("bad input for argument \"imu\"")
+ stop("bad input for argument 'imu'")
if(!is.Numeric(irho) || max(abs(irho)) > 0.5)
- stop("bad input for argument \"irho\"")
+ stop("bad input for argument 'irho'")
if(!is.list(emu)) emu = list()
if(!is.list(erho)) erho = list()
if(!is.Numeric(nsimEIM, allow=1, integ=TRUE) || nsimEIM <= 50)
@@ -152,8 +160,9 @@ cardioid = function(lmu="elogit", lrho="elogit",
function(mu,y,w,residuals=FALSE,eta,extra=NULL) {
mu = eta2theta(eta[,1], link= .lmu, earg= .emu)
rho = eta2theta(eta[,2], link= .lrho, earg= .erho)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * (-log(2*pi) + log1p(2*rho*cos(y-mu))))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dcard(x=y, mu=mu, rho=rho, log=TRUE))
+ }
}, list( .lmu=lmu, .lrho=lrho,
.emu=emu, .erho=erho ))),
vfamily=c("cardioid"),
@@ -182,7 +191,7 @@ cardioid = function(lmu="elogit", lrho="elogit",
temp3[,ind1$row.index]*temp3[,ind1$col.index]) / ii
}
wz = if(intercept.only)
- matrix(apply(run.varcov, 2, mean),
+ matrix(colMeans(run.varcov),
n, ncol(run.varcov), byrow=TRUE) else run.varcov
dtheta.detas = cbind(dmu.deta, drho.deta)
@@ -194,20 +203,21 @@ cardioid = function(lmu="elogit", lrho="elogit",
-vonmises = function(llocation="elogit",
- lscale="loge",
- elocation=if(llocation=="elogit") list(min=0, max=2*pi) else list(),
- escale=list(),
- ilocation=NULL, iscale=NULL,
- method.init=1, zero=NULL) {
+ vonmises = function(llocation="elogit",
+ lscale="loge",
+ elocation=if(llocation=="elogit") list(min=0, max=2*pi)
+ else list(),
+ escale=list(),
+ ilocation=NULL, iscale=NULL,
+ method.init=1, zero=NULL) {
if(mode(llocation) != "character" && mode(llocation) != "name")
llocation = as.character(substitute(llocation))
if(mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
- method.init > 2) stop("argument \"method.init\" must be 1 or 2")
+ method.init > 2) stop("argument 'method.init' must be 1 or 2")
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(!is.list(escale)) escale = list()
new("vglmff",
diff --git a/R/family.extremes.q b/R/family.extremes.q
index 538468f..c1de526 100644
--- a/R/family.extremes.q
+++ b/R/family.extremes.q
@@ -5,7 +5,130 @@
-gev <- function(llocation="identity",
+
+
+
+
+
+
+rgev = function(n, location=0, scale=1, shape=0) {
+ use.n = if((length.n <- length(n)) > 1) length.n else
+ if(!is.Numeric(n, integ=TRUE, allow=1, posit=TRUE))
+ stop("bad input for argument 'n'") else n
+
+ if(!is.Numeric(location))
+ stop("bad input for argument argument 'location'")
+ if(!is.Numeric(shape)) stop("bad input for argument argument 'shape'")
+
+ ans = numeric(use.n)
+ shape = rep(shape, len=use.n); location = rep(location, len=use.n);
+ scale = rep(scale, len=use.n)
+ scase = abs(shape) < sqrt(.Machine$double.eps)
+ nscase = sum(scase)
+ if(use.n - nscase)
+ ans[!scase] = location[!scase] + scale[!scase] *
+ ((-log(runif(use.n-nscase)))^(-shape[!scase]) -1) / shape[!scase]
+ if(nscase)
+ ans[scase] = rgumbel(nscase, location[scase], scale[scase])
+ ans[scale <= 0] = NaN
+ ans
+}
+
+
+
+dgev = function(x, location=0, scale=1, shape=0, log = FALSE,
+ tolshape0 = sqrt(.Machine$double.eps),
+ oobounds.log = -Inf, giveWarning=FALSE) {
+ if(!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+ if(oobounds.log > 0)
+ stop("bad input for argument 'oobounds.log'")
+
+ if(!is.Numeric(tolshape0, allow=1, posit=TRUE))
+ stop("bad input for argument 'tolshape0'")
+ use.n = max(length(x), length(location), length(scale), length(shape))
+ shape = rep(shape, len=use.n); location = rep(location, len=use.n);
+ scale = rep(scale, len=use.n);
+ x = rep(x, len=use.n)
+
+ logdensity = rep(log(0), len=use.n)
+ scase = abs(shape) < tolshape0
+ nscase = sum(scase)
+ if(use.n - nscase) {
+ zedd = 1+shape*(x-location)/scale # pmax(0, (1+shape*xc/scale))
+ xok = (!scase) & (zedd > 0)
+ logdensity[xok] = -log(scale[xok]) - zedd[xok]^(-1/shape[xok]) -
+ (1 + 1/shape[xok]) * log(zedd[xok])
+ outofbounds = (!scase) & (zedd <= 0)
+ if(any(outofbounds)) {
+ logdensity[outofbounds] = oobounds.log
+ no.oob = sum(outofbounds)
+ if(giveWarning)
+ warning(no.oob, " observation",
+ ifelse(no.oob > 1, "s are", " is"), " out of bounds")
+ }
+ }
+ if(nscase) {
+ logdensity[scase] = dgumbel(x[scase], loc=location[scase],
+ sc=scale[scase], log=TRUE)
+ }
+
+ logdensity[scale <= 0] = NaN
+ if(log.arg) logdensity else exp(logdensity)
+}
+
+
+
+pgev = function(q, location=0, scale=1, shape=0) {
+ if(!is.Numeric(q)) stop("bad input for argument 'q'")
+ if(!is.Numeric(location)) stop("bad input for argument 'location'")
+ if(!is.Numeric(shape)) stop("bad input for argument 'shape'")
+
+ use.n = max(length(q), length(location), length(scale), length(shape))
+ ans = numeric(use.n)
+ shape = rep(shape, len=use.n); location = rep(location, len=use.n);
+ scale = rep(scale, len=use.n); q = rep(q-location, len=use.n)
+ scase = abs(shape) < sqrt(.Machine$double.eps)
+ nscase = sum(scase)
+ if(use.n - nscase) {
+ zedd = pmax(0,(1+shape*q/scale))
+ ans[!scase] = exp(-zedd[!scase]^(-1/shape[!scase]))
+ }
+ if(nscase)
+ ans[scase] = pgumbel(q[scase], location[scase], scale[scase])
+ ans[scale <= 0] = NaN
+ ans
+}
+
+
+
+qgev = function(p, location=0, scale=1, shape=0) {
+ if(!is.Numeric(p, posit=TRUE) || any(p >= 1)) stop("0 < p < 1 is required")
+ if(!is.Numeric(location)) stop("bad input for argument 'location'")
+ if(!is.Numeric(shape)) stop("bad input for argument 'shape'")
+
+ use.n = max(length(p), length(location), length(scale), length(shape))
+ ans = numeric(use.n)
+ shape = rep(shape, len=use.n); location = rep(location, len=use.n);
+ scale = rep(scale, len=use.n); p = rep(p, len=use.n)
+ scase = abs(shape) < sqrt(.Machine$double.eps)
+ nscase = sum(scase)
+ if(use.n - nscase) {
+ ans[!scase] = location[!scase] + scale[!scase] *
+ ((-log(p[!scase]))^(-shape[!scase]) -1) / shape[!scase]
+ }
+ if(nscase)
+ ans[scase] = qgumbel(p[scase], location[scase], scale[scase])
+ ans[scale <= 0] = NaN
+ ans
+}
+
+
+
+
+
+ gev = function(llocation="identity",
lscale="loge",
lshape="logoff",
elocation = list(),
@@ -14,14 +137,17 @@ gev <- function(llocation="identity",
if(lshape=="elogit") list(min=-0.5, max=0.5) else list(),
percentiles=c(95,99),
iscale=NULL, ishape=NULL,
- method.init=1, gshape=c(-0.45, 0.45), tshape0=0.001,
+ method.init=1, gshape=c(-0.45, 0.45),
+ tolshape0=0.001, giveWarning=TRUE,
zero=3)
{
+ if(!is.logical(giveWarning) || length(giveWarning) != 1)
+ stop("bad input for argument 'giveWarning'")
mean = FALSE
if(length(iscale) && !is.Numeric(iscale, posit=TRUE))
- stop("bad input for argument \"iscale\"")
+ stop("bad input for argument 'iscale'")
if(mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
if(mode(llocation) != "character" && mode(llocation) != "name")
@@ -30,18 +156,18 @@ gev <- function(llocation="identity",
lshape = as.character(substitute(lshape))
if(!mean && length(percentiles) &&
(!is.Numeric(percentiles, posit=TRUE) || max(percentiles) >= 100))
- stop("bad input for argument \"percentiles\"")
+ stop("bad input for argument 'percentiles'")
if(!is.Numeric(method.init, allow=1, posit=TRUE, integer=TRUE) ||
method.init > 2.5)
- stop("argument \"method.init\" must be 1 or 2")
+ stop("argument 'method.init' must be 1 or 2")
if(length(ishape) && !is.Numeric(ishape))
- stop("bad input for argument \"ishape\"")
- if(!is.Numeric(tshape0, allow=1, posit=TRUE) || tshape0 > 0.1)
- stop("bad input for argument \"tshape0\"")
+ stop("bad input for argument 'ishape'")
+ if(!is.Numeric(tolshape0, allow=1, posit=TRUE) || tolshape0 > 0.1)
+ stop("bad input for argument 'tolshape0'")
if(!is.Numeric(gshape, allow=2) || gshape[1] >= gshape[2])
- stop("bad input for argument \"gshape\"")
+ stop("bad input for argument 'gshape'")
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(!is.list(elocation)) elocation = list()
if(!is.list(escale)) escale = list()
if(!is.list(eshape)) eshape = list()
@@ -64,86 +190,81 @@ gev <- function(llocation="identity",
if(ncol(y) > 1)
y = -t(apply(-y, 1, sort, na.last=TRUE))
- sum1 = function(mat) {
- if(!is.matrix(mat)) mat = as.matrix(mat)
- apply(mat, 1, sum, na.rm=TRUE)
- }
- r.vec = sum1(!is.na(y))
+ r.vec = rowSums(cbind(!is.na(y)))
+
+
if(any(r.vec == 0))
stop("A row contains all missing values")
extra$percentiles = .percentiles
if(!length(etastart)) {
- siginit= if(length( .iscale)) rep( .iscale, len=nrow(y)) else NULL
- xiinit = if(length( .ishape)) rep( .ishape, len=nrow(y)) else NULL
+ init.sig= if(length( .iscale)) rep( .iscale, len=nrow(y)) else NULL
+ init.xi = if(length( .ishape)) rep( .ishape, len=nrow(y)) else NULL
eshape = .eshape
- if( .lshape=="elogit" && length(xiinit) &&
- (any(xiinit <= eshape$min | xiinit >= eshape$max)))
- stop("bad input for argument \"eshape\"")
+ if( .lshape=="elogit" && length(init.xi) &&
+ (any(init.xi <= eshape$min | init.xi >= eshape$max)))
+ stop("bad input for argument 'eshape'")
if( .method.init == 1) {
nvector = 4:10 # Arbitrary; could be made an argument
ynvector = quantile(y[,1], probs = 1-1/nvector)
objecFunction = -Inf # Actually the log-likelihood
- est.sigma = !length(siginit)
+ est.sigma = !length(init.sig)
gshape = .gshape
- temp234 = if(length(xiinit)) xiinit[1] else
+ temp234 = if(length(init.xi)) init.xi[1] else
seq(gshape[1], gshape[2], len=12)
for(xi.try in temp234) {
- xvec = if(abs(xi.try) < .tshape0) log(nvector) else
+ xvec = if(abs(xi.try) < .tolshape0) log(nvector) else
(nvector^xi.try - 1) / xi.try
fit0 = lsfit(x=xvec, y=ynvector, intercept=TRUE)
- if(est.sigma) {
- sigmaTry = rep(fit0$coef["X"], len=nrow(y))
- } else {
- sigmaTry = siginit
- }
+ sigmaTry = if(est.sigma)
+ rep(fit0$coef["X"], len=nrow(y)) else init.sig
muTry = rep(fit0$coef["Intercept"], len=nrow(y))
- llTry = egev()@loglikelihood(mu=NULL,y=y[,1],w=w,
- residuals=FALSE, extra=list(giveWarning=FALSE),
- eta=cbind(theta2eta(muTry, .llocation, earg= .elocation),
- theta2eta(sigmaTry, .lscale, earg= .escale),
- theta2eta(xi.try, link= .lshape,
- earg= .eshape)))
+ llTry = egev(giveWarning=
+ FALSE)@loglikelihood(mu=NULL, y=y[,1], w=w,
+ residuals=FALSE,
+ eta=cbind(theta2eta(muTry, .llocation,earg= .elocation),
+ theta2eta(sigmaTry, .lscale,earg= .escale),
+ theta2eta(xi.try, link= .lshape, earg= .eshape)))
if(llTry >= objecFunction) {
if(est.sigma)
- siginit = sigmaTry
- muinit = rep(muTry, len=nrow(y))
+ init.sig = sigmaTry
+ init.mu = rep(muTry, len=nrow(y))
objecFunction = llTry
bestxi = xi.try
}
}
- if(!length(xiinit))
- xiinit = rep(bestxi, len=nrow(y))
+ if(!length(init.xi))
+ init.xi = rep(bestxi, len=nrow(y))
} else {
- xiinit = rep(0.05, len=nrow(y))
- if(!length(siginit))
- siginit = rep(sqrt(6 * var(y[,1]))/pi, len=nrow(y))
+ init.xi = rep(0.05, len=nrow(y))
+ if(!length(init.sig))
+ init.sig = rep(sqrt(6 * var(y[,1]))/pi, len=nrow(y))
EulerM = -digamma(1)
- muinit = rep(median(y[,1]) - EulerM*siginit, len=nrow(y))
+ init.mu = rep(median(y[,1]) - EulerM*init.sig, len=nrow(y))
}
- bad = ((1 + xiinit*(y-muinit)/siginit) <= 0)
- if(fred <- sum(sum1(bad))) {
+ bad = ((1 + init.xi*(y-init.mu)/init.sig) <= 0)
+ if(fred <- sum(bad)) {
warning(paste(fred, "observations violating boundary",
"constraints while initializing. Taking corrective action."))
- xiinit[bad] = ifelse(y[bad] > muinit[bad], 0.1, -0.1)
+ init.xi[bad] = ifelse(y[bad] > init.mu[bad], 0.1, -0.1)
}
- etastart = cbind(theta2eta(muinit, .llocation, earg= .elocation),
- theta2eta(siginit, .lscale, earg= .escale),
- theta2eta(xiinit, .lshape, earg= .eshape))
+ etastart = cbind(theta2eta(init.mu, .llocation, earg= .elocation),
+ theta2eta(init.sig, .lscale, earg= .escale),
+ theta2eta(init.xi, .lshape, earg= .eshape))
}
}), list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
.percentiles=percentiles,
.elocation = elocation, .escale = escale,
- .eshape= eshape, .tshape0=tshape0,
- .method.init=method.init,
+ .eshape= eshape, .tolshape0=tolshape0,
+ .method.init=method.init, .giveWarning= giveWarning,
.iscale=iscale, .ishape=ishape, .gshape=gshape ))),
inverse=eval(substitute(function(eta, extra=NULL) {
loc = eta2theta(eta[,1], .llocation, earg= .elocation)
sigma = eta2theta(eta[,2], .lscale, earg= .escale)
xi = eta2theta(eta[,3], .lshape, earg= .eshape)
- iszero = (abs(xi) < .tshape0)
+ iszero = (abs(xi) < .tolshape0)
cent = extra$percentiles
lp = length(cent)
fv = matrix(as.numeric(NA), nrow(eta), lp)
@@ -165,65 +286,62 @@ gev <- function(llocation="identity",
}
fv
}, list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
- .eshape= eshape, .tshape0=tshape0 ))),
+ .eshape= eshape, .tolshape0=tolshape0 ))),
last=eval(substitute(expression({
misc$links = c(location = .llocation, scale = .lscale, shape =.lshape)
misc$true.mu = !length( .percentiles) # @fitted is not a true mu
misc$percentiles = .percentiles
misc$earg= list(location= .elocation, scale= .escale, shape= .eshape)
misc$expected = TRUE
- misc$tshape0 = .tshape0
+ misc$tolshape0 = .tolshape0
if(ncol(y)==1)
y = as.vector(y)
if(any(xi < -0.5))
warning("some values of the shape parameter are less than -0.5")
}), list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
.elocation = elocation, .escale = escale, .eshape= eshape,
- .tshape0=tshape0, .percentiles=percentiles ))),
+ .tolshape0=tolshape0, .percentiles=percentiles ))),
loglikelihood=eval(substitute(
- function(mu,y,w,residuals=FALSE,eta,extra=NULL) {
- mmu = eta2theta(eta[,1], .llocation, earg= .elocation)
- sigma = eta2theta(eta[,2], .lscale, earg= .escale)
- xi = eta2theta(eta[,3], .lshape, earg= .eshape)
- iszero = (abs(xi) < .tshape0)
- sum1 = function(mat) {
- if(!is.matrix(mat)) mat = as.matrix(mat)
- apply(mat, 1, sum, na.rm=TRUE)
- }
- zedd = (y-mmu) / sigma
- r.vec = sum1(!is.na(y))
- A = 1 + xi * (y-mmu)/sigma
- ii = 1:nrow(eta)
- A1 = A[cbind(ii, r.vec)]
- mytolerance = 0 # .Machine$double.eps
- if(any(bad <- (A1 <= mytolerance), na.rm=TRUE)) {
- cat("There are",sum(bad),"range violations in @loglikelihood\n")
- if(exists("flush.console")) flush.console()
- }
- igev = !iszero & !bad
- igum = iszero & !bad
- pow = 1 + 1/xi[igev]
- if(residuals) stop("loglikelihood residuals not implemented yet") else
+ function(mu,y,w,residuals=FALSE,eta,extra=NULL) {
+ mmu = eta2theta(eta[,1], .llocation, earg= .elocation)
+ sigma = eta2theta(eta[,2], .lscale, earg= .escale)
+ xi = eta2theta(eta[,3], .lshape, earg= .eshape)
+ iszero = (abs(xi) < .tolshape0)
+ zedd = (y-mmu) / sigma
+ r.vec = rowSums(cbind(!is.na(y)))
+ A = 1 + xi * (y-mmu)/sigma
+ ii = 1:nrow(eta)
+ A1 = A[cbind(ii, r.vec)]
+ mytolerance = 0 # .Machine$double.eps
+ if(any(bad <- (A1 <= mytolerance), na.rm=TRUE)) {
+ cat("There are",sum(bad),"range violations in @loglikelihood\n")
+ flush.console()
+ }
+ igev = !iszero & !bad
+ igum = iszero & !bad
+ pow = 1 + 1/xi[igev]
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+
+ old.answer =
sum(bad) * (-1.0e10) +
sum(w[igum] * (-r.vec[igum]*log(sigma[igum]) -
- exp(-zedd[igum,r.vec]) - sum1(zedd))) +
+ exp(-zedd[igum,r.vec]) -
+ rowSums(cbind(zedd, na.rm=TRUE)))) +
sum(w[igev] * (-r.vec[igev]*log(sigma[igev]) -
- pow*sum1(log(A[igev])) -
+ pow*rowSums(cbind(log(A[igev])), na.rm=TRUE) -
A1[igev]^(-1/xi[igev])))
- }, list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
- .elocation = elocation, .escale = escale, .eshape= eshape,
- .tshape0=tshape0 ))),
+ old.answer
+ }
+ }, list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
+ .elocation = elocation, .escale = escale, .eshape= eshape,
+ .giveWarning=giveWarning, .tolshape0=tolshape0 ))),
vfamily=c("gev", "vextremes"),
deriv=eval(substitute(expression({
- sum1 = function(mat) {
- if(!is.matrix(mat)) mat = as.matrix(mat)
- apply(mat, 1, sum, na.rm=TRUE)
- }
- r.vec = sum1(!is.na(y))
+ r.vec = rowSums(cbind(!is.na(y)))
mmu = eta2theta(eta[,1], .llocation, earg= .elocation)
sigma = eta2theta(eta[,2], .lscale, earg= .escale)
xi = eta2theta(eta[,3], .lshape, earg= .eshape)
- iszero = (abs(xi) < .tshape0)
+ iszero = (abs(xi) < .tolshape0)
ii = 1:nrow(eta)
zedd = (y-mmu) / sigma
A = 1 + xi * zedd
@@ -233,14 +351,16 @@ gev <- function(llocation="identity",
pow = 1 + 1/xi
A1 = A[cbind(ii, r.vec)]
- AAr1 = dA.dmu/(xi * A1^pow) - pow * sum1(dA.dmu/A)
+ AAr1 = dA.dmu/(xi * A1^pow) - pow * rowSums(cbind(dA.dmu/A), na.rm=TRUE)
AAr2 = dA.dsigma[cbind(ii,r.vec)] / (xi * A1^pow) -
- pow * sum1(dA.dsigma/A)
- AAr3 = 1/(xi * A1^pow) - pow * sum1(dA.dsigma/A)
+ pow * rowSums(cbind(dA.dsigma/A), na.rm=TRUE)
+ AAr3 = 1/(xi * A1^pow) - pow * rowSums(cbind(dA.dsigma/A), na.rm=TRUE)
dl.dmu = AAr1
dl.dsi = AAr2 - r.vec/sigma
- dl.dxi = sum1(log(A))/xi^2 - pow * sum1(dA.dxi/A) - (log(A1) /
- xi^2 - dA.dxi[cbind(ii,r.vec)] /(xi*A1)) * A1^(-1/xi)
+ dl.dxi = rowSums(cbind(log(A)), na.rm=TRUE)/xi^2 -
+ pow * rowSums(cbind(dA.dxi/A), na.rm=TRUE) -
+ (log(A1) / xi^2 -
+ dA.dxi[cbind(ii,r.vec)] / (xi*A1)) * A1^(-1/xi)
if(any(iszero)) {
zorro = c(zedd[cbind(1:n,r.vec)])
@@ -256,7 +376,7 @@ gev <- function(llocation="identity",
w * cbind(dl.dmu * dmu.deta, dl.dsi * dsi.deta, dl.dxi * dxi.deta)
}), list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
.elocation = elocation, .escale = escale, .eshape= eshape,
- .tshape0=tshape0 ))),
+ .tolshape0=tolshape0 ))),
weight=eval(substitute(expression({
kay = -xi
dd = digamma(r.vec-kay+1)
@@ -313,7 +433,6 @@ gev <- function(llocation="identity",
-
dgammadx = function(x, deriv.arg=1) {
if(deriv.arg==0) {
gamma(x)
@@ -334,21 +453,24 @@ dgammadx = function(x, deriv.arg=1) {
-egev <- function(llocation="identity",
+
+ egev = function(llocation="identity",
lscale="loge",
lshape="logoff",
- elocation = list(),
- escale = list(),
- eshape = if(lshape=="logoff") list(offset=0.5) else
- if(lshape=="elogit") list(min=-0.5, max=0.5) else list(),
+ elocation = list(),
+ escale = list(),
+ eshape = if(lshape=="logoff") list(offset=0.5) else
+ if(lshape=="elogit") list(min=-0.5, max=0.5) else list(),
percentiles=c(95,99),
iscale=NULL, ishape=NULL,
method.init=1, gshape=c(-0.45, 0.45),
- tshape0=0.001,
+ tolshape0=0.001, giveWarning=TRUE,
zero=3)
{
+ if(!is.logical(giveWarning) || length(giveWarning) != 1)
+ stop("bad input for argument 'giveWarning'")
if(length(iscale) && !is.Numeric(iscale, posit=TRUE))
- stop("bad input for argument \"iscale\"")
+ stop("bad input for argument 'iscale'")
if(mode(lscale) != "character" && mode(lscale) != "name")
lscale <- as.character(substitute(lscale))
if(mode(llocation) != "character" && mode(llocation) != "name")
@@ -356,19 +478,19 @@ egev <- function(llocation="identity",
if(mode(lshape) != "character" && mode(lshape) != "name")
lshape <- as.character(substitute(lshape))
if(!is.Numeric(gshape, allow=2) || gshape[1] >= gshape[2])
- stop("bad input for argument \"gshape\"")
+ stop("bad input for argument 'gshape'")
if(length(percentiles) &&
(!is.Numeric(percentiles, posit=TRUE) || max(percentiles) >= 100))
- stop("bad input for argument \"percentiles\"")
+ stop("bad input for argument 'percentiles'")
if(!is.Numeric(method.init, allow=1, posit=TRUE, integer=TRUE) ||
method.init > 2.5)
- stop("argument \"method.init\" must be 1 or 2")
+ stop("argument 'method.init' must be 1 or 2")
if(length(ishape) && !is.Numeric(ishape))
- stop("bad input for argument \"ishape\"")
- if(!is.Numeric(tshape0, allow=1, posit=TRUE) || tshape0 > 0.1)
- stop("bad input for argument \"tshape0\"")
+ stop("bad input for argument 'ishape'")
+ if(!is.Numeric(tolshape0, allow=1, posit=TRUE) || tolshape0 > 0.1)
+ stop("bad input for argument 'tolshape0'")
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(!is.list(elocation)) elocation = list()
if(!is.list(escale)) escale = list()
if(!is.list(eshape)) eshape = list()
@@ -390,76 +512,79 @@ egev <- function(llocation="identity",
if(ncol(as.matrix(y)) != 1)
stop("response must be a vector or one-column matrix")
if(!length(etastart)) {
- siginit= if(length( .iscale)) rep( .iscale, len=length(y)) else NULL
- xiinit = if(length( .ishape)) rep( .ishape, len=length(y)) else NULL
+ init.sig= if(length( .iscale)) rep( .iscale, len=length(y)) else NULL
+ init.xi = if(length( .ishape)) rep( .ishape, len=length(y)) else NULL
eshape = .eshape
- if( .lshape == "elogit" && length(xiinit) &&
- (any(xiinit <= eshape$min | xiinit >= eshape$max)))
- stop("bad input for argument \"eshape\"")
+ if( .lshape == "elogit" && length(init.xi) &&
+ (any(init.xi <= eshape$min | init.xi >= eshape$max)))
+ stop("bad input for argument 'eshape'")
if( .method.init == 1) {
nvector = 4:10 # Arbitrary; could be made an argument
ynvector = quantile(y, probs = 1-1/nvector)
objecFunction = -Inf # Actually the log-likelihood
- est.sigma = !length(siginit)
+ est.sigma = !length(init.sig)
gshape = .gshape
- temp234 = if(length(xiinit)) xiinit[1] else
+ temp234 = if(length(init.xi)) init.xi[1] else
seq(gshape[1], gshape[2], len=12)
for(xi.try in temp234) {
- xvec = if(abs(xi.try) < .tshape0) log(nvector) else
+ xvec = if(abs(xi.try) < .tolshape0) log(nvector) else
(nvector^xi.try - 1) / xi.try
fit0 = lsfit(x=xvec, y=ynvector, intercept=TRUE)
if(est.sigma) {
sigmaTry = rep(fit0$coef["X"], len=length(y))
} else {
- sigmaTry = siginit
+ sigmaTry = init.sig
}
muTry = rep(fit0$coef["Intercept"], len=length(y))
- llTry = egev()@loglikelihood(mu=NULL,y=y,w=w,
- residuals=FALSE, extra=list(giveWarning=FALSE),
- eta=cbind(theta2eta(muTry, .llocation,earg= .elocation),
- theta2eta(sigmaTry, .lscale, earg= .escale),
- theta2eta(xi.try, .lshape, earg= .eshape)))
+ llTry = egev(giveWarning=
+ FALSE)@loglikelihood(mu=NULL, y=y, w=w,
+ residuals=FALSE,
+ eta=cbind(theta2eta(muTry, .llocation, earg= .elocation),
+ theta2eta(sigmaTry, .lscale, earg= .escale),
+ theta2eta(xi.try, .lshape, earg= .eshape)))
if(llTry >= objecFunction) {
if(est.sigma)
- siginit = sigmaTry
- muinit = rep(muTry, len=length(y))
+ init.sig = sigmaTry
+ init.mu = rep(muTry, len=length(y))
objecFunction = llTry
bestxi = xi.try
}
}
- if(!length(xiinit))
- xiinit = rep(bestxi, len=length(y))
+ if(!length(init.xi))
+ init.xi = rep(bestxi, len=length(y))
} else {
- xiinit = rep(if(length(xiinit)) xiinit else 0.05, len=length(y))
- if(!length(siginit))
- siginit = rep(sqrt(6*var(y))/pi, len=length(y))
+ init.xi = rep(if(length(init.xi)) init.xi else 0.05,
+ len=length(y))
+ if(!length(init.sig))
+ init.sig = rep(sqrt(6*var(y))/pi, len=length(y))
EulerM = -digamma(1)
- muinit = rep(median(y) - EulerM * siginit, len=length(y))
+ init.mu = rep(median(y) - EulerM * init.sig, len=length(y))
}
- bad <- (1 + xiinit*(y-muinit)/siginit <= 0)
+ bad <- (1 + init.xi*(y-init.mu)/init.sig <= 0)
if(fred <- sum(bad, na.rm=TRUE)) {
warning(paste(fred, "observations violating boundary",
"constraints while initializing. Taking corrective action."))
- xiinit[bad] = ifelse(y[bad] > muinit[bad], 0.01, -0.01)
+ init.xi[bad] = ifelse(y[bad] > init.mu[bad], 0.01, -0.01)
}
extra$percentiles = .percentiles
- etastart = cbind(theta2eta(muinit, .llocation, earg= .elocation),
- theta2eta(siginit, .lscale, earg= .escale),
- theta2eta(xiinit, .lshape, earg= .eshape))
+ etastart = cbind(theta2eta(init.mu, .llocation, earg= .elocation),
+ theta2eta(init.sig, .lscale, earg= .escale),
+ theta2eta(init.xi, .lshape, earg= .eshape))
}
}), list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
- .percentiles=percentiles, .tshape0=tshape0,
+ .percentiles=percentiles, .tolshape0=tolshape0,
.elocation = elocation, .escale = escale, .eshape= eshape,
.method.init=method.init,
+ .giveWarning= giveWarning,
.iscale=iscale, .ishape=ishape, .gshape=gshape ))),
inverse=eval(substitute(function(eta, extra=NULL) {
loc <- eta2theta(eta[,1], .llocation, earg= .elocation)
sigma <- eta2theta(eta[,2], .lscale, earg= .escale)
xi <- eta2theta(eta[,3], .lshape, earg= .eshape)
- iszero <- (abs(xi) < .tshape0)
+ iszero <- (abs(xi) < .tolshape0)
cent = extra$percentiles
lp <- length(cent)
fv <- matrix(as.numeric(NA), nrow(eta), lp)
@@ -482,60 +607,48 @@ egev <- function(llocation="identity",
fv
}, list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
.elocation = elocation, .escale = escale, .eshape= eshape,
- .tshape0=tshape0 ))),
+ .tolshape0=tolshape0 ))),
last=eval(substitute(expression({
misc$links <- c(location = .llocation, scale = .lscale, shape = .lshape)
misc$true.mu = !length( .percentiles) # @fitted is not a true mu
misc$percentiles <- .percentiles
misc$earg= list(location= .elocation, scale= .escale, shape= .eshape)
- misc$tshape0 = .tshape0
+ misc$tolshape0 = .tolshape0
misc$expected = TRUE
if(any(xi < -0.5))
warning("some values of the shape parameter are less than -0.5")
}), list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
.elocation = elocation, .escale = escale, .eshape= eshape,
- .tshape0=tshape0, .percentiles=percentiles ))),
+ .tolshape0=tolshape0, .percentiles=percentiles ))),
loglikelihood=eval(substitute(
- function(mu,y,w, residuals=FALSE, eta, extra=NULL) {
- giveWarning = if(length(extra$giveWarning)) extra$giveWarning else
- FALSE
- mmu <- eta2theta(eta[,1], .llocation, earg= .elocation )
- sigma <- eta2theta(eta[,2], .lscale, earg= .escale )
- xi <- eta2theta(eta[,3], .lshape, earg= .eshape )
- zedd <- (y-mmu) / sigma
- A <- 1 + xi * zedd
- if(any(iszero <- (abs(xi) < .tshape0))) {
- }
- mytolerance = 0 # .Machine$double.eps
- bad <- (A<=mytolerance) # Range violation
- if(any(bad) && giveWarning) {
- cat("There are",sum(bad),"range violations in @loglikelihood\n")
- if(exists("flush.console")) flush.console()
- }
- igev = !iszero & !bad
- igum = iszero & !bad
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(bad) * (-1.0e10) +
- sum(w[igum] * (-log(sigma[igum]) - zedd[igum] - exp(-zedd[igum]))) +
- sum(w[igev] * (-log(sigma[igev]) - (1 + 1/xi[igev])*log(A[igev]) -
- A[igev]^(-1/xi[igev])))
- }, list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
- .elocation = elocation, .escale = escale, .eshape= eshape,
- .tshape0=tshape0 ))),
+ function(mu,y,w, residuals=FALSE, eta, extra=NULL) {
+ mmu <- eta2theta(eta[,1], .llocation, earg= .elocation )
+ sigma <- eta2theta(eta[,2], .lscale, earg= .escale )
+ xi <- eta2theta(eta[,3], .lshape, earg= .eshape )
+
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dgev(x=y, location=mmu, scale=sigma, shape=xi,
+ tolshape0 = .tolshape0,
+ log=TRUE, oobounds.log = -1.0e04,
+ giveWarning= .giveWarning))
+ }
+ }, list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
+ .elocation = elocation, .escale = escale, .eshape= eshape,
+ .giveWarning= giveWarning, .tolshape0=tolshape0 ))),
vfamily=c("egev", "vextremes"),
deriv=eval(substitute(expression({
mmu = eta2theta(eta[,1], .llocation, earg= .elocation)
sigma = eta2theta(eta[,2], .lscale, earg= .escale )
xi = eta2theta(eta[,3], .lshape, earg= .eshape)
- iszero <- (abs(xi) < .tshape0)
+ iszero <- (abs(xi) < .tolshape0)
zedd = (y-mmu) / sigma
A = 1 + xi * zedd
dA.dxi = zedd
dA.dmu = -xi / sigma
dA.dsigma = -xi * zedd / sigma
pow = 1 + 1/xi
- if(any(bad <- A<=0, na.rm=TRUE)) stop(paste(sum(bad, na.rm=TRUE),
- "observations violating boundary constraints in @deriv"))
+ if(any(bad <- A<=0, na.rm=TRUE)) stop(sum(bad, na.rm=TRUE),
+ " observations violating boundary constraints in '@deriv'")
AA = 1/(xi*A^pow)- pow/A
dl.dmu = dA.dmu * AA
dl.dsi = dA.dsigma * AA - 1/sigma
@@ -545,7 +658,7 @@ egev <- function(llocation="identity",
ezedd = exp(-zedd[iszero])
dl.dmu[iszero] = (1-ezedd) / sigma[iszero]
dl.dsi[iszero] = (zedd[iszero] * (1-ezedd) - 1) / sigma[iszero]
- dl.dxi[iszero] = zedd[iszero] * ((1 - ezedd) * zedd[iszero] / 2 - 1)
+ dl.dxi[iszero] = zedd[iszero] * ((1 - ezedd) * zedd[iszero] / 2 -1)
}
dmu.deta = dtheta.deta(mmu, .llocation, earg= .elocation)
dsi.deta = dtheta.deta(sigma, .lscale, earg= .escale )
@@ -553,15 +666,15 @@ egev <- function(llocation="identity",
w * cbind(dl.dmu * dmu.deta, dl.dsi * dsi.deta, dl.dxi*dxi.deta)
}), list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
.elocation = elocation, .escale = escale, .eshape= eshape,
- .tshape0=tshape0 ))),
+ .tolshape0=tolshape0 ))),
weight=eval(substitute(expression({
bad <- A <= 0
- if(any(bad, na.rm = TRUE)) stop(paste(sum(bad, na.rm = TRUE),
- "observations violating boundary constraints in @weight"))
+ if(any(bad, na.rm = TRUE)) stop(sum(bad, na.rm=TRUE),
+ " observations violating boundary constraints in '@weight'")
kay = -xi # for the formulae
- kay[abs(kay-0.5) < .tshape0] = 0.501
+ kay[abs(kay-0.5) < .tolshape0] = 0.501
temp100 = gamma(2-kay)
- pp = (1-kay)^2 * gamma(1-2*kay) # gamma(0) is undefined so kay != 0.5
+ pp = (1-kay)^2 * gamma(1-2*kay) # gamma(0) is undefined so kay != 0.5
qq = temp100 * (digamma(1-kay) - (1-kay)/kay)
wz = matrix(as.numeric(NA), n, 6)
wz[,iam(1,1,M)] = pp / sigma^2
@@ -580,7 +693,7 @@ egev <- function(llocation="identity",
wz[iszero,iam(1,3,M)]= -(trigamma(1)/2 + digamma(1)*
(digamma(1)/2+1))/sigma
wz[iszero,iam(2,3,M)] = (-dgammadx(2,3)/6 + dgammadx(1,1) +
- 2*dgammadx(1,2) + 2*dgammadx(1,3)/3) / sigma
+ 2*dgammadx(1,2) + 2*dgammadx(1,3)/3)/sigma
}
wz[,iam(1,1,M)] = wz[,iam(1,1,M)] * dmu.deta^2
wz[,iam(2,2,M)] = wz[,iam(2,2,M)] * dsi.deta^2
@@ -589,40 +702,51 @@ egev <- function(llocation="identity",
wz[,iam(1,3,M)] = wz[,iam(1,3,M)] * dmu.deta * (-dxi.deta)
wz[,iam(2,3,M)] = wz[,iam(2,3,M)] * dsi.deta * (-dxi.deta)
w * wz
- }), list( .eshape= eshape, .tshape0=tshape0 ))))
+ }), list( .eshape= eshape, .tolshape0=tolshape0 ))))
}
-
rgumbel = function(n, location=0, scale=1) {
- if(!is.Numeric(n, posit=TRUE, allow=1, integ=TRUE))
- stop("bad input for argument \"n\"")
- if(!is.Numeric(scale, posit=TRUE)) stop("\"scale\" must be positive")
- location - scale * log(-log(runif(n)))
+ use.n = if((length.n <- length(n)) > 1) length.n else
+ if(!is.Numeric(n, integ=TRUE, allow=1, posit=TRUE))
+ stop("bad input for argument 'n'") else n
+
+ answer = location - scale * log(-log(runif(use.n)))
+ answer[scale <= 0] = NaN
+ answer
}
-dgumbel = function(x, location=0, scale=1) {
- if(!is.Numeric(scale, posit=TRUE)) stop("\"scale\" must be positive")
- temp = exp(-(x-location) / scale)
- temp * exp(-temp) / scale
+dgumbel = function(x, location=0, scale=1, log = FALSE) {
+ if(!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ zedd = (x - location) / scale
+ logdensity = -zedd - exp(-zedd) - log(scale)
+ if(log.arg) logdensity else exp(logdensity)
}
qgumbel = function(p, location=0, scale=1) {
- if(!is.Numeric(p, posit=TRUE) || any(p >= 1)) stop("0 < p < 1 is required")
- if(!is.Numeric(scale, posit=TRUE)) stop("\"scale\" must be positive")
- location - scale * log(-log(p))
+ answer = location - scale * log(-log(p))
+ answer[scale <= 0] = NaN
+ answer[p < 0] = NaN
+ answer[p > 1] = NaN
+ answer[p == 0] = -Inf
+ answer[p == 1] = Inf
+ answer
}
pgumbel = function(q, location=0, scale=1) {
- if(!is.Numeric(scale, posit=TRUE)) stop("\"scale\" must be positive")
- exp(-exp(-(q-location) / scale))
+ answer = exp(-exp(-(q-location) / scale))
+ answer[scale <= 0] = NaN
+ answer
}
-gumbel <- function(llocation="identity",
+ gumbel = function(llocation="identity",
lscale="loge",
elocation = list(),
escale = list(),
@@ -635,14 +759,14 @@ gumbel <- function(llocation="identity",
if(mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
if(!is.logical(mpv) || length(mpv) != 1)
- stop("bad input for argument \"mpv\"")
+ stop("bad input for argument 'mpv'")
if(length(percentiles) &&
(!is.Numeric(percentiles, posit=TRUE) || max(percentiles) >= 100))
- stop("bad input for argument \"percentiles\"")
+ stop("bad input for argument 'percentiles'")
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(length(iscale) && !is.Numeric(iscale, posit=TRUE))
- stop("bad input for argument \"iscale\"")
+ stop("bad input for argument 'iscale'")
if(!is.list(elocation)) elocation = list()
if(!is.list(escale)) escale = list()
@@ -660,19 +784,14 @@ gumbel <- function(llocation="identity",
namesof("scale", .lscale, earg= .escale , short=TRUE))
y = as.matrix(y)
if(ncol(y) > 1)
- y = if(is.R()) -t(apply(-y,1,sort, na.last = TRUE)) else {
- y[is.na(y)] = -Inf
- y = -t(apply(-y,1,sort, na.last = TRUE))
- y[y == -Inf] = NA
- as.matrix(y)
- }
- r.vec = as.vector((!is.na(y)) %*% rep(1,ncol(y))) # r_i vector
- if(any(r.vec==0))
+ y = -t(apply(-y, 1, sort, na.last=TRUE))
+ r.vec = rowSums(cbind(!is.na(y)))
+ if(any(r.vec == 0))
stop("There is at least one row of the response containing all NAs")
if(ncol(y) > 1) {
yiri = y[cbind(1:nrow(y), r.vec)]
sc.init = if(is.Numeric( .iscale, posit=TRUE))
- .iscale else {3*(apply(y, 1, mean, na.rm=TRUE) - yiri)}
+ .iscale else {3 * (rowMeans(y, na.rm=TRUE) - yiri)}
sc.init = rep(sc.init, length=nrow(y))
sc.init[sc.init <= 0.0001] = 1 # Used to be .iscale
loc.init = yiri + sc.init * log(r.vec)
@@ -734,10 +853,10 @@ gumbel <- function(llocation="identity",
.mpv=mpv, .R=R ))),
vfamily=c("gumbel", "vextremes"),
loglikelihood=eval(substitute(
- function(mu,y,w, residuals=FALSE, eta, extra=NULL) {
+ function(mu,y,w, residuals=FALSE, eta, extra=NULL) {
loc = eta2theta(eta[,1], .llocation, earg= .elocation)
sigma = eta2theta(eta[,2], .lscale, earg= .escale )
- r.vec = as.vector((!is.na(y)) %*% rep(1,ncol(y))) # r_i vector
+ r.vec = rowSums(cbind(!is.na(y)))
yiri = y[cbind(1:nrow(y),r.vec)]
ans = -r.vec * log(sigma) - exp( -(yiri-loc)/sigma )
max.r.vec = max(r.vec)
@@ -745,22 +864,25 @@ gumbel <- function(llocation="identity",
index = (jay <= r.vec)
ans[index] = ans[index] - (y[index,jay]-loc[index]) / sigma[index]
}
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * ans)
- }, list( .llocation=llocation, .lscale=lscale,
- .elocation = elocation, .escale = escale ))),
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+
+
+ sum(w * ans)
+ }
+ }, list( .llocation=llocation, .lscale=lscale,
+ .elocation = elocation, .escale = escale ))),
deriv=eval(substitute(expression({
loc = eta2theta(eta[,1], .llocation, earg= .elocation)
sigma = eta2theta(eta[,2], .lscale, earg= .escale )
- r.vec = as.vector((!is.na(y)) %*% rep(1,ncol(y))) # r_i vector
+ r.vec = rowSums(cbind(!is.na(y)))
yiri = y[cbind(1:nrow(y),r.vec)]
- yi.bar = apply(y, 1, mean, na.rm=TRUE)
+ yi.bar = rowMeans(y, na.rm=TRUE)
temp2 = (yiri - loc) / sigma
term2 = exp(-temp2)
dloc.deta = dtheta.deta(loc, .llocation, earg= .elocation)
dsigma.deta = dtheta.deta(sigma, .lscale, earg= .escale )
dl.dloc = (r.vec - term2) / sigma
- dl.dsigma = (apply((y - loc) / sigma, 1, sum, na.rm=TRUE) - r.vec -
+ dl.dsigma = (rowSums((y - loc) / sigma, na.rm=TRUE) - r.vec -
temp2 * term2) / sigma
w * cbind(dl.dloc * dloc.deta, dl.dsigma * dsigma.deta)
}), list( .llocation=llocation, .lscale=lscale,
@@ -785,70 +907,96 @@ gumbel <- function(llocation="identity",
-
-
-
rgpd = function(n, location=0, scale=1, shape=0) {
- if(!is.Numeric(scale, posit=TRUE)) stop("\"scale\" must be positive")
- if(!is.Numeric(n, posit=TRUE, allow=1, integ=TRUE))
- stop("bad input for argument \"n\"")
- if(!is.Numeric(location)) stop("bad input for argument \"location\"")
- if(!is.Numeric(shape)) stop("bad input for argument \"shape\"")
- ans = numeric(n)
- shape = rep(shape, len=n); location = rep(location, len=n);
- scale = rep(scale, len=n)
+ use.n = if((length.n <- length(n)) > 1) length.n else
+ if(!is.Numeric(n, integ=TRUE, allow=1, posit=TRUE))
+ stop("bad input for argument 'n'") else n
+
+ if(!is.Numeric(location)) stop("bad input for argument 'location'")
+ if(!is.Numeric(shape)) stop("bad input for argument 'shape'")
+ ans = numeric(use.n)
+ shape = rep(shape, len=use.n); location = rep(location, len=use.n);
+ scale = rep(scale, len=use.n)
scase = abs(shape) < sqrt(.Machine$double.eps)
nscase = sum(scase)
- if(n - nscase)
- ans[!scase] = location[!scase] +
- scale[!scase]*((runif(n-nscase))^(-shape[!scase])-1) / shape[!scase]
+ if(use.n - nscase)
+ ans[!scase] = location[!scase] + scale[!scase] *
+ ((runif(use.n-nscase))^(-shape[!scase])-1) / shape[!scase]
if(nscase)
ans[scase] = location[scase] - scale[scase] * log(runif(nscase))
+ ans[scale <= 0] = NaN
ans
}
-dgpd = function(x, location=0, scale=1, shape=0) {
- if(!is.Numeric(x)) stop("bad input for argument \"x\"")
- if(!is.Numeric(location)) stop("bad input for argument \"location\"")
- if(!is.Numeric(scale, posit=TRUE)) stop("\"scale\" must be positive")
- if(!is.Numeric(shape)) stop("bad input for argument \"shape\"")
- n = max(length(x), length(location), length(scale), length(shape))
- ans = rep(as.numeric(NA), len=n)
- shape = rep(shape, len=n); location = rep(location, len=n);
- scale = rep(scale, len=n); x = rep(x-location, len=n)
- scase = abs(shape) < sqrt(.Machine$double.eps)
+
+
+dgpd = function(x, location=0, scale=1, shape=0, log=FALSE,
+ tolshape0 = sqrt(.Machine$double.eps),
+ oobounds.log = -Inf, giveWarning=FALSE) {
+ if(!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+ if(oobounds.log > 0)
+ stop("bad input for argument 'oobounds.log'")
+
+ if(!is.Numeric(tolshape0, allow=1, posit=TRUE))
+ stop("bad input for argument 'tolshape0'")
+ L = max(length(x), length(location), length(scale), length(shape))
+ shape = rep(shape, len=L); location = rep(location, len=L);
+ scale = rep(scale, len=L);
+ x = rep(x, len=L)
+
+ logdensity = rep(log(0), len=L)
+ scase = abs(shape) < tolshape0
nscase = sum(scase)
- if(n - nscase) {
- pos = (1+shape*x/scale > 0 & x > 0)
- ind8 = pos & !scase
- ans[ind8] = (1+shape[ind8]*x[ind8]/scale[ind8])^(-1/shape[ind8]-1) /
- scale[ind8]
- ind8 = !pos & !scase
- ans[ind8] = 0
+ if(L - nscase) {
+ zedd = (x-location) / scale
+ xok = (!scase) & (zedd > 0) & (1 + shape*zedd > 0)
+ logdensity[xok] = -(1 + 1/shape[xok])*log1p(shape[xok]*zedd[xok]) -
+ log(scale[xok])
+ outofbounds = (!scase) & ((zedd <= 0) | (1 + shape*zedd <= 0))
+ if(any(outofbounds)) {
+ logdensity[outofbounds] = oobounds.log
+ no.oob = sum(outofbounds)
+ if(giveWarning)
+ warning(no.oob, " observation",
+ ifelse(no.oob > 1, "s are", " is"), " out of bounds")
+ }
}
if(nscase) {
- pos = x>0
- ind5 = pos & scase
- ans[ind5] = exp(-x[ind5]/scale[ind5]) / scale[ind5]
- ans[!pos & scase] = 0
+ xok = scase & (x > location)
+ logdensity[xok] = -(x[xok]-location[xok])/scale[xok] - log(scale[xok])
+ outofbounds = scase & (x <= location)
+ if(any(outofbounds)) {
+ logdensity[outofbounds] = oobounds.log
+ no.oob = sum(outofbounds)
+ if(giveWarning)
+ warning(no.oob, " observation",
+ ifelse(no.oob > 1, "s are", " is"), " out of bounds")
+ }
}
- ans
+
+ logdensity[scale <= 0] = NaN
+ if(log.arg) logdensity else exp(logdensity)
}
+
+
pgpd = function(q, location=0, scale=1, shape=0) {
- if(!is.Numeric(q)) stop("bad input for argument \"q\"")
- if(!is.Numeric(location)) stop("bad input for argument \"location\"")
- if(!is.Numeric(scale, posit=TRUE)) stop("\"scale\" must be positive")
- if(!is.Numeric(shape)) stop("bad input for argument \"shape\"")
- n = max(length(q), length(location), length(scale), length(shape))
- ans = numeric(n)
- shape = rep(shape, len=n); location = rep(location, len=n);
- scale = rep(scale, len=n); q = rep(q-location, len=n)
+ if(!is.Numeric(q)) stop("bad input for argument 'q'")
+ if(!is.Numeric(location)) stop("bad input for argument 'location'")
+ if(!is.Numeric(shape)) stop("bad input for argument 'shape'")
+
+ use.n = max(length(q), length(location), length(scale), length(shape))
+ ans = numeric(use.n)
+ shape = rep(shape, len=use.n); location = rep(location, len=use.n);
+ scale = rep(scale, len=use.n);
+ q = rep(q-location, len=use.n) # Note the centering, careful with dgumbel()!
scase = abs(shape) < sqrt(.Machine$double.eps)
nscase = sum(scase)
- if(n - nscase) {
+ if(use.n - nscase) {
q[q<0] = 0
- ans = 1 - pmax(0,(1+shape*q/scale))^(-1/shape)
+ ans = 1 - pmax(0, (1+shape*q/scale))^(-1/shape)
}
if(nscase) {
pos = q>=0
@@ -857,27 +1005,34 @@ pgpd = function(q, location=0, scale=1, shape=0) {
ind9 = !pos & scase
ans[ind9] = 0
}
+ ans[scale <= 0] = NaN
ans
}
qgpd = function(p, location=0, scale=1, shape=0) {
- if(!is.Numeric(p, posit=TRUE) || any(p >= 1)) stop("0 < p < 1 is required")
- if(!is.Numeric(location)) stop("bad input for argument \"location\"")
- if(!is.Numeric(scale, posit=TRUE)) stop("\"scale\" must be positive")
- if(!is.Numeric(shape)) stop("bad input for argument \"shape\"")
- n = max(length(p), length(location), length(scale), length(shape))
- ans = numeric(n)
- shape = rep(shape, len=n); location = rep(location, len=n);
- scale = rep(scale, len=n); p = rep(p, len=n)
+
+ use.n = max(length(p), length(location), length(scale), length(shape))
+ ans = numeric(use.n)
+ shape = rep(shape, len=use.n); location = rep(location, len=use.n);
+ scale = rep(scale, len=use.n); p = rep(p, len=use.n)
scase = abs(shape) < sqrt(.Machine$double.eps)
nscase = sum(scase)
- if(n - nscase) {
+ if(use.n - nscase) {
ans[!scase] = location[!scase] + scale[!scase] *
((1-p[!scase])^(-shape[!scase]) - 1) / shape[!scase]
}
if(nscase) {
ans[scase] = location[scase] - scale[scase] * log1p(-p[scase])
}
+
+ ans[p < 0] = NaN
+ ans[p > 1] = NaN
+ ans[(p == 0)] = location[p == 0]
+ ans[(p == 1) & (shape >= 0)] = Inf
+ ind5 = (p == 1) & (shape < 0)
+ ans[ind5] = location[ind5] - scale[ind5] / shape[ind5]
+
+ ans[scale <= 0] = NaN
ans
}
@@ -885,34 +1040,36 @@ qgpd = function(p, location=0, scale=1, shape=0) {
-gpd = function(threshold=0,
- lscale="loge",
- lshape="logoff",
- escale = list(),
- eshape = if(lshape=="logoff") list(offset=0.5) else
- if(lshape=="elogit") list(min=-0.5, max=0.5) else NULL,
- percentiles=c(90,95),
- iscale=NULL,
- ishape=NULL,
- tshape0=0.001,
- method.init=1,
- zero=2) {
+ gpd = function(threshold=0,
+ lscale="loge",
+ lshape="logoff",
+ escale = list(),
+ eshape = if(lshape=="logoff") list(offset=0.5) else
+ if(lshape=="elogit") list(min=-0.5, max=0.5) else NULL,
+ percentiles=c(90,95),
+ iscale=NULL,
+ ishape=NULL,
+ tolshape0=0.001, giveWarning=TRUE,
+ method.init=1,
+ zero=2) {
+ if(!is.logical(giveWarning) || length(giveWarning) != 1)
+ stop("bad input for argument 'giveWarning'")
if(!is.Numeric(threshold))
- stop("bad input for argument \"threshold\"")
+ stop("bad input for argument 'threshold'")
if(!is.Numeric(method.init, allow=1, posit=TRUE, integer=TRUE) ||
method.init > 2.5)
- stop("argument \"method.init\" must be 1 or 2")
+ stop("argument 'method.init' must be 1 or 2")
if(mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
if(mode(lshape) != "character" && mode(lshape) != "name")
lshape = as.character(substitute(lshape))
if(length(percentiles) &&
(!is.Numeric(percentiles, posit=TRUE) || max(percentiles) >= 100))
- stop("bad input for argument \"percentiles\"")
- if(!is.Numeric(tshape0, allow=1, posit=TRUE) || tshape0 > 0.1)
- stop("bad input for argument \"tshape0\"")
+ stop("bad input for argument 'percentiles'")
+ if(!is.Numeric(tolshape0, allow=1, posit=TRUE) || tolshape0 > 0.1)
+ stop("bad input for argument 'tolshape0'")
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(!is.list(escale)) escale = list()
if(!is.list(eshape)) eshape = list()
@@ -939,23 +1096,23 @@ gpd = function(threshold=0,
if(!length(etastart)) {
meany = mean(ystar)
vary = var(ystar)
- xiinit = if(length( .ishape)) .ishape else {
+ init.xi = if(length( .ishape)) .ishape else {
if( .method.init == 1) -0.5*(meany^2/vary - 1) else
0.5 * (1 - median(ystar)^2 / vary)
}
- siginit = if(length( .iscale)) .iscale else {
+ init.sig = if(length( .iscale)) .iscale else {
if(.method.init==1) 0.5*meany*(meany^2/vary + 1) else
- abs(1-xiinit) * median(ystar)
+ abs(1-init.xi) * median(ystar)
}
- siginit[siginit <= 0] = 0.01 # sigma > 0
- xiinit[xiinit <= -0.5] = -0.40 # Fisher scoring works if xi > -0.5
- xiinit[xiinit >= 1.0] = 0.90 # Mean/var exists if xi < 1 / 0.5
- if( .lshape == "loge") xiinit[xiinit <= 0.0] = 0.05
- siginit = rep(siginit, leng=length(y))
- xiinit = rep(xiinit, leng=length(y))
-
- etastart = cbind(theta2eta(siginit, .lscale, earg= .escale ),
- theta2eta(xiinit, .lshape, earg= .eshape ))
+ init.sig[init.sig <= 0] = 0.01 # sigma > 0
+ init.xi[init.xi <= -0.5] = -0.40 # Fisher scoring works if xi > -0.5
+ init.xi[init.xi >= 1.0] = 0.90 # Mean/var exists if xi < 1 / 0.5
+ if( .lshape == "loge") init.xi[init.xi <= 0.0] = 0.05
+ init.sig = rep(init.sig, leng=length(y))
+ init.xi = rep(init.xi, leng=length(y))
+
+ etastart = cbind(theta2eta(init.sig, .lscale, earg= .escale ),
+ theta2eta(init.xi, .lshape, earg= .eshape ))
}
}), list( .lscale=lscale, .lshape=lshape, .threshold=threshold,
.iscale=iscale, .ishape=ishape,
@@ -969,12 +1126,12 @@ gpd = function(threshold=0,
Threshold = if(is.Numeric( .threshold)) .threshold else 0
if(lp) {
fv = matrix(as.numeric(NA), nrow(eta), lp)
- iszero = (abs(xi) < .tshape0)
- for(i in 1:lp) {
- temp = 1-cent[i]/100
- fv[!iszero,i] = Threshold + (temp^(-xi[!iszero]) -1) *
- sigma[!iszero] / xi[!iszero]
- fv[iszero,i] = Threshold - sigma[iszero] * log(temp)
+ iszero = (abs(xi) < .tolshape0)
+ for(ii in 1:lp) {
+ temp = 1-cent[ii]/100
+ fv[!iszero,ii] = Threshold + (temp^(-xi[!iszero]) -1) *
+ sigma[!iszero] / xi[!iszero]
+ fv[ iszero,ii] = Threshold - sigma[iszero] * log(temp)
}
dimnames(fv) = list(dimnames(eta)[[1]],
paste(as.character(.percentiles), "%", sep=""))
@@ -985,7 +1142,7 @@ gpd = function(threshold=0,
fv
}, list( .lscale=lscale, .lshape=lshape, .threshold=threshold,
.escale=escale, .eshape=eshape,
- .tshape0=tshape0, .percentiles=percentiles ))),
+ .tolshape0=tolshape0, .percentiles=percentiles ))),
last=eval(substitute(expression({
misc$links = c(scale = .lscale, shape = .lshape)
misc$true.mu = FALSE # @fitted is not a true mu
@@ -993,36 +1150,25 @@ gpd = function(threshold=0,
misc$percentiles = .percentiles
misc$threshold = if(is.Numeric( .threshold)) .threshold else 0
misc$expected = TRUE
- misc$tshape0 = .tshape0
+ misc$tolshape0 = .tolshape0
if(any(xi < -0.5))
warning("some values of the shape parameter are less than -0.5")
}), list( .lscale=lscale, .lshape=lshape, .threshold=threshold,
.escale=escale, .eshape=eshape,
- .tshape0=tshape0, .percentiles=percentiles ))),
+ .tolshape0=tolshape0, .percentiles=percentiles ))),
loglikelihood=eval(substitute(
function(mu,y,w, residuals=FALSE, eta, extra=NULL) {
- sigma = eta2theta(eta[,1], .lscale, earg= .escale )
- xi = eta2theta(eta[,2], .lshape, earg= .eshape )
- if(any(iszero <- (abs(xi) < .tshape0))) {
- }
- Threshold = extra$threshold
- ystar = y - Threshold # Operate on ystar
- A = 1 + xi*ystar/sigma
- mytolerance = .Machine$double.eps
- bad <- (A<=mytolerance) # Range violation
- if(any(bad) && any(w[bad] != 0)) {
- cat("There are some range violations\n")
- if(exists("flush.console")) flush.console()
- }
- igpd = !iszero & !bad
- iexp = iszero & !bad
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w[bad]) * (-1.0e10) +
- sum(w[igpd] * (-log(sigma[igpd]) - (1+1/xi[igpd])*log(A[igpd]))) +
- sum(w[iexp] * (-log(sigma[iexp]) - ystar[iexp]/sigma[iexp]))
- }, list( .tshape0=tshape0, .lscale=lscale,
- .escale=escale, .eshape=eshape,
- .lshape=lshape ))),
+ sigma = eta2theta(eta[,1], .lscale, earg= .escale )
+ xi = eta2theta(eta[,2], .lshape, earg= .eshape )
+ Threshold = extra$threshold
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dgpd(x=y, location=Threshold, scale=sigma, shape=xi,
+ tolshape0 = .tolshape0, giveWarning= .giveWarning,
+ log=TRUE, oobounds.log = -1.0e04))
+ }
+ }, list( .tolshape0=tolshape0, .giveWarning= giveWarning,
+ .escale=escale, .eshape=eshape,
+ .lscale=lscale, .lshape=lshape ))),
vfamily=c("gpd", "vextremes"),
deriv=eval(substitute(expression({
sigma = eta2theta(eta[,1], .lscale, earg= .escale )
@@ -1035,9 +1181,9 @@ gpd = function(threshold=0,
if(any(bad) && any(w[bad] != 0)) {
cat(sum(w[bad],na.rm=TRUE), # "; ignoring them"
"observations violating boundary constraints\n")
- if(exists("flush.console")) flush.console()
+ flush.console()
}
- if(any(iszero <- (abs(xi) < .tshape0))) {
+ if(any(iszero <- (abs(xi) < .tolshape0))) {
}
igpd = !iszero & !bad
iexp = iszero & !bad
@@ -1051,7 +1197,7 @@ gpd = function(threshold=0,
dsigma.deta = dtheta.deta(sigma, .lscale, earg= .escale )
dxi.deta = dtheta.deta(xi, .lshape, earg= .eshape )
w * cbind(dl.dsigma * dsigma.deta, dl.dxi * dxi.deta)
- }), list( .tshape0=tshape0, .lscale=lscale,
+ }), list( .tolshape0=tolshape0, .lscale=lscale,
.escale=escale, .eshape=eshape,
.lshape=lshape ))),
weight=eval(substitute(expression({
@@ -1074,7 +1220,7 @@ gpd = function(threshold=0,
meplot.default = function(y, main="Mean Excess Plot",
xlab="Threshold", ylab="Mean Excess", lty=c(2,1:2),
conf=0.95, col=c("blue","black","blue"), type="l", ...) {
- if(!is.Numeric(y)) stop("bad input for argument \"y\"")
+ if(!is.Numeric(y)) stop("bad input for argument 'y'")
n = length(y)
sy = sort(y)
dsy = rev(sy) # decreasing sequence
@@ -1110,7 +1256,7 @@ setMethod("meplot", "vlm",
guplot.default = function(y, main="Gumbel Plot",
xlab="Reduced data", ylab="Observed data", type="p", ...) {
- if(!is.Numeric(y)) stop("bad input for argument \"y\"")
+ if(!is.Numeric(y)) stop("bad input for argument 'y'")
n = length(y)
sy = sort(y)
x = -log(-log(((1:n) - 0.5) / n))
@@ -1137,213 +1283,30 @@ setMethod("guplot", "vlm",
-if(FALSE)
-bvevd.log.qn <- function(lscale="loge",
- ldep="logit",
- exchangeable=FALSE, zero=NULL)
-{
- if(mode(ldep) != "character" && mode(ldep) != "name")
- ldep <- as.character(substitute(ldep))
- if(mode(lscale) != "character" && mode(lscale) != "name")
- lscale <- as.character(substitute(lscale))
-
- new("vglmff",
- blurb=c("Bivariate logistic extreme value distribution\n\n",
- "Link: ", "loc1, ", namesof("scale1", lscale), ", shape1, ",
- "loc2, ", namesof("scale2", lscale), ", shape2, ",
- namesof("dep", ldep), "\n"),
- constraints=eval(substitute(expression({
- constraints <- cm.vgam(cbind(rbind(diag(3),diag(3),matrix(0,1,3)),
- ei(7,7)), x, .exchangeable, constraints)
- constraints <- cm.zero.vgam(constraints, x, .zero, M)
- }), list( .exchangeable=exchangeable, .zero=zero ))),
- deviance= function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
- nz <- y > 0
- devi <- - (y - mu)
- devi[nz] <- devi[nz] + y[nz] * log(y[nz]/mu[nz])
- if(residuals) sign(y - mu) * sqrt(2 * abs(devi) * w) else
- 2 * sum(w * devi)
- },
- initialize=eval(substitute(expression({
- M = if(is.matrix(y)) ncol(y) else 1
- if(M != 1) stop("response must be a vector or a one-column matrix")
- predictors.names =
- c("loc1",
- namesof("scale1", lscale), "shape1",
- "loc2",
- namesof("scale2", lscale), "shape2",
- namesof("dep", ldep))
- if(!length(etastart)) {
- etastart <- theta2eta(mu, link=.link)
- }
- }), list( .link=link, .estimated.dispersion=estimated.dispersion) )),
- inverse=eval(substitute(function(eta, extra=NULL) {
- mu = eta2theta(eta, link=.link)
- mu
- }, list( .link=link) )),
- last=eval(substitute(expression({
- misc$link = c(loc1="identity", scale1= .lscale, shape1="identity",
- loc2="identity", scale2= .lscale, shape2="identity",
- dep= .ldep)
- misc$expected = FALSE
- misc$BFGS = TRUE
- }), list( .lscale=lscale, .ldep=ldep) )),
- link=eval(substitute(function(mu, extra=NULL) {
- theta2eta(mu, link=.link)
- }, list( .link=link) )),
- loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
- pdf = deriv3( ~ w * ifelse(1+xi1*(y1-mu1)/sigma1>0, y1, 0),
- name=c("y1"), hessian= TRUE)
- pdf = deriv3( ~ w * exp(-((pmax(0,1+xi1*(y1-mu1)/sigma1)^(-1/xi1))^(1/dep) +
- (pmax(0,1+xi2*(y2-mu2)/sigma2)^(-1/xi2))^(1/dep))^(dep)),
- name=c("y1","y2"), hessian= TRUE)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- pdf
-
- },
- vfamily="bvevd.log.qn",
- deriv=eval(substitute(expression({
- if(iter == 1) {
- etanew = eta
- } else {
- derivold = derivnew
- etaold = etanew
- etanew = eta
- }
-
- derivnew =
- if(.link == "loge" && (any(mu < .Machine$double.eps))) {
- w * (y - mu)
- } else {
- lambda <- mu
- dl.dlambda <- (y-lambda) / lambda
- dlambda.deta <- dtheta.deta(theta=lambda, link=.link)
- w * dl.dlambda * dlambda.deta
- }
- derivnew
- }), list( .link=link) )),
- weight=eval(substitute(expression({
- if( .wwts == "qn") {
- if(iter == 1) {
- wznew = cbind(matrix(w, n, M), matrix(0, n, dimm(M)-M))
- } else {
- wzold = wznew
- wznew = qnupdate(w=w, wzold=wzold, dderiv=(derivold-derivnew),
- deta=etanew-etaold, M=M,
- trace=trace) # weights incorporated in args
- }
- }
- wznew
- }), list( .lscale=lscale, .ldep=ldep) )))
-}
-
-
-rgev = function(n, location=0, scale=1, shape=0) {
- if(!is.Numeric(scale, posit=TRUE)) stop("\"scale\" must be positive")
- if(!is.Numeric(n, posit=TRUE, allow=1, integ=TRUE))
- stop("bad input for argument \"n\"")
- if(!is.Numeric(location))
- stop("bad input for argument argument \"location\"")
- if(!is.Numeric(shape)) stop("bad input for argument argument \"shape\"")
- ans = numeric(n)
- shape = rep(shape, len=n); location = rep(location, len=n);
- scale = rep(scale, len=n)
- scase = abs(shape) < sqrt(.Machine$double.eps)
- nscase = sum(scase)
- if(n - nscase)
- ans[!scase] = location[!scase] + scale[!scase] *
- ((-log(runif(n-nscase)))^(-shape[!scase]) -1) / shape[!scase]
- if(nscase)
- ans[scase] = rgumbel(nscase, location[scase], scale[scase])
- ans
-}
-dgev = function(x, location=0, scale=1, shape=0) {
- if(!is.Numeric(x)) stop("bad input for argument \"x\"")
- if(!is.Numeric(location)) stop("bad input for argument \"location\"")
- if(!is.Numeric(scale, posit=TRUE)) stop("\"scale\" must be positive")
- if(!is.Numeric(shape)) stop("bad input for argument \"shape\"")
- n = max(length(x), length(location), length(scale), length(shape))
- ans = rep(as.numeric(NA), len=n)
- shape = rep(shape, len=n); location = rep(location, len=n);
- scale = rep(scale, len=n); x = rep(x-location, len=n)
- scase = abs(shape) < sqrt(.Machine$double.eps)
- nscase = sum(scase)
- if(n - nscase) {
- zedd = pmax(0,(1+shape*x/scale))
- ans[!scase] = exp(-zedd[!scase]^(-1/shape[!scase])) *
- zedd[!scase]^(-1/shape[!scase] -1) / scale[!scase]
- }
- if(nscase)
- ans[scase] = dgumbel(x[scase], location[scase], scale[scase])
- ans
-}
-
-pgev = function(q, location=0, scale=1, shape=0) {
- if(!is.Numeric(q)) stop("bad input for argument \"q\"")
- if(!is.Numeric(location)) stop("bad input for argument \"location\"")
- if(!is.Numeric(scale, posit=TRUE)) stop("\"scale\" must be positive")
- if(!is.Numeric(shape)) stop("bad input for argument \"shape\"")
- n = max(length(q), length(location), length(scale), length(shape))
- ans = numeric(n)
- shape = rep(shape, len=n); location = rep(location, len=n);
- scale = rep(scale, len=n); q = rep(q-location, len=n)
- scase = abs(shape) < sqrt(.Machine$double.eps)
- nscase = sum(scase)
- if(n - nscase) {
- zedd = pmax(0,(1+shape*q/scale))
- ans[!scase] = exp(-zedd[!scase]^(-1/shape[!scase]))
- }
- if(nscase)
- ans[scase] = pgumbel(q[scase], location[scase], scale[scase])
- ans
-}
-
-qgev = function(p, location=0, scale=1, shape=0) {
- if(!is.Numeric(p, posit=TRUE) || any(p >= 1)) stop("0 < p < 1 is required")
- if(!is.Numeric(location)) stop("bad input for argument \"location\"")
- if(!is.Numeric(scale, posit=TRUE)) stop("\"scale\" must be positive")
- if(!is.Numeric(shape)) stop("bad input for argument \"shape\"")
- n = max(length(p), length(location), length(scale), length(shape))
- ans = numeric(n)
- shape = rep(shape, len=n); location = rep(location, len=n);
- scale = rep(scale, len=n); p = rep(p, len=n)
- scase = abs(shape) < sqrt(.Machine$double.eps)
- nscase = sum(scase)
- if(n - nscase) {
- ans[!scase] = location[!scase] + scale[!scase] *
- ((-log(p[!scase]))^(-shape[!scase]) -1) / shape[!scase]
- }
- if(nscase)
- ans[scase] = qgumbel(p[scase], location[scase], scale[scase])
- ans
-}
-
-
-
-egumbel = function(llocation="identity",
- lscale="loge",
- elocation = list(),
- escale = list(),
- iscale=NULL,
- R=NA, percentiles=c(95,99),
- mpv=FALSE, zero=NULL)
+ egumbel = function(llocation="identity",
+ lscale="loge",
+ elocation = list(),
+ escale = list(),
+ iscale=NULL,
+ R=NA, percentiles=c(95,99),
+ mpv=FALSE, zero=NULL)
{
if(mode(llocation) != "character" && mode(llocation) != "name")
llocation = as.character(substitute(llocation))
if(mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
if(!is.logical(mpv) || length(mpv) != 1)
- stop("bad input for argument \"mpv\"")
+ stop("bad input for argument 'mpv'")
if(length(percentiles) &&
(!is.Numeric(percentiles, posit=TRUE) || max(percentiles) >= 100))
- stop("bad input for argument \"percentiles\"")
+ stop("bad input for argument 'percentiles'")
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(length(iscale) && !is.Numeric(iscale, posit=TRUE))
- stop("bad input for argument \"iscale\"")
+ stop("bad input for argument 'iscale'")
if(!is.list(elocation)) elocation = list()
if(!is.list(escale)) escale = list()
@@ -1394,10 +1357,10 @@ egumbel = function(llocation="identity",
mu = matrix(as.numeric(NA), nrow(eta), lp + mpv)
Rvec = extra$R
if(1 <= lp)
- for(i in 1:lp) {
- ci = if(is.Numeric(Rvec)) Rvec * (1 - Percentiles[i] / 100) else
- -log( Percentiles[i] / 100)
- mu[,i] = loc - sigma * log(ci)
+ for(ii in 1:lp) {
+ ci = if(is.Numeric(Rvec)) Rvec * (1 - Percentiles[ii] / 100) else
+ -log( Percentiles[ii] / 100)
+ mu[,ii] = loc - sigma * log(ci)
}
if(mpv)
mu[,ncol(mu)] = loc - sigma * log(log(2))
@@ -1422,9 +1385,9 @@ egumbel = function(llocation="identity",
function(mu,y,w,residuals= FALSE,eta,extra=NULL) {
loc = eta2theta(eta[,1], .llocation, earg= .elocation)
sc = eta2theta(eta[,2], .lscale, earg= .escale )
- zedd = (y-loc) / sc
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * (-log(sc) - zedd - exp(-zedd)))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dgumbel(x=y, location=loc, scale=sc, log=TRUE))
+ }
}, list( .llocation=llocation, .lscale=lscale,
.elocation=elocation, .escale=escale ))),
vfamily="egumbel",
@@ -1456,11 +1419,11 @@ egumbel = function(llocation="identity",
-cgumbel = function(llocation="identity",
- lscale="loge",
- elocation = list(),
- escale = list(), iscale=NULL,
- mean=TRUE, percentiles=NULL, zero=2)
+ cgumbel = function(llocation="identity",
+ lscale="loge",
+ elocation = list(),
+ escale = list(), iscale=NULL,
+ mean=TRUE, percentiles=NULL, zero=2)
{
if(mode(llocation) != "character" && mode(llocation) != "name")
llocation = as.character(substitute(llocation))
@@ -1472,7 +1435,7 @@ cgumbel = function(llocation="identity",
any(percentiles>=100)))
stop("valid percentiles values must be given when mean=FALSE")
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(!is.list(elocation)) elocation = list()
if(!is.list(escale)) escale = list()
@@ -1522,9 +1485,9 @@ cgumbel = function(llocation="identity",
if(.mean) loc + sc * EulerM else {
lp = length(.percentiles) # 0 if NULL
mu = matrix(as.numeric(NA), nrow(eta), lp)
- for(i in 1:lp) {
- ci = -log( .percentiles[i] / 100)
- mu[,i] = loc - sc * log(ci)
+ for(ii in 1:lp) {
+ ci = -log( .percentiles[ii] / 100)
+ mu[,ii] = loc - sc * log(ci)
}
dmn2 = paste(as.character(.percentiles), "%", sep="")
dimnames(mu) <- list(dimnames(eta)[[1]], dmn2)
@@ -1557,7 +1520,7 @@ cgumbel = function(llocation="identity",
ell2 = log(Fy[cenL])
ell3 = log1p(-Fy[cenU])
if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w[cen0] * ell1) + sum(w[cenL] * ell2) + sum(w[cenU] * ell3)
+ sum(w[cen0] * ell1) + sum(w[cenL] * ell2) + sum(w[cenU] * ell3)
}, list( .lscale=lscale,
.llocation = llocation,
.elocation = elocation, .escale = escale ))),
@@ -1625,13 +1588,21 @@ cgumbel = function(llocation="identity",
-dfrechet = function(x, location=0, scale=1, shape) {
- if(!is.Numeric(scale, posit=TRUE)) stop("scale must be positive")
- if(!is.Numeric(shape, posit=TRUE)) stop("shape must be positive")
+dfrechet = function(x, location=0, scale=1, shape, log=FALSE) {
+ if(!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ L = max(length(x), length(scale), length(shape))
+ x = rep(x, len=L); scale = rep(scale, len=L); shape = rep(shape, len=L);
+ logdensity = rep(log(0), len=L)
+ xok = (x > location)
rzedd = scale / (x - location)
- ans = rzedd^2 * shape * exp(-(rzedd^shape)) * rzedd^(shape-1) / scale
- ans[x <= location] = 0
- ans
+ logdensity[xok] = log(shape[xok]) - (rzedd[xok]^shape[xok]) +
+ (shape[xok]+1) * log(rzedd[xok]) - log(scale[xok])
+ logdensity[shape <= 0] = NaN
+ logdensity[scale <= 0] = NaN
+ if(log.arg) logdensity else exp(logdensity)
}
pfrechet = function(q, location=0, scale=1, shape) {
@@ -1652,7 +1623,7 @@ qfrechet = function(p, location=0, scale=1, shape) {
rfrechet = function(n, location=0, scale=1, shape) {
if(!is.Numeric(n, posit=TRUE, allow=1, integ=TRUE))
- stop("bad input for argument \"n\"")
+ stop("bad input for argument 'n'")
if(!is.Numeric(scale, posit=TRUE)) stop("scale must be positive")
if(!is.Numeric(shape, posit=TRUE)) stop("shape must be positive")
location + scale * (-log(runif(n)))^(-1/shape)
@@ -1663,16 +1634,16 @@ frechet2.control <- function(save.weight=TRUE, ...)
list(save.weight=save.weight)
}
-frechet2 = function(location=0,
- lscale="loge",
- lshape="loglog",
- escale = list(),
- eshape = list(),
- iscale=NULL, ishape=3,
- zero=NULL)
+ frechet2 = function(location=0,
+ lscale="loge",
+ lshape="loglog",
+ escale = list(),
+ eshape = list(),
+ iscale=NULL, ishape=3,
+ zero=NULL)
{
if(!is.Numeric(location))
- stop("bad input for argument \"location\"")
+ stop("bad input for argument 'location'")
if(mode(lscale) != "character" && mode(lscale) != "name")
lscale <- as.character(substitute(lscale))
if(mode(lshape) != "character" && mode(lshape) != "name")
@@ -1735,8 +1706,8 @@ frechet2 = function(location=0,
shape = eta2theta(eta[,2], .lshape, earg= .eshape )
rzedd = Scale / (y-loc)
if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * (log(shape) + log(Scale) - 2 * log(y-loc) -
- rzedd^shape + (shape-1) * log(rzedd)))
+ sum(w * dfrechet(x=y, location=loc, scale=Scale, shape=shape,
+ log=TRUE))
}, list( .lscale=lscale, .lshape=lshape,
.escale=escale, .eshape=eshape ))),
vfamily=c("frechet2", "vextremes"),
@@ -1782,15 +1753,15 @@ frechet3.control <- function(save.weight=TRUE, ...)
}
-frechet3= function(anchor=NULL,
- ldifference="loge",
- lscale="loge",
- lshape="loglog",
- edifference = list(),
- escale = list(),
- eshape = list(),
- ilocation=NULL, iscale=NULL, ishape=3, zero=NULL,
- effpos = .Machine$double.eps^0.75)
+ frechet3 = function(anchor=NULL,
+ ldifference="loge",
+ lscale="loge",
+ lshape="loglog",
+ edifference = list(),
+ escale = list(),
+ eshape = list(),
+ ilocation=NULL, iscale=NULL, ishape=3, zero=NULL,
+ effpos = .Machine$double.eps^0.75)
{
if(mode(ldifference) != "character" && mode(ldifference) != "name")
ldifference <- as.character(substitute(ldifference))
@@ -1798,8 +1769,8 @@ frechet3= function(anchor=NULL,
lscale <- as.character(substitute(lscale))
if(mode(lshape) != "character" && mode(lshape) != "name")
lshape <- as.character(substitute(lshape))
- if(!is.Numeric(ishape, allo=1, posi=TRUE)) stop("bad input for argument \"ishape\"")
- if(!is.Numeric(effpos, allo=1)|| effpos<0) stop("bad input for argument \"effpos\"")
+ if(!is.Numeric(ishape, allo=1, posi=TRUE)) stop("bad input for argument 'ishape'")
+ if(!is.Numeric(effpos, allo=1)|| effpos<0) stop("bad input for argument 'effpos'")
if(!is.list(edifference)) edifference = list()
if(!is.list(escale)) escale = list()
if(!is.list(eshape)) eshape = list()
@@ -1815,14 +1786,13 @@ frechet3= function(anchor=NULL,
}), list( .zero=zero ))),
initialize=eval(substitute(expression({
predictors.names =
- c(namesof("difference", .ldifference, earg= .edifference, short=TRUE),
+ c(namesof("difference", .ldifference, earg= .edifference, short=TRUE),
namesof("scale", .lscale, earg= .escale, short=TRUE),
namesof("shape", .lshape, earg= .eshape, short=TRUE))
anchorpt = if(is.Numeric( .anchor, allow=1)) .anchor else min(y)
if(min(y) < anchorpt) stop("anchor point is too large")
extra$LHSanchor = anchorpt
if(!length(etastart)) {
- # Initial values for limiting case as xi --> 0, r_i==1
locinit = if(length( .ilocation)) rep( .ilocation, len=n) else
rep(anchorpt - 0.01*diff(range(y)), len=n)
if(any(y <= locinit))
@@ -1864,20 +1834,21 @@ frechet3= function(anchor=NULL,
.edifference=edifference, .escale=escale, .eshape=eshape ))),
loglikelihood=eval(substitute(
function(mu,y,w, residuals=FALSE, eta, extra=NULL) {
- loc = extra$LHSanchor - eta2theta(eta[,1], .ldifference, earg= .edifference)
+ loc = extra$LHSanchor -
+ eta2theta(eta[,1], .ldifference, earg= .edifference)
Scale = eta2theta(eta[,2], .lscale, earg= .escale )
shape = eta2theta(eta[,3], .lshape, earg= .eshape )
- rzedd = Scale / (y-loc)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * (log(shape) + log(Scale) - 2 * log(y-loc) -
- rzedd^shape + (shape-1) * log(rzedd)))
- }, list( .ldifference=ldifference, .lscale=lscale, .lshape=lshape,
- .edifference=edifference, .escale=escale, .eshape=eshape ))),
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dfrechet(x=y, location=loc, scale=Scale, shape=shape,
+ log=TRUE))
+ }
+ }, list( .ldifference=ldifference, .lscale=lscale, .lshape=lshape,
+ .edifference=edifference, .escale=escale, .eshape=eshape ))),
vfamily=c("frechet3", "vextremes"),
deriv=eval(substitute(expression({
difference = eta2theta(eta[,1], .ldifference, earg= .edifference )
- Scale = eta2theta(eta[,2], .lscale, earg= .escale )
- shape = eta2theta(eta[,3], .lshape, earg= .eshape )
+ Scale = eta2theta(eta[,2], .lscale, earg= .escale )
+ shape = eta2theta(eta[,3], .lshape, earg= .eshape )
loc = extra$LHSanchor - difference
extra$location = loc # Store the location parameter estimate here
rzedd = Scale / (y-loc) # reciprocial of zedd
@@ -1893,7 +1864,8 @@ frechet3= function(anchor=NULL,
etanew = eta
}
derivnew = w *
- cbind(dl.ddiff * dtheta.deta(difference, .ldifference, earg= .edifference ),
+ cbind(dl.ddiff * dtheta.deta(difference, .ldifference,
+ earg= .edifference ),
dl.dScale * dtheta.deta(Scale, .lscale, earg= .escale ),
dl.dshape * dtheta.deta(shape, .lshape, earg= .eshape ))
derivnew
@@ -1918,8 +1890,8 @@ recnormal1.control <- function(save.weight=TRUE, ...)
list(save.weight=save.weight)
}
-recnormal1 = function(lmean="identity", lsd="loge",
- imean=NULL, isd=NULL, method.init=1, zero=NULL)
+ recnormal1 = function(lmean="identity", lsd="loge",
+ imean=NULL, isd=NULL, method.init=1, zero=NULL)
{
if(mode(lmean) != "character" && mode(lmean) != "name")
@@ -1928,7 +1900,7 @@ recnormal1 = function(lmean="identity", lsd="loge",
lsd = as.character(substitute(lsd))
if(!is.Numeric(method.init, allow=1, integ=TRUE, positi=TRUE) ||
method.init > 3.5)
- stop("argument \"method.init\" must be 1 or 2 or 3")
+ stop("argument 'method.init' must be 1 or 2 or 3")
new("vglmff",
blurb=c("Upper record values from a univariate normal distribution\n\n",
@@ -1970,13 +1942,13 @@ recnormal1 = function(lmean="identity", lsd="loge",
misc$expected = FALSE
}), list( .lmean=lmean, .lsd=lsd ))),
loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
sd = eta2theta(eta[,2], .lsd)
if(residuals) stop("loglikelihood residuals not implemented yet") else {
zedd = (y - mu) / sd
NN = nrow(eta)
sum(w * (-log(sd) - 0.5 * zedd^2)) -
- sum(w[-NN] * (log1p(-pnorm(zedd[-NN]))))
+ sum(w[-NN] * pnorm(zedd[-NN], lower.tail=FALSE, log.p=TRUE))
}
}, list( .lsd=lsd ))),
vfamily=c("recnormal1"),
@@ -2022,14 +1994,14 @@ recexp1.control <- function(save.weight=TRUE, ...)
list(save.weight=save.weight)
}
-recexp1 = function(lrate="loge", irate=NULL, method.init=1)
+ recexp1 = function(lrate="loge", irate=NULL, method.init=1)
{
if(mode(lrate) != "character" && mode(lrate) != "name")
lrate = as.character(substitute(lrate))
if(!is.Numeric(method.init, allow=1, integ=TRUE, positi=TRUE) ||
method.init > 3.5)
- stop("argument \"method.init\" must be 1 or 2 or 3")
+ stop("argument 'method.init' must be 1 or 2 or 3")
new("vglmff",
blurb=c("Upper record values from a 1-parameter exponential distribution\n\n",
@@ -2088,190 +2060,14 @@ recexp1 = function(lrate="loge", irate=NULL, method.init=1)
-if(FALSE)
-exbilogi <- function(zero=c(3,6,7),
- llocation="identity",
- lscale="loge",
- lshape="identity",
- ldependency="logit",
- percentiles=c(95,99),
- ishape1=-0.1, ishape2=-0.1, idependency=1)
-{
- stop("not working")
-
- if(mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
-
- if(mode(llocation) != "character" && mode(llocation) != "name")
- llocation = as.character(substitute(llocation))
-
- if(mode(lshape) != "character" && mode(lshape) != "name")
- lshape = as.character(substitute(lshape))
-
- if(mode(ldependency) != "character" && mode(ldependency) != "name")
- ldependency = as.character(substitute(ldependency))
- if(!is.Numeric(percentiles, posit=TRUE) || max(percentiles) >= 100)
- stop("bad input for argument \"percentiles\"")
-
- new("vglmff",
- blurb=c("Extreme Bivariate Logistic Distribution\n",
- "Links: ",
- namesof("loc1", link=llocation), ", ",
- namesof("scale1", link=lscale), ", ",
- namesof("shape1", link=lshape), ", ",
- namesof("loc2", link=llocation), ", ",
- namesof("scale2", link=lscale), ", ",
- namesof("shape2", link=lshape), ", ",
- namesof("dependency", link=ldependency)),
- constraints=eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
- predictors.names = c(
- namesof("loc1", .llocation, short=TRUE),
- namesof("scale1", .lscale, short=TRUE),
- namesof("shape1", .lshape, short=TRUE),
- namesof("loc2", .llocation, short=TRUE),
- namesof("scale2", .lscale, short=TRUE),
- namesof("shape2", .lshape, short=TRUE),
- namesof("dependency", .ldependency, short=TRUE))
- if(ncol(as.matrix(y)) != 2)
- stop("the response must be a two-column matrix")
- if(!length(etastart)) {
- EulerM = -digamma(1)
- # Initial values for limiting case as xi --> 0, r_i==1
- sig1.init = sqrt(6 * var(y[,1]))/pi
- mu1.init = median(y[,1]) - EulerM * sig1.init
- xi1.init = if(length(.ishape1)) .ishape1 else 0.1
- sig2.init = sqrt(6 * var(y[,2]))/pi
- mu2.init = median(y[,2]) - EulerM * sig2.init
- xi2.init = if(length(.ishape2)) .ishape2 else 0.1
- alpha.init = if(length(.idependency)) .idependency else 1
- etastart = cbind(rep(theta2eta(mu1.init, .llocation), nrow(y)),
- theta2eta(sig1.init, .lscale),
- theta2eta(xi1.init, .lshape),
- rep(theta2eta(mu2.init, .llocation), nrow(y)),
- theta2eta(sig2.init, .lscale),
- theta2eta(xi2.init, .lshape),
- theta2eta(alpha.init, .ldependency))
- bad1 = 1 + xi1.init*(y[,1]-mu1.init)/sig1.init <= 0
- bad2 = 1 + xi2.init*(y[,2]-mu2.init)/sig2.init <= 0
- if(any(bad1, na.rm = TRUE) || any(bad2, na.rm = TRUE))
- stop(paste(sum(bad1, na.rm = TRUE) + sum(bad2, na.rm = TRUE),
- "observations violating",
- "boundary constraints while initializing"))
- }
- }), list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
- .ldependency=ldependency,
- .ishape1=ishape1, .ishape2=ishape2, .idependency=idependency ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- mmus = eta2theta(eta[,c(1,4)], .llocation)
- sigmas = eta2theta(eta[,c(2,5)], .lscale)
- xis = eta2theta(eta[,c(3,6)], .lshape)
- alpha = eta2theta(eta[,7], .ldependency)
- cent = .percentiles
- lp = length(cent)
- fv = matrix(as.numeric(NA), nrow(eta), lp)
- dimnames(fv) = list(dimnames(eta)[[1]],
- paste(as.character(.percentiles), "%", sep=""))
- fv
- }, list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
- .ldependency=ldependency, .percentiles=percentiles ))),
- last=eval(substitute(expression({
- misc$links = c(loc1 = .llocation, lscale1 = .lscale, shape1 =.lshape,
- loc2 = .llocation, lscale2 = .lscale, shape2 =.lshape,
- dependency= .ldependency)
- misc$true.mu = FALSE # @fitted.value is not a true mu
- misc$percentiles = .percentiles
- }), list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
- .ldependency=ldependency, .percentiles=percentiles ))),
- loglikelihood=eval(substitute(
- function(mu,y,w, residuals=FALSE, eta, extra=NULL) {
- mmus = eta2theta(eta[,c(1,4)], .llocation)
- sigmas = eta2theta(eta[,c(2,5)], .lscale)
- xis = eta2theta(eta[,c(3,6)], .lshape)
- alpha = eta2theta(eta[,7], .ldependency)
- zedds = (y - mmus) / sigmas
- V = (zedds[,1]^(1/alpha) + zedds[,2]^(1/alpha))^alpha
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * (log1p(-alpha) - log(alpha) -
- log(sigmas[,1]) - log(sigmas[,2]) +
- (xis[,1] + 1/alpha) * log(zedds[,1]) +
- (xis[,2] + 1/alpha) * log(zedds[,2]) +
- (1 - 2/alpha) * log(V) - V))
- }, list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
- .ldependency=ldependency ))),
- vfamily=c("exbilogi", "vextremes"),
- deriv=eval(substitute(expression({
- mmus = eta2theta(eta[,c(1,4)], .llocation)
- sigmas = eta2theta(eta[,c(2,5)], .lscale)
- xis = eta2theta(eta[,c(3,6)], .lshape)
- alpha = eta2theta(eta[,7], .ldependency)
- zedds = (y - mmus) / sigmas
- Zee = (1 + zedds)^(-1/xis)
- dZee.dmus =
- dZee.dsigmas =
- dZee.dxis =
- A = (zedds[,1]^(1/alpha) + zedds[,2]^(1/alpha))
- V = A^alpha
- dV.dalpha = V * (log(A) - (zedds[,1]^(1/alpha) *log(zedds[,1]) +
- zedds[,2]^(1/alpha) *log(zedds[,2])) / (alpha*A))
- temp400 = (1/alpha + xis)
- temp300 = ((1-2/alpha)/V - 1)
-
- dzedds.dmus = Zee^(1+xis) / sigmas
- dzedds.dsigmas = (y- zedds) * Zee^(1+xis) / sigmas
- dzedds.dxis = Zee * (log(Zee)/xis - zedds/(1+xis*(y-mmmu)/sigmas)) / xis
- dV.dmus = alpha * V^(1-1/alpha) * dZee.dmus
- dV.dsigmas = alpha * V^(1-1/alpha) * dZee.dsigmas
- dV.dxis = alpha * V^(1-1/alpha) * dZee.dxis
-
- dl.dalpha = -1/(1-alpha) - 1/alpha - (log(zedds[,1]) + log(zedds[,2])) /
- alpha^2 + 2*log(V)/alpha^2 + temp300 * dV.dalpha
- dl.dmus = temp400 * dzedds.dmus / zedds + temp300 * dV.dmus
- dl.dsigmas = -1/sigmas + temp400 * dzedds.dsigmas / zedds +
- temp300 * dV.dsigmas
- dl.dxis = log(zedds) + temp400 * dzedds.dxis / zedds + temp300 * dV.dxis
- dmus.dtheta = dtheta.deta(mmus, .llocation)
- dsigmas.dtheta = dtheta.deta(sigmas, .lscale)
- dxis.dtheta = dtheta.deta(xis, .lshape)
- if(iter == 1) {
- etanew = eta
- } else {
- derivold = derivnew
- etaold = etanew
- etanew = eta
- }
- derivnew = w * cbind(dl.dmus[,1] * dmus.dtheta[,1],
- dl.dsigmas[,1] * dsigmas.dtheta[,1],
- dl.dxis[,1] * dxi.dtheta[,1],
- dl.dmus[,2] * dmus.dtheta[,2],
- dl.dsigmas[,2] * dsigmas.dtheta[,2],
- dl.dxis[,2] * dxi.dtheta[,2],
- dl.dalpha * dtheta.deta(alpha, .ldependency))
- derivnew
- }), list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
- .ldependency=ldependency ))),
- weight=eval(substitute(expression({
- if(iter == 1) {
- wznew = cbind(matrix(w, n, M), matrix(0, n, dimm(M)-M))
- } else {
- wzold = wznew
- wznew = qnupdate(w=w, wzold=wzold, dderiv=(derivold - derivnew),
- deta=etanew-etaold, M=M,
- trace=trace) # weights incorporated in args
- }
- wznew
- }), list( .llocation=llocation ))))
-}
-poissonp = function(ostatistic, dimension=2, link="loge", earg=list(),
- idensity=NULL, method.init=1) {
+ poissonp = function(ostatistic, dimension=2, link="loge", earg=list(),
+ idensity=NULL, method.init=1) {
if(!is.Numeric(ostatistic, posit=TRUE, allow=1, integ=TRUE))
stop("argument 'ostatistic' must be a single positive integer")
if(!is.Numeric(dimension, posit=TRUE, allow=1, integ=TRUE) ||
@@ -2282,9 +2078,9 @@ poissonp = function(ostatistic, dimension=2, link="loge", earg=list(),
if(!is.list(earg)) earg = list()
if(!is.Numeric(method.init, allow=1, posit=TRUE, integer=TRUE) ||
method.init > 2.5)
- stop("argument \"method.init\" must be 1 or 2")
+ stop("argument 'method.init' must be 1 or 2")
if(length(idensity) && !is.Numeric(idensity, posit=TRUE))
- stop("bad input for argument \"idensity\"")
+ stop("bad input for argument 'idensity'")
new("vglmff",
blurb=c(if(dimension==2)
@@ -2373,3 +2169,7 @@ poissonp = function(ostatistic, dimension=2, link="loge", earg=list(),
.dimension=dimension ))))
}
+
+
+
+
diff --git a/R/family.fishing.q b/R/family.fishing.q
index 4757edb..c636046 100644
--- a/R/family.fishing.q
+++ b/R/family.fishing.q
@@ -1,8 +1,8 @@
-# These functions are Copyright (C) 1998-2008 T. W. Yee All rights reserved.
+# These functions are Copyright (C) 1998-2009 T. W. Yee All rights reserved.
# "family.fishing.q"
# Last modified: 01/12/08, 02/12/08
-# Copyright Thomas W. Yee (2008)
+# Copyright Thomas W. Yee (2009)
# ====================================================================
diff --git a/R/family.functions.q b/R/family.functions.q
index 2d9b8dc..0323346 100644
--- a/R/family.functions.q
+++ b/R/family.functions.q
@@ -8,8 +8,7 @@ fill =
fill1 = fill2 = fill3 =
function(x, values=0, ncolx=ncol(x)) {
x = as.matrix(x)
- ans = matrix(values, nrow=nrow(x), ncol=ncolx)
- ans
+ matrix(values, nrow=nrow(x), ncol=ncolx, byrow=TRUE)
}
@@ -144,7 +143,7 @@ matrix.power <- function(wz, M, power, fast=TRUE)
evals <- k$values # M x n
evects <- k$vectors # M x M x n
} else {
- stop("sorry, can't handle matrix-band form yet")
+ stop("sorry, cannot handle matrix-band form yet")
k <- unlist(apply(wz,3,eigen), use.names=FALSE)
dim(k) <- c(M,M+1,n)
evals <- k[,1,,drop=TRUE] # M x n
@@ -230,7 +229,7 @@ veigen <- function(x, M)
error.code = integer(1))
if(z$error.code)
- stop(paste("Eigen algorithm (rs) returned error code", z$error.code))
+ stop("eigen algorithm (rs) returned error code ", z$error.code)
ord <- M:1
dim(z$values) <- c(M,n)
z$values <- z$values[ord,,drop=FALSE]
diff --git a/R/family.genetic.q b/R/family.genetic.q
index 575e6e5..1328647 100644
--- a/R/family.genetic.q
+++ b/R/family.genetic.q
@@ -8,634 +8,676 @@
-G1G2G3 <- function(link="logit", earg = list(), ip1=NULL, ip2=NULL, iF=NULL)
+
+
+
+
+ G1G2G3 = function(link="logit", earg = list(), ip1=NULL, ip2=NULL, iF=NULL)
{
if(mode(link) != "character" && mode(link) != "name")
- link <- as.character(substitute(link))
+ link = as.character(substitute(link))
if(!is.list(earg)) earg = list()
new("vglmff",
- blurb=c("G1/G2/G3 phenotype\n\n",
+ blurb=c("G1-G2-G3 phenotype\n\n",
"Links: ",
namesof("p1", link, earg= earg), ", ",
namesof("p2", link, earg= earg), ", ",
- namesof("f", link, earg= earg, tag=FALSE),
- "\n",
- "Variance: multinomial type variance"),
+ namesof("f", link, earg= earg, tag=FALSE)),
+ deviance=Deviance.categorical.data.vgam,
initialize=eval(substitute(expression({
- delete.zero.colns <- FALSE
+ delete.zero.colns = FALSE
eval(process.categorical.data.vgam)
- predictors.names <- c(namesof("p1", .link, earg= .earg, tag=FALSE),
+
+ ok.col.ny = c("G1G1","G1G2","G1G3","G2G2","G2G3","G3G3")
+ if(length(col.ny <- colnames(y)) == length(ok.col.ny) &&
+ setequal(ok.col.ny, col.ny)) {
+ if(!all(ok.col.ny == col.ny))
+ stop("the columns of the response matrix should have names ",
+ "(output of colnames()) ordered as ",
+ "c('G1G1','G1G2','G1G3','G2G2','G2G3','G3G3')")
+ }
+
+ predictors.names = c(namesof("p1", .link, earg= .earg, tag=FALSE),
namesof("p2", .link, earg= .earg, tag=FALSE),
namesof("f", .link, earg= .earg, tag=FALSE))
if(is.null(etastart)) {
- p1 <- if(is.numeric(.ip1)) rep(.ip1, n) else
+ p1 = if(is.numeric(.ip1)) rep(.ip1, n) else
sqrt(mustart[,1])
- f <- if(is.numeric(.iF)) rep(.iF, n) else
+ f = if(is.numeric(.iF)) rep(.iF, n) else
rep(0.01, n) # close to zero
- p2 <- if(is.numeric(.ip2)) rep(.ip2, n) else
+ p2 = if(is.numeric(.ip2)) rep(.ip2, n) else
mustart[,2] / (sqrt(mustart[,1]) * 2)
if(any(p1 <= 0) || any(p1 >= 1))
- stop("bad initial value for p1")
+ stop("bad initial value for 'p1'")
if(any(p2 <= 0) || any(p2 >= 1))
- stop("bad initial value for p2")
- etastart <- cbind(theta2eta(p1, .link, earg= .earg),
+ stop("bad initial value for 'p2'")
+ etastart = cbind(theta2eta(p1, .link, earg= .earg),
theta2eta(p2, .link, earg= .earg),
theta2eta(f, .link, earg= .earg))
}
- }), list(.link=link, .ip1=ip1, .ip2=ip2, .iF=iF,
- .earg=earg ))),
+ }), list( .link=link, .ip1=ip1, .ip2=ip2, .iF=iF, .earg=earg ))),
inverse=eval(substitute(function(eta, extra=NULL){
- p1 <- eta2theta(eta[,1], link=.link, earg= .earg)
- p2 <- eta2theta(eta[,2], link=.link, earg= .earg)
- p3 <- 1-p1-p2
- f <- eta2theta(eta[,3], link=.link, earg= .earg)
- cbind("G1/G1"=f*p1+(1-f)*p1^2,
- "G1/G2"=2*p1*p2*(1-f),
- "G1/G3"=2*p1*p3*(1-f),
- "G2/G2"=f*p2+(1-f)*p2^2,
- "G2/G3"=2*p2*p3*(1-f),
- "G3/G3"=f*p3+(1-f)*p3^2)
- }, list(.link=link,
- .earg=earg ))),
+ p1 = eta2theta(eta[,1], link=.link, earg= .earg)
+ p2 = eta2theta(eta[,2], link=.link, earg= .earg)
+ p3 = 1-p1-p2
+ f = eta2theta(eta[,3], link=.link, earg= .earg)
+ cbind("G1G1"=f*p1+(1-f)*p1^2,
+ "G1G2"=2*p1*p2*(1-f),
+ "G1G3"=2*p1*p3*(1-f),
+ "G2G2"=f*p2+(1-f)*p2^2,
+ "G2G3"=2*p2*p3*(1-f),
+ "G3G3"=f*p3+(1-f)*p3^2)
+ }, list( .link=link, .earg=earg ))),
last=eval(substitute(expression({
- misc$link <- c(p1= .link, p2= .link, f= .link)
+ misc$link = c(p1= .link, p2= .link, f= .link)
misc$earg = list(p1= .earg, p2= .earg, f= .earg )
- }), list(.link=link,
- .earg=earg ))),
- loglikelihood=function(mu,y,w,residuals=FALSE,eta,extra=NULL)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum((w*y)*log(mu)),
+ }), list( .link=link, .earg=earg ))),
+ loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(dmultinomial(x=w*y, size=w, prob=mu, log=TRUE, docheck=FALSE))
+ },
vfamily=c("G1G2G3", "vgenetic"),
deriv=eval(substitute(expression({
- p1 <- eta2theta(eta[,1], link=.link, earg= .earg)
- p2 <- eta2theta(eta[,2], link=.link, earg= .earg)
- p3 <- 1-p1-p2
- f <- eta2theta(eta[,3], link=.link, earg= .earg)
- dP1 <- cbind(f + 2*p1*(1-f), 2*(1-f)*p2, 2*(1-f)*(1-p2-2*p1), 0,
+ p1 = eta2theta(eta[,1], link=.link, earg= .earg)
+ p2 = eta2theta(eta[,2], link=.link, earg= .earg)
+ p3 = 1-p1-p2
+ f = eta2theta(eta[,3], link=.link, earg= .earg)
+ dP1 = cbind(f + 2*p1*(1-f), 2*(1-f)*p2, 2*(1-f)*(1-p2-2*p1), 0,
-2*(1-f)*p2, -f - 2*p3*(1-f))
- dP2 <- cbind(0, 2*p1*(1-f), -2*(1-f)*p1, f+2*p2*(1-f),
+ dP2 = cbind(0, 2*p1*(1-f), -2*(1-f)*p1, f+2*p2*(1-f),
2*(1-f)*(1-p1-2*p2), -f - 2*p3*(1-f))
- dP3 <- cbind(p1*(1-p1), -2*p1*p2, -2*p1*p3, p2*(1-p2), -2*p2*p3,
+ dP3 = cbind(p1*(1-p1), -2*p1*p2, -2*p1*p3, p2*(1-p2), -2*p2*p3,
p3*(1-p3))
- dl1 <- apply(y * dP1 / mu, 1, sum)
- dl2 <- apply(y * dP2 / mu, 1, sum)
- dl3 <- apply(y * dP3 / mu, 1, sum)
- dPP.deta <- dtheta.deta(cbind(p1,p2,f), link=.link, earg= .earg)
+ dl1 = rowSums(y * dP1 / mu)
+ dl2 = rowSums(y * dP2 / mu)
+ dl3 = rowSums(y * dP3 / mu)
+ dPP.deta = dtheta.deta(cbind(p1,p2,f), link=.link, earg= .earg)
w * cbind(dPP.deta[,1] * dl1, dPP.deta[,2] * dl2,
dPP.deta[,3] * dl3)
- }), list(.link=link,
- .earg=earg ))),
+ }), list( .link=link, .earg=earg ))),
weight=eval(substitute(expression({
- dPP <- array(c(dP1,dP2,dP3), c(n,6,3))
+ dPP = array(c(dP1,dP2,dP3), c(n,6,3))
- wz <- matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==6 because M==3
+ wz = matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==6 because M==3
for(i1 in 1:M)
for(i2 in i1:M) {
- index <- iam(i1,i2,M)
- wz[,index] <- apply(dPP[,,i1,drop=TRUE] * dPP[,,i2,drop=TRUE] /
- mu, 1, sum) * dPP.deta[,i1] * dPP.deta[,i2]
+ index = iam(i1,i2,M)
+ wz[,index] = rowSums(dPP[,,i1,drop=TRUE] *
+ dPP[,,i2,drop=TRUE] / mu) *
+ dPP.deta[,i1] * dPP.deta[,i2]
}
w * wz
- }), list(.link=link,
- .earg=earg ))))
+ }), list( .link=link, .earg=earg ))))
}
-AAaa.nohw <- function(link="logit", earg = list(), ipA=NULL, iF=NULL)
+ AAaa.nohw = function(link="logit", earg = list(), ipA=NULL, iF=NULL)
{
if(mode(link) != "character" && mode(link) != "name")
- link <- as.character(substitute(link))
+ link = as.character(substitute(link))
if(!is.list(earg)) earg = list()
new("vglmff",
blurb=c("AA-Aa-aa phenotype (without Hardy-Weinberg assumption)\n\n",
"Links: ",
namesof("pA", link, earg= earg), ", ",
- namesof("f", "identity", tag=FALSE),
- "\n",
- "Variance: multinomial type variance"),
+ namesof("f", "identity", tag=FALSE)),
+ deviance=Deviance.categorical.data.vgam,
initialize=eval(substitute(expression({
- delete.zero.colns <- FALSE
+ delete.zero.colns = FALSE
eval(process.categorical.data.vgam)
- predictors.names <- c(namesof("pA", .link, earg= .earg, tag=FALSE),
+
+ ok.col.ny = c("AA","Aa","aa")
+ if(length(col.ny <- colnames(y)) == length(ok.col.ny) &&
+ setequal(ok.col.ny, col.ny)) {
+ if(!all(ok.col.ny == col.ny))
+ stop("the columns of the response matrix should have names ",
+ "(output of colnames()) ordered as c('AA','Aa','aa')")
+ }
+
+ predictors.names = c(namesof("pA", .link, earg= .earg, tag=FALSE),
namesof("f", "identity", tag=FALSE))
if(is.null(etastart)) {
- pA <- if(is.numeric(.ipA)) rep(.ipA, n) else
+ pA = if(is.numeric(.ipA)) rep(.ipA, n) else
c(sqrt(mustart[,1] - mustart[,2]/2))
- f <- if(is.numeric(.iF)) rep(.iF, n) else
+ f = if(is.numeric(.iF)) rep(.iF, n) else
rep(0.01, n) # 1- mustart[,2]/(2*pA*(1-pA))
if(any(pA <= 0) || any(pA >= 1))
- stop("bad initial value for pA")
- etastart <- cbind(theta2eta(pA, .link, earg= .earg),
+ stop("bad initial value for 'pA'")
+ etastart = cbind(theta2eta(pA, .link, earg= .earg),
theta2eta(f, "identity"))
}
- }), list(.link=link, .ipA=ipA, .iF=iF,
- .earg=earg ))),
+ }), list( .link=link, .ipA=ipA, .iF=iF, .earg=earg ))),
inverse=eval(substitute(function(eta, extra=NULL){
- pA <- eta2theta(eta[,1], link=.link, earg= .earg)
- f <- eta2theta(eta[,2], link="identity")
- cbind(AA=pA^2+pA*(1-pA)*f, Aa=2*pA*(1-pA)*(1-f),
+ pA = eta2theta(eta[,1], link=.link, earg= .earg)
+ f = eta2theta(eta[,2], link="identity")
+ cbind(AA=pA^2+pA*(1-pA)*f,
+ Aa=2*pA*(1-pA)*(1-f),
aa=(1-pA)^2 + pA*(1-pA)*f)
- }, list(.link=link,
- .earg=earg ))),
+ }, list( .link=link, .earg=earg ))),
last=eval(substitute(expression({
- misc$link <- c(pA= .link, f= "identity")
+ misc$link = c(pA= .link, f= "identity")
misc$earg = list(pA= .earg, f= list() )
- }), list(.link=link,
- .earg=earg ))),
+ }), list( .link=link, .earg=earg ))),
link=eval(substitute(function(mu, extra=NULL){
- pA <- sqrt(mu[,1] - mu[,2]/2)
- f <- 1- mu[,2]/(2*pA*(1-pA))
+ pA = sqrt(mu[,1] - mu[,2]/2)
+ f = 1- mu[,2]/(2*pA*(1-pA))
cbind(theta2eta(pA, .link, earg= .earg),
theta2eta(f, "identity"))
- }, list(.link=link,
- .earg=earg ))),
- loglikelihood=function(mu,y,w,residuals=FALSE,eta,extra=NULL)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum((w*y)*log(mu)),
+ }, list( .link=link, .earg=earg ))),
+ loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(dmultinomial(x=w*y, size=w, prob=mu, log=TRUE, docheck=FALSE))
+ },
vfamily=c("AAaa.nohw", "vgenetic"),
deriv=eval(substitute(expression({
- pA <- eta2theta(eta[,1], link=.link, earg= .earg)
- f <- eta2theta(eta[,2], link="identity")
- dP1 <- cbind(f + 2*pA*(1-f), 2*(1-f)*(1-2*pA), -2*(1-pA) +f*(1-2*pA))
- dP2 <- cbind(pA*(1-pA), -2*pA*(1-pA), pA*(1-pA))
- dl1 <- apply(y * dP1 / mu, 1, sum)
- dl2 <- apply(y * dP2 / mu, 1, sum)
- dPP.deta <- dtheta.deta(pA, link=.link, earg= .earg)
+ pA = eta2theta(eta[,1], link=.link, earg= .earg)
+ f = eta2theta(eta[,2], link="identity")
+ dP1 = cbind(f + 2*pA*(1-f), 2*(1-f)*(1-2*pA), -2*(1-pA) +f*(1-2*pA))
+ dP2 = cbind(pA*(1-pA), -2*pA*(1-pA), pA*(1-pA))
+ dl1 = rowSums(y * dP1 / mu)
+ dl2 = rowSums(y * dP2 / mu)
+ dPP.deta = dtheta.deta(pA, link=.link, earg= .earg)
w * cbind(dPP.deta * dl1, dl2)
- }), list(.link=link,
- .earg=earg ))),
+ }), list( .link=link, .earg=earg ))),
weight=eval(substitute(expression({
- dPP <- array(c(dP1,dP2), c(n,3,2))
- dPP.deta <- cbind(dtheta.deta(pA, link=.link, earg= .earg),
+ dPP = array(c(dP1,dP2), c(n,3,2))
+ dPP.deta = cbind(dtheta.deta(pA, link=.link, earg= .earg),
dtheta.deta(f, link="identity"))
- wz <- matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==3 because M==2
+ wz = matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==3 because M==2
for(i1 in 1:M)
for(i2 in i1:M) {
- index <- iam(i1,i2,M)
- wz[,index] <- apply(dPP[,,i1,drop=T] * dPP[,,i2,drop=T] /
- mu, 1, sum) * dPP.deta[,i1] * dPP.deta[,i2]
+ index = iam(i1,i2,M)
+ wz[,index] = rowSums(dPP[,,i1,drop=TRUE] *
+ dPP[,,i2,drop=TRUE] / mu) *
+ dPP.deta[,i1] * dPP.deta[,i2]
}
w * wz
- }), list(.link=link,
- .earg=earg ))))
+ }), list( .link=link, .earg=earg ))))
}
-AB.Ab.aB.ab2 <- function(link="logit", earg = list(), init.p=NULL)
+
+
+ AB.Ab.aB.ab2 = function(link="logit", earg = list(), init.p=NULL)
{
if(mode(link) != "character" && mode(link) != "name")
- link <- as.character(substitute(link))
+ link = as.character(substitute(link))
if(!is.list(earg)) earg = list()
new("vglmff",
blurb=c("AB-Ab-aB-ab2 phenotype\n\n",
"Links: ",
- namesof("p", link, earg= earg),
- "\n",
- "Variance: multinomial type variance"),
+ namesof("p", link, earg= earg)),
+ deviance=Deviance.categorical.data.vgam,
initialize=eval(substitute(expression({
- delete.zero.colns <- FALSE
+ delete.zero.colns = FALSE
eval(process.categorical.data.vgam)
- predictors.names <- namesof("p", .link, earg= .earg, tag=FALSE)
-
+ predictors.names = namesof("p", .link, earg= .earg, tag=FALSE)
+
+ ok.col.ny = c("AB","Ab","aB","ab")
+ if(length(col.ny <- colnames(y)) == length(ok.col.ny) &&
+ setequal(ok.col.ny, col.ny)) {
+ if(!all(ok.col.ny == col.ny))
+ stop("the columns of the response matrix should have names ",
+ "(output of colnames()) ordered as c('AB','Ab','aB','ab')")
+ }
+
if(is.null(etastart)) {
- p.init <- if(is.numeric(.init.p)) rep(.init.p, n) else
+ p.init = if(is.numeric(.init.p)) rep(.init.p, n) else
c(1 - 2 * sqrt(mustart[,4]))
- etastart <- theta2eta(p.init, .link, earg= .earg)
+ etastart = theta2eta(p.init, .link, earg= .earg)
}
- }), list(.link=link, .init.p=init.p,
- .earg=earg ))),
+ }), list( .link=link, .init.p=init.p, .earg=earg ))),
inverse=eval(substitute(function(eta,extra=NULL){
- p <- eta2theta(eta, link=.link, earg= .earg)
- cbind("A-B-"=(2+(1-p)^2),
- "A-bb"=(1-(1-p)^2),
- "aaB-"=(1-(1-p)^2),
- "aabb"=(1-p)^2) / 4
- }, list(.link=link,
- .earg=earg ) )),
+ p = eta2theta(eta, link=.link, earg= .earg)
+ cbind("AB"=(2+(1-p)^2),
+ "Ab"=(1-(1-p)^2),
+ "aB"=(1-(1-p)^2),
+ "ab"=(1-p)^2) / 4
+ }, list( .link=link, .earg=earg ) )),
last=eval(substitute(expression({
- misc$link <- c(p = .link)
+ misc$link = c(p = .link)
misc$earg = list(p= .earg )
- }), list(.link=link,
- .earg=earg ) )),
+ }), list( .link=link, .earg=earg ) )),
link=eval(substitute(function(mu, extra=NULL){
- p <- 1 - 2 * sqrt(mu[,4])
+ p = 1 - 2 * sqrt(mu[,4])
theta2eta(p, .link, earg= .earg)
- }, list(.link=link,
- .earg=earg ) )),
- loglikelihood= function(mu,y,w,residuals=FALSE,eta,extra=NULL)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum((w*y)*log(mu)),
+ }, list( .link=link, .earg=earg ) )),
+ loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(dmultinomial(x=w*y, size=w, prob=mu, log=TRUE, docheck=FALSE))
+ },
vfamily=c("AB.Ab.aB.ab2", "vgenetic"),
deriv=eval(substitute(expression({
- pp <- eta2theta(eta, link=.link, earg= .earg)
- dP1 <- cbind(-0.5*(1-pp), 0.5*(1-pp), 0.5*(1-pp), -0.5*(1-pp))
- dl1 <- apply(y * dP1 / mu, 1, sum)
- dPP.deta <- dtheta.deta(pp, link=.link, earg= .earg)
+ pp = eta2theta(eta, link=.link, earg= .earg)
+ dP1 = cbind(-0.5*(1-pp), 0.5*(1-pp), 0.5*(1-pp), -0.5*(1-pp))
+ dl1 = rowSums(y * dP1 / mu)
+ dPP.deta = dtheta.deta(pp, link=.link, earg= .earg)
w * dPP.deta * dl1
- }), list(.link=link,
- .earg=earg ) )),
+ }), list( .link=link, .earg=earg ) )),
weight=eval(substitute(expression({
- wz <- apply(dP1 * dP1 / mu, 1, sum) * dPP.deta^2
+ wz = rowSums(dP1 * dP1 / mu) * dPP.deta^2
w * wz
- }), list(.link=link,
- .earg=earg ) )))
+ }), list( .link=link, .earg=earg ) )))
}
-A1A2A3 <- function(link="logit", earg = list(), ip1=NULL, ip2=NULL)
+ A1A2A3 = function(link="logit", earg = list(), ip1=NULL, ip2=NULL)
{
if(mode(link) != "character" && mode(link) != "name")
- link <- as.character(substitute(link))
+ link = as.character(substitute(link))
if(!is.list(earg)) earg = list()
new("vglmff",
blurb=c("A1A2A3 Allele System (A1A1, A1A2, A2A2, A1A3, A2A3, A3A3)\n\n",
"Links: ",
- namesof("p1",link, earg= earg), ", ",
- namesof("p2", link, earg= earg, tag=FALSE),
- "\n",
- "Variance: multinomial type variance"),
+ namesof("p1", link, earg= earg), ", ",
+ namesof("p2", link, earg= earg, tag=FALSE)),
+ deviance=Deviance.categorical.data.vgam,
initialize=eval(substitute(expression({
- delete.zero.colns <- FALSE
+ delete.zero.colns = FALSE
eval(process.categorical.data.vgam)
- predictors.names <- c(namesof("pA", .link, earg= .earg,tag=FALSE),
- namesof("pB", .link, earg= .earg,tag=FALSE))
+
+ ok.col.ny = c("A1A1","A1A2","A2A2","A1A3","A2A3","A3A3")
+ if(length(col.ny <- colnames(y)) == length(ok.col.ny) &&
+ setequal(ok.col.ny, col.ny)) {
+ if(!all(ok.col.ny == col.ny))
+ stop("the columns of the response matrix should have names ",
+ "(output of colnames()) ordered as ",
+ "c('A1A1','A1A2','A2A2','A1A3','A2A3','A3A3')")
+ }
+
+ predictors.names = c(namesof("pA", .link, earg= .earg, tag=FALSE),
+ namesof("pB", .link, earg= .earg, tag=FALSE))
if(is.null(etastart)) {
- p1 <- if(is.numeric(.ip1)) rep(.ip1, n) else
+ p1 = if(is.numeric(.ip1)) rep(.ip1, n) else
c(sqrt(mustart[,1]))
- p2 <- if(is.numeric(.ip2)) rep(.ip2, n) else
+ p2 = if(is.numeric(.ip2)) rep(.ip2, n) else
c(sqrt(mustart[,3]))
- etastart <- cbind(theta2eta(p1, .link, earg= .earg),
+ etastart = cbind(theta2eta(p1, .link, earg= .earg),
theta2eta(p2, .link, earg= .earg))
}
- }), list(.link=link, .ip1=ip1, .ip2=ip2,
- .earg=earg ))),
+ }), list( .link=link, .ip1=ip1, .ip2=ip2, .earg=earg ))),
inverse=eval(substitute(function(eta, extra=NULL){
- p1 <- eta2theta(eta[,1], link=.link, earg= .earg)
- p2 <- eta2theta(eta[,2], link=.link, earg= .earg)
- qq <- 1-p1-p2
+ p1 = eta2theta(eta[,1], link=.link, earg= .earg)
+ p2 = eta2theta(eta[,2], link=.link, earg= .earg)
+ qq = 1-p1-p2
cbind(A1A1=p1*p1, A1A2=2*p1*p2, A2A2=p2*p2, A1A3=2*p1*qq,
A2A3=2*p2*qq, A3A3=qq*qq)
- }, list(.link=link,
- .earg=earg ))),
+ }, list( .link=link, .earg=earg ))),
last=eval(substitute(expression({
- misc$link <- c(p1= .link, p2= .link)
+ misc$link = c(p1= .link, p2= .link)
misc$earg = list(p1= .earg, p2= .earg )
- }), list(.link=link,
- .earg=earg ))),
+ }), list( .link=link, .earg=earg ))),
link=eval(substitute(function(mu, extra=NULL){
- p1 <- sqrt(mu[,1])
- p2 <- sqrt(mu[,3])
- qq <- 1-p1-p2
+ p1 = sqrt(mu[,1])
+ p2 = sqrt(mu[,3])
+ qq = 1-p1-p2
cbind(theta2eta(p1, .link, earg= .earg),
theta2eta(p2, .link, earg= .earg))
- }, list(.link=link,
- .earg=earg ))),
- loglikelihood=function(mu,y,w,residuals=FALSE,eta,extra=NULL)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum((w*y)*log(mu)),
+ }, list( .link=link, .earg=earg ))),
+ loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(dmultinomial(x=w*y, size=w, prob=mu, log=TRUE, docheck=FALSE))
+ },
vfamily=c("A1A2A3", "vgenetic"),
deriv=eval(substitute(expression({
- p1 <- eta2theta(eta[,1], link=.link, earg= .earg)
- p2 <- eta2theta(eta[,2], link=.link, earg= .earg)
- dl.dp1 <- (2*y[,1]+y[,2]+y[,4])/p1 - (2*y[,6]+y[,4]+y[,5])/(1-p1-p2)
- dl.dp2 <- (2*y[,3]+y[,2]+y[,5])/p2 - (2*y[,6]+y[,4]+y[,5])/(1-p1-p2)
- dp1.deta <- dtheta.deta(p1, link=.link, earg= .earg)
- dp2.deta <- dtheta.deta(p2, link=.link, earg= .earg)
+ p1 = eta2theta(eta[,1], link=.link, earg= .earg)
+ p2 = eta2theta(eta[,2], link=.link, earg= .earg)
+ dl.dp1 = (2*y[,1]+y[,2]+y[,4])/p1 - (2*y[,6]+y[,4]+y[,5])/(1-p1-p2)
+ dl.dp2 = (2*y[,3]+y[,2]+y[,5])/p2 - (2*y[,6]+y[,4]+y[,5])/(1-p1-p2)
+ dp1.deta = dtheta.deta(p1, link=.link, earg= .earg)
+ dp2.deta = dtheta.deta(p2, link=.link, earg= .earg)
w * cbind(dl.dp1 * dp1.deta, dl.dp2 * dp2.deta)
- }), list(.link=link,
- .earg=earg ))),
+ }), list( .link=link, .earg=earg ))),
weight=eval(substitute(expression({
- qq <- 1-p1-p2
- wz <- matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==3 because M==2
- ed2l.dp12 <- 2 * (1/p1 + 1/qq)
- ed2l.dp22 <- 2 * (1/p2 + 1/qq)
- ed2l.dp1dp2 <- 2 / qq
- wz[,iam(1,1,M)] <- dp1.deta^2 * ed2l.dp12
- wz[,iam(2,2,M)] <- dp2.deta^2 * ed2l.dp22
- wz[,iam(1,2,M)] <- ed2l.dp1dp2 * dp1.deta * dp2.deta
+ qq = 1-p1-p2
+ wz = matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==3 because M==2
+ ed2l.dp12 = 2 * (1/p1 + 1/qq)
+ ed2l.dp22 = 2 * (1/p2 + 1/qq)
+ ed2l.dp1dp2 = 2 / qq
+ wz[,iam(1,1,M)] = dp1.deta^2 * ed2l.dp12
+ wz[,iam(2,2,M)] = dp2.deta^2 * ed2l.dp22
+ wz[,iam(1,2,M)] = ed2l.dp1dp2 * dp1.deta * dp2.deta
w * wz
- }), list(.link=link,
- .earg=earg ))))
+ }), list( .link=link, .earg=earg ))))
}
-MNSs <- function(link="logit", earg = list(), imS=NULL, ims=NULL, inS=NULL)
+ MNSs = function(link="logit", earg = list(), imS=NULL, ims=NULL, inS=NULL)
{
if(mode(link) != "character" && mode(link) != "name")
- link <- as.character(substitute(link))
+ link = as.character(substitute(link))
if(!is.list(earg)) earg = list()
new("vglmff",
blurb=c("MNSs Blood Group System (MS-Ms-MNS-MNs-NS-Ns phenotype)\n\n",
"Links: ",
- namesof("mS",link, earg= earg), ", ",
- namesof("ms",link, earg= earg), ", ",
- namesof("nS", link, earg= earg, tag=FALSE),
- "\n",
- "Variance: multinomial type variance"),
+ namesof("mS", link, earg= earg), ", ",
+ namesof("ms", link, earg= earg), ", ",
+ namesof("nS", link, earg= earg, tag=FALSE)),
+ deviance=Deviance.categorical.data.vgam,
initialize=eval(substitute(expression({
- delete.zero.colns <- FALSE
+ delete.zero.colns = FALSE
eval(process.categorical.data.vgam)
+
+ ok.col.ny = c("MS","Ms","MNS","MNs","NS","Ns")
+ if(length(col.ny <- colnames(y)) == length(ok.col.ny) &&
+ setequal(ok.col.ny, col.ny)) {
+ if(!all(ok.col.ny == col.ny))
+ stop("the columns of the response matrix should have names ",
+ "(output of colnames()) ordered as ",
+ "c('MS','Ms','MNS','MNs','NS','Ns')")
+ }
+
predictors.names <-
- c(namesof("mS", .link, earg= .earg,tag=FALSE),
- namesof("ms", .link, earg= .earg,tag=FALSE),
- namesof("nS", .link, earg= .earg,tag=FALSE))
+ c(namesof("mS", .link, earg= .earg, tag=FALSE),
+ namesof("ms", .link, earg= .earg, tag=FALSE),
+ namesof("nS", .link, earg= .earg, tag=FALSE))
if(is.null(etastart)) {
- ms <- if(is.numeric(.ims)) rep(.ims, n) else
+ ms = if(is.numeric(.ims)) rep(.ims, n) else
c(sqrt(mustart[,2]))
- ns <- c(sqrt(mustart[,6]))
- nS <- if(is.numeric(.inS)) rep(.inS, n) else
+ ns = c(sqrt(mustart[,6]))
+ nS = if(is.numeric(.inS)) rep(.inS, n) else
c(-ns + sqrt(ns^2 + mustart[,5])) # Solve a quadratic eqn
- mS <- if(is.numeric(.imS)) rep(.imS, n) else
+ mS = if(is.numeric(.imS)) rep(.imS, n) else
1-ns-ms-nS
- etastart <- cbind(theta2eta(mS, .link, earg= .earg),
- theta2eta(ms, .link, earg= .earg),
- theta2eta(nS, .link, earg= .earg))
+ etastart = cbind(theta2eta(mS, .link, earg= .earg),
+ theta2eta(ms, .link, earg= .earg),
+ theta2eta(nS, .link, earg= .earg))
}
- }), list(.link=link, .imS=imS, .ims=ims, .inS=inS,
- .earg=earg ))),
+ }), list( .link=link, .imS=imS, .ims=ims, .inS=inS, .earg=earg ))),
inverse=eval(substitute(function(eta, extra=NULL){
- mS <- eta2theta(eta[,1], link=.link, earg= .earg)
- ms <- eta2theta(eta[,2], link=.link, earg= .earg)
- nS <- eta2theta(eta[,3], link=.link, earg= .earg)
- ns <- 1-mS-ms-nS
+ mS = eta2theta(eta[,1], link=.link, earg= .earg)
+ ms = eta2theta(eta[,2], link=.link, earg= .earg)
+ nS = eta2theta(eta[,3], link=.link, earg= .earg)
+ ns = 1-mS-ms-nS
cbind(MS=mS^2+2*mS*ms, Ms=ms^2, MNS=2*(mS*nS+ms*nS+mS*ns),
MNs=2*ms*ns, NS=nS^2 + 2*nS*ns, Ns=ns^2)
- }, list(.link=link,
- .earg=earg ))),
+ }, list( .link=link, .earg=earg ))),
last=eval(substitute(expression({
- misc$link <- c(mS= .link, ms= .link, nS= .link)
+ misc$link = c(mS= .link, ms= .link, nS= .link)
misc$earg = list(mS= .earg, ms= .earg, nS= .earg )
- }), list(.link=link,
- .earg=earg ))),
+ }), list( .link=link, .earg=earg ))),
link=eval(substitute(function(mu, extra=NULL){
- ms <- sqrt(mu[,2])
- ns <- sqrt(mu[,6])
- nS <- c(-nS + sqrt(nS^2 + mu[,5]))
- mS <- 1-ns-ms-nS
+ ms = sqrt(mu[,2])
+ ns = sqrt(mu[,6])
+ nS = c(-nS + sqrt(nS^2 + mu[,5]))
+ mS = 1-ns-ms-nS
cbind(theta2eta(mS, .link, earg= .earg),
theta2eta(ms, .link, earg= .earg),
theta2eta(nS, .link, earg= .earg))
- }, list(.link=link,
- .earg=earg ))),
- loglikelihood=function(mu,y,w,residuals=FALSE,eta,extra=NULL)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum((w*y)*log(mu)),
+ }, list( .link=link, .earg=earg ))),
+ loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(dmultinomial(x=w*y, size=w, prob=mu, log=TRUE, docheck=FALSE))
+ },
vfamily=c("MNSs", "vgenetic"),
deriv=eval(substitute(expression({
- mS <- eta2theta(eta[,1], link=.link, earg= .earg)
- ms <- eta2theta(eta[,2], link=.link, earg= .earg)
- nS <- eta2theta(eta[,3], link=.link, earg= .earg)
- ns <- 1-mS-ms-nS
- dP1 <- cbind(2*(mS+ms), 0, 2*(nS+ns-mS), -2*ms, -2*nS, -2*ns)
- dP2 <- cbind(2*mS, 2*ms, 2*(nS-mS), 2*(ns-ms), -2*nS, -2*ns)
- dP3 <- cbind(0, 0, 2*ms, -2*ms, 2*ns, -2*ns) # n x 6
- dl1 <- apply(y * dP1 / mu, 1, sum)
- dl2 <- apply(y * dP2 / mu, 1, sum)
- dl3 <- apply(y * dP3 / mu, 1, sum)
- dPP.deta <- dtheta.deta(cbind(mS,ms,nS), link=.link, earg= .earg)
+ mS = eta2theta(eta[,1], link=.link, earg= .earg)
+ ms = eta2theta(eta[,2], link=.link, earg= .earg)
+ nS = eta2theta(eta[,3], link=.link, earg= .earg)
+ ns = 1-mS-ms-nS
+ dP1 = cbind(2*(mS+ms), 0, 2*(nS+ns-mS), -2*ms, -2*nS, -2*ns)
+ dP2 = cbind(2*mS, 2*ms, 2*(nS-mS), 2*(ns-ms), -2*nS, -2*ns)
+ dP3 = cbind(0, 0, 2*ms, -2*ms, 2*ns, -2*ns) # n x 6
+ dl1 = rowSums(y * dP1 / mu)
+ dl2 = rowSums(y * dP2 / mu)
+ dl3 = rowSums(y * dP3 / mu)
+ dPP.deta = dtheta.deta(cbind(mS,ms,nS), link=.link, earg= .earg)
w * dPP.deta * cbind(dl1, dl2, dl3)
- }), list(.link=link,
- .earg=earg ))),
+ }), list( .link=link, .earg=earg ))),
weight=eval(substitute(expression({
- dPP <- array(c(dP1,dP2,dP3), c(n,6,3))
- wz <- matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==6 because M==3
+ dPP = array(c(dP1,dP2,dP3), c(n,6,3))
+ wz = matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==6 because M==3
for(i1 in 1:M)
for(i2 in i1:M) {
- index <- iam(i1,i2,M)
- wz[,index] <- apply(dPP[,,i1,drop=TRUE] * dPP[,,i2,drop=TRUE] /
- mu, 1, sum) * dPP.deta[,i1] * dPP.deta[,i2]
+ index = iam(i1,i2,M)
+ wz[,index] = rowSums(dPP[,,i1,drop=TRUE] *
+ dPP[,,i2,drop=TRUE] / mu) *
+ dPP.deta[,i1] * dPP.deta[,i2]
}
w * wz
- }), list(.link=link,
- .earg=earg ))))
+ }), list( .link=link, .earg=earg ))))
}
-ABO <- function(link="logit", earg = list(), ir=NULL, ip=NULL)
+
+
+
+
+ ABO = function(link="logit", earg = list(), ipA=NULL, ipO=NULL)
{
if(mode(link) != "character" && mode(link) != "name")
- link <- as.character(substitute(link))
+ link = as.character(substitute(link))
if(!is.list(earg)) earg = list()
new("vglmff",
blurb=c("ABO Blood Group System (A-B-AB-O phenotype)\n\n",
"Links: ",
- namesof("p",link, earg= earg), ", ",
- namesof("q", link, earg= earg, tag=FALSE),
- "\n",
- "Variance: multinomial type variance"),
+ namesof("pA", link, earg= earg), ", ",
+ namesof("pB", link, earg= earg, tag=FALSE)),
+ deviance=Deviance.categorical.data.vgam,
initialize=eval(substitute(expression({
- delete.zero.colns <- FALSE
+ delete.zero.colns = FALSE
eval(process.categorical.data.vgam)
+
+ ok.col.ny = c("A","B","AB","O")
+ if(length(col.ny <- colnames(y)) == length(ok.col.ny) &&
+ setequal(ok.col.ny, col.ny)) {
+ if(!all(ok.col.ny == col.ny))
+ stop("the columns of the response matrix should have names ",
+ "(output of colnames()) ordered as c('A','B','AB','O')")
+ }
+
predictors.names <-
- c(namesof("pA", .link, earg= .earg,tag=FALSE),
- namesof("pB", .link, earg= .earg,tag=FALSE))
+ c(namesof("pA", .link, earg= .earg, tag=FALSE),
+ namesof("pB", .link, earg= .earg, tag=FALSE))
if(is.null(etastart)) {
- r <- if(is.numeric(.ir)) rep(.ir, n) else
- c(sqrt(mustart[,4]))
- p <- if(is.numeric(.ip)) rep(.ip, n) else
+ pO = if(is.Numeric( .ipO)) rep( .ipO, n) else c(sqrt(mustart[,4]))
+ pA = if(is.Numeric( .ipA)) rep( .ipA, n) else
c(1-sqrt(mustart[,2]+mustart[,4]))
- q <- 1-p-r
- etastart <- cbind(theta2eta(p, .link, earg= .earg),
- theta2eta(q, .link, earg= .earg))
+ pB = 1-pA-pO
+ etastart = cbind(theta2eta(pA, .link, earg= .earg),
+ theta2eta(pB, .link, earg= .earg))
}
- }), list(.link=link, .ir=ir, .ip=ip,
- .earg=earg ))),
+ }), list( .link=link, .ipO=ipO, .ipA=ipA, .earg=earg ))),
inverse=eval(substitute(function(eta, extra=NULL){
- p <- eta2theta(eta[,1], link=.link, earg= .earg)
- q <- eta2theta(eta[,2], link=.link, earg= .earg)
- r <- 1-p-q
- cbind(A=p*(p+2*r), B=q*(q+2*r), AB=2*p*q, O=r*r)
- }, list(.link=link,
- .earg=earg ))),
+ pA = eta2theta(eta[,1], link=.link, earg= .earg)
+ pB = eta2theta(eta[,2], link=.link, earg= .earg)
+ pO = 1-pA-pB
+ cbind(A=pA*(pA+2*pO), B=pB*(pB+2*pO), AB=2*pA*pB, O=pO*pO)
+ }, list( .link=link, .earg=earg ))),
last=eval(substitute(expression({
- misc$link <- c(p = .link, q = .link)
- misc$earg = list(p= .earg, q= .earg )
- }), list(.link=link,
- .earg=earg ))),
+ misc$link = c(pA = .link, pB = .link)
+ misc$earg = list(pA = .earg, pB = .earg )
+ }), list( .link=link, .earg=earg ))),
link=eval(substitute(function(mu, extra=NULL){
- r <- sqrt(mu[,4])
- p1 <- ( (1-r)+sqrt((1-r)^2 + 2*mu[,3]) )/2
- p2 <- ( (1-r)-sqrt((1-r)^2 + 2*mu[,3]) )/2
- index <- p2 >= 0 & p2 <= 1
- p <- p1
- p[index] <- p2[index]
- q <- 1-p-r
- cbind(theta2eta(p, .link, earg= .earg),
- theta2eta(q, .link, earg= .earg))
- }, list(.link=link,
- .earg=earg ))),
-
- loglikelihood=function(mu,y,w,residuals=FALSE,eta,extra=NULL)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum((w*y)*log(mu)),
+ pO = sqrt(mu[,4])
+ p1 = ( (1-pO)+sqrt((1-pO)^2 + 2*mu[,3]) )/2
+ p2 = ( (1-pO)-sqrt((1-pO)^2 + 2*mu[,3]) )/2
+ index = p2 >= 0 & p2 <= 1
+ pA = p1
+ pA[index] = p2[index]
+ pB = 1-pA-pO
+ cbind(theta2eta(pA, .link, earg= .earg),
+ theta2eta(pB, .link, earg= .earg))
+ }, list( .link=link, .earg=earg ))),
+ loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(dmultinomial(x=w*y, size=w, prob=mu, log=TRUE, docheck=FALSE))
+ },
vfamily=c("ABO", "vgenetic"),
deriv=eval(substitute(expression({
- p <- eta2theta(eta[,1], link=.link, earg= .earg)
- q <- eta2theta(eta[,2], link=.link, earg= .earg)
- r <- 1-p-q
- pbar <- 2*r+p
- qbar <- 2*r+q
- na <- y[,1]
- nb <- y[,2]
- nab <- y[,3]
- no <- y[,4]
- dl.dp <- (na+nab)/p - na/pbar - 2*nb/qbar - 2*no/r
- dl.dq <- (nb+nab)/q - 2*na/pbar - nb/qbar - 2*no/r
- dp.deta <- dtheta.deta(p, link=.link, earg= .earg)
- dq.deta <- dtheta.deta(q, link=.link, earg= .earg)
+ p = eta2theta(eta[,1], link=.link, earg= .earg)
+ q = eta2theta(eta[,2], link=.link, earg= .earg)
+ r = 1-p-q
+ pbar = 2*r+p
+ qbar = 2*r+q
+ na = y[,1]
+ nb = y[,2]
+ nab = y[,3]
+ no = y[,4]
+ dl.dp = (na+nab)/p - na/pbar - 2*nb/qbar - 2*no/r
+ dl.dq = (nb+nab)/q - 2*na/pbar - nb/qbar - 2*no/r
+ dp.deta = dtheta.deta(p, link=.link, earg= .earg)
+ dq.deta = dtheta.deta(q, link=.link, earg= .earg)
w * cbind(dl.dp * dp.deta, dl.dq * dq.deta)
- }), list(.link=link,
- .earg=earg ))),
+ }), list( .link=link, .earg=earg ))),
weight=eval(substitute(expression({
- wz <- matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==3 because M==2
- ed2l.dp2 <- w * (1 + 2/p + 4*q/qbar + p/pbar)
- ed2l.dq2 <- w * (1 + 2/q + 4*p/pbar + q/qbar)
- ed2l.dpdq <- 2 * w * (1 + q/qbar + p/pbar)
- wz[,iam(1,1,M)] <- dp.deta^2 * ed2l.dp2
- wz[,iam(2,2,M)] <- dq.deta^2 * ed2l.dq2
- wz[,iam(1,2,M)] <- ed2l.dpdq * dp.deta * dq.deta
- wz
- }), list(.link=link,
- .earg=earg ))))
+ wz = matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==3 because M==2
+ ed2l.dp2 = (1 + 2/p + 4*q/qbar + p/pbar)
+ ed2l.dq2 = (1 + 2/q + 4*p/pbar + q/qbar)
+ ed2l.dpdq = 2 * (1 + q/qbar + p/pbar)
+ wz[,iam(1,1,M)] = dp.deta^2 * ed2l.dp2
+ wz[,iam(2,2,M)] = dq.deta^2 * ed2l.dq2
+ wz[,iam(1,2,M)] = ed2l.dpdq * dp.deta * dq.deta
+ w * wz
+ }), list( .link=link, .earg=earg ))))
}
-AB.Ab.aB.ab <- function(link="logit", earg = list(), init.p=NULL)
+ AB.Ab.aB.ab = function(link="logit", earg = list(), init.p=NULL)
{
if(mode(link) != "character" && mode(link) != "name")
- link <- as.character(substitute(link))
+ link = as.character(substitute(link))
if(!is.list(earg)) earg = list()
new("vglmff",
blurb=c("AB-Ab-aB-ab phenotype\n\n",
- "Links: ", namesof("p", link, earg= earg, tag=TRUE), "\n",
- "Variance: multinomial type variance"),
-
+ "Links: ", namesof("p", link, earg= earg, tag=TRUE)),
+ deviance=Deviance.categorical.data.vgam,
initialize=eval(substitute(expression({
- delete.zero.colns <- FALSE
+ delete.zero.colns = FALSE
eval(process.categorical.data.vgam)
- predictors.names <- namesof("p", .link, earg= .earg, tag=FALSE)
+
+ ok.col.ny = c("AB","Ab","aB","ab")
+ if(length(col.ny <- colnames(y)) == length(ok.col.ny) &&
+ setequal(ok.col.ny, col.ny)) {
+ if(!all(ok.col.ny == col.ny))
+ stop("the columns of the response matrix should have names ",
+ "(output of colnames()) ordered as c('AB','Ab','aB','ab')")
+ }
+
+ predictors.names = namesof("p", .link, earg= .earg, tag=FALSE)
if(is.null(etastart)) {
- p <- if(is.numeric(.init.p)) rep(.init.p,n) else
+ p = if(is.numeric(.init.p)) rep(.init.p,n) else
c(sqrt(4*mustart[,4]))
- etastart <- cbind(theta2eta(p, .link, earg= .earg))
+ etastart = cbind(theta2eta(p, .link, earg= .earg))
}
- }), list(.link=link, .init.p=init.p,
- .earg=earg ))),
+ }), list( .link=link, .init.p=init.p, .earg=earg ))),
inverse=eval(substitute(function(eta,extra=NULL){
- p <- eta2theta(eta, link=.link, earg= .earg)
- pp4 <- p*p/4
+ p = eta2theta(eta, link=.link, earg= .earg)
+ pp4 = p*p/4
cbind(AB=0.5+pp4, Ab=0.25-pp4, aB=0.25-pp4, ab=pp4)
- }, list(.link=link,
- .earg=earg ))),
+ }, list( .link=link, .earg=earg ))),
last=eval(substitute(expression({
- misc$link <- c(p = .link)
+ misc$link = c(p = .link)
misc$earg = list(p= .earg )
- }), list(.link=link,
- .earg=earg ))),
+ }), list( .link=link, .earg=earg ))),
link=eval(substitute(function(mu, extra=NULL){
- p <- sqrt(4* mu[,4])
+ p = sqrt(4* mu[,4])
theta2eta(p, .link, earg= .earg)
- }, list(.link=link,
- .earg=earg ))),
-
- loglikelihood=function(mu,y,w,residuals=FALSE,eta,extra=NULL)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum((w*y)*log(mu)),
+ }, list( .link=link, .earg=earg ))),
+ loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(dmultinomial(x=w*y, size=w, prob=mu, log=TRUE, docheck=FALSE))
+ },
vfamily=c("AB.Ab.aB.ab", "vgenetic"),
deriv=eval(substitute(expression({
- pp <- eta2theta(eta, link=.link, earg= .earg)
- p2 <- pp*pp
- nAB <- w*y[,1]
- nAb <- w*y[,2]
- naB <- w*y[,3]
- nab <- w*y[,4]
- dl.dp <- 8 * pp * (nAB/(2+p2) - (nAb+naB)/(1-p2) + nab/p2)
- dp.deta <- dtheta.deta(pp, link=.link, earg= .earg)
+ pp = eta2theta(eta, link=.link, earg= .earg)
+ p2 = pp*pp
+ nAB = w*y[,1]
+ nAb = w*y[,2]
+ naB = w*y[,3]
+ nab = w*y[,4]
+ dl.dp = 8 * pp * (nAB/(2+p2) - (nAb+naB)/(1-p2) + nab/p2)
+ dp.deta = dtheta.deta(pp, link=.link, earg= .earg)
dl.dp * dp.deta
- }), list(.link=link,
- .earg=earg ))),
+ }), list( .link=link, .earg=earg ))),
weight=eval(substitute(expression({
- ed2l.dp2 <- 4 * w * p2 * (1/(2+p2) + 2/(1-p2) + 1/p2)
- wz <- cbind((dp.deta^2) * ed2l.dp2)
+ ed2l.dp2 = 4 * w * p2 * (1/(2+p2) + 2/(1-p2) + 1/p2)
+ wz = cbind((dp.deta^2) * ed2l.dp2)
wz
- }), list(.link=link,
- .earg=earg ))))
+ }), list( .link=link, .earg=earg ))))
}
-AA.Aa.aa <- function(link="logit", earg = list(), init.pA=NULL)
+ AA.Aa.aa = function(link="logit", earg = list(), init.pA=NULL)
{
if(mode(link) != "character" && mode(link) != "name")
- link <- as.character(substitute(link))
+ link = as.character(substitute(link))
if(!is.list(earg)) earg = list()
new("vglmff",
blurb=c("AA-Aa-aa phenotype\n\n",
- "Links: ", namesof("pA",link, earg= earg), "\n",
- "Variance: multinomial type variance"),
+ "Links: ", namesof("pA", link, earg= earg)),
+ deviance=Deviance.categorical.data.vgam,
initialize=eval(substitute(expression({
- delete.zero.colns <- FALSE
+ delete.zero.colns = FALSE
eval(process.categorical.data.vgam)
- predictors.names <- namesof("pA", .link, earg= .earg, tag=FALSE)
+
+ ok.col.ny = c("AA","Aa","aa")
+ if(length(col.ny <- colnames(y)) == length(ok.col.ny) &&
+ setequal(ok.col.ny, col.ny)) {
+ if(!all(ok.col.ny == col.ny))
+ stop("the columns of the response matrix should have names ",
+ "(output of colnames()) ordered as c('AA','Aa','aa')")
+ }
+
+ predictors.names = namesof("pA", .link, earg= .earg, tag=FALSE)
if(is.null(etastart)) {
- pA <- if(is.numeric(.init.pA)) rep(.init.pA, n) else
+ pA = if(is.numeric(.init.pA)) rep(.init.pA, n) else
c(sqrt(mustart[,1]))
- etastart <- cbind(theta2eta(pA, .link, earg= .earg))
+ etastart = cbind(theta2eta(pA, .link, earg= .earg))
}
- }), list(.link=link, .init.pA=init.pA,
- .earg=earg ))),
+ }), list( .link=link, .init.pA=init.pA, .earg=earg ))),
inverse=eval(substitute(function(eta,extra=NULL){
- pA <- eta2theta(eta, link=.link, earg= .earg)
- pp <- pA*pA
+ pA = eta2theta(eta, link=.link, earg= .earg)
+ pp = pA*pA
cbind(AA=pp, Aa=2*pA*(1-pA), aa=(1-pA)^2)
- }, list(.link=link,
- .earg=earg ))),
+ }, list( .link=link, .earg=earg ))),
last=eval(substitute(expression({
- misc$link <- c("pA" = .link)
+ misc$link = c("pA" = .link)
misc$earg = list("pA" = .earg )
- }), list(.link=link,
- .earg=earg ))),
+ }), list( .link=link, .earg=earg ))),
link=eval(substitute(function(mu, extra=NULL){
- pA <- sqrt(mu[,1])
+ pA = sqrt(mu[,1])
theta2eta(pA, .link, earg= .earg)
- }, list(.link=link,
- .earg=earg ))),
- loglikelihood=function(mu,y,w,residuals=FALSE,eta,extra=NULL)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum((w*y)*log(mu)),
+ }, list( .link=link, .earg=earg ))),
+ loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(dmultinomial(x=w*y, size=w, prob=mu, log=TRUE, docheck=FALSE))
+ },
vfamily=c("AA.Aa.aa", "vgenetic"),
deriv=eval(substitute(expression({
- pA <- eta2theta(eta, link=.link, earg= .earg)
- nAA <- w*y[,1]
- nAa <- w*y[,2]
- naa <- w*y[,3]
- dl.dpA <- (2*nAA+nAa)/pA - (nAa+2*naa)/(1-pA)
- dpA.deta <- dtheta.deta(pA, link=.link, earg= .earg)
+ pA = eta2theta(eta, link=.link, earg= .earg)
+ nAA = w*y[,1]
+ nAa = w*y[,2]
+ naa = w*y[,3]
+ dl.dpA = (2*nAA+nAa)/pA - (nAa+2*naa)/(1-pA)
+ dpA.deta = dtheta.deta(pA, link=.link, earg= .earg)
dl.dpA * dpA.deta
- }), list(.link=link,
- .earg=earg ))),
+ }), list( .link=link, .earg=earg ))),
weight=eval(substitute(expression({
- d2l.dp2 <- (2*nAA+nAa)/pA^2 + (nAa+2*naa)/(1-pA)^2
- wz <- cbind((dpA.deta^2) * d2l.dp2)
+ d2l.dp2 = (2*nAA+nAa)/pA^2 + (nAa+2*naa)/(1-pA)^2
+ wz = cbind((dpA.deta^2) * d2l.dp2)
wz
- }), list(.link=link,
- .earg=earg ))))
+ }), list( .link=link, .earg=earg ))))
}
+
+
+
diff --git a/R/family.glmgam.q b/R/family.glmgam.q
index 78c06ce..48c9171 100644
--- a/R/family.glmgam.q
+++ b/R/family.glmgam.q
@@ -8,10 +8,10 @@
-binomialff <- function(link="logit", earg=list(),
+
+ binomialff = function(link="logit", earg=list(),
dispersion=1, mv=FALSE, onedpar=!mv,
- parallel = FALSE,
- zero=NULL)
+ parallel = FALSE, zero=NULL)
{
@@ -33,26 +33,9 @@ binomialff <- function(link="logit", earg=list(),
constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .parallel=parallel, .zero=zero ))),
deviance=function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
- devy <- y
- nz <- y != 0
- devy[nz] <- y[nz] * log(y[nz])
- nz <- (1 - y) != 0
- devy[nz] <- devy[nz] + (1 - y[nz]) * log1p(-y[nz])
- devmu <- y * log(mu) + (1 - y) * log1p(-mu)
- if(any(small <- mu * (1 - mu) < .Machine$double.eps)) {
- warning("fitted values close to 0 or 1")
- smu <- mu[small]
- sy <- y[small]
- smu <- ifelse(smu < .Machine$double.eps,
- .Machine$double.eps, smu)
- onemsmu <- ifelse((1 - smu) < .Machine$
- double.eps, .Machine$double.eps, 1 - smu)
- devmu[small] <- sy * log(smu) + (1 - sy) * log(onemsmu)
- }
- devi <- 2 * (devy - devmu)
- if(residuals) {
- sign(y - mu) * sqrt(abs(devi) * w)
- } else sum(w * devi)
+ Deviance.categorical.data.vgam(mu=cbind(mu,1-mu), y=cbind(y,1-y),
+ w=w, residuals = residuals,
+ eta=eta, extra=extra)
},
initialize=eval(substitute(expression({
if(is.R()) {
@@ -129,8 +112,8 @@ binomialff <- function(link="logit", earg=list(),
dpar = rep(as.numeric(NA), len=M)
temp87 = cbind(temp87)
nrow.mu = if(is.matrix(mu)) nrow(mu) else length(mu)
- for(i in 1:M)
- dpar[i] = sum(temp87[,i]) / (nrow.mu - ncol(x))
+ for(ii in 1:M)
+ dpar[ii] = sum(temp87[,ii]) / (nrow.mu - ncol(x))
if(is.matrix(y) && length(dimnames(y)[[2]])==length(dpar))
names(dpar) = dimnames(y)[[2]]
} else
@@ -154,8 +137,9 @@ binomialff <- function(link="logit", earg=list(),
theta2eta(mu, .link, earg = .earg )
, list( .link=link, .earg = earg ))),
loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
- if(residuals) w*(y/mu - (1-y)/(1-mu)) else
- sum(w*(y*log(mu) + (1-y)*log1p(-mu)))
+ if(residuals) w*(y/mu - (1-y)/(1-mu)) else {
+ sum(dbinom(x=w*y, size=w, prob=mu, log=TRUE))
+ }
},
vfamily=c("binomialff", "vcategorical"),
deriv=eval(substitute(expression({
@@ -193,8 +177,7 @@ binomialff <- function(link="logit", earg=list(),
-gammaff <- function(link="nreciprocal", earg=list(),
- dispersion=0)
+ gammaff = function(link="nreciprocal", earg=list(), dispersion=0)
{
estimated.dispersion <- dispersion==0
if(mode(link )!= "character" && mode(link )!= "name")
@@ -274,7 +257,7 @@ gammaff <- function(link="nreciprocal", earg=list(),
-inverse.gaussianff <- function(link="natural.ig", dispersion=0)
+ inverse.gaussianff = function(link="natural.ig", dispersion=0)
{
estimated.dispersion <- dispersion==0
warning("@deviance() not finished")
@@ -337,17 +320,20 @@ inverse.gaussianff <- function(link="natural.ig", dispersion=0)
-dinv.gaussian = function(x, mu, lambda) {
- if(any(mu <=0)) stop("mu must be positive")
- if(any(lambda <=0)) stop("lambda must be positive")
- ans = x
- mu = rep(mu, len=length(x))
- lambda = rep(lambda, len=length(x))
- ans[x <= 0] = 0
- bb = x > 0
- ans[bb] = sqrt(lambda[bb]/(2*pi*x[bb]^3)) *
- exp(-lambda[bb]*(x[bb]-mu[bb])^2/(2*mu[bb]^2*x[bb]))
- ans
+dinv.gaussian = function(x, mu, lambda, log=FALSE) {
+ if(!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ LLL = max(length(x), length(mu), length(lambda))
+ x = rep(x, len=LLL); mu = rep(mu, len=LLL); lambda = rep(lambda, len=LLL)
+ logdensity = rep(log(0), len=LLL)
+ xok = (x > 0)
+ logdensity[xok] = 0.5 * log(lambda[xok] / (2 * pi * x[xok]^3)) -
+ lambda[xok] * (x[xok]-mu[xok])^2 / (2*mu[xok]^2 * x[xok])
+ logdensity[mu <= 0] = NaN
+ logdensity[lambda <= 0] = NaN
+ if(log.arg) logdensity else exp(logdensity)
}
@@ -367,24 +353,23 @@ pinv.gaussian = function(q, mu, lambda) {
rinv.gaussian = function(n, mu, lambda) {
- if(!is.Numeric(n, positive=TRUE, integer=TRUE, allow=1))
- stop("'n' must be a single positive integer")
- if(!is.Numeric(mu, positive=TRUE))
- stop("'mu' must have positive values only")
- if(!is.Numeric(lambda, positive=TRUE))
- stop("'lambda' must have positive values only")
- mu = rep(mu, len=n)
- lambda = rep(lambda, len=n)
- u = runif(n)
- z = rnorm(n)^2
+ use.n = if((length.n <- length(n)) > 1) length.n else
+ if(!is.Numeric(n, integ=TRUE, allow=1, posit=TRUE))
+ stop("bad input for argument 'n'") else n
+ mu = rep(mu, len=use.n); lambda = rep(lambda, len=use.n)
+ u = runif(use.n)
+ Z = rnorm(use.n)^2 # rchisq(use.n, df=1)
phi = lambda / mu
- y1 = 1 - 0.5 * (sqrt(z^2 + 4*phi*z) - z) / phi
- mu * ifelse((1+y1)*u > 1, 1/y1, y1)
+ y1 = 1 - 0.5 * (sqrt(Z^2 + 4*phi*Z) - Z) / phi
+ ans = mu * ifelse((1+y1)*u > 1, 1/y1, y1)
+ ans[mu <= 0] = NaN
+ ans[lambda <= 0] = NaN
+ ans
}
-inv.gaussianff <- function(lmu="loge", llambda="loge",
+ inv.gaussianff = function(lmu="loge", llambda="loge",
emu=list(), elambda=list(),
ilambda=1,
zero=NULL)
@@ -433,9 +418,9 @@ inv.gaussianff <- function(lmu="loge", llambda="loge",
loglikelihood=eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
lambda <- eta2theta(eta[,2], link=.llambda, earg= .elambda)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w*(0.5 * log(lambda / (2 * pi * y^3)) -
- lambda *(y-mu)^2 / (2*mu^2 * y)))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dinv.gaussian(x=y, mu=mu, lambda=lambda, log=TRUE))
+ }
}, list( .llambda=llambda, .emu=emu, .elambda=elambda ))),
vfamily="inv.gaussianff",
deriv=eval(substitute(expression({
@@ -455,7 +440,7 @@ inv.gaussianff <- function(lmu="loge", llambda="loge",
-poissonff <- function(link="loge", earg=list(),
+ poissonff = function(link="loge", earg=list(),
dispersion=1, onedpar=FALSE,
imu=NULL, method.init=1,
parallel=FALSE, zero=NULL)
@@ -480,9 +465,9 @@ poissonff <- function(link="loge", earg=list(),
constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .parallel=parallel, .zero=zero ))),
deviance= function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
- nz <- y > 0
- devi <- - (y - mu)
- devi[nz] <- devi[nz] + y[nz] * log(y[nz]/mu[nz])
+ nz = y > 0
+ devi = -(y - mu)
+ devi[nz] = devi[nz] + y[nz] * log(y[nz]/mu[nz])
if(residuals) sign(y - mu) * sqrt(2 * abs(devi) * w) else
2 * sum(w * devi)
},
@@ -537,8 +522,8 @@ poissonff <- function(link="loge", earg=list(),
dpar = rep(as.numeric(NA), len=M)
temp87 = cbind(temp87)
nrow.mu = if(is.matrix(mu)) nrow(mu) else length(mu)
- for(i in 1:M)
- dpar[i] = sum(temp87[,i]) / (nrow.mu - ncol(x))
+ for(ii in 1:M)
+ dpar[ii] = sum(temp87[,ii]) / (nrow.mu - ncol(x))
if(is.matrix(y) && length(dimnames(y)[[2]])==length(dpar))
names(dpar) = dimnames(y)[[2]]
} else
@@ -562,7 +547,9 @@ poissonff <- function(link="loge", earg=list(),
theta2eta(mu, link= .link, earg= .earg)
}, list( .link=link, .earg=earg ))),
loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
- if(residuals) w*(y/mu - 1) else sum(w*(-mu + y*log(mu) - lgamma(y+1)))
+ if(residuals) w*(y/mu - 1) else {
+ sum(w * dpois(x=y, lambda=mu, log=TRUE))
+ }
},
vfamily="poissonff",
deriv=eval(substitute(expression({
@@ -589,8 +576,8 @@ poissonff <- function(link="loge", earg=list(),
}
-quasibinomialff = function(link = "logit", mv = FALSE, onedpar = !mv,
- parallel = FALSE, zero = NULL) {
+ quasibinomialff = function(link = "logit", mv = FALSE, onedpar = !mv,
+ parallel = FALSE, zero = NULL) {
dispersion = 0 # Estimated; this is the only difference with binomialff()
ans =
binomialff(link = link, dispersion=dispersion, mv=mv, onedpar=onedpar,
@@ -599,8 +586,8 @@ quasibinomialff = function(link = "logit", mv = FALSE, onedpar = !mv,
ans
}
-quasipoissonff = function(link = "loge", onedpar = FALSE, parallel = FALSE,
- zero = NULL) {
+ quasipoissonff = function(link = "loge", onedpar = FALSE, parallel = FALSE,
+ zero = NULL) {
dispersion = 0 # Estimated; this is the only difference with poissonff()
ans =
poissonff(link = link, dispersion=dispersion, onedpar=onedpar,
@@ -612,28 +599,13 @@ quasipoissonff = function(link = "loge", onedpar = FALSE, parallel = FALSE,
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
poissonqn.control <- function(save.weight=TRUE, ...)
{
list(save.weight=save.weight)
}
-poissonqn <- function(link="loge", earg=list(),
+ poissonqn = function(link="loge", earg=list(),
dispersion=1, onedpar=FALSE,
parallel=FALSE, zero=NULL,
wwts=c("expected","observed","qn"))
@@ -655,9 +627,9 @@ poissonqn <- function(link="loge", earg=list(),
constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .parallel=parallel, .zero=zero ))),
deviance= function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
- nz <- y > 0
- devi <- - (y - mu)
- devi[nz] <- devi[nz] + y[nz] * log(y[nz]/mu[nz])
+ nz = y > 0
+ devi = -(y - mu)
+ devi[nz] = devi[nz] + y[nz] * log(y[nz]/mu[nz])
if(residuals) sign(y - mu) * sqrt(2 * abs(devi) * w) else
2 * sum(w * devi)
},
@@ -683,7 +655,7 @@ poissonqn <- function(link="loge", earg=list(),
last=eval(substitute(expression({
dpar <- .dispersion
if(!dpar) {
- temp87 = (y-mu)^2 * wz / (dtheta.deta(mu, link= .link, earg= .earg)^2) # w cancel
+ temp87= (y-mu)^2 * wz/(dtheta.deta(mu, link= .link, earg= .earg)^2)
if(M > 1 && ! .onedpar) {
dpar = rep(as.numeric(NA), len=M)
temp87 = cbind(temp87)
@@ -715,7 +687,9 @@ poissonqn <- function(link="loge", earg=list(),
}, list( .link=link,
.earg=earg ))),
loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
- if(residuals) w*(y/mu - 1) else sum(w*(-mu + y*log(mu) - lgamma(y+1)))
+ if(residuals) w*(y/mu - 1) else {
+ sum(w * dpois(x=y, lambda=mu, log=TRUE))
+ }
},
vfamily="poissonqn",
deriv=eval(substitute(expression({
@@ -775,9 +749,7 @@ poissonqn <- function(link="loge", earg=list(),
-
-
-dexppoisson <- function(lmean="loge", emean=list(),
+ dexppoisson = function(lmean="loge", emean=list(),
ldispersion="logit", edispersion=list(),
idispersion=0.8,
zero=NULL)
@@ -867,7 +839,7 @@ dexppoisson <- function(lmean="loge", emean=list(),
-dexpbinomial <- function(lmean="logit", ldispersion="logit",
+ dexpbinomial = function(lmean="logit", ldispersion="logit",
emean=list(), edispersion=list(),
idispersion=0.25,
zero=2)
@@ -988,7 +960,7 @@ dexpbinomial <- function(lmean="logit", ldispersion="logit",
-mbinomial <- function(mvar=NULL, link="logit", earg=list(),
+ mbinomial = function(mvar=NULL, link="logit", earg=list(),
parallel = TRUE, smallno = .Machine$double.eps^(3/4))
{
if(mode(link )!= "character" && mode(link )!= "name")
@@ -1081,8 +1053,9 @@ mbinomial <- function(mvar=NULL, link="logit", earg=list(),
matrix(temp, extra$n, extra$M)
}, list( .link=link, .earg = earg ))),
loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
- if(residuals) w*(y/mu - (1-y)/(1-mu)) else
+ if(residuals) w*(y/mu - (1-y)/(1-mu)) else {
sum(w*(y*log(mu) + (1-y)*log1p(-mu)))
+ }
},
vfamily=c("mbinomial", "vcategorical"),
deriv=eval(substitute(expression({
@@ -1123,15 +1096,16 @@ mbinomial <- function(mvar=NULL, link="logit", earg=list(),
mypool = function(x, index) {
answer = x
uindex = unique(index)
- for(i in uindex) {
- ind0 = index == i
+ for(ii in uindex) {
+ ind0 = index == ii
answer[ind0] = sum(x[ind0])
}
answer
}
-mbino <- function()
+if(FALSE)
+ mbino = function()
{
link = "logit"
earg = list()
@@ -1210,8 +1184,9 @@ mbino <- function()
misc$expected = TRUE
}), list( .link=link, .earg = earg ))),
loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
- if(residuals) w*(y/mu - (1-y)/(1-mu)) else
+ if(residuals) w*(y/mu - (1-y)/(1-mu)) else {
sum(w*(y*log(mu) + (1-y)*log1p(-mu)))
+ }
},
vfamily=c("mbin", "vcategorical"),
deriv=eval(substitute(expression({
@@ -1234,3 +1209,7 @@ mbino <- function()
}
+
+
+
+
diff --git a/R/family.mixture.q b/R/family.mixture.q
index ad76ff6..6ba9270 100644
--- a/R/family.mixture.q
+++ b/R/family.mixture.q
@@ -31,24 +31,24 @@ mix2normal1 = function(lphi="logit",
if(mode(lsd) != "character" && mode(lsd) != "name")
lsd = as.character(substitute(lsd))
if(!is.Numeric(qmu, allow=2, positive=TRUE) || any(qmu >= 1))
- stop("bad input for argument \"qmu\"")
+ stop("bad input for argument 'qmu'")
if(length(iphi) && (!is.Numeric(iphi, allow=1, positive=TRUE) || iphi>= 1))
- stop("bad input for argument \"iphi\"")
+ stop("bad input for argument 'iphi'")
if(length(imu1) && !is.Numeric(imu1))
- stop("bad input for argument \"imu1\"")
+ stop("bad input for argument 'imu1'")
if(length(imu2) && !is.Numeric(imu2))
- stop("bad input for argument \"imu2\"")
+ stop("bad input for argument 'imu2'")
if(length(isd1) && !is.Numeric(isd1, positive=TRUE))
- stop("bad input for argument \"isd1\"")
+ stop("bad input for argument 'isd1'")
if(length(isd2) && !is.Numeric(isd2, positive=TRUE))
- stop("bad input for argument \"isd2\"")
+ stop("bad input for argument 'isd2'")
if(!is.list(ephi)) ephi = list()
if(!is.list(emu1)) emu1 = list()
if(!is.list(emu2)) emu2 = list()
if(!is.list(esd1)) esd1 = list()
if(!is.list(esd2)) esd2 = list()
if(!is.logical(equalsd) || length(equalsd) != 1)
- stop("bad input for argument \"equalsd\"")
+ stop("bad input for argument 'equalsd'")
if(!is.Numeric(nsimEIM, allow=1, integ=TRUE) || nsimEIM <= 10)
stop("'nsimEIM' should be an integer greater than 10")
@@ -191,7 +191,7 @@ mix2normal1 = function(lphi="logit",
run.mean = ((ii-1) * run.mean + temp3) / ii
}
wz = if(intercept.only)
- matrix(apply(run.mean,2,mean), n, dimm(M), byrow=TRUE) else run.mean
+ matrix(colMeans(run.mean), n, dimm(M), byrow=TRUE) else run.mean
dtheta.detas = cbind(dphi.deta,dmu1.deta,dsd1.deta,dmu2.deta,dsd2.deta)
index0 = iam(NA, NA, M=M, both=TRUE, diag=TRUE)
@@ -219,13 +219,13 @@ mix2poisson = function(lphi="logit", llambda="loge",
if(mode(llambda) != "character" && mode(llambda) != "name")
llambda = as.character(substitute(llambda))
if(!is.Numeric(qmu, allow=2, positive=TRUE) || any(qmu >= 1))
- stop("bad input for argument \"qmu\"")
+ stop("bad input for argument 'qmu'")
if(length(iphi) && (!is.Numeric(iphi, allow=1, positive=TRUE) || iphi>= 1))
- stop("bad input for argument \"iphi\"")
+ stop("bad input for argument 'iphi'")
if(length(il1) && !is.Numeric(il1))
- stop("bad input for argument \"il1\"")
+ stop("bad input for argument 'il1'")
if(length(il2) && !is.Numeric(il2))
- stop("bad input for argument \"il2\"")
+ stop("bad input for argument 'il2'")
if(!is.list(ephi)) ephi = list()
if(!is.list(el1)) el1 = list()
if(!is.list(el2)) el2 = list()
@@ -344,7 +344,7 @@ mix2poisson = function(lphi="logit", llambda="loge",
run.mean = ((ii-1) * run.mean + temp3) / ii
}
wz = if(intercept.only)
- matrix(apply(run.mean,2,mean), n, dimm(M), byrow=TRUE) else run.mean
+ matrix(colMeans(run.mean), n, dimm(M), byrow=TRUE) else run.mean
dtheta.detas = cbind(dphi.deta, dlambda1.deta, dlambda2.deta)
index0 = iam(NA, NA, M=M, both=TRUE, diag=TRUE)
@@ -373,13 +373,13 @@ mix2exp = function(lphi="logit", llambda="loge",
if(mode(llambda) != "character" && mode(llambda) != "name")
llambda = as.character(substitute(llambda))
if(!is.Numeric(qmu, allow=2, positive=TRUE) || any(qmu >= 1))
- stop("bad input for argument \"qmu\"")
+ stop("bad input for argument 'qmu'")
if(length(iphi) && (!is.Numeric(iphi, allow=1, positive=TRUE) || iphi>= 1))
- stop("bad input for argument \"iphi\"")
+ stop("bad input for argument 'iphi'")
if(length(il1) && !is.Numeric(il1))
- stop("bad input for argument \"il1\"")
+ stop("bad input for argument 'il1'")
if(length(il2) && !is.Numeric(il2))
- stop("bad input for argument \"il2\"")
+ stop("bad input for argument 'il2'")
if(!is.list(ephi)) ephi = list()
if(!is.list(el1)) el1 = list()
if(!is.list(el2)) el2 = list()
@@ -495,7 +495,7 @@ mix2exp = function(lphi="logit", llambda="loge",
run.mean = ((ii-1) * run.mean + temp3) / ii
}
wz = if(intercept.only)
- matrix(apply(run.mean,2,mean), n, dimm(M), byrow=TRUE) else run.mean
+ matrix(colMeans(run.mean), n, dimm(M), byrow=TRUE) else run.mean
dtheta.detas = cbind(dphi.deta, dlambda1.deta, dlambda2.deta)
index0 = iam(NA, NA, M=M, both=TRUE, diag=TRUE)
diff --git a/R/family.nonlinear.q b/R/family.nonlinear.q
index f4fc928..24b5e5e 100644
--- a/R/family.nonlinear.q
+++ b/R/family.nonlinear.q
@@ -61,8 +61,8 @@ micmen <- function(rpar=0.001, divisor=10,
if(!length(Xm2))
stop("regressor not found")
if(ncol(as.matrix(Xm2)) != 1)
- stop(paste("regressor not found or is not a vector. Use the",
- "'form2' argument without an intercept"))
+ stop("regressor not found or is not a vector. Use the ",
+ "'form2' argument without an intercept")
Xm2 = as.vector(Xm2) # Make sure
extra$Xm2 = Xm2 # Needed for @inverse
@@ -71,7 +71,7 @@ micmen <- function(rpar=0.001, divisor=10,
namesof("theta2", .link2, earg= .earg2, tag=FALSE))
if(length(mustart) || length(coefstart))
- stop("can't handle mustart or coefstart")
+ stop("cannot handle mustart or coefstart")
if(!length(etastart)) {
index <- (1:n)[Xm2>quantile(Xm2, prob=.85)]
init1 <- median(y[index])
@@ -100,13 +100,13 @@ micmen <- function(rpar=0.001, divisor=10,
misc$link <- c(theta1= .link1, theta2= .link2)
misc$earg = list(theta1= .earg1, theta2= .earg2 )
misc$rpar <- rpar
- fit$df.residual <- n - rank # Not n.big - rank
- fit$df.total <- n # Not n.big
+ fit$df.residual <- n - rank # Not nrow_X_vlm - rank
+ fit$df.total <- n # Not nrow_X_vlm
extra$Xm2 = NULL # Regressor is in control$regressor
dpar <- .dispersion
if(!dpar) {
- dpar <- sum(w * (y-mu)^2) / (n - p.big)
+ dpar <- sum(w * (y-mu)^2) / (n - ncol_X_vlm)
}
misc$dispersion <- dpar
misc$default.dispersion <- 0
diff --git a/R/family.normal.q b/R/family.normal.q
index d9b587f..b71ed66 100644
--- a/R/family.normal.q
+++ b/R/family.normal.q
@@ -27,7 +27,7 @@ VGAM.weights.function = function(w, M, n) {
-gaussianff = function(dispersion=0, parallel=FALSE, zero=NULL)
+ gaussianff = function(dispersion=0, parallel=FALSE, zero=NULL)
{
if(!is.Numeric(dispersion, allow=1) || dispersion < 0)
stop("bad input for argument 'dispersion'")
@@ -79,7 +79,7 @@ gaussianff = function(dispersion=0, parallel=FALSE, zero=NULL)
wz = VGAM.weights.function(w=w, M=M, n=n)
temp = rss.vgam(y-mu, wz=wz, M=M)
dpar = temp / (length(y) -
- (if(is.numeric(ncol(xbig.save))) ncol(xbig.save) else 0))
+ (if(is.numeric(ncol(X_vlm_save))) ncol(X_vlm_save) else 0))
}
misc$dispersion = dpar
misc$default.dispersion = 0
@@ -124,7 +124,7 @@ dposnorm = function(x, mean=0, sd=1, log=FALSE) {
log.arg = log
rm(log)
if(!is.logical(log.arg) || length(log.arg)!=1)
- stop("bad input for argument \"log\"")
+ stop("bad input for argument 'log'")
L = max(length(x), length(mean), length(sd))
x = rep(x, len=L); mean = rep(mean, len=L); sd = rep(sd, len=L);
@@ -145,13 +145,13 @@ pposnorm = function(q, mean=0, sd=1) {
qposnorm = function(p, mean=0, sd=1) {
if(!is.Numeric(p, posit=TRUE) || max(p) >= 1)
- stop("bad input for argument \"p\"")
+ stop("bad input for argument 'p'")
qnorm(p=p+(1-p)*pnorm(0, mean=mean, sd=sd), mean=mean, sd=sd)
}
rposnorm = function(n, mean=0, sd=1) {
if(!is.Numeric(n, integ=TRUE, posit=TRUE))
- stop("bad input for argument \"n\"")
+ stop("bad input for argument 'n'")
mean = rep(mean, length=n)
sd = rep(sd, length=n)
qnorm(p=runif(n, min=pnorm(0, m=mean, sd=sd)), m=mean, sd=sd)
@@ -178,9 +178,9 @@ rposnorm = function(n, mean=0, sd=1) {
if(mode(lsd) != "character" && mode(lsd) != "name")
lsd = as.character(substitute(lsd))
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(length(isd) && !is.Numeric(isd, posit=TRUE))
- stop("bad input for argument \"isd\"")
+ stop("bad input for argument 'isd'")
if(!is.list(emean)) emean = list()
if(!is.list(esd)) esd = list()
if(length(nsimEIM))
@@ -265,7 +265,7 @@ rposnorm = function(n, mean=0, sd=1) {
temp3[,ind1$row.index]*temp3[,ind1$col.index]) / ii
}
wz = if(intercept.only)
- matrix(apply(run.varcov, 2, mean),
+ matrix(colMeans(run.varcov),
n, ncol(run.varcov), byrow=TRUE) else run.varcov
wz = wz * dthetas.detas[,ind1$row] * dthetas.detas[,ind1$col]
@@ -293,7 +293,7 @@ dbetanorm = function(x, shape1, shape2, mean=0, sd=1, log=FALSE) {
log.arg = log
rm(log)
if(!is.logical(log.arg) || length(log.arg)!=1)
- stop("bad input for argument \"log\"")
+ stop("bad input for argument 'log'")
ans =
if(is.R() && log.arg) {
dnorm(x=x, mean=mean, sd=sd, log=TRUE) +
@@ -317,129 +317,43 @@ pbetanorm = function(q, shape1, shape2, mean=0, sd=1,
qbetanorm = function(p, shape1, shape2, mean=0, sd=1) {
if(!is.Numeric(p, posit=TRUE) || max(p) >= 1)
- stop("bad input for argument \"p\"")
+ stop("bad input for argument 'p'")
qnorm(p=qbeta(p=p, shape1=shape1, shape2=shape2), mean=mean, sd=sd)
}
rbetanorm = function(n, shape1, shape2, mean=0, sd=1) {
if(!is.Numeric(n, integ=TRUE, posit=TRUE))
- stop("bad input for argument \"n\"")
+ stop("bad input for argument 'n'")
qnorm(p=qbeta(p=runif(n), shape1=shape1, shape2=shape2), mean=mean, sd=sd)
}
-tikuv = function(d, lmean="identity", lsigma="loge",
- emean=list(), esigma=list(),
- isigma=NULL, zero=2)
-{
- if(mode(lmean) != "character" && mode(lmean) != "name")
- lmean = as.character(substitute(lmean))
- if(mode(lsigma) != "character" && mode(lsigma) != "name")
- lsigma = as.character(substitute(lsigma))
- if(length(zero) && (!is.Numeric(zero, integer=TRUE, posit=TRUE) ||
- max(zero) > 2))
- stop("bad input for argument \"zero\"")
- if(!is.Numeric(d, allow=1) || max(d) >= 2)
- stop("bad input for argument \"d\"")
- if(!is.list(emean)) emean = list()
- if(!is.list(esigma)) esigma = list()
-
- new("vglmff",
- blurb=c("Short-tailed symmetric [Tiku and Vaughan (1999)] distribution\n",
- "Link: ",
- namesof("mean", lmean, earg= emean), ", ",
- namesof("sigma", lsigma, earg= esigma),
- "\n",
- "\n",
- "Mean: mean"),
- constraints=eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
- if(ncol(cbind(y)) != 1)
- stop("the response must be a vector or one-column matrix")
- predictors.names =
- c(namesof("mean", .lmean, earg= .emean, tag= FALSE),
- namesof("sigma", .lsigma, earg= .esigma, tag= FALSE))
- if(!length(etastart)) {
- sigma.init = if(length(.isigma)) rep(.isigma, length=n) else {
- hh = 2 - .d
- KK = 1 / (1 + 1/hh + 0.75/hh^2)
- K2 = 1 + 3/hh + 15/(4*hh^2)
- rep(sqrt(var(y) / (KK*K2)), len=n)
- }
- mean.init = rep(weighted.mean(y, w), len=n)
- etastart = cbind(theta2eta(mean.init, .lmean, earg= .emean),
- theta2eta(sigma.init, .lsigma, earg= .esigma))
- }
- }),list( .lmean=lmean, .lsigma=lsigma, .isigma=isigma, .d=d,
- .emean=emean, .esigma=esigma ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- eta2theta(eta[,1], .lmean, earg= .emean)
- }, list( .lmean=lmean,
- .emean=emean, .esigma=esigma ))),
- last=eval(substitute(expression({
- misc$link = c("mean"= .lmean, "sigma"= .lsigma)
- misc$earg = list("mean"= .emean, "sigma"= .esigma )
- misc$expected = TRUE
- misc$d = .d
- }), list( .lmean=lmean, .lsigma=lsigma, .d=d,
- .emean=emean, .esigma=esigma ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- mymu = eta2theta(eta[,1], .lmean, earg= .emean)
- sigma = eta2theta(eta[,2], .lsigma, earg= .esigma)
- if(residuals) stop("loglikelihood residuals not implemented yet") else {
- zedd = (y - mymu) / sigma
- hh = 2 - .d
- sum(w * (-log(sigma) + 2 * log1p(0.5*zedd^2 / hh) - 0.5*zedd^2))
- }
- }, list( .lmean=lmean, .lsigma=lsigma, .d=d,
- .emean=emean, .esigma=esigma ))),
- vfamily=c("tikuv"),
- deriv=eval(substitute(expression({
- mymu = eta2theta(eta[,1], .lmean, earg= .emean)
- sigma = eta2theta(eta[,2], .lsigma, earg= .esigma)
- dmu.deta = dtheta.deta(mymu, .lmean, earg= .emean)
- dsigma.deta = dtheta.deta(sigma, .lsigma, earg= .esigma)
- zedd = (y - mymu) / sigma
- hh = 2 - .d
- gzedd = zedd / (1 + 0.5*zedd^2 / hh)
- dl.dmu = zedd / sigma - 2 * gzedd / (hh*sigma)
- dl.dsigma = (zedd^2 - 1 - 2 * zedd * gzedd / hh) / sigma
- w * cbind(dl.dmu * dmu.deta, dl.dsigma * dsigma.deta)
- }), list( .lmean=lmean, .lsigma=lsigma, .d=d,
- .emean=emean, .esigma=esigma ))),
- weight=eval(substitute(expression({
- ayy = 1 / (2*hh)
- Dnos = 1 - (2/hh) * (1 - ayy) / (1 + 2*ayy + 3*ayy^2)
- Dstar = -1 + 3 * (1 + 2*ayy + 11*ayy^2) / (1 + 2*ayy + 3*ayy^2)
- ed2l.dmymu2 = Dnos / sigma^2
- ed2l.dnu2 = Dstar / sigma^2
- wz = matrix(as.numeric(NA), n, M) # diagonal matrix
- wz[,iam(1,1,M)] = ed2l.dmymu2 * dmu.deta^2
- wz[,iam(2,2,M)] = ed2l.dnu2 * dsigma.deta^2
- w * wz
- }), list( .lmean=lmean, .lsigma=lsigma,
- .emean=emean, .esigma=esigma ))))
-}
+dtikuv = function(x, d, mean=0, sigma=1, log = FALSE) {
+ if(!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
-dtikuv = function(x, d, mean=0, sigma=1) {
if(!is.Numeric(d, allow=1) || max(d) >= 2)
- stop("bad input for argument \"d\"")
+ stop("bad input for argument 'd'")
L = max(length(x), length(mean), length(sigma))
x = rep(x, len=L); mean = rep(mean, len=L); sigma = rep(sigma, len=L);
hh = 2 - d
KK = 1 / (1 + 1/hh + 0.75/hh^2)
- dnorm(x=x, mean=mean, sd=sigma) * KK * (1 + ((x-mean)/sigma)^2 / (2*hh))^2
+ if(log.arg) {
+ dnorm(x=x, mean=mean, sd=sigma, log=TRUE) + log(KK) +
+ 2 * log1p(((x-mean)/sigma)^2 / (2*hh))
+ } else {
+ dnorm(x=x, mean=mean, sd=sigma) * KK *
+ (1 + ((x-mean)/sigma)^2 / (2*hh))^2
+ }
}
ptikuv = function(q, d, mean=0, sigma=1) {
if(!is.Numeric(d, allow=1) || max(d) >= 2)
- stop("bad input for argument \"d\"")
+ stop("bad input for argument 'd'")
L = max(length(q), length(mean), length(sigma))
q = rep(q, len=L); mean = rep(mean, len=L); sigma = rep(sigma, len=L);
zedd1 = 0.5 * ((q - mean) / sigma)^2
@@ -462,13 +376,13 @@ ptikuv = function(q, d, mean=0, sigma=1) {
qtikuv = function(p, d, mean=0, sigma=1, ...) {
if(!is.Numeric(p, posit=TRUE) || max(p) >= 1)
- stop("bad input for argument \"p\"")
+ stop("bad input for argument 'p'")
if(!is.Numeric(d, allow=1) || max(d) >= 2)
- stop("bad input for argument \"d\"")
+ stop("bad input for argument 'd'")
if(!is.Numeric(mean))
- stop("bad input for argument \"mean\"")
+ stop("bad input for argument 'mean'")
if(!is.Numeric(sigma))
- stop("bad input for argument \"sigma\"")
+ stop("bad input for argument 'sigma'")
L = max(length(p), length(mean), length(sigma))
p = rep(p, len=L); mean = rep(mean, len=L); sigma = rep(sigma, len=L);
ans = rep(0.0, len=L)
@@ -490,16 +404,16 @@ qtikuv = function(p, d, mean=0, sigma=1, ...) {
rtikuv = function(n, d, mean=0, sigma=1, Smallno=1.0e-6) {
if(!is.Numeric(n, posit=TRUE, integ=TRUE))
- stop("bad input for argument \"n\"")
+ stop("bad input for argument 'n'")
if(!is.Numeric(d, allow=1) || max(d) >= 2)
- stop("bad input for argument \"d\"")
+ stop("bad input for argument 'd'")
if(!is.Numeric(mean, allow=1))
- stop("bad input for argument \"mean\"")
+ stop("bad input for argument 'mean'")
if(!is.Numeric(sigma, allow=1))
- stop("bad input for argument \"sigma\"")
+ stop("bad input for argument 'sigma'")
if(!is.Numeric(Smallno, posit=TRUE, allow=1) || Smallno > 0.01 ||
Smallno < 2 * .Machine$double.eps)
- stop("bad input for argument \"Smallno\"")
+ stop("bad input for argument 'Smallno'")
ans = rep(0.0, len=n)
ptr1 = 1; ptr2 = 0
@@ -529,6 +443,103 @@ rtikuv = function(n, d, mean=0, sigma=1, Smallno=1.0e-6) {
+
+ tikuv = function(d, lmean="identity", lsigma="loge",
+ emean=list(), esigma=list(),
+ isigma=NULL, zero=2)
+{
+ if(mode(lmean) != "character" && mode(lmean) != "name")
+ lmean = as.character(substitute(lmean))
+ if(mode(lsigma) != "character" && mode(lsigma) != "name")
+ lsigma = as.character(substitute(lsigma))
+ if(length(zero) && (!is.Numeric(zero, integer=TRUE, posit=TRUE) ||
+ max(zero) > 2))
+ stop("bad input for argument 'zero'")
+ if(!is.Numeric(d, allow=1) || max(d) >= 2)
+ stop("bad input for argument 'd'")
+ if(!is.list(emean)) emean = list()
+ if(!is.list(esigma)) esigma = list()
+
+ new("vglmff",
+ blurb=c("Short-tailed symmetric [Tiku and Vaughan (1999)] distribution\n",
+ "Link: ",
+ namesof("mean", lmean, earg= emean), ", ",
+ namesof("sigma", lsigma, earg= esigma),
+ "\n",
+ "\n",
+ "Mean: mean"),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ if(ncol(cbind(y)) != 1)
+ stop("the response must be a vector or one-column matrix")
+ predictors.names =
+ c(namesof("mean", .lmean, earg= .emean, tag= FALSE),
+ namesof("sigma", .lsigma, earg= .esigma, tag= FALSE))
+ if(!length(etastart)) {
+ sigma.init = if(length(.isigma)) rep(.isigma, length=n) else {
+ hh = 2 - .d
+ KK = 1 / (1 + 1/hh + 0.75/hh^2)
+ K2 = 1 + 3/hh + 15/(4*hh^2)
+ rep(sqrt(var(y) / (KK*K2)), len=n)
+ }
+ mean.init = rep(weighted.mean(y, w), len=n)
+ etastart = cbind(theta2eta(mean.init, .lmean, earg= .emean),
+ theta2eta(sigma.init, .lsigma, earg= .esigma))
+ }
+ }),list( .lmean=lmean, .lsigma=lsigma, .isigma=isigma, .d=d,
+ .emean=emean, .esigma=esigma ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ eta2theta(eta[,1], .lmean, earg= .emean)
+ }, list( .lmean=lmean,
+ .emean=emean, .esigma=esigma ))),
+ last=eval(substitute(expression({
+ misc$link = c("mean"= .lmean, "sigma"= .lsigma)
+ misc$earg = list("mean"= .emean, "sigma"= .esigma )
+ misc$expected = TRUE
+ misc$d = .d
+ }), list( .lmean=lmean, .lsigma=lsigma, .d=d,
+ .emean=emean, .esigma=esigma ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ mymu = eta2theta(eta[,1], .lmean, earg= .emean)
+ sigma = eta2theta(eta[,2], .lsigma, earg= .esigma)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dtikuv(x=y, d= .d, mean=mymu, sigma=sigma, log = TRUE))
+ }
+ }, list( .lmean=lmean, .lsigma=lsigma, .d=d,
+ .emean=emean, .esigma=esigma ))),
+ vfamily=c("tikuv"),
+ deriv=eval(substitute(expression({
+ mymu = eta2theta(eta[,1], .lmean, earg= .emean)
+ sigma = eta2theta(eta[,2], .lsigma, earg= .esigma)
+ dmu.deta = dtheta.deta(mymu, .lmean, earg= .emean)
+ dsigma.deta = dtheta.deta(sigma, .lsigma, earg= .esigma)
+ zedd = (y - mymu) / sigma
+ hh = 2 - .d
+ gzedd = zedd / (1 + 0.5*zedd^2 / hh)
+ dl.dmu = zedd / sigma - 2 * gzedd / (hh*sigma)
+ dl.dsigma = (zedd^2 - 1 - 2 * zedd * gzedd / hh) / sigma
+ w * cbind(dl.dmu * dmu.deta, dl.dsigma * dsigma.deta)
+ }), list( .lmean=lmean, .lsigma=lsigma, .d=d,
+ .emean=emean, .esigma=esigma ))),
+ weight=eval(substitute(expression({
+ ayy = 1 / (2*hh)
+ Dnos = 1 - (2/hh) * (1 - ayy) / (1 + 2*ayy + 3*ayy^2)
+ Dstar = -1 + 3 * (1 + 2*ayy + 11*ayy^2) / (1 + 2*ayy + 3*ayy^2)
+ ed2l.dmymu2 = Dnos / sigma^2
+ ed2l.dnu2 = Dstar / sigma^2
+ wz = matrix(as.numeric(NA), n, M) # diagonal matrix
+ wz[,iam(1,1,M)] = ed2l.dmymu2 * dmu.deta^2
+ wz[,iam(2,2,M)] = ed2l.dnu2 * dsigma.deta^2
+ w * wz
+ }), list( .lmean=lmean, .lsigma=lsigma,
+ .emean=emean, .esigma=esigma ))))
+}
+
+
+
dfnorm = function(x, mean=0, sd=1, a1=1, a2=1) {
if(!is.Numeric(a1, posit=TRUE) || !is.Numeric(a2, posit=TRUE))
stop("bad input for arguments 'a1' and 'a2'")
@@ -553,7 +564,7 @@ pfnorm = function(q, mean=0, sd=1, a1=1, a2=1) {
qfnorm = function(p, mean=0, sd=1, a1=1, a2=1, ...) {
if(!is.Numeric(p, posit=TRUE) || max(p) >= 1)
- stop("bad input for argument \"p\"")
+ stop("bad input for argument 'p'")
if(!is.Numeric(a1, posit=TRUE) || !is.Numeric(a2, posit=TRUE))
stop("bad input for arguments 'a1' and 'a2'")
if(any(a1 <= 0 | a2 <= 0))
@@ -580,7 +591,7 @@ qfnorm = function(p, mean=0, sd=1, a1=1, a2=1, ...) {
rfnorm = function(n, mean=0, sd=1, a1=1, a2=1) {
if(!is.Numeric(n, integ=TRUE, posit=TRUE))
- stop("bad input for argument \"n\"")
+ stop("bad input for argument 'n'")
if(!is.Numeric(a1, posit=TRUE) || !is.Numeric(a2, posit=TRUE))
stop("bad input for arguments 'a1' and 'a2'")
if(any(a1 <= 0 | a2 <= 0))
@@ -590,9 +601,9 @@ rfnorm = function(n, mean=0, sd=1, a1=1, a2=1) {
}
-fnormal1 = function(lmean="identity", lsd="loge", emean=list(), esd=list(),
- imean=NULL, isd=NULL, a1=1, a2=1, nsimEIM=500,
- method.init=1, zero=NULL)
+ fnormal1 = function(lmean="identity", lsd="loge", emean=list(), esd=list(),
+ imean=NULL, isd=NULL, a1=1, a2=1, nsimEIM=500,
+ method.init=1, zero=NULL)
{
if(!is.Numeric(a1, posit=TRUE, allow=1) ||
!is.Numeric(a2, posit=TRUE, allow=1))
@@ -608,7 +619,7 @@ fnormal1 = function(lmean="identity", lsd="loge", emean=list(), esd=list(),
if(mode(lsd) != "character" && mode(lsd) != "name")
lsd = as.character(substitute(lsd))
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(!is.list(emean)) emean = list()
if(!is.list(esd)) esd = list()
if(!is.Numeric(nsimEIM, allow=1, integ=TRUE) || nsimEIM <= 10)
@@ -726,7 +737,7 @@ if(FALSE) {
}
wz = if(intercept.only)
- matrix(apply(run.mean,2,mean), n, dimm(M), byrow=TRUE) else run.mean
+ matrix(colMeans(run.mean), n, dimm(M), byrow=TRUE) else run.mean
index0 = iam(NA, NA, M=M, both=TRUE, diag=TRUE)
wz = wz * dtheta.detas[,index0$row] * dtheta.detas[,index0$col]
@@ -756,7 +767,7 @@ lqnorm = function(qpower=2, link="identity", earg=list(),
method.init > 3)
stop("'method.init' must be 1 or 2 or 3")
if(!is.Numeric(shrinkage.init, allow=1) || shrinkage.init < 0 ||
- shrinkage.init > 1) stop("bad input for argument \"shrinkage.init\"")
+ shrinkage.init > 1) stop("bad input for argument 'shrinkage.init'")
new("vglmff",
blurb=c("Minimizing the q-norm of residuals\n",
diff --git a/R/family.positive.q b/R/family.positive.q
index a661399..8a91a3d 100644
--- a/R/family.positive.q
+++ b/R/family.positive.q
@@ -57,22 +57,23 @@ qposnegbin = function(p, size, prob=NULL, munb=NULL) {
prob <- size/(size + munb)
}
if(!is.Numeric(p, posit=TRUE) || any(p >= 1))
- stop("bad input for argument \"p\"")
+ stop("bad input for argument 'p'")
qnbinom(p * pnbinom(q=p*0, size=size, prob=prob, lower.tail = FALSE) +
dnbinom(x=p*0, size=size, prob=prob), size=size, prob=prob)
}
rposnegbin = function(n, size, prob=NULL, munb=NULL) {
+ use.n = if((length.n <- length(n)) > 1) length.n else
+ if(!is.Numeric(n, integ=TRUE, allow=1, posit=TRUE))
+ stop("bad input for argument 'n'") else n
+
if (length(munb)) {
if (length(prob))
stop("'prob' and 'munb' both specified")
prob <- size/(size + munb)
}
- if(!is.Numeric(n, positive=TRUE, integer=TRUE, allow=1))
- stop("'n' must be a single positive integer")
- ans = rnbinom(n, size=size, prob=prob)
-
+ ans = rnbinom(use.n, size=size, prob=prob)
index = (ans == 0)
size = rep(size, len=length(ans))
prob = rep(prob, len=length(ans))
@@ -96,13 +97,13 @@ rposnegbin = function(n, size, prob=NULL, munb=NULL) {
shrinkage.init=0.95, method.init=1)
{
if(!is.Numeric(cutoff, allow=1) || cutoff<0.8 || cutoff>=1)
- stop("range error in the argument \"cutoff\"")
+ stop("range error in the argument 'cutoff'")
if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
- method.init > 2) stop("argument \"method.init\" must be 1 or 2")
+ method.init > 2) stop("argument 'method.init' must be 1 or 2")
if(length(ik) && !is.Numeric(ik, posit=TRUE))
- stop("bad input for argument \"ik\"")
+ stop("bad input for argument 'ik'")
if(!is.Numeric(shrinkage.init, allow=1) || shrinkage.init < 0 ||
- shrinkage.init > 1) stop("bad input for argument \"shrinkage.init\"")
+ shrinkage.init > 1) stop("bad input for argument 'shrinkage.init'")
if(mode(lmunb) != "character" && mode(lmunb) != "name")
lmunb = as.character(substitute(lmunb))
@@ -268,8 +269,9 @@ rposnegbin = function(n, size, prob=NULL, munb=NULL) {
dpospois = function(x, lambda, log=FALSE) {
if(!is.logical(log.arg <- log)) stop("bad input for 'log'")
rm(log)
+
if(!is.Numeric(lambda, posit=TRUE))
- stop("bad input for argument \"lambda\"")
+ stop("bad input for argument 'lambda'")
L = max(length(x), length(lambda))
x = rep(x, len=L); lambda = rep(lambda, len=L);
ans = if(log.arg) {
@@ -283,7 +285,7 @@ dpospois = function(x, lambda, log=FALSE) {
ppospois = function(q, lambda) {
if(!is.Numeric(lambda, posit=TRUE))
- stop("bad input for argument \"lambda\"")
+ stop("bad input for argument 'lambda'")
L = max(length(q), length(lambda))
q = rep(q, len=L); lambda = rep(lambda, len=L);
ifelse(q<1, 0, (ppois(q, lambda) - exp(-lambda)) / (-expm1(-lambda)))
@@ -291,23 +293,25 @@ ppospois = function(q, lambda) {
qpospois = function(p, lambda) {
if(!is.Numeric(lambda, posit=TRUE))
- stop("bad input for argument \"lambda\"")
+ stop("bad input for argument 'lambda'")
if(!is.Numeric(p, posit=TRUE) || any(p >= 1))
- stop("bad input for argument \"p\"")
+ stop("bad input for argument 'p'")
qpois(p * (-expm1(-lambda)) + exp(-lambda), lambda)
}
rpospois = function(n, lambda) {
- if(!is.Numeric(n, posit=TRUE, allow=1, integ=TRUE))
- stop("bad input for argument \"n\"")
- if(!is.Numeric(lambda, posit=TRUE))
- stop("bad input for argument \"lambda\"")
- ans = rpois(n, lambda)
- lambda = rep(lambda, len=n)
+ use.n = if((length.n <- length(n)) > 1) length.n else
+ if(!is.Numeric(n, integ=TRUE, allow=1, posit=TRUE))
+ stop("bad input for argument 'n'") else n
+
+ if(any(lambda == 0))
+ stop("no zero values allowed for argument 'lambda'")
+ ans = rpois(use.n, lambda)
+ lambda = rep(lambda, len=use.n)
index = (ans == 0)
while(any(index)) {
- more = rpois(sum(index), lambda[index])
+ more = rpois(n=sum(index), lambda[index])
ans[index] = more
index = (ans == 0)
}
@@ -324,11 +328,11 @@ rpospois = function(n, lambda) {
link = as.character(substitute(link))
if(!is.list(earg)) earg = list()
if(!is.logical(expected) || length(expected) != 1)
- stop("bad input for argument \"expected\"")
+ stop("bad input for argument 'expected'")
if(length( ilambda) && !is.Numeric(ilambda, posit=TRUE))
- stop("bad input for argument \"ilambda\"")
+ stop("bad input for argument 'ilambda'")
if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
- method.init > 3) stop("argument \"method.init\" must be 1 or 2 or 3")
+ method.init > 3) stop("argument 'method.init' must be 1 or 2 or 3")
new("vglmff",
blurb=c("Positive-Poisson distribution\n\n",
@@ -493,7 +497,7 @@ dposbinom = function(x, size, prob, log = FALSE) {
pposbinom = function(q, size, prob, lower.tail = TRUE, log.p = FALSE) {
if(!is.Numeric(prob, positive=TRUE))
- stop("no zero or non-numeric values allowed for argument \"prob\"")
+ stop("no zero or non-numeric values allowed for argument 'prob'")
L = max(length(q), length(size), length(prob))
q = rep(q, len=L); size = rep(size, len=L); prob = rep(prob, len=L);
ifelse(q<1, 0, (pbinom(q=q, size=size, prob=prob, lower.tail=lower.tail,
@@ -502,19 +506,21 @@ pposbinom = function(q, size, prob, lower.tail = TRUE, log.p = FALSE) {
qposbinom = function(p, size, prob, lower.tail = TRUE, log.p = FALSE) {
if(!is.Numeric(prob, positive=TRUE))
- stop("no zero or non-numeric values allowed for argument \"prob\"")
+ stop("no zero or non-numeric values allowed for argument 'prob'")
if(!is.Numeric(p, posit=TRUE) || any(p >= 1))
- stop("bad input for argument \"p\"")
+ stop("bad input for argument 'p'")
qbinom(p=p * (1 - (1-prob)^size) + (1-prob)^size, size=size, prob=prob,
lower.tail=lower.tail, log.p=log.p)
}
rposbinom = function(n, size, prob) {
- if(!is.Numeric(n, posit=TRUE, allow=1, integ=TRUE))
- stop("bad input for argument \"n\"")
- if(!is.Numeric(prob, positive=TRUE))
- stop("no zero or non-numeric values allowed for argument \"prob\"")
- ans = rbinom(n=n, size=size, prob=prob)
+ use.n = if((length.n <- length(n)) > 1) length.n else
+ if(!is.Numeric(n, integ=TRUE, allow=1, posit=TRUE))
+ stop("bad input for argument 'n'") else n
+
+ if(any(prob == 0))
+ stop("no zero values allowed for argument 'prob'")
+ ans = rbinom(n=use.n, size=size, prob=prob)
index = (ans == 0)
size = rep(size, len=length(ans))
prob = rep(prob, len=length(ans))
diff --git a/R/family.qreg.q b/R/family.qreg.q
index e299857..4d3f0de 100644
--- a/R/family.qreg.q
+++ b/R/family.qreg.q
@@ -708,7 +708,7 @@ lms.yjn2 = function(percentiles=c(25,50,75),
}
if(intercept.only)
- run.varcov = matrix(apply(run.varcov, 2, mean),
+ run.varcov = matrix(colMeans(run.varcov),
nr=n, nc=ncol(run.varcov), byrow=TRUE)
@@ -1013,7 +1013,7 @@ lmscreg.control <- function(cdf= TRUE, at.arg=NULL, x0=NULL, ...)
{
if(!is.logical(cdf)) {
- warning("\"cdf\" is not logical; using TRUE instead")
+ warning("'cdf' is not logical; using TRUE instead")
cdf = TRUE
}
list(cdf=cdf, at.arg=at.arg, x0=x0)
@@ -1029,7 +1029,7 @@ Wr1 <- function(r, w) ifelse(r <= 0, 1, w)
Wr2 <- function(r, w) (r <= 0) * 1 + (r > 0) * w
-alsqreg.deviance = function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
+amlnormal.deviance = function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
M <- length(extra$w.als)
@@ -1052,17 +1052,17 @@ alsqreg.deviance = function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
-alsqreg <- function(w.als=1, parallel=FALSE,
- lexpectile = "identity", eexpectile = list(),
- iexpectile = NULL,
- method.init=1, digw=4)
+ amlnormal <- function(w.als=1, parallel=FALSE,
+ lexpectile = "identity", eexpectile = list(),
+ iexpectile = NULL,
+ method.init=1, digw=4)
{
if(!is.Numeric(w.als, posit=TRUE))
stop("'w.als' must be a vector of positive values")
if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
- method.init > 3) stop("argument \"method.init\" must be 1, 2 or 3")
+ method.init > 3) stop("argument 'method.init' must be 1, 2 or 3")
if(mode(lexpectile) != "character" && mode(lexpectile) != "name")
lexpectile = as.character(substitute(lexpectile))
if(!is.list(eexpectile)) eexpectile = list()
@@ -1077,8 +1077,8 @@ alsqreg <- function(w.als=1, parallel=FALSE,
constraints = cm.vgam(matrix(1,M,1), x, .parallel, constraints)
}), list( .parallel=parallel ))),
deviance= function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
- alsqreg.deviance(mu=mu, y=y, w=w, residuals=residuals,
- eta=eta, extra=extra)
+ amlnormal.deviance(mu=mu, y=y, w=w, residuals=residuals,
+ eta=eta, extra=extra)
},
initialize=eval(substitute(expression({
extra$w.als = .w.als
@@ -1129,12 +1129,12 @@ alsqreg <- function(w.als=1, parallel=FALSE,
names(extra$percentile) = names(misc$link)
extra$individual = TRUE
- extra$deviance = alsqreg.deviance(mu=mu, y=y, w=w,
- residuals=FALSE, eta=eta, extra=extra)
+ extra$deviance = amlnormal.deviance(mu=mu, y=y, w=w, residuals=FALSE,
+ eta=eta, extra=extra)
names(extra$deviance) = extra$y.names
}), list( .lexpectile=lexpectile, .eexpectile=eexpectile,
.parallel=parallel ))),
- vfamily=c("alsqreg"),
+ vfamily=c("amlnormal"),
deriv=eval(substitute(expression({
mymu = eta2theta(eta, .lexpectile, earg= .eexpectile)
dexpectile.deta = dtheta.deta(mymu, .lexpectile, earg= .eexpectile)
@@ -1460,7 +1460,7 @@ amlexponential <- function(w.aml=1, parallel=FALSE, method.init=1, digw=4,
link = as.character(substitute(link))
if(!is.list(earg)) earg = list()
if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
- method.init > 3) stop("argument \"method.init\" must be 1, 2 or 3")
+ method.init > 3) stop("argument 'method.init' must be 1, 2 or 3")
y.names = paste("w.aml=", round(w.aml, dig=digw), sep="")
predictors.names = c(namesof(
@@ -1556,77 +1556,91 @@ amlexponential <- function(w.aml=1, parallel=FALSE, method.init=1, digw=4,
rho1check = function(u, tau=0.5)
u * (tau - (u <= 0))
-dalaplace = function(x, location=0, scale=1, tau=0.5, kappa=sqrt(tau/(1-tau))) {
- if(!is.Numeric(scale, posit=TRUE)) stop("\"scale\" must be positive")
- if(!is.Numeric(kappa, posit=TRUE)) stop("\"kappa\" must be positive")
- const = (sqrt(2) / scale) * kappa / (1 + kappa^2)
+dalap = function(x, location=0, scale=1, tau=0.5,
+ kappa=sqrt(tau/(1-tau)), log=FALSE) {
+ if(!is.logical(log.arg <- log)) stop("bad input for argument 'log'")
+ rm(log)
+
+ NN = max(length(x), length(location), length(scale), length(kappa))
+ location = rep(location, len=NN); scale= rep(scale, len=NN)
+ kappa = rep(kappa, len=NN); x = rep(x, len=NN)
+ tau = rep(tau, len=NN)
+
+ logconst = 0.5 * log(2) - log(scale) + log(kappa) - log1p(kappa^2)
exponent = -(sqrt(2) / scale) * abs(x - location) *
ifelse(x >= location, kappa, 1/kappa)
- const * exp(exponent)
+
+ indexTF = (scale > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
+ logconst[!indexTF] = NaN
+
+ if(log.arg) logconst + exponent else exp(logconst + exponent)
}
-ralaplace = function(n, location=0, scale=1, tau=0.5, kappa=sqrt(tau/(1-tau))) {
- if(!is.Numeric(n, posit=TRUE, integ=TRUE, allow=1))
- stop("bad input for argument \"n\"")
- if(!is.Numeric(scale, posit=TRUE)) stop("\"scale\" must be positive")
- if(!is.Numeric(kappa, posit=TRUE)) stop("\"kappa\" must be positive")
- location = rep(location, len=n); scale= rep(scale, len=n)
- kappa = rep(kappa, len=n);
- U1 = runif(n)
- U2 = runif(n)
- location + scale * log(U1^kappa / U2^(1/kappa)) / sqrt(2)
+ralap = function(n, location=0, scale=1, tau=0.5, kappa=sqrt(tau/(1-tau))) {
+ use.n = if((length.n <- length(n)) > 1) length.n else
+ if(!is.Numeric(n, integ=TRUE, allow=1, posit=TRUE))
+ stop("bad input for argument 'n'") else n
+
+ location = rep(location, len=use.n); scale= rep(scale, len=use.n)
+ tau = rep(tau, len=use.n); kappa = rep(kappa, len=use.n);
+ ans = location + scale *
+ log(runif(use.n)^kappa / runif(use.n)^(1/kappa)) / sqrt(2)
+ indexTF = (scale > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
+ ans[!indexTF] = NaN
+ ans
}
-palaplace = function(q, location=0, scale=1, tau=0.5, kappa=sqrt(tau/(1-tau))) {
- if(!is.Numeric(q))
- stop("bad input for argument \"q\"")
- if(!is.Numeric(location))
- stop("bad input for argument \"location\"")
- if(!is.Numeric(scale, posit=TRUE)) stop("\"scale\" must be positive")
- if(!is.Numeric(kappa, posit=TRUE))
- stop("bad input for argument \"kappa\"")
- N = max(length(q), length(location), length(scale), length(kappa))
- location = rep(location, len=N); scale= rep(scale, len=N)
- kappa = rep(kappa, len=N); q= rep(q, len=N)
+palap = function(q, location=0, scale=1, tau=0.5, kappa=sqrt(tau/(1-tau))) {
+ NN = max(length(q), length(location), length(scale), length(kappa))
+ location = rep(location, len=NN); scale= rep(scale, len=NN)
+ kappa = rep(kappa, len=NN); q= rep(q, len=NN)
+ tau = rep(tau, len=NN);
+
exponent = -(sqrt(2) / scale) * abs(q - location) *
ifelse(q >= location, kappa, 1/kappa)
- temp = exp(exponent) / (1 + kappa^2)
- ans = 1 - temp
+ temp5 = exp(exponent) / (1 + kappa^2)
+ ans = 1 - temp5
index1 = (q < location)
- ans[index1] = (kappa[index1])^2 * temp[index1]
+ ans[index1] = (kappa[index1])^2 * temp5[index1]
+
+ indexTF = (scale > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
+ ans[!indexTF] = NaN
ans
}
-qalaplace = function(p, location=0, scale=1, tau=0.5, kappa=sqrt(tau/(1-tau))) {
- if(!is.Numeric(p, posit=TRUE) || max(p) >= 1)
- stop("bad input for argument \"p\"")
- if(!is.Numeric(location))
- stop("bad input for argument \"location\"")
- if(!is.Numeric(scale, posit=TRUE)) stop("\"scale\" must be positive")
- if(!is.Numeric(kappa, posit=TRUE))
- stop("bad input for argument \"kappa\"")
- N = max(length(p), length(location), length(scale), length(kappa))
- location = rep(location, len=N); scale= rep(scale, len=N)
- kappa = rep(kappa, len=N); p = rep(p, len=N)
+qalap = function(p, location=0, scale=1, tau=0.5, kappa=sqrt(tau/(1-tau))) {
+ NN = max(length(p), length(location), length(scale), length(kappa))
+ location = rep(location, len=NN); scale= rep(scale, len=NN)
+ kappa = rep(kappa, len=NN); p = rep(p, len=NN)
+ tau = rep(tau, len=NN)
ans = p
- temp = kappa^2 / (1 + kappa^2)
- index1 = (p <= temp)
- exponent = p[index1] / temp[index1]
+ temp5 = kappa^2 / (1 + kappa^2)
+ index1 = (p <= temp5)
+ exponent = p[index1] / temp5[index1]
ans[index1] = location[index1] + (scale[index1] * kappa[index1]) *
log(exponent) / sqrt(2)
ans[!index1] = location[!index1] - (scale[!index1] / kappa[!index1]) *
(log1p((kappa[!index1])^2) + log1p(-p[!index1])) / sqrt(2)
+
+ indexTF = (scale > 0) & (tau > 0) & (tau < 1) & (kappa > 0) &
+ (p >= 0) & (p <= 1)
+ ans[!indexTF] = NaN
+ ans[p == 0 & indexTF] = -Inf
+ ans[p == 1 & indexTF] = Inf
ans
}
+
+
+
if(FALSE)
dqregal = function(x, tau=0.5, location=0, scale=1) {
- if(!is.Numeric(scale, posit=TRUE)) stop("\"scale\" must be positive")
+ if(!is.Numeric(scale, posit=TRUE)) stop("'scale' must be positive")
if(!is.Numeric(tau, posit=TRUE) || max(tau) >= 1)
- stop("\"tau\" must have values in (0,1)")
+ stop("'tau' must have values in (0,1)")
const = tau * (1-tau) / scale
const * exp(-rho1check((x-location)/scale, tau=tau))
}
@@ -1636,10 +1650,10 @@ dqregal = function(x, tau=0.5, location=0, scale=1) {
if(FALSE)
rqregal = function(n, tau=0.5, location=0, scale=1) {
if(!is.Numeric(n, posit=TRUE, integ=TRUE, allow=1))
- stop("bad input for argument \"n\"")
- if(!is.Numeric(scale, posit=TRUE)) stop("\"scale\" must be positive")
+ stop("bad input for argument 'n'")
+ if(!is.Numeric(scale, posit=TRUE)) stop("'scale' must be positive")
if(!is.Numeric(tau, posit=TRUE) || max(tau) >= 1)
- stop("\"tau\" must have values in (0,1)")
+ stop("'tau' must have values in (0,1)")
location = rep(location, len=n); scale= rep(scale, len=n)
r = runif(n)
location - sign(r-tau) * scale * log(2*ifelse(r < tau, r, 1-r))
@@ -1652,12 +1666,12 @@ pqregal = function(q, tau=0.5, location=0, scale=1) {
if(!all(scale == 1))
stop("currently can only handle scale == 1")
if(!is.Numeric(q))
- stop("bad input for argument \"q\"")
+ stop("bad input for argument 'q'")
if(!is.Numeric(location))
- stop("bad input for argument \"location\"")
- if(!is.Numeric(scale, posit=TRUE)) stop("\"scale\" must be positive")
+ stop("bad input for argument 'location'")
+ if(!is.Numeric(scale, posit=TRUE)) stop("'scale' must be positive")
if(!is.Numeric(tau, posit=TRUE) || max(tau) >= 1)
- stop("\"tau\" must have values in (0,1)")
+ stop("'tau' must have values in (0,1)")
N = max(length(q), length(tau), length(location), length(scale))
location = rep(location, len=N); scale= rep(scale, len=N)
tau = rep(tau, len=N); q= rep(q, len=N)
@@ -1677,9 +1691,9 @@ qregal = function(tau=c(0.25, 0.5, 0.75),
if(mode(llocation) != "character" && mode(llocation) != "name")
llocation = as.character(substitute(llocation))
if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
- method.init > 2) stop("argument \"method.init\" must be 1 or 2")
+ method.init > 2) stop("argument 'method.init' must be 1 or 2")
if(!is.Numeric(tau, posit=TRUE) || max(tau) >= 1)
- stop("bad input for argument \"tau\"")
+ stop("bad input for argument 'tau'")
if(!is.list(elocation)) elocation = list()
if(mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
@@ -1797,4 +1811,350 @@ qregal = function(tau=c(0.25, 0.5, 0.75),
+rloglap = function(n, location.ald=0, scale.ald=1, tau=0.5,
+ kappa=sqrt(tau/(1-tau))) {
+ use.n = if((length.n <- length(n)) > 1) length.n else
+ if(!is.Numeric(n, integ=TRUE, allow=1, posit=TRUE))
+ stop("bad input for argument 'n'") else n
+ location.ald = rep(location.ald, len=use.n);
+ scale.ald= rep(scale.ald, len=use.n)
+ tau = rep(tau, len=use.n);
+ kappa = rep(kappa, len=use.n);
+ ans = exp(location.ald) *
+ (runif(use.n)^kappa / runif(use.n)^(1/kappa))^(scale.ald / sqrt(2))
+ indexTF = (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
+ ans[!indexTF] = NaN
+ ans
+}
+
+
+dloglap = function(x, location.ald=0, scale.ald=1, tau=0.5,
+ kappa=sqrt(tau/(1-tau)), log=FALSE) {
+ if(!is.logical(log.arg <- log)) stop("bad input for argument 'log'")
+ rm(log)
+
+ NN = max(length(x), length(location.ald), length(scale.ald), length(kappa))
+ location = rep(location.ald, len=NN); scale= rep(scale.ald, len=NN)
+ kappa = rep(kappa, len=NN); x = rep(x, len=NN)
+ tau = rep(tau, len=NN)
+
+ Alpha = sqrt(2) * kappa / scale.ald
+ Beta = sqrt(2) / (scale.ald * kappa)
+ Delta = exp(location.ald)
+ exponent = ifelse(x >= Delta, -(Alpha+1), (Beta-1)) * (log(x) - location.ald)
+ logdensity = -location.ald + log(Alpha) + log(Beta) -
+ log(Alpha + Beta) + exponent
+ indexTF = (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
+ logdensity[!indexTF] = NaN
+ logdensity[x < 0 & indexTF] = -Inf
+ if(log.arg) logdensity else exp(logdensity)
+}
+
+
+qloglap = function(p, location.ald=0, scale.ald=1,
+ tau=0.5, kappa=sqrt(tau/(1-tau))) {
+ NN = max(length(p), length(location.ald), length(scale.ald), length(kappa))
+ location = rep(location.ald, len=NN); scale= rep(scale.ald, len=NN)
+ kappa = rep(kappa, len=NN); p = rep(p, len=NN)
+ tau = rep(tau, len=NN)
+
+ Alpha = sqrt(2) * kappa / scale.ald
+ Beta = sqrt(2) / (scale.ald * kappa)
+ Delta = exp(location.ald)
+
+ temp9 = Alpha + Beta
+ ans = Delta * (p * temp9 / Alpha)^(1/Beta)
+ index1 = (p > Alpha / temp9)
+ ans[index1] = (Delta * ((1-p) * temp9 / Beta)^(-1/Alpha))[index1]
+ ans[p == 0] = 0
+ ans[p == 1] = Inf
+
+ indexTF = (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0)
+ (p >= 0) & (p <= 1) # &
+ ans[!indexTF] = NaN
+ ans
+}
+
+
+
+ploglap = function(q, location.ald=0, scale.ald=1,
+ tau=0.5, kappa=sqrt(tau/(1-tau))) {
+ NN = max(length(q), length(location.ald), length(scale.ald), length(kappa))
+ location = rep(location.ald, len=NN); scale = rep(scale.ald, len=NN)
+ kappa = rep(kappa, len=NN); q = rep(q, len=NN)
+ tau = rep(tau, len=NN)
+
+ Alpha = sqrt(2) * kappa / scale.ald
+ Beta = sqrt(2) / (scale.ald * kappa)
+ Delta = exp(location.ald)
+
+ temp9 = Alpha + Beta
+ ans = (Alpha / temp9) * (q / Delta)^(Beta)
+ ans[q <= 0] = 0
+ index1 = (q >= Delta)
+ ans[index1] = (1 - (Beta/temp9) * (Delta/q)^(Alpha))[index1]
+
+ indexTF = (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
+ ans[!indexTF] = NaN
+ ans
+}
+
+
+
+
+rlogitlap = function(n, location.ald=0, scale.ald=1, tau=0.5,
+ kappa=sqrt(tau/(1-tau)), earg=list()) {
+ logit(ralap(n=n, location=location.ald, scale=scale.ald,
+ tau=tau, kappa=kappa), inverse=TRUE, earg=earg)
+}
+
+
+dlogitlap = function(x, location.ald=0, scale.ald=1, tau=0.5,
+ kappa=sqrt(tau/(1-tau)), log=FALSE, earg=list()) {
+ if(!is.logical(log.arg <- log)) stop("bad input for argument 'log'")
+ rm(log)
+
+ NN = max(length(x), length(location.ald), length(scale.ald), length(kappa))
+ location = rep(location.ald, len=NN); scale= rep(scale.ald, len=NN)
+ kappa = rep(kappa, len=NN); x = rep(x, len=NN)
+ tau = rep(tau, len=NN)
+
+ Alpha = sqrt(2) * kappa / scale.ald
+ Beta = sqrt(2) / (scale.ald * kappa)
+ Delta = logit(location.ald, inverse=TRUE, earg=earg)
+
+ exponent = ifelse(x >= Delta, -Alpha, Beta) *
+ (logit(x, earg=earg) - location.ald)
+ logdensity = log(Alpha) + log(Beta) - log(Alpha + Beta) -
+ log(x) - log1p(-x) + exponent
+ indexTF = (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
+ logdensity[!indexTF] = NaN
+ logdensity[x < 0 & indexTF] = -Inf
+ logdensity[x > 1 & indexTF] = -Inf
+ if(log.arg) logdensity else exp(logdensity)
+}
+
+
+qlogitlap = function(p, location.ald=0, scale.ald=1,
+ tau=0.5, kappa=sqrt(tau/(1-tau)), earg=list()) {
+ qqq = qalap(p=p, location=location.ald, scale=scale.ald,
+ tau=tau, kappa=kappa)
+ ans = logit(qqq, inverse=TRUE, earg=earg)
+ ans[(p < 0) | (p > 1)] = NaN
+ ans[p == 0] = 0
+ ans[p == 1] = 1
+ ans
+}
+
+
+
+plogitlap = function(q, location.ald=0, scale.ald=1,
+ tau=0.5, kappa=sqrt(tau/(1-tau)), earg=list()) {
+ NN = max(length(q), length(location.ald), length(scale.ald),
+ length(kappa))
+ location.ald = rep(location.ald, len=NN); scale.ald= rep(scale.ald, len=NN)
+ kappa = rep(kappa, len=NN); q= rep(q, len=NN)
+ tau = rep(tau, len=NN);
+
+ indexTF = (q > 0) & (q < 1)
+ qqq = logit(q[indexTF], earg=earg)
+ ans = q
+ ans[indexTF] = palap(q=qqq, location=location.ald[indexTF],
+ scale=scale.ald[indexTF],
+ tau=tau[indexTF], kappa=kappa[indexTF])
+ ans[q >= 1] = 1
+ ans[q <= 0] = 0
+ ans
+}
+
+
+
+rprobitlap = function(n, location.ald=0, scale.ald=1, tau=0.5,
+ kappa=sqrt(tau/(1-tau)), earg=list()) {
+ probit(ralap(n=n, location=location.ald, scale=scale.ald,
+ tau=tau, kappa=kappa), inverse=TRUE, earg=earg)
+}
+
+
+dprobitlap = function(x, location.ald=0, scale.ald=1, tau=0.5,
+ kappa=sqrt(tau/(1-tau)), log=FALSE,
+ earg=list(), meth2=TRUE) {
+ if(!is.logical(log.arg <- log)) stop("bad input for argument 'log'")
+ rm(log)
+
+ NN = max(length(x), length(location.ald), length(scale.ald), length(kappa))
+ location.ald = rep(location.ald, len=NN); scale.ald= rep(scale.ald, len=NN)
+ kappa = rep(kappa, len=NN); x = rep(x, len=NN)
+ tau = rep(tau, len=NN)
+
+ logdensity = x * NaN
+ index1 = (x > 0) & (x < 1)
+ indexTF = (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
+ if(meth2) {
+ dx.dy = x
+ use.x = probit(x[index1], earg=earg)
+ logdensity[index1] = dalap(x=use.x, location=location.ald[index1],
+ scale=scale.ald[index1], tau=tau[index1],
+ kappa=kappa[index1], log=TRUE)
+ } else {
+ Alpha = sqrt(2) * kappa / scale.ald
+ Beta = sqrt(2) / (scale.ald * kappa)
+ Delta = pnorm(location.ald)
+ use.x = qnorm(x) # qnorm(x[index1])
+ log.dy.dw = dnorm(use.x, log=TRUE)
+
+ exponent = ifelse(x >= Delta, -Alpha, Beta) * (use.x - location.ald) -
+ log.dy.dw
+
+ logdensity[index1] = (log(Alpha) + log(Beta) -
+ log(Alpha + Beta) + exponent)[index1]
+ }
+ logdensity[!indexTF] = NaN
+ logdensity[x < 0 & indexTF] = -Inf
+ logdensity[x > 1 & indexTF] = -Inf
+
+ if(meth2) {
+ dx.dy[index1] = probit(x[index1], earg=earg, inverse=FALSE, deriv=1)
+ dx.dy[!index1] = 0 # zz 0 seems to work. -Inf
+ dx.dy[!indexTF] = NaN
+ if(log.arg) logdensity - log(abs(dx.dy)) else exp(logdensity) / abs(dx.dy)
+ } else {
+ if(log.arg) logdensity else exp(logdensity)
+ }
+}
+
+
+qprobitlap = function(p, location.ald=0, scale.ald=1,
+ tau=0.5, kappa=sqrt(tau/(1-tau)), earg=list()) {
+ qqq = qalap(p=p, location=location.ald, scale=scale.ald,
+ tau=tau, kappa=kappa)
+ ans = probit(qqq, inverse=TRUE, earg=earg)
+ ans[(p < 0) | (p > 1)] = NaN
+ ans[p == 0] = 0
+ ans[p == 1] = 1
+ ans
+}
+
+
+
+pprobitlap = function(q, location.ald=0, scale.ald=1,
+ tau=0.5, kappa=sqrt(tau/(1-tau)), earg=list()) {
+ NN = max(length(q), length(location.ald), length(scale.ald),
+ length(kappa))
+ location.ald = rep(location.ald, len=NN); scale.ald= rep(scale.ald, len=NN)
+ kappa = rep(kappa, len=NN); q= rep(q, len=NN)
+ tau = rep(tau, len=NN);
+
+ indexTF = (q > 0) & (q < 1)
+ qqq = probit(q[indexTF], earg=earg)
+ ans = q
+ ans[indexTF] = palap(q=qqq, location=location.ald[indexTF],
+ scale=scale.ald[indexTF],
+ tau=tau[indexTF], kappa=kappa[indexTF])
+ ans[q >= 1] = 1
+ ans[q <= 0] = 0
+ ans
+}
+
+
+rclogloglap = function(n, location.ald=0, scale.ald=1, tau=0.5,
+ kappa=sqrt(tau/(1-tau)), earg=list()) {
+ cloglog(ralap(n=n, location=location.ald, scale=scale.ald,
+ tau=tau, kappa=kappa), inverse=TRUE, earg=earg)
+}
+
+
+dclogloglap = function(x, location.ald=0, scale.ald=1, tau=0.5,
+ kappa=sqrt(tau/(1-tau)), log=FALSE,
+ earg=list(), meth2=TRUE) {
+ if(!is.logical(log.arg <- log)) stop("bad input for argument 'log'")
+ rm(log)
+
+ NN = max(length(x), length(location.ald), length(scale.ald), length(kappa))
+ location.ald = rep(location.ald, len=NN); scale.ald= rep(scale.ald, len=NN)
+ kappa = rep(kappa, len=NN); x = rep(x, len=NN)
+ tau = rep(tau, len=NN)
+
+ logdensity = x * NaN
+ index1 = (x > 0) & (x < 1)
+ indexTF = (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
+ if(meth2) {
+ dx.dy = x
+ use.w = cloglog(x[index1], earg=earg)
+ logdensity[index1] = dalap(x=use.w, location=location.ald[index1],
+ scale=scale.ald[index1], tau=tau[index1],
+ kappa=kappa[index1], log=TRUE)
+
+ } else {
+ Alpha = sqrt(2) * kappa / scale.ald
+ Beta = sqrt(2) / (scale.ald * kappa)
+ Delta = cloglog(location.ald, inverse=TRUE)
+
+ exponent = ifelse(x >= Delta, -(Alpha+1), Beta-1) * log(-log1p(-x)) +
+ ifelse(x >= Delta, Alpha, -Beta) * location.ald
+ logdensity[index1] = (log(Alpha) + log(Beta) -
+ log(Alpha + Beta) - log1p(-x) + exponent)[index1]
+ }
+ logdensity[!indexTF] = NaN
+ logdensity[x < 0 & indexTF] = -Inf
+ logdensity[x > 1 & indexTF] = -Inf
+
+ if(meth2) {
+ dx.dy[index1] = cloglog(x[index1], earg=earg, inverse=FALSE, deriv=1)
+ dx.dy[!index1] = 0 # zz 0 seems to work. -Inf
+ dx.dy[!indexTF] = NaN
+ if(log.arg) logdensity - log(abs(dx.dy)) else exp(logdensity) / abs(dx.dy)
+ } else {
+ if(log.arg) logdensity else exp(logdensity)
+ }
+}
+
+
+qclogloglap = function(p, location.ald=0, scale.ald=1,
+ tau=0.5, kappa=sqrt(tau/(1-tau)), earg=list()) {
+ qqq = qalap(p=p, location=location.ald, scale=scale.ald,
+ tau=tau, kappa=kappa)
+ ans = cloglog(qqq, inverse=TRUE, earg=earg)
+ ans[(p < 0) | (p > 1)] = NaN
+ ans[p == 0] = 0
+ ans[p == 1] = 1
+ ans
+}
+
+
+
+pclogloglap = function(q, location.ald=0, scale.ald=1,
+ tau=0.5, kappa=sqrt(tau/(1-tau)), earg=list()) {
+ NN = max(length(q), length(location.ald), length(scale.ald),
+ length(kappa))
+ location.ald = rep(location.ald, len=NN); scale.ald= rep(scale.ald, len=NN)
+ kappa = rep(kappa, len=NN); q= rep(q, len=NN)
+ tau = rep(tau, len=NN);
+
+ indexTF = (q > 0) & (q < 1)
+ qqq = cloglog(q[indexTF], earg=earg)
+ ans = q
+ ans[indexTF] = palap(q=qqq, location=location.ald[indexTF],
+ scale=scale.ald[indexTF],
+ tau=tau[indexTF], kappa=kappa[indexTF])
+ ans[q >= 1] = 1
+ ans[q <= 0] = 0
+ ans
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/R/family.rcqo.q b/R/family.rcqo.q
index 33992da..2f0d95a 100644
--- a/R/family.rcqo.q
+++ b/R/family.rcqo.q
@@ -16,7 +16,7 @@ rcqo <- function(n, p, S,
ESOptima = FALSE,
loabundance = if(EqualMaxima) hiabundance else 10,
hiabundance = 100,
- sdlv = c(1.5/2^(0:3))[1:Rank],
+ sdlv = head(1.5/2^(0:3), Rank),
sdOptima = ifelse(ESOptima, 1.5/Rank, 1) *
ifelse(scalelv, sdlv, 1),
sdTolerances = 0.25,
@@ -63,8 +63,8 @@ rcqo <- function(n, p, S,
if(!is.Numeric(sdOptima, posit=TRUE))
stop("bad input for argument 'sdOptima'")
if(EqualMaxima && loabundance != hiabundance)
- stop(paste("arguments 'loabundance)' and 'hiabundance)' must",
- "be equal when EqualTolerances=TRUE"))
+ stop("arguments 'loabundance' and 'hiabundance' must ",
+ "be equal when 'EqualTolerances=TRUE'")
if(any(loabundance > hiabundance))
stop("loabundance > hiabundance is not allowed")
if(!is.logical(Crow1positive)) {
@@ -301,8 +301,8 @@ dcqo <- function(x, p, S,
if(!is.logical(EqualTolerances) || length(EqualTolerances)>1)
stop("bad input for argument 'EqualTolerances)'")
if(EqualMaxima && loabundance != hiabundance)
- stop(paste("'loabundance)' and 'hiabundance)' must",
- "be equal when EqualTolerances=TRUE"))
+ stop("'loabundance' and 'hiabundance' must ",
+ "be equal when 'EqualTolerances=TRUE'")
if(length(seed)) set.seed(seed)
xmat = matrix(rnorm(n*(p-1)), n, p-1, dimnames=list(as.character(1:n),
diff --git a/R/family.rrr.q b/R/family.rrr.q
index fa98d11..b4a114e 100644
--- a/R/family.rrr.q
+++ b/R/family.rrr.q
@@ -5,11 +5,10 @@
-replace.constraints <- function(Blist, cm, index)
-{
+replace.constraints = function(Blist, cm, index) {
- for(i in index)
- Blist[[i]] = cm
+ for(iii in index)
+ Blist[[iii]] = cm
Blist
}
@@ -18,7 +17,7 @@ valt.control <- function(
Alphavec=c(2, 4, 6, 9, 12, 16, 20, 25, 30, 40, 50,
60, 80, 100, 125, 2^(8:12)),
Criterion = c("rss", "coefficients"),
- Linesearch= FALSE, Maxit=7,
+ Linesearch = FALSE, Maxit=7,
Suppress.warning=TRUE,
Tolerance=1e-7, ...)
{
@@ -68,9 +67,9 @@ valt <- function(x, z, U, Rank=1,
Maxit=20,
Structural.zero=NULL,
SD.Cinit=0.02,
- Suppress.warning= FALSE,
+ Suppress.warning=FALSE,
Tolerance=1e-6,
- trace= FALSE,
+ trace=FALSE,
xij=NULL)
{
@@ -110,8 +109,8 @@ valt <- function(x, z, U, Rank=1,
if(length(Structural.zero))
diag(M)[,-Structural.zero,drop=FALSE] else diag(M), 1:Rank)
if(p1)
- for(k in 1:p1)
- cmat2[[Rank+k]] <- Blist[[colx1.index[k]]]
+ for(kk in 1:p1)
+ cmat2[[Rank+kk]] <- Blist[[colx1.index[kk]]]
if(is.null(Cinit))
Cinit <- matrix(rnorm(p2*Rank, sd=SD.Cinit), p2, Rank)
@@ -127,13 +126,13 @@ valt <- function(x, z, U, Rank=1,
lv.mat <- x[,colx2.index,drop=FALSE] %*% C
new.lv.model.matrix = cbind(lv.mat, if(p1) x[,colx1.index] else NULL)
- fit = vlm.wfit(new.lv.model.matrix, z, Blist=cmat2, U=U,
- matrix.out=TRUE, XBIG=FALSE, rss=FALSE, qr=FALSE, xij=xij)
+ fit = vlm.wfit(xmat=new.lv.model.matrix, z, Blist=cmat2, U=U,
+ matrix.out=TRUE, is.vlmX=FALSE, rss=FALSE, qr=FALSE, xij=xij)
A <- t(fit$mat.coef[1:Rank,,drop=FALSE])
cmat1 = replace.constraints(Blist, A, colx2.index)
- fit = vlm.wfit(x, z, Blist=cmat1, U=U,
- matrix.out=TRUE, XBIG= FALSE, rss=TRUE, qr= FALSE, xij=xij)
+ fit = vlm.wfit(xmat=x, z, Blist=cmat1, U=U,
+ matrix.out=TRUE, is.vlmX=FALSE, rss=TRUE, qr=FALSE, xij=xij)
C = fit$mat.coef[colx2.index,,drop=FALSE] %*% A %*% solve(t(A) %*% A)
numat = x[,colx2.index,drop=FALSE] %*% C
@@ -156,8 +155,7 @@ valt <- function(x, z, U, Rank=1,
" ratio =", format(ratio), "\n")
if(!is.null(fit$rss))
cat(" rss =", fit$rss, "\n")
- if(exists("flush.console"))
- flush.console()
+ flush.console()
}
if(ratio < Tolerance) {
@@ -180,9 +178,9 @@ valt <- function(x, z, U, Rank=1,
try.new.lv.model.matrix = cbind(try.lv.mat,
if(p1) x[,colx1.index] else NULL)
- try = vlm.wfit(try.new.lv.model.matrix, z, Blist=cmat2, U=U,
- matrix.out=TRUE, XBIG= FALSE, rss=TRUE, qr= FALSE,
- xij=xij)
+ try = vlm.wfit(xmat=try.new.lv.model.matrix, z, Blist=cmat2,
+ U=U, matrix.out=TRUE, is.vlmX=FALSE,
+ rss=TRUE, qr=FALSE, xij=xij)
if(try$rss < ftemp) {
use.alpha <- Alphavec[itter]
fit <- try
@@ -195,8 +193,7 @@ valt <- function(x, z, U, Rank=1,
if(trace && use.alpha>0) {
cat(" Finished line search using Alpha =",
use.alpha, "\n")
- if(exists("flush.console"))
- flush.console()
+ flush.console()
}
fini.linesearch = TRUE
}
@@ -247,8 +244,8 @@ lm2qrrvlm.model.matrix = function(x, Blist, C, control, assign=TRUE,
if(length(Dzero)) diag(M)[,-Dzero,drop=FALSE] else diag(M)},
Aoffset + (1:Qoffset))
if(p1)
- for(k in 1:p1)
- cmat2[[Aoffset+Qoffset+k]] <- Blist[[colx1.index[k]]]
+ for(kk in 1:p1)
+ cmat2[[Aoffset+Qoffset+kk]] <- Blist[[colx1.index[kk]]]
if(!no.thrills) {
i63 = iam(NA, NA, M=Rank, both=TRUE)
names(cmat2) = c(
@@ -274,8 +271,8 @@ lm2qrrvlm.model.matrix = function(x, Blist, C, control, assign=TRUE,
asx = attr(x, "assign")
asx = vector("list", ncol(new.lv.model.matrix))
names(asx) = names(cmat2)
- for(i in 1:length(names(asx))) {
- asx[[i]] = i
+ for(ii in 1:length(names(asx))) {
+ asx[[ii]] = ii
}
attr(new.lv.model.matrix, "assign") = asx
}
@@ -292,8 +289,8 @@ valt.2iter <- function(x, z, U, Blist, A, control) {
cmat1 = replace.constraints(Blist, A, control$colx2.index)
- fit <- vlm.wfit(x, z, Blist=cmat1, U=U, matrix.out=TRUE,
- XBIG= FALSE, rss=TRUE, qr= FALSE, xij=control$xij)
+ fit <- vlm.wfit(xmat=x, z, Blist=cmat1, U=U, matrix.out=TRUE,
+ is.vlmX=FALSE, rss=TRUE, qr=FALSE, xij=control$xij)
C = fit$mat.coef[control$colx2.index,,drop=FALSE] %*% A %*% solve(t(A) %*% A)
list(A=A, C=C, fitted=fit$fitted, new.coeffs = fit$coef,
@@ -331,18 +328,18 @@ valt.1iter = function(x, z, U, Blist, C, control, lp.names=NULL, nice31=FALSE,
for(ii in 1:NOS) {
i5 = i5 + 1:MSratio
- tmp100 = vlm.wfit(new.lv.model.matrix, zedd[,i5,drop=FALSE],
+ tmp100 = vlm.wfit(xmat=new.lv.model.matrix, zedd[,i5,drop=FALSE],
Blist=cmat2, U=U[i5,,drop=FALSE],
- matrix.out=TRUE, XBIG=FALSE, rss=TRUE, qr=FALSE,
- Eta.range = control$Eta.range,
+ matrix.out=TRUE, is.vlmX=FALSE, rss=TRUE,
+ qr=FALSE, Eta.range = control$Eta.range,
xij=control$xij, lp.names=lp.names[i5])
fit$rss = fit$rss + tmp100$rss
fit$mat.coef = cbind(fit$mat.coef, tmp100$mat.coef)
fit$fitted.values = cbind(fit$fitted.values, tmp100$fitted.values)
}
} else {
- fit = vlm.wfit(new.lv.model.matrix, zedd, Blist=cmat2, U=U,
- matrix.out=TRUE, XBIG= FALSE, rss=TRUE, qr= FALSE,
+ fit = vlm.wfit(xmat=new.lv.model.matrix, zedd, Blist=cmat2, U=U,
+ matrix.out=TRUE, is.vlmX=FALSE, rss=TRUE, qr=FALSE,
Eta.range = control$Eta.range,
xij=control$xij, lp.names=lp.names)
}
@@ -376,15 +373,15 @@ valt.1iter = function(x, z, U, Blist, C, control, lp.names=NULL, nice31=FALSE,
rrr.init.expression <- expression({
- if(backchat || control$Quadratic)
- copyxbig <- TRUE
+ if(control$Quadratic)
+ copy_X_vlm <- TRUE
modelno = switch(family at vfamily[1], "poissonff"=2,
"quasipoissonff"=2, "quasipoisson"=2,
"binomialff"=1, "quasibinomialff"=1,
"quasibinomial"=1, "negbinomial"=3,
"gamma2"=5, "gaussianff"=8,
- 0) # stop("can't fit this model using fast algorithm")
+ 0) # stop("cannot fit this model using fast algorithm")
if(modelno == 1) modelno = get("modelno", envir = VGAMenv)
rrcontrol$modelno = control$modelno = modelno
if(modelno==3 || modelno==5) {
@@ -507,13 +504,8 @@ rrr.normalize = function(rrcontrol, A, C, x, Dmat=NULL) {
rrr.end.expression = expression({
- if(is.R()) {
- if(exists(".VGAM.etamat", envir = VGAMenv))
- rm(".VGAM.etamat", envir = VGAMenv)
- } else {
- while(exists(".VGAM.etamat", inherits=TRUE))
- rm(".VGAM.etamat", inherits=TRUE)
- }
+ if(exists(".VGAM.etamat", envir = VGAMenv))
+ rm(".VGAM.etamat", envir = VGAMenv)
if(control$Quadratic) {
@@ -525,7 +517,7 @@ rrr.end.expression = expression({
Blist = replace.constraints(Blist.save, Amat, colx2.index)
}
- xbig.save = if(control$Quadratic) {
+ X_vlm_save = if(control$Quadratic) {
tmp300 = lm2qrrvlm.model.matrix(x=x, Blist=Blist.save,
C=Cmat, control=control)
lv.mat = tmp300$lv.mat # Needed at the top of new.s.call
@@ -567,18 +559,15 @@ rrr.derivative.expression <- expression({
- which.optimizer = if(is.R()) {
- if(control$Quadratic && control$FastAlgorithm) {
- "BFGS"
- } else {
- if(iter <= rrcontrol$Switch.optimizer) "Nelder-Mead" else "BFGS"
- }
- } else "Quasi-Newton"
+ which.optimizer = if(control$Quadratic && control$FastAlgorithm) {
+ "BFGS"
+ } else {
+ if(iter <= rrcontrol$Switch.optimizer) "Nelder-Mead" else "BFGS"
+ }
if(trace && control$OptimizeWrtC) {
cat("\n\n")
cat("Using", which.optimizer, "\n")
- if(exists("flush.console"))
- flush.console()
+ flush.console()
}
constraints=replace.constraints(constraints,diag(M),rrcontrol$colx2.index)
@@ -586,33 +575,18 @@ rrr.derivative.expression <- expression({
all(trivial.constraints(constraints) == 1)
theta0 <- c(Cmat)
- if(is.R()) assign(".VGAM.dot.counter", 0, envir = VGAMenv) else
- .VGAM.dot.counter <<- 0
-if(control$OptimizeWrtC) {
- if(is.R()) {
+ assign(".VGAM.dot.counter", 0, envir = VGAMenv)
+ if(control$OptimizeWrtC) {
if(control$Quadratic && control$FastAlgorithm) {
if(iter == 2) {
- if(is.R()) {
- if(exists(".VGAM.etamat", envir = VGAMenv))
- rm(".VGAM.etamat", envir = VGAMenv)
- } else {
- if(exists(".VGAM.etamat", inherits=TRUE))
- rm(".VGAM.etamat", inherits=TRUE)
- }
+ if(exists(".VGAM.etamat", envir = VGAMenv))
+ rm(".VGAM.etamat", envir = VGAMenv)
}
if(iter > 2 && !quasi.newton$convergence) {
- if(is.R()) {
- if(zthere <- exists(".VGAM.z", envir = VGAMenv)) {
- ..VGAM.z = get(".VGAM.z", envir = VGAMenv)
- ..VGAM.U = get(".VGAM.U", envir = VGAMenv)
- ..VGAM.beta = get(".VGAM.beta", envir = VGAMenv)
- }
- } else {
- if(zthere <- exists(".VGAM.z")) {
- ..VGAM.z = .VGAM.z
- ..VGAM.U = .VGAM.U
- ..VGAM.beta = .VGAM.beta
- }
+ if(zthere <- exists(".VGAM.z", envir = VGAMenv)) {
+ ..VGAM.z = get(".VGAM.z", envir = VGAMenv)
+ ..VGAM.U = get(".VGAM.U", envir = VGAMenv)
+ ..VGAM.beta = get(".VGAM.beta", envir = VGAMenv)
}
if(zthere) {
z = matrix(..VGAM.z, n, M) # minus any offset
@@ -624,18 +598,16 @@ if(control$OptimizeWrtC) {
if(iter == 2 || quasi.newton$convergence) {
NOS = ifelse(modelno==3 || modelno==5, M/2, M)
- canfitok = if(is.R())
- (exists("CQO.FastAlgorithm", envir=VGAMenv) &&
- get("CQO.FastAlgorithm", envir = VGAMenv)) else
- (exists("CQO.FastAlgorithm",inherits=TRUE) && CQO.FastAlgorithm)
+ canfitok = (exists("CQO.FastAlgorithm", envir=VGAMenv) &&
+ get("CQO.FastAlgorithm", envir = VGAMenv))
if(!canfitok)
- stop("can't fit this model using fast algorithm")
+ stop("cannot fit this model using fast algorithm")
p2star = if(nice31)
- ifelse(control$IToleran, Rank, Rank+0.5*Rank*(Rank+1)) else
- (NOS*Rank + Rank*(Rank+1)/2 * ifelse(control$EqualTol,1,NOS))
+ ifelse(control$IToleran, Rank, Rank+0.5*Rank*(Rank+1)) else
+ (NOS*Rank + Rank*(Rank+1)/2 * ifelse(control$EqualTol,1,NOS))
p1star = if(nice31) p1 * ifelse(modelno==3 || modelno==5,2,1) else
- (ncol(xbig.save) - p2star)
- xbig.save1 = if(p1star > 0) xbig.save[,-(1:p2star)] else NULL
+ (ncol(X_vlm_save) - p2star)
+ X_vlm_1save = if(p1star > 0) X_vlm_save[,-(1:p2star)] else NULL
quasi.newton = optim(par=Cmat, fn=callcqof,
gr=if(control$GradientFunction) calldcqof else NULL,
method=which.optimizer,
@@ -643,36 +615,23 @@ if(control$OptimizeWrtC) {
parscale=rep(control$Parscale, len=length(Cmat)),
maxit=250),
etamat=eta, xmat=x, ymat=y, wvec=w,
- xbig.save1 = if(nice31) NULL else xbig.save1,
+ X_vlm_1save = if(nice31) NULL else X_vlm_1save,
modelno=modelno, Control=control,
n=n, M=M, p1star=p1star, p2star=p2star, nice31=nice31)
- if(is.R()) {
- if(zthere <- exists(".VGAM.z", envir = VGAMenv)) {
- ..VGAM.z = get(".VGAM.z", envir = VGAMenv)
- ..VGAM.U = get(".VGAM.U", envir = VGAMenv)
- ..VGAM.beta = get(".VGAM.beta", envir = VGAMenv)
- }
- } else {
- if(zthere <- exists(".VGAM.z")) {
- ..VGAM.z = .VGAM.z
- ..VGAM.U = .VGAM.U
- ..VGAM.beta = .VGAM.beta
- }
+ if(zthere <- exists(".VGAM.z", envir = VGAMenv)) {
+ ..VGAM.z = get(".VGAM.z", envir = VGAMenv)
+ ..VGAM.U = get(".VGAM.U", envir = VGAMenv)
+ ..VGAM.beta = get(".VGAM.beta", envir = VGAMenv)
}
if(zthere) {
z = matrix(..VGAM.z, n, M) # minus any offset
U = matrix(..VGAM.U, M, n)
}
} else {
- if(is.R()) {
- if(exists(".VGAM.offset", envir = VGAMenv))
- rm(".VGAM.offset", envir = VGAMenv)
- } else {
- while(exists(".VGAM.offset", inherits=TRUE))
- rm(".VGAM.offset", inherits=TRUE)
- }
+ if(exists(".VGAM.offset", envir = VGAMenv))
+ rm(".VGAM.offset", envir = VGAMenv)
}
} else {
use.reltol = if(length(rrcontrol$Reltol) >= iter)
@@ -689,21 +648,7 @@ if(control$OptimizeWrtC) {
M=M, xmat=x, # varbix2=varbix2,
Blist=Blist, rrcontrol=rrcontrol)
}
- } else {
- quasi.newton <-
- nlminb(start=theta0,
- objective=rrr.derivC.rss,
- control = nlminb.control(x.tol = rrcontrol$X.tol,
- eval.max = rrcontrol$Eval.max,
- iter.max = rrcontrol$Iter.max,
- abs.tol = rrcontrol$Abs.tol,
- rel.tol = rrcontrol$Rel.tol,
- step.min = rrcontrol$Step.min,
- rel.err = rrcontrol$Rel.err),
- U=U, z= if(control$ITolerances) z+offset else z,
- M=M, xmat=x, # varbix2=varbix2,
- Blist=Blist, rrcontrol=rrcontrol)
- }
+
@@ -714,7 +659,7 @@ if(control$OptimizeWrtC) {
evnu = eigen(var(numat))
Cmat = Cmat %*% evnu$vector
numat = x[,rrcontrol$colx2.index,drop=FALSE] %*% Cmat
- offset = if(Rank > 1) -0.5*apply(numat^2, 1, sum) else -0.5*numat^2
+ offset = if(Rank > 1) -0.5*rowSums(numat^2) else -0.5*numat^2
}
}
@@ -735,25 +680,17 @@ if(control$OptimizeWrtC) {
if(trace && control$OptimizeWrtC) {
cat("\n")
- cat(which.optimizer, "using",
- if(is.R()) "optim():" else "nlminb():", "\n")
- cat("Objective =", if(is.R())
- quasi.newton$value else format(quasi.newton$objective), "\n")
+ cat(which.optimizer, "using optim():\n")
+ cat("Objective =", quasi.newton$value, "\n")
cat("Parameters (= c(C)) = ", if(length(quasi.newton$par) < 5)
"" else "\n")
- cat(if(is.R()) alt$Cmat else format(alt$Cmat), fill=TRUE)
+ cat(alt$Cmat, fill=TRUE)
cat("\n")
- if(!is.R())
- cat("Gradient norm =", format(quasi.newton$grad.norm), "\n")
- cat("Number of function evaluations =", if(is.R())
- quasi.newton$count[1] else quasi.newton$f.evals, "\n")
- if(!is.R())
- cat("Number of gradient evaluations =", quasi.newton$g.evals, "\n")
+ cat("Number of function evaluations =", quasi.newton$count[1], "\n")
if(length(quasi.newton$message))
cat("Message =", quasi.newton$message, "\n")
cat("\n")
- if(exists("flush.console"))
- flush.console()
+ flush.console()
}
@@ -771,28 +708,19 @@ rrr.derivC.rss = function(theta, U, z, M, xmat, Blist, rrcontrol,
if(rrcontrol$trace) {
cat(".")
- if(exists("flush.console"))
- flush.console()
+ flush.console()
}
- alreadyThere = if(is.R())
- exists(".VGAM.dot.counter", envir = VGAMenv) else
- exists(".VGAM.dot.counter")
+ alreadyThere = exists(".VGAM.dot.counter", envir = VGAMenv)
if(alreadyThere) {
- if(is.R()) {
- VGAM.dot.counter = get(".VGAM.dot.counter", envir = VGAMenv)
- VGAM.dot.counter = VGAM.dot.counter + 1
- assign(".VGAM.dot.counter", VGAM.dot.counter, envir = VGAMenv)
- } else {
- .VGAM.dot.counter <<- .VGAM.dot.counter + 1
- }
+ VGAM.dot.counter = get(".VGAM.dot.counter", envir = VGAMenv)
+ VGAM.dot.counter = VGAM.dot.counter + 1
+ assign(".VGAM.dot.counter", VGAM.dot.counter, envir = VGAMenv)
if(VGAM.dot.counter > max(50, options()$width - 5)) {
if(rrcontrol$trace) {
cat("\n")
- if(exists("flush.console"))
- flush.console()
+ flush.console()
}
- if(is.R()) assign(".VGAM.dot.counter", 0, envir = VGAMenv) else
- .VGAM.dot.counter <<- 0
+ assign(".VGAM.dot.counter", 0, envir = VGAMenv)
}
}
@@ -801,7 +729,7 @@ rrr.derivC.rss = function(theta, U, z, M, xmat, Blist, rrcontrol,
tmp700 = lm2qrrvlm.model.matrix(x=xmat, Blist=Blist,
no.thrills = !rrcontrol$Corner,
- C=Cmat, control=rrcontrol, assign= FALSE)
+ C=Cmat, control=rrcontrol, assign=FALSE)
Blist = tmp700$constraints # Doesn't contain \bI_{Rank} \bnu
if(rrcontrol$Corner) {
@@ -812,9 +740,9 @@ rrr.derivC.rss = function(theta, U, z, M, xmat, Blist, rrcontrol,
if(length(tmp700$offset)) z = z - tmp700$offset
- vlm.wfit(x=tmp700$new.lv.model.matrix, z=z,
+ vlm.wfit(xmat=tmp700$new.lv.model.matrix, zmat=z,
Blist=Blist, ncolx=ncol(xmat), U=U, only.rss=TRUE,
- matrix.out= FALSE, XBIG= FALSE, rss= TRUE, qr= FALSE,
+ matrix.out=FALSE, is.vlmX=FALSE, rss= TRUE, qr=FALSE,
Eta.range = rrcontrol$Eta.range,
xij=rrcontrol$xij)$rss
}
@@ -841,7 +769,6 @@ rrvglm.optim.control = function(Fnscale=1,
-if(is.R())
nlminbcontrol = function(Abs.tol = 10^(-6),
Eval.max=91,
Iter.max=91,
@@ -869,13 +796,13 @@ Coef.qrrvglm <- function(object, varlvI = FALSE, reference = NULL, ...) {
if(length(varlvI) != 1 || !is.logical(varlvI))
- stop("\"varlvI\" must be TRUE or FALSE")
- if(length(reference) > 1) stop("\"reference\" must be of length 0 or 1")
+ stop("'varlvI' must be TRUE or FALSE")
+ if(length(reference) > 1) stop("'reference' must be of length 0 or 1")
if(length(reference) && is.Numeric(reference))
if(!is.Numeric(reference, allow=1, integ=TRUE))
- stop("bad input for argument \"reference\"")
+ stop("bad input for argument 'reference'")
if(!is.logical(ConstrainedQO <- object at control$ConstrainedQO))
- stop("can't determine whether the model is constrained or not")
+ stop("cannot determine whether the model is constrained or not")
ocontrol = object at control
coef.object = object at coefficients
Rank = ocontrol$Rank
@@ -913,18 +840,18 @@ Coef.qrrvglm <- function(object, varlvI = FALSE, reference = NULL, ...) {
td.expression = expression({
Tolerance = Darray = m2adefault(Dmat, M=Rank)
- for(i in 1:M)
- if(length(Dzero) && any(Dzero == i)) {
- Tolerance[,,i] = NA # Darray[,,i] == O
- bellshaped[i] = FALSE
+ for(ii in 1:M)
+ if(length(Dzero) && any(Dzero == ii)) {
+ Tolerance[,,ii] = NA # Darray[,,ii] == O
+ bellshaped[ii] = FALSE
} else {
- Tolerance[,,i] = -0.5 * solve(Darray[,,i])
- bellshaped[i] = all(eigen(Tolerance[,,i])$values > 0)
+ Tolerance[,,ii] = -0.5 * solve(Darray[,,ii])
+ bellshaped[ii] = all(eigen(Tolerance[,,ii])$values > 0)
}
optimum = matrix(as.numeric(NA),Rank,M) # dimnames=list(lv.names,ynames)
- for(i in 1:M)
- if(bellshaped[i])
- optimum[,i] = Tolerance[,,i] %*% cbind(Amat[i,])
+ for(ii in 1:M)
+ if(bellshaped[ii])
+ optimum[,ii] = Tolerance[,,ii] %*% cbind(Amat[ii,])
})
Amat = object at extra$Amat # M x Rank
Cmat = object at extra$Cmat # p2 x Rank
@@ -935,7 +862,7 @@ Coef.qrrvglm <- function(object, varlvI = FALSE, reference = NULL, ...) {
if(is.character(reference)) {
reference = (1:NOS)[reference == ynames]
if(length(reference) != 1)
- stop("could not match argument \"reference\" with any response")
+ stop("could not match argument 'reference' with any response")
}
ptr1 = 1
candidates = if(length(reference)) reference else {
@@ -971,10 +898,10 @@ Coef.qrrvglm <- function(object, varlvI = FALSE, reference = NULL, ...) {
eval(td.expression)
} else {
if(length(reference) == 1)
- stop(paste("tolerance matrix specified by \"reference\"",
- "is not positive-definite")) else
- warning(paste("could not find any positive-definite",
- "tolerance matrix"))
+ stop("tolerance matrix specified by 'reference' ",
+ "is not positive-definite") else
+ warning("could not find any positive-definite ",
+ "tolerance matrix")
}
@@ -1013,11 +940,11 @@ Coef.qrrvglm <- function(object, varlvI = FALSE, reference = NULL, ...) {
cx1i = ocontrol$colx1.index
maximum = if(length(cx1i)==1 && names(cx1i)=="(Intercept)") {
eta.temp = B1
- for(i in 1:M)
- eta.temp[i] = eta.temp[i] +
- Amat[i,,drop=FALSE] %*% optimum[,i,drop=FALSE] +
- t(optimum[,i,drop=FALSE]) %*%
- Darray[,,i,drop= TRUE] %*% optimum[,i,drop=FALSE]
+ for(ii in 1:M)
+ eta.temp[ii] = eta.temp[ii] +
+ Amat[ii,,drop=FALSE] %*% optimum[,ii,drop=FALSE] +
+ t(optimum[,ii,drop=FALSE]) %*%
+ Darray[,,ii,drop= TRUE] %*% optimum[,ii,drop=FALSE]
mymax = object at family@inverse(rbind(eta.temp), extra=object at extra)
c(mymax) # Convert from matrix to vector
} else {
@@ -1051,10 +978,10 @@ Coef.qrrvglm <- function(object, varlvI = FALSE, reference = NULL, ...) {
Tolerance=Tolerance)
if(ConstrainedQO) {ans at C = Cmat} else {Cmat = NULL}
- for(r in 1:Rank)
- ans at OptimumOrder[r,] = order(ans at Optimum[r,])
- for(r in 1:Rank)
- ans at lvOrder[,r] = order(ans at lv[,r])
+ for(rrr in 1:Rank)
+ ans at OptimumOrder[rrr,] = order(ans at Optimum[rrr,])
+ for(rrr in 1:Rank)
+ ans at lvOrder[,rrr] = order(ans at lv[,rrr])
if(length(object at misc$estimated.dispersion) &&
object at misc$estimated.dispersion) {
@@ -1112,8 +1039,9 @@ setClass(Class="Coef.uqo", representation(
"Dzero" = "logical",
"Tolerance" = "array"))
-setClass(Class="Coef.qrrvglm", representation("Coef.uqo",
- "C" = "matrix"))
+setClass(Class="Coef.qrrvglm", representation(
+ "C" = "matrix"),
+ contains = "Coef.uqo")
printCoef.qrrvglm = function(x, ...) {
@@ -1121,40 +1049,40 @@ printCoef.qrrvglm = function(x, ...) {
Rank = object at Rank
M = nrow(object at A)
NOS = object at NOS
- iii = matrix(as.numeric(NA), NOS, Rank)
+ mymat = matrix(as.numeric(NA), NOS, Rank)
if(Rank == 1) { # || object at Diagonal
- for(i in 1:NOS) {
- fred = if(Rank>1) diag(object at Tolerance[,,i,drop=F]) else
- object at Tolerance[,,i]
+ for(ii in 1:NOS) {
+ fred = if(Rank>1) diag(object at Tolerance[,,ii,drop=F]) else
+ object at Tolerance[,,ii]
if(all(fred > 0))
- iii[i,] = sqrt(fred)
+ mymat[ii,] = sqrt(fred)
}
- dimnames(iii) = list(dimnames(object at Tolerance)[[3]],
- if(Rank==1) "lv" else
- paste("Tolerance", dimnames(iii)[[2]], sep=""))
+ dimnames(mymat) = list(dimnames(object at Tolerance)[[3]],
+ if(Rank==1) "lv" else
+ paste("Tolerance", dimnames(mymat)[[2]], sep=""))
} else {
- for(i in 1:NOS) {
- fred = eigen(object at Tolerance[,,i])
+ for(ii in 1:NOS) {
+ fred = eigen(object at Tolerance[,,ii])
if(all(fred$value > 0))
- iii[i,] = sqrt(fred$value)
+ mymat[ii,] = sqrt(fred$value)
}
- dimnames(iii) = list(dimnames(object at Tolerance)[[3]],
- paste("tol", 1:Rank, sep=""))
+ dimnames(mymat) = list(dimnames(object at Tolerance)[[3]],
+ paste("tol", 1:Rank, sep=""))
}
dimnames(object at A) = list(dimnames(object at A)[[1]],
if(Rank > 1) paste("A", dimnames(object at A)[[2]], sep=".") else "A")
Maximum = if(length(object at Maximum)) cbind(Maximum=object at Maximum) else NULL
- if(length(Maximum) && length(iii) && Rank==1)
- Maximum[is.na(iii),] = NA
+ if(length(Maximum) && length(mymat) && Rank==1)
+ Maximum[is.na(mymat),] = NA
optmat = cbind(t(object at Optimum))
dimnames(optmat) = list(dimnames(optmat)[[1]],
if(Rank > 1) paste("Optimum", dimnames(optmat)[[2]], sep=".")
else "Optimum")
- if(length(optmat) && length(iii) && Rank==1)
- optmat[is.na(iii),] = NA
+ if(length(optmat) && length(mymat) && Rank==1)
+ optmat[is.na(mymat),] = NA
if( object at Constrained ) {
cat("\nC matrix (constrained/canonical coefficients)\n")
@@ -1169,7 +1097,7 @@ printCoef.qrrvglm = function(x, ...) {
if(Rank > 1) { # !object at Diagonal && Rank > 1
cat("\nTolerances\n") } else
cat("\nTolerance\n")
- print(iii, ...)
+ print(mymat, ...)
cat("\nStandard deviation of the latent variables (site scores)\n")
print(sd(object at lv))
@@ -1177,12 +1105,12 @@ printCoef.qrrvglm = function(x, ...) {
}
- setMethod("show", "Coef.qrrvglm", function(object)
- printCoef.qrrvglm(object))
- setMethod("print", "Coef.qrrvglm", function(x, ...)
- printCoef.qrrvglm(x, ...))
- setMethod("summary", "qrrvglm", function(object, ...)
- summary.qrrvglm(object, ...))
+setMethod("show", "Coef.qrrvglm", function(object)
+ printCoef.qrrvglm(object))
+setMethod("print", "Coef.qrrvglm", function(x, ...)
+ printCoef.qrrvglm(x, ...))
+setMethod("summary", "qrrvglm", function(object, ...)
+ summary.qrrvglm(object, ...))
predict.qrrvglm <- function(object,
@@ -1195,7 +1123,7 @@ predict.qrrvglm <- function(object,
varlvI = FALSE, reference = NULL, ...)
{
if(se.fit)
- stop("can't handle se.fit==TRUE yet")
+ stop("cannot handle se.fit==TRUE yet")
if(deriv != 0)
stop("derivative is not equal to 0")
@@ -1203,9 +1131,9 @@ predict.qrrvglm <- function(object,
type <- as.character(substitute(type))
type <- match.arg(type, c("link", "response", "lv", "terms"))[1]
if(type=="lv")
- stop("can't handle type='lv' yet")
+ stop("cannot handle type='lv' yet")
if(type=="terms")
- stop("can't handle type='terms' yet")
+ stop("cannot handle type='terms' yet")
M = object at misc$M
Rank = object at control$Rank
@@ -1221,28 +1149,12 @@ predict.qrrvglm <- function(object,
}
}
- attrassignlm <- function(object, ...)
- attrassigndefault(model.matrix(object), object at terms)
-
- attrassigndefault <- function(mmat, tt) {
- if (!inherits(tt, "terms"))
- stop("need terms object")
- aa <- attr(mmat, "assign")
- if (is.null(aa))
- stop("argument is not really a model matrix")
- ll <- attr(tt, "term.labels")
- if (attr(tt, "intercept") > 0)
- ll <- c("(Intercept)", ll)
- aaa <- factor(aa, labels = ll)
- split(order(aa), aaa)
- }
-
if(!length(newdata)) {
X <- model.matrixvlm(object, type="lm", ...)
offset <- object at offset
tt <- object at terms$terms # terms(object)
- if(is.R() && !length(object at x))
- attr(X, "assign") <- attrassignlm(X, tt)
+ if(!length(object at x))
+ attr(X, "assign") = attrassignlm(X, tt)
} else {
if(is.smart(object) && length(object at smart.prediction)) {
setup.smart("read", smart.prediction=object at smart.prediction)
@@ -1253,7 +1165,7 @@ predict.qrrvglm <- function(object,
if(length(object at contrasts)) object at contrasts else NULL,
xlev = object at xlevels)
- if(is.R() && nrow(X)!=nrow(newdata)) {
+ if(nrow(X) != nrow(newdata)) {
as.save = attr(X, "assign")
X = X[rep(1, nrow(newdata)),,drop=FALSE]
dimnames(X) = list(dimnames(newdata)[[1]], "(Intercept)")
@@ -1271,8 +1183,7 @@ predict.qrrvglm <- function(object,
wrapup.smart()
}
- if(is.R())
- attr(X, "assign") <- attrassigndefault(X, tt)
+ attr(X, "assign") = attrassigndefault(X, tt)
}
ocontrol = object at control
@@ -1335,10 +1246,10 @@ setMethod("predict", "qrrvglm", function(object, ...)
predict.qrrvglm(object, ...))
coefqrrvglm = function(object, matrix.out = FALSE,
- label = TRUE, compress = TRUE) {
+ label = TRUE) {
if(matrix.out)
- stop("currently can't handle matrix.out=TRUE")
- coefvlm(object, matrix.out = matrix.out, label = label, compress = compress)
+ stop("currently cannot handle matrix.out=TRUE")
+ coefvlm(object, matrix.out = matrix.out, label = label)
}
@@ -1359,22 +1270,23 @@ setMethod("residuals", "qrrvglm", function(object, ...)
printrrvglm <- function(x, ...)
{
if(!is.null(cl <- x at call)) {
- cat("Call:\n")
- dput(cl)
- }
- coef <- x at coefficients
- if(any(nas <- is.na(coef))) {
- if(is.null(names(coef)))
- names(coef) <- paste("b", 1:length(coef), sep = "")
- cat("\nCoefficients: (", sum(nas),
- " not defined because of singularities)\n", sep = "")
+ cat("Call:\n")
+ dput(cl)
+ }
+ vecOfBetas <- x at coefficients
+ if(any(nas <- is.na(vecOfBetas))) {
+ if(is.null(names(vecOfBetas)))
+ names(vecOfBetas) = paste("b", 1:length(vecOfBetas), sep="")
+ cat("\nCoefficients: (", sum(nas),
+ " not defined because of singularities)\n", sep = "")
} else
cat("\nCoefficients:\n")
+ print.default(vecOfBetas, ...) # used to be print()
if(FALSE) {
- Rank <- x at Rank
- if(!length(Rank))
- Rank <- sum(!nas)
+ Rank <- x at Rank
+ if(!length(Rank))
+ Rank <- sum(!nas)
}
if(FALSE) {
@@ -1392,9 +1304,9 @@ printrrvglm <- function(x, ...)
if(length(x at criterion)) {
ncrit <- names(x at criterion)
- for(i in ncrit)
- if(i!="loglikelihood" && i!="deviance")
- cat(paste(i, ":", sep=""), format(x at criterion[[i]]), "\n")
+ for(iii in ncrit)
+ if(iii != "loglikelihood" && iii != "deviance")
+ cat(paste(iii, ":", sep=""), format(x at criterion[[iii]]), "\n")
}
invisible(x)
@@ -1405,26 +1317,26 @@ printrrvglm <- function(x, ...)
setMethod("print", "rrvglm", function(x, ...) printrrvglm(x, ...))
- setMethod("show", "rrvglm", function(object) printrrvglm(object))
+setMethod("show", "rrvglm", function(object) printrrvglm(object))
-rrvglm.control.Gaussian <- function(backchat= FALSE, half.stepsizing= FALSE,
+rrvglm.control.Gaussian <- function(half.stepsizing= FALSE,
save.weight= TRUE, ...)
{
- list(backchat= FALSE, half.stepsizing= FALSE,
+ list(half.stepsizing= FALSE,
save.weight=as.logical(save.weight)[1])
}
-summary.rrvglm <- function(object, correlation= FALSE,
+summary.rrvglm <- function(object, correlation=FALSE,
dispersion=NULL, digits=NULL,
numerical= TRUE,
h.step = 0.0001,
- kill.all= FALSE, omit13= FALSE, fixA= FALSE, ...)
+ kill.all=FALSE, omit13=FALSE, fixA=FALSE, ...)
{
@@ -1432,7 +1344,7 @@ summary.rrvglm <- function(object, correlation= FALSE,
if(!is.Numeric(h.step, allow=1) || abs(h.step)>1)
- stop("bad input for \"h.step\"")
+ stop("bad input for 'h.step'")
if(!object at control$Corner)
stop("this function works with corner constraints only")
@@ -1440,8 +1352,9 @@ summary.rrvglm <- function(object, correlation= FALSE,
if(is.null(dispersion))
dispersion <- object at misc$dispersion
- newobject <- object
- class(newobject) <- "vglm" # 6/2/02; For Splus6
+ newobject <- as(object, "vglm")
+
+
stuff <- summaryvglm(newobject, correlation=correlation,
dispersion=dispersion)
@@ -1515,16 +1428,16 @@ printsummary.rrvglm <- function(x, digits=NULL, quote= TRUE, prefix="")
-get.rrvglm.se1 <- function(fit, omit13= FALSE, kill.all= FALSE,
- numerical= TRUE,
- fixA= FALSE, h.step=0.0001,
- trace.arg= FALSE, ...) {
+get.rrvglm.se1 = function(fit, omit13=FALSE, kill.all=FALSE,
+ numerical=TRUE,
+ fixA=FALSE, h.step=0.0001,
+ trace.arg=FALSE, ...) {
if(length(fit at control$Nested) && fit at control$Nested)
- stop("sorry, can't handle nested models yet")
+ stop("sorry, cannot handle nested models yet")
Structural.zero = fit at control$Structural.zero
@@ -1552,7 +1465,7 @@ get.rrvglm.se1 <- function(fit, omit13= FALSE, kill.all= FALSE,
wz <- weights(fit, type="w") # old: wweights(fit) #fit at weights
if(!length(wz))
- stop("can't get fit at weights")
+ stop("cannot get fit at weights")
M <- fit at misc$M
n <- fit at misc$n
@@ -1580,8 +1493,13 @@ get.rrvglm.se1 <- function(fit, omit13= FALSE, kill.all= FALSE,
}
- newobject <- fit
- class(newobject) <- "vglm" # 6/2/02; For Splus6
+
+
+ newobject <- as(fit, "vglm")
+
+
+
+
sfit2233 <- summaryvglm(newobject)
d8 <- dimnames(sfit2233 at cov.unscaled)[[1]]
cov2233 <- solve(sfit2233 at cov.unscaled) # Includes any intercepts
@@ -1591,7 +1509,7 @@ get.rrvglm.se1 <- function(fit, omit13= FALSE, kill.all= FALSE,
nassign = names(fit at constraints)
choose.from = varassign(fit at constraints, nassign)
for(ii in nassign)
- if(any(ii== names(colx2.index))) {
+ if(any(ii == names(colx2.index))) {
log.vec33 = c(log.vec33, choose.from[[ii]])
}
cov33 = cov2233[ log.vec33, log.vec33, drop=FALSE] # r*p2 by r*p2
@@ -1603,14 +1521,14 @@ get.rrvglm.se1 <- function(fit, omit13= FALSE, kill.all= FALSE,
offs = matrix(0, n, M) # The "0" handles Structural.zero's
offs[,Index.corner] = lv.mat
if(M == (Rank+length(Structural.zero)))
- stop("can't handle full-rank models yet")
+ stop("cannot handle full-rank models yet")
cm = matrix(0, M, M-Rank-length(Structural.zero))
cm[-c(Index.corner,Structural.zero),] = diag(M-Rank-length(Structural.zero))
Blist = vector("list", length(colx1.index)+1)
names(Blist) = c(names(colx1.index), "I(lv.mat)")
for(ii in names(colx1.index))
- Blist[[ii]] = fit at constraints[[ii]]
+ Blist[[ii]] = fit at constraints[[ii]]
Blist[["I(lv.mat)"]] = cm
@@ -1637,33 +1555,24 @@ get.rrvglm.se1 <- function(fit, omit13= FALSE, kill.all= FALSE,
}
- if(( is.R() && fit at misc$dataname == "list") ||
- (!is.R() && fit at misc$dataname == "sys.parent")) {
+ if(fit at misc$dataname == "list") {
dspec = FALSE
} else {
- if(is.R()) {
- mytext1 = "exists(x=fit at misc$dataname, envir = VGAMenv)"
- myexp1 = parse(text=mytext1)
- is.there = eval(myexp1)
- bbdata= if(is.there) get(fit at misc$dataname, envir=VGAMenv) else
- get(fit at misc$dataname)
- } else {
- bbdata = get(fit at misc$dataname)
- }
+ mytext1 = "exists(x=fit at misc$dataname, envir = VGAMenv)"
+ myexp1 = parse(text=mytext1)
+ is.there = eval(myexp1)
+ bbdata= if(is.there) get(fit at misc$dataname, envir=VGAMenv) else
+ get(fit at misc$dataname)
dspec = TRUE
}
- if(!is.R()) {
-
- stop("26-9-2007: uncomment out the following lines to run it in Splus")
- }
fit1122 <- if(dspec) vlm(bb,
constraint=Blist, crit="d", weight=wz, data=bbdata,
- save.weight= TRUE, smart= FALSE, trace=trace.arg, x= TRUE) else
+ save.weight=TRUE, smart=FALSE, trace=trace.arg, x=TRUE) else
vlm(bb,
constraint=Blist, crit="d", weight=wz,
- save.weight= TRUE, smart= FALSE, trace=trace.arg, x= TRUE)
+ save.weight=TRUE, smart=FALSE, trace=trace.arg, x=TRUE)
@@ -1737,15 +1646,15 @@ num.deriv.rrr <- function(fit, M, r, x1mat, x2mat,
dct.da <- matrix(as.numeric(NA), (M-r-length(Structural.zero))*r, r*p2)
if((length(Index.corner) + length(Structural.zero)) == M)
- stop("can't handle full rank models yet")
+ stop("cannot handle full rank models yet")
cbindex = (1:M)[-c(Index.corner, Structural.zero)]
ptr = 1
- for(s in 1:r)
+ for(sss in 1:r)
for(tt in cbindex) {
small.Blist = vector("list", p2)
pAmat = Aimat
- pAmat[tt,s] = pAmat[tt,s] + h.step # Perturb it
+ pAmat[tt,sss] = pAmat[tt,sss] + h.step # Perturb it
for(ii in 1:p2)
small.Blist[[ii]] = pAmat
@@ -1760,7 +1669,7 @@ num.deriv.rrr <- function(fit, M, r, x1mat, x2mat,
fred = weights(fit, type="w", deriv= TRUE, ignore.slot= TRUE)
if(!length(fred))
- stop("can't get @weights and $deriv from object")
+ stop("cannot get @weights and $deriv from object")
wz = fred$weights
deriv.mu <- fred$deriv
@@ -1768,11 +1677,11 @@ num.deriv.rrr <- function(fit, M, r, x1mat, x2mat,
tvfor <- vforsub(U, as.matrix(deriv.mu), M=M, n=nn)
newzmat <- neweta + vbacksub(U, tvfor, M=M, n=nn) - offset
- newfit = vlm.wfit(x=x2mat, z=newzmat - x1mat %*% Bmat,
- Blist=small.Blist, U = U,
- matrix.out = FALSE, XBIG = FALSE,
- rss = TRUE, qr = FALSE, x.ret = FALSE, offset = NULL,
- xij=xij)
+ newfit = vlm.wfit(xmat=x2mat, zmat=newzmat - x1mat %*% Bmat,
+ Blist=small.Blist, U = U,
+ matrix.out = FALSE, is.vlmX = FALSE,
+ rss = TRUE, qr = FALSE, x.ret = FALSE,
+ offset = NULL, xij=xij)
dct.da[ptr,] <- (newfit$coef - t(Cimat)) / h.step
ptr = ptr + 1
}
@@ -1783,69 +1692,69 @@ num.deriv.rrr <- function(fit, M, r, x1mat, x2mat,
-dctda.fast.only <- function(theta, wz, U, zmat, M, r, x1mat, x2mat,
- p2, Index.corner, Aimat, Bmat, Cimat,
- xij=NULL,
- Structural.zero=NULL)
+dctda.fast.only = function(theta, wz, U, zmat, M, r, x1mat, x2mat,
+ p2, Index.corner, Aimat, Bmat, Cimat,
+ xij=NULL,
+ Structural.zero=NULL)
{
if(length(Structural.zero))
- stop("can't handle Structural.zero in dctda.fast.only()")
+ stop("cannot handle Structural.zero in dctda.fast.only()")
- nn <- nrow(x2mat)
- if(nrow(Cimat)!=p2 || ncol(Cimat)!=r)
+ nn = nrow(x2mat)
+ if(nrow(Cimat) != p2 || ncol(Cimat) != r)
stop("Cimat wrong shape")
- fred <- kronecker(matrix(1,1,r), x2mat)
- fred <- kronecker(fred, matrix(1,M,1))
- barney <- kronecker(Aimat, matrix(1,1,p2))
- barney <- kronecker(matrix(1,nn,1), barney)
+ fred = kronecker(matrix(1,1,r), x2mat)
+ fred = kronecker(fred, matrix(1,M,1))
+ barney = kronecker(Aimat, matrix(1,1,p2))
+ barney = kronecker(matrix(1,nn,1), barney)
- temp <- array(t(barney*fred), c(p2*r, M, nn))
- temp <- aperm(temp, c(2,1,3)) # M by p2*r by nn
- temp <- mux5(wz, temp, M=M, matrix.arg= TRUE)
- temp <- m2adefault(temp, M=p2*r) # Note M != M here!
- G <- solve(apply(temp,1:2,sum)) # p2*r by p2*r
+ temp = array(t(barney*fred), c(p2*r, M, nn))
+ temp = aperm(temp, c(2,1,3)) # M by p2*r by nn
+ temp = mux5(wz, temp, M=M, matrix.arg= TRUE)
+ temp = m2adefault(temp, M=p2*r) # Note M != M here!
+ G = solve(rowSums(temp, dims=2)) # p2*r by p2*r
- dc.da <- array(NA, c(p2, r, M, r)) # different from other functions
+ dc.da = array(NA, c(p2, r, M, r)) # different from other functions
if(length(Index.corner) == M)
- stop("can't handle full rank models yet")
- cbindex <- (1:M)[-Index.corner] # complement of Index.corner
- resid2 <- if(length(x1mat))
- mux22(t(wz), zmat - x1mat %*% Bmat, M=M, upper= FALSE, as.mat= TRUE) else
- mux22(t(wz), zmat , M=M, upper= FALSE, as.mat= TRUE)
-
- for(s in 1:r)
- for(tt in cbindex) {
- fred <- t(x2mat) * matrix(resid2[,tt], p2, nn, byrow= TRUE) # p2 * nn
- temp2 <- kronecker(ei(s,r), apply(fred,1,sum))
- for(k in 1:r) {
- Wiak <- mux22(t(wz), matrix(Aimat[,k], nn, M, byrow= TRUE),
- M=M, upper= FALSE, as.mat= TRUE) # nn * M
- wxx <- Wiak[,tt] * x2mat
- blocki <- t(x2mat) %*% wxx
- temp4a <- blocki %*% Cimat[,k]
- if(k==1) {
- temp4b <- blocki %*% Cimat[,s]
+ stop("cannot handle full rank models yet")
+ cbindex = (1:M)[-Index.corner] # complement of Index.corner
+ resid2 = if(length(x1mat))
+ mux22(t(wz), zmat - x1mat %*% Bmat, M=M, upper=FALSE, as.mat=TRUE) else
+ mux22(t(wz), zmat , M=M, upper=FALSE, as.mat=TRUE)
+
+ for(sss in 1:r)
+ for(ttt in cbindex) {
+ fred = t(x2mat) * matrix(resid2[,ttt], p2, nn, byrow= TRUE) # p2 * nn
+ temp2 = kronecker(ei(sss,r), rowSums(fred))
+ for(kkk in 1:r) {
+ Wiak = mux22(t(wz), matrix(Aimat[,kkk], nn, M, byrow= TRUE),
+ M=M, upper= FALSE, as.mat= TRUE) # nn * M
+ wxx = Wiak[,ttt] * x2mat
+ blocki = t(x2mat) %*% wxx
+ temp4a = blocki %*% Cimat[,kkk]
+ if(kkk==1) {
+ temp4b = blocki %*% Cimat[,sss]
}
- temp2 = temp2 - kronecker(ei(s,r), temp4a) -
- kronecker(ei(k,r), temp4b)
+ temp2 = temp2 - kronecker(ei(sss,r), temp4a) -
+ kronecker(ei(kkk,r), temp4b)
}
- dc.da[,,tt,s] <- G %*% temp2
+ dc.da[,,ttt,sss] = G %*% temp2
}
- ans1 <- dc.da[,,cbindex,,drop=FALSE] # p2 x r x (M-r) x r
- ans1 <- aperm(ans1, c(2,1,3,4)) # r x p2 x (M-r) x r
+ ans1 = dc.da[,,cbindex,,drop=FALSE] # p2 x r x (M-r) x r
+ ans1 = aperm(ans1, c(2,1,3,4)) # r x p2 x (M-r) x r
- ans1 <- matrix(c(ans1), r*p2, (M-r)*r)
- ans1 <- t(ans1)
+ ans1 = matrix(c(ans1), r*p2, (M-r)*r)
+ ans1 = t(ans1)
ans1
}
-dcda.fast <- function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
- intercept= TRUE, xij=NULL)
+dcda.fast = function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
+ intercept= TRUE, xij=NULL)
{
@@ -1859,16 +1768,16 @@ dcda.fast <- function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
if(intercept) {
Blist <- vector("list", pp+1)
Blist[[1]] <- diag(M)
- for(i in 2:(pp+1))
- Blist[[i]] <- Aimat
+ for(ii in 2:(pp+1))
+ Blist[[ii]] = Aimat
} else {
Blist <- vector("list", pp)
- for(i in 1:(pp))
- Blist[[i]] <- Aimat
+ for(ii in 1:pp)
+ Blist[[ii]] = Aimat
}
- coeffs <- vlm.wfit(xmat, z, Blist, U=U, matrix.out= TRUE,
- xij=xij)$mat.coef
+ coeffs = vlm.wfit(xmat=xmat, z, Blist, U=U, matrix.out=TRUE,
+ xij=xij)$mat.coef
c3 <- coeffs <- t(coeffs) # transpose to make M x (pp+1)
@@ -1887,7 +1796,7 @@ dcda.fast <- function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
temp <- aperm(temp, c(2,1,3))
temp <- mux5(wz, temp, M=M, matrix.arg= TRUE)
temp <- m2adefault(temp, M=r*pp) # Note M != M here!
- G <- solve(apply(temp,1:2,sum))
+ G = solve(rowSums(temp, dims=2))
dc.da <- array(NA, c(pp,r,M,r)) # different from other functions
cbindex <- (1:M)[-Index.corner]
@@ -1898,7 +1807,7 @@ dcda.fast <- function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
for(tt in cbindex) {
fred <- (if(intercept) t(xmat[,-1,drop=FALSE]) else
t(xmat)) * matrix(resid2[,tt],pp,nn,byrow= TRUE)
- temp2 <- kronecker(ei(s,r), apply(fred,1,sum))
+ temp2 <- kronecker(ei(s,r), rowSums(fred))
temp4 <- rep(0,pp)
for(k in 1:r) {
@@ -1928,17 +1837,17 @@ dcda.fast <- function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
etastar <- (if(intercept) xmat[,-1,drop=FALSE] else xmat) %*% Cimat
eta <- matrix(int.vec, nn, M, byrow= TRUE) + etastar %*% t(Aimat)
- sumWinv <- solve((m2adefault(t(apply(wz, 2, sum)), M=M))[,,1])
+ sumWinv <- solve((m2adefault(t(colSums(wz)), M=M))[,,1])
deta0.da <- array(0,c(M,M,r))
AtWi <- kronecker(matrix(1,nn,1), Aimat)
AtWi <- mux111(t(wz), AtWi, M=M, upper= FALSE) # matrix.arg= TRUE,
AtWi <- array(t(AtWi), c(r,M,nn))
for(ss in 1:r) {
- temp90 <- (m2adefault(t(apply(etastar[,ss]*wz,2,sum)), M=M))[,,1] #MxM
+ temp90 <- (m2adefault(t(colSums(etastar[,ss]*wz)), M=M))[,,1] #MxM
temp92 <- array(detastar.da[,,ss,], c(M,r,nn))
temp93 <- mux7(temp92, AtWi)
- temp91 <- apply(temp93, 1:2, sum) # M x M
+ temp91 = rowSums(temp93, dims=2) # M x M
deta0.da[,,ss] <- -(temp90 + temp91) %*% sumWinv
}
ans2 <- deta0.da[-(1:r),,,drop=FALSE] # (M-r) x M x r
@@ -1950,141 +1859,142 @@ dcda.fast <- function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
-rrr.deriv.rss <- function(theta, wz, U, z, M, r, xmat,
- pp, Index.corner, intercept= TRUE,
- xij=NULL)
+rrr.deriv.rss = function(theta, wz, U, z, M, r, xmat,
+ pp, Index.corner, intercept= TRUE,
+ xij=NULL)
{
- Amat <- matrix(as.numeric(NA), M, r)
- Amat[Index.corner,] <- diag(r)
- Amat[-Index.corner,] <- theta # [-(1:M)]
+ Amat = matrix(as.numeric(NA), M, r)
+ Amat[Index.corner,] = diag(r)
+ Amat[-Index.corner,] = theta # [-(1:M)]
if(intercept) {
- Blist <- vector("list", pp+1)
- Blist[[1]] <- diag(M)
- for(i in 2:(pp+1))
- Blist[[i]] <- Amat
+ Blist = vector("list", pp+1)
+ Blist[[1]] = diag(M)
+ for(ii in 2:(pp+1))
+ Blist[[ii]] = Amat
} else {
- Blist <- vector("list", pp)
- for(i in 1:(pp))
- Blist[[i]] <- Amat
+ Blist = vector("list", pp)
+ for(ii in 1:pp)
+ Blist[[ii]] = Amat
}
- vlm.wfit(xmat, z, Blist, U=U, matrix.out= FALSE, rss= TRUE, xij=xij)$rss
+ vlm.wfit(xmat=xmat, z, Blist, U=U, matrix.out=FALSE,
+ rss=TRUE, xij=xij)$rss
}
-rrr.deriv.gradient.fast <- function(theta, wz, U, z, M, r, xmat,
- pp, Index.corner, intercept= TRUE)
+rrr.deriv.gradient.fast = function(theta, wz, U, z, M, r, xmat,
+ pp, Index.corner, intercept= TRUE)
{
- nn <- nrow(xmat)
+ nn = nrow(xmat)
- Aimat <- matrix(as.numeric(NA), M, r)
- Aimat[Index.corner,] <- diag(r)
- Aimat[-Index.corner,] <- theta # [-(1:M)]
+ Aimat = matrix(as.numeric(NA), M, r)
+ Aimat[Index.corner,] = diag(r)
+ Aimat[-Index.corner,] = theta # [-(1:M)]
if(intercept) {
- Blist <- vector("list", pp+1)
- Blist[[1]] <- diag(M)
+ Blist = vector("list", pp+1)
+ Blist[[1]] = diag(M)
for(i in 2:(pp+1))
- Blist[[i]] <- Aimat
+ Blist[[i]] = Aimat
} else {
- Blist <- vector("list", pp)
+ Blist = vector("list", pp)
for(i in 1:(pp))
- Blist[[i]] <- Aimat
+ Blist[[i]] = Aimat
}
- coeffs <- vlm.wfit(xmat, z, Blist, U=U, matrix.out= TRUE,
+ coeffs = vlm.wfit(xmat, z, Blist, U=U, matrix.out= TRUE,
xij=NULL)$mat.coef
- c3 <- coeffs <- t(coeffs) # transpose to make M x (pp+1)
+ c3 = coeffs = t(coeffs) # transpose to make M x (pp+1)
- int.vec <- if(intercept) c3[,1] else 0 # \boldeta_0
- Cimat <- if(intercept) t(c3[Index.corner,-1,drop=FALSE]) else
+ int.vec = if(intercept) c3[,1] else 0 # \boldeta_0
+ Cimat = if(intercept) t(c3[Index.corner,-1,drop=FALSE]) else
t(c3[Index.corner,,drop=FALSE])
if(nrow(Cimat)!=pp || ncol(Cimat)!=r)
stop("Cimat wrong shape")
fred = kronecker(matrix(1,1,r), if(intercept) xmat[,-1,drop=FALSE] else xmat)
- fred <- kronecker(fred, matrix(1,M,1))
- barney <- kronecker(Aimat, matrix(1,1,pp))
- barney <- kronecker(matrix(1,nn,1), barney)
-
- temp <- array(t(barney*fred), c(r*pp,M,nn))
- temp <- aperm(temp, c(2,1,3))
- temp <- mux5(wz, temp, M=M, matrix.arg= TRUE)
- temp <- m2adefault(temp, M=r*pp) # Note M != M here!
- G <- solve(apply(temp,1:2,sum))
-
- dc.da <- array(NA,c(pp,r,r,M))
- cbindex <- (1:M)[-Index.corner]
- resid2 <- mux22(t(wz), z - matrix(int.vec,nn,M,byrow= TRUE), M=M,
+ fred = kronecker(fred, matrix(1,M,1))
+ barney = kronecker(Aimat, matrix(1,1,pp))
+ barney = kronecker(matrix(1,nn,1), barney)
+
+ temp = array(t(barney*fred), c(r*pp,M,nn))
+ temp = aperm(temp, c(2,1,3))
+ temp = mux5(wz, temp, M=M, matrix.arg= TRUE)
+ temp = m2adefault(temp, M=r*pp) # Note M != M here!
+ G = solve(rowSums(temp, dims=2))
+
+ dc.da = array(NA,c(pp,r,r,M))
+ cbindex = (1:M)[-Index.corner]
+ resid2 = mux22(t(wz), z - matrix(int.vec,nn,M,byrow= TRUE), M=M,
upper= FALSE, as.mat= TRUE) # mat= TRUE,
for(s in 1:r)
for(tt in cbindex) {
- fred <- (if(intercept) t(xmat[,-1,drop=FALSE]) else
+ fred = (if(intercept) t(xmat[,-1,drop=FALSE]) else
t(xmat)) * matrix(resid2[,tt],pp,nn,byrow= TRUE)
- temp2 <- kronecker(ei(s,r), apply(fred,1,sum))
+ temp2 = kronecker(ei(s,r), rowSums(fred))
- temp4 <- rep(0,pp)
+ temp4 = rep(0,pp)
for(k in 1:r) {
- Wiak <- mux22(t(wz), matrix(Aimat[,k],nn,M,byrow= TRUE),
+ Wiak = mux22(t(wz), matrix(Aimat[,k],nn,M,byrow= TRUE),
M=M, upper= FALSE, as.mat= TRUE) # mat= TRUE,
- wxx <- Wiak[,tt] * (if(intercept) xmat[,-1,drop=FALSE] else xmat)
- blocki <- (if(intercept) t(xmat[,-1,drop=FALSE]) else t(xmat)) %*% wxx
- temp4 <- temp4 + blocki %*% Cimat[,k]
+ wxx = Wiak[,tt] * (if(intercept) xmat[,-1,drop=FALSE] else xmat)
+ blocki = (if(intercept) t(xmat[,-1,drop=FALSE]) else t(xmat)) %*% wxx
+ temp4 = temp4 + blocki %*% Cimat[,k]
}
- dc.da[,,s,tt] <- G %*% (temp2 - 2 * kronecker(ei(s,r),temp4))
+ dc.da[,,s,tt] = G %*% (temp2 - 2 * kronecker(ei(s,r),temp4))
}
- detastar.da <- array(0,c(M,r,r,nn))
+ detastar.da = array(0,c(M,r,r,nn))
for(s in 1:r)
for(j in 1:r) {
- t1 <- t(dc.da[,j,s,])
- t1 <- matrix(t1, M, pp)
- detastar.da[,j,s,] <- t1 %*% (if(intercept)
+ t1 = t(dc.da[,j,s,])
+ t1 = matrix(t1, M, pp)
+ detastar.da[,j,s,] = t1 %*% (if(intercept)
t(xmat[,-1,drop=FALSE]) else t(xmat))
}
- etastar <- (if(intercept) xmat[,-1,drop=FALSE] else xmat) %*% Cimat
- eta <- matrix(int.vec, nn, M, byrow= TRUE) + etastar %*% t(Aimat)
+ etastar = (if(intercept) xmat[,-1,drop=FALSE] else xmat) %*% Cimat
+ eta = matrix(int.vec, nn, M, byrow= TRUE) + etastar %*% t(Aimat)
- sumWinv <- solve((m2adefault(t(apply(wz, 2, sum)), M=M))[,,1])
+ sumWinv = solve((m2adefault(t(colSums(wz)), M=M))[,,1])
- deta0.da <- array(0,c(M,M,r))
+ deta0.da = array(0,c(M,M,r))
- AtWi <- kronecker(matrix(1,nn,1), Aimat)
- AtWi <- mux111(t(wz), AtWi, M=M, upper= FALSE) # matrix.arg= TRUE,
- AtWi <- array(t(AtWi), c(r,M,nn))
+ AtWi = kronecker(matrix(1,nn,1), Aimat)
+ AtWi = mux111(t(wz), AtWi, M=M, upper= FALSE) # matrix.arg= TRUE,
+ AtWi = array(t(AtWi), c(r,M,nn))
for(ss in 1:r) {
- temp90 <- (m2adefault(t(apply(etastar[,ss]*wz,2,sum)), M=M))[,,1] # M x M
- temp92 <- array(detastar.da[,,ss,],c(M,r,nn))
- temp93 <- mux7(temp92,AtWi)
- temp91 <- apply(temp93,1:2,sum) # M x M
- deta0.da[,,ss] <- -(temp90 + temp91) %*% sumWinv
+ temp90 = (m2adefault(t(colSums(etastar[,ss]*wz)), M=M))[,,1] # M x M
+ temp92 = array(detastar.da[,,ss,],c(M,r,nn))
+ temp93 = mux7(temp92,AtWi)
+ temp91 = rowSums(temp93, dims=2) # M x M
+ deta0.da[,,ss] = -(temp90 + temp91) %*% sumWinv
}
- ans <- matrix(0,M,r)
- fred <- mux22(t(wz), z-eta, M=M, upper= FALSE, as.mat= TRUE) # mat= TRUE,
- fred.array <- array(t(fred %*% Aimat),c(r,1,nn))
+ ans = matrix(0,M,r)
+ fred = mux22(t(wz), z-eta, M=M, upper= FALSE, as.mat= TRUE) # mat= TRUE,
+ fred.array = array(t(fred %*% Aimat),c(r,1,nn))
for(s in 1:r) {
- a1 <- apply(fred %*% t(deta0.da[,,s]),2,sum)
- a2 <- apply(fred * etastar[,s],2,sum)
- temp92 <- array(detastar.da[,,s,],c(M,r,nn))
- temp93 <- mux7(temp92, fred.array)
- a3 <- apply(temp93,1:2,sum)
- ans[,s] <- a1 + a2 + a3
+ a1 = colSums(fred %*% t(deta0.da[,,s]))
+ a2 = colSums(fred * etastar[,s])
+ temp92 = array(detastar.da[,,s,],c(M,r,nn))
+ temp93 = mux7(temp92, fred.array)
+ a3 = rowSums(temp93, dims=2)
+ ans[,s] = a1 + a2 + a3
}
- ans <- -2 * c(ans[cbindex,])
+ ans = -2 * c(ans[cbindex,])
ans
}
@@ -2231,8 +2141,8 @@ lvplot.qrrvglm = function(object, varlvI = FALSE, reference = NULL,
if(length(ellipse)) {
ellipse.temp = if(ellipse > 0) ellipse else 0.95
if(ellipse < 0 && (!object at control$EqualTolerances || varlvI))
- stop(paste("an equal-tolerances assumption and varlvI=FALSE",
- "is needed for \"ellipse\" < 0"))
+ stop("an equal-tolerances assumption and 'varlvI=FALSE' ",
+ "is needed for 'ellipse' < 0")
if( check.ok ) {
colx1.index = object at control$colx1.index
if(!(length(colx1.index)==1 &&
@@ -2364,7 +2274,7 @@ lvplot.rrvglm = function(object,
Aadj = rep(Aadj, len=length(index.nosz))
Acex = rep(Acex, len=length(index.nosz))
Acol = rep(Acol, len=length(index.nosz))
- if(length(Alabels) != M) stop(paste("Alabels must be of length", M))
+ if(length(Alabels) != M) stop("'Alabels' must be of length ", M)
if(length(Apch)) {
Apch = rep(Apch, len=length(index.nosz))
for(i in index.nosz)
@@ -2385,15 +2295,13 @@ lvplot.rrvglm = function(object,
Clwd = rep(Clwd, len=p2)
Clty = rep(Clty, len=p2)
if(length(Clabels) != p2)
- stop(paste("length(Clabels) must be equal to", p2))
- for(i in 1:p2) {
- if(is.R()) arrows(0, 0, Cmat[i,1], Cmat[i,2],
- lwd=Clwd[i], lty=Clty[i], col=Ccol[i]) else
- arrows(0,0,Cmat[i,1],Cmat[i,2],open=TRUE,
- lwd=Clwd[i], lty=Clty[i], col=Ccol[i])
- const = 1 + gapC[i] / sqrt(Cmat[i,1]^2 + Cmat[i,2]^2)
- text(const*Cmat[i,1], const*Cmat[i,2], Clabels[i], cex=Ccex[i],
- adj=Cadj[i], col=Ccol[i])
+ stop("'length(Clabels)' must be equal to ", p2)
+ for(ii in 1:p2) {
+ arrows(0, 0, Cmat[ii,1], Cmat[ii,2],
+ lwd=Clwd[ii], lty=Clty[ii], col=Ccol[ii])
+ const = 1 + gapC[ii] / sqrt(Cmat[ii,1]^2 + Cmat[ii,2]^2)
+ text(const*Cmat[ii,1], const*Cmat[ii,2], Clabels[ii], cex=Ccex[ii],
+ adj=Cadj[ii], col=Ccol[ii])
}
}
@@ -2407,13 +2315,13 @@ lvplot.rrvglm = function(object,
spch = rep(spch, len=n)
scol = rep(scol, len=n)
scex = rep(scex, len=n)
- for(i in ugrp) {
- gp = groups==i
+ for(ii in ugrp) {
+ gp = groups == ii
if(nlev > 1 && (length(unique(spch[gp])) != 1 ||
length(unique(scol[gp])) != 1 ||
length(unique(scex[gp])) != 1))
- warning(paste("spch/scol/scex is different for individuals",
- "from the same group"))
+ warning("spch/scol/scex is different for individuals ",
+ "from the same group")
temp = nuhat[gp,,drop=FALSE]
if(length(spch)) {
@@ -2426,8 +2334,8 @@ lvplot.rrvglm = function(object,
if(chull.arg) {
hull = chull(temp[,1],temp[,2])
hull = c(hull, hull[1])
- lines(temp[hull,1], temp[hull,2], type="b", lty=clty[i],
- col=ccol[i], lwd=clwd[i], pch=" ")
+ lines(temp[hull,1], temp[hull,2], type="b", lty=clty[ii],
+ col=ccol[ii], lwd=clwd[ii], pch=" ")
}
}
}
@@ -2497,10 +2405,8 @@ printCoef.rrvglm = function(x, ...) {
}
-if(is.R()) {
- if(!isGeneric("biplot"))
+if(!isGeneric("biplot"))
setGeneric("biplot", function(x, ...) standardGeneric("biplot"))
-}
setMethod("Coef", "qrrvglm", function(object, ...) Coef.qrrvglm(object, ...))
@@ -2541,8 +2447,7 @@ summary.qrrvglm = function(object,
answer at dispersion =
answer at misc$dispersion = (answer at post$Coef)@dispersion
- class(answer) = "summary.qrrvglm"
- answer
+ as(answer, "summary.qrrvglm")
}
printsummary.qrrvglm = function(x, ...) {
@@ -2568,19 +2473,23 @@ printsummary.qrrvglm = function(x, ...) {
}
-setClass(Class="summary.qrrvglm", representation("qrrvglm"))
+
+ setClass("summary.qrrvglm", contains = "qrrvglm")
+
+
+
setMethod("summary", "qrrvglm",
function(object, ...)
summary.qrrvglm(object, ...))
-setMethod("print", "summary.qrrvglm",
- function(x, ...)
- invisible(printsummary.qrrvglm(x, ...)))
+ setMethod("print", "summary.qrrvglm",
+ function(x, ...)
+ invisible(printsummary.qrrvglm(x, ...)))
-setMethod("show", "summary.qrrvglm",
- function(object)
- invisible(printsummary.qrrvglm(object)))
+ setMethod("show", "summary.qrrvglm",
+ function(object)
+ invisible(printsummary.qrrvglm(object)))
setMethod("print", "Coef.rrvglm", function(x, ...)
invisible(printCoef.rrvglm(x, ...)))
@@ -2604,7 +2513,7 @@ grc = function(y, Rank=1, Index.corner=2:(1+Rank), Structural.zero=1,
y = object.save at y
} else {
y = as.matrix(y)
- class(y) = "matrix" # Needed in R
+ y = as(y, "matrix")
}
if(length(dim(y)) != 2 || nrow(y) < 3 || ncol(y) < 3)
stop("y must be a matrix with >= 3 rows & columns, or a rrvglm() object")
@@ -2621,30 +2530,22 @@ grc = function(y, Rank=1, Index.corner=2:(1+Rank), Structural.zero=1,
options(warn=warn.save)
Row = factor(1:nrow(y))
- modmat.row = if(is.R()) model.matrix(~ Row) else {
- tmp3 = contrasts(Row)
- dimnames(tmp3) = list(dimnames(tmp3)[[1]], paste("Row", 2:nrow(y), sep=""))
- cbind("(Intercept)"=1, tmp3)
- }
+ modmat.row = model.matrix( ~ Row)
Col = factor(1:ncol(y))
- modmat.col = if(is.R()) model.matrix(~ Col) else {
- tmp3 = contrasts(Col)
- dimnames(tmp3) = list(dimnames(tmp3)[[1]], paste("Col", 2:ncol(y), sep=""))
- cbind("(Intercept)"=1, tmp3)
- }
+ modmat.col = model.matrix( ~ Col)
cms = list("(Intercept)" = matrix(1, ncol(y), 1))
- for(i in 2:nrow(y)) {
- cms[[paste("Row", i, sep="")]] = matrix(1, ncol(y), 1)
- .grc.df[[paste("Row", i, sep="")]] = modmat.row[,i]
+ for(ii in 2:nrow(y)) {
+ cms[[paste("Row", ii, sep="")]] = matrix(1, ncol(y), 1)
+ .grc.df[[paste("Row", ii, sep="")]] = modmat.row[,ii]
}
- for(i in 2:ncol(y)) {
- cms[[paste("Col", i, sep="")]] = modmat.col[,i,drop=FALSE]
- .grc.df[[paste("Col", i, sep="")]] = rep(1, nrow(y))
+ for(ii in 2:ncol(y)) {
+ cms[[paste("Col", ii, sep="")]] = modmat.col[,ii,drop=FALSE]
+ .grc.df[[paste("Col", ii, sep="")]] = rep(1, nrow(y))
}
- for(i in 2:nrow(y)) {
- cms[[yn1[i]]] = diag(ncol(y))
- .grc.df[[yn1[i]]] = ei(i, nrow(y))
+ for(ii in 2:nrow(y)) {
+ cms[[yn1[ii]]] = diag(ncol(y))
+ .grc.df[[yn1[ii]]] = ei(ii, nrow(y))
}
dimnames(.grc.df) = list(if(length(dimnames(y)[[1]])) dimnames(y)[[1]] else
@@ -2653,17 +2554,16 @@ grc = function(y, Rank=1, Index.corner=2:(1+Rank), Structural.zero=1,
str1 = "~ Row2"
if(nrow(y)>2)
- for(i in 3:nrow(y))
- str1 = paste(str1, paste("Row", i, sep=""), sep=" + ")
- for(i in 2:ncol(y))
- str1 = paste(str1, paste("Col", i, sep=""), sep=" + ")
+ for(ii in 3:nrow(y))
+ str1 = paste(str1, paste("Row", ii, sep=""), sep=" + ")
+ for(ii in 2:ncol(y))
+ str1 = paste(str1, paste("Col", ii, sep=""), sep=" + ")
str2 = paste("y ", str1)
- for(i in 2:nrow(y))
- str2 = paste(str2, yn1[i], sep=" + ")
+ for(ii in 2:nrow(y))
+ str2 = paste(str2, yn1[ii], sep=" + ")
myrrcontrol$Norrr = as.formula(str1) # Overwrite this
- if(is.R()) assign(".grc.df", .grc.df, envir = VGAMenv) else
- .grc.df <<- .grc.df
+ assign(".grc.df", .grc.df, envir = VGAMenv)
warn.save = options()$warn
options(warn=-3) # Suppress the warnings (hopefully, temporarily)
@@ -2673,18 +2573,15 @@ grc = function(y, Rank=1, Index.corner=2:(1+Rank), Structural.zero=1,
options(warn=warn.save)
if(summary.arg) {
- class(answer) = "rrvglm"
+ answer = as(answer, "rrvglm")
+
answer = summary.rrvglm(answer, h.step=h.step)
} else {
- class(answer) = "grc"
+ answer = as(answer, "grc")
}
- if(is.R()) {
- if(exists(".grc.df", envir = VGAMenv))
- rm(".grc.df", envir = VGAMenv)
- } else {
- remove(".grc.df")
- }
+ if(exists(".grc.df", envir = VGAMenv))
+ rm(".grc.df", envir = VGAMenv)
answer
}
@@ -2818,7 +2715,7 @@ vcovqrrvglm = function(object,
MaxScale <- as.character(substitute(MaxScale))
MaxScale <- match.arg(MaxScale, c("predictors", "response"))[1]
if(MaxScale != "predictors")
- stop("can currently only handle MaxScale=\"predictors\"")
+ stop("can currently only handle MaxScale='predictors'")
sobj = summary(object)
cobj = Coef(object, ITolerances = ITolerances, ...)
@@ -3109,11 +3006,11 @@ setMethod("Tol", "Coef.qrrvglm", function(object, ...) Tol.Coef.qrrvglm(object,
cgo <- function(...) {
- stop("The function \"cgo\" has been renamed \"cqo\". Ouch! Sorry!")
+ stop("The function 'cgo' has been renamed 'cqo'. Ouch! Sorry!")
}
clo <- function(...) {
- stop("Constrained linear ordination is fitted with the function \"rrvglm\"")
+ stop("Constrained linear ordination is fitted with the function 'rrvglm'")
}
diff --git a/R/family.survival.q b/R/family.survival.q
index ce4acf9..f398cd0 100644
--- a/R/family.survival.q
+++ b/R/family.survival.q
@@ -7,9 +7,11 @@
-dcnormal1 = function(r1=0, r2=0, link.sd="loge",
- earg=list(),
- isd=NULL, zero=NULL)
+
+
+ dcnormal1 = function(r1=0, r2=0, link.sd="loge",
+ earg=list(),
+ isd=NULL, zero=NULL)
{
if(!is.Numeric(r1, allow=1, integ=TRUE) || r1<0) stop("bad input for r1")
if(!is.Numeric(r2, allow=1, integ=TRUE) || r2<0) stop("bad input for r2")
@@ -32,8 +34,8 @@ dcnormal1 = function(r1=0, r2=0, link.sd="loge",
if(ncol(y <- cbind(y)) != 1)
stop("the response must be a vector or a one-column matrix")
if(length(w) != n || !is.Numeric(w, integ=TRUE, posit=TRUE))
- stop(paste("the argument \"weights\" must be a vector",
- "of positive integers"))
+ stop("the argument 'weights' must be a vector ",
+ "of positive integers")
sumw = sum(w)
extra$bign = sumw + .r1 + .r2 # Tot num of censored & uncensored obsns
if(!length(etastart)) {
@@ -109,24 +111,27 @@ dcnormal1 = function(r1=0, r2=0, link.sd="loge",
dbisa = function(x, shape, scale=1, log = FALSE) {
- if(!is.Numeric(x)) stop("bad input for argument \"x\"")
- if(!is.Numeric(shape, pos=TRUE)) stop("bad input for argument \"shape\"")
- if(!is.Numeric(scale, pos=TRUE)) stop("bad input for argument \"scale\"")
+ if(!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ L = max(length(x), length(shape), length(scale))
+ x = rep(x, len=L); shape = rep(shape, len=L); scale = rep(scale, len=L);
+ logdensity = rep(log(0), len=L)
+ xok = (x > 0)
xifun = function(x) {temp <- sqrt(x); temp - 1/temp}
- ans = if(log)
- dnorm(xifun(x/scale) / shape, log=TRUE) + log1p(scale/x) - log(2) -
- 0.5 * log(x) - 0.5 * log(scale) - log(shape) else
- dnorm(xifun(x/scale) / shape) * (1 + scale/x) / (2 * sqrt(x) *
- sqrt(scale) * shape)
- ans[scale < 0 | shape < 0] = NA
- ans[x <= 0] = if(log) log(0) else 0
- ans
+ logdensity[xok] = dnorm(xifun(x[xok]/scale[xok]) / shape[xok], log=TRUE) +
+ log1p(scale[xok]/x[xok]) - log(2) - log(shape[xok]) -
+ 0.5 * log(x[xok]) - 0.5 * log(scale[xok])
+ logdensity[scale <= 0] = NaN
+ logdensity[shape <= 0] = NaN
+ if(log.arg) logdensity else exp(logdensity)
}
pbisa = function(q, shape, scale=1) {
- if(!is.Numeric(q)) stop("bad input for argument \"q\"")
- if(!is.Numeric(shape, pos=TRUE)) stop("bad input for argument \"shape\"")
- if(!is.Numeric(scale, pos=TRUE)) stop("bad input for argument \"scale\"")
+ if(!is.Numeric(q)) stop("bad input for argument 'q'")
+ if(!is.Numeric(shape, pos=TRUE)) stop("bad input for argument 'shape'")
+ if(!is.Numeric(scale, pos=TRUE)) stop("bad input for argument 'scale'")
ans = pnorm(((temp <- sqrt(q/scale)) - 1/temp) / shape)
ans[scale < 0 | shape < 0] = NA
ans[q <= 0] = 0
@@ -135,9 +140,9 @@ pbisa = function(q, shape, scale=1) {
qbisa = function(p, shape, scale=1) {
if(!is.Numeric(p, posit=TRUE) || any(p >= 1))
- stop("argument \"p\" must have values inside the interval (0,1)")
- if(!is.Numeric(shape, pos=TRUE)) stop("bad input for argument \"shape\"")
- if(!is.Numeric(scale, pos=TRUE)) stop("bad input for argument \"scale\"")
+ stop("argument 'p' must have values inside the interval (0,1)")
+ if(!is.Numeric(shape, pos=TRUE)) stop("bad input for argument 'shape'")
+ if(!is.Numeric(scale, pos=TRUE)) stop("bad input for argument 'scale'")
A = qnorm(p)
temp1 = A * shape * sqrt(4 + A^2 * shape^2)
ans1 = (2 + A^2 * shape^2 + temp1) * scale / 2
@@ -146,34 +151,38 @@ qbisa = function(p, shape, scale=1) {
}
rbisa = function(n, shape, scale=1) {
- if(!is.Numeric(n,integ=TRUE, allow=1)) stop("bad input for argument \"n\"")
- if(!is.Numeric(shape, pos=TRUE)) stop("bad input for argument \"shape\"")
- if(!is.Numeric(scale, pos=TRUE)) stop("bad input for argument \"scale\"")
- A = rnorm(n)
+ use.n = if((length.n <- length(n)) > 1) length.n else
+ if(!is.Numeric(n, integ=TRUE, allow=1, posit=TRUE))
+ stop("bad input for argument 'n'") else n
+
+ A = rnorm(use.n)
temp1 = A * shape
temp1 = temp1 * sqrt(4 + temp1^2)
ans1 = (2 + A^2 * shape^2 + temp1) * scale / 2
ans2 = (2 + A^2 * shape^2 - temp1) * scale / 2
- ifelse(A < 0, pmin(ans1, ans2), pmax(ans1, ans2))
+ ans = ifelse(A < 0, pmin(ans1, ans2), pmax(ans1, ans2))
+ ans[shape <= 0] = NaN
+ ans[scale <= 0] = NaN
+ ans
}
-bisa = function(lshape = "loge", lscale = "loge",
- eshape = list(), escale = list(),
- ishape = NULL, iscale=1,
- method.init=1, zero=NULL)
+ bisa = function(lshape = "loge", lscale = "loge",
+ eshape = list(), escale = list(),
+ ishape = NULL, iscale=1,
+ method.init=1, zero=NULL)
{
if(mode(lshape) != "character" && mode(lshape) != "name")
lshape = as.character(substitute(lshape))
if(mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
if(length(ishape) && !is.Numeric(ishape, posit=TRUE))
- stop("bad input for argument \"ishape\"")
+ stop("bad input for argument 'ishape'")
if(!is.Numeric(iscale, posit=TRUE))
- stop("bad input for argument \"iscale\"")
+ stop("bad input for argument 'iscale'")
if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
method.init > 3)
stop("method.init must be 1 or 2 or 3")
@@ -230,10 +239,9 @@ bisa = function(lshape = "loge", lscale = "loge",
function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
sh = eta2theta(eta[,1], .lshape, earg= .eshape)
sc = eta2theta(eta[,2], .lscale, earg= .escale)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * (-log(sh) - 0.5 * log(sc) + log1p(sc/y) -
- 0.5*log(8*pi) - 0.5 * log(y) -
- (y/sc - 2 + sc/y) / (2*sh^2)))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dbisa(x=y, shape=sh, scale=sc, log = TRUE))
+ }
} , list( .lshape=lshape, .lscale=lscale,
.eshape=eshape, .escale=escale ))),
vfamily=c("bisa"),
diff --git a/R/family.ts.q b/R/family.ts.q
index 95e275c..efa3e53 100644
--- a/R/family.ts.q
+++ b/R/family.ts.q
@@ -133,7 +133,7 @@ rrar <- function(Ranks=1, coefstart=NULL)
pp <- ncol(x)
indices <- 1:plag
- copyxbig <- TRUE # xbig.save matrix changes at each iteration
+ copy_X_vlm <- TRUE # X_vlm_save matrix changes at each iteration
dsrank <- -sort(-Ranks.) # ==rev(sort(Ranks.))
if(any(dsrank != Ranks.))
@@ -154,7 +154,7 @@ rrar <- function(Ranks=1, coefstart=NULL)
warning("ignoring explanatory variables")
if(any(MM < Ranks.))
- stop(paste("max(Ranks) can only be", MM, "or less"))
+ stop("'max(Ranks)' can only be ", MM, " or less")
y.save <- y # Save the original
if(any(w != 1))
stop("all weights should be 1")
@@ -164,10 +164,10 @@ rrar <- function(Ranks=1, coefstart=NULL)
rep(new.coeffs, len=aa+sum(Ranks.)*MM) else
runif(aa+sum(Ranks.)*MM)
temp8 <- rrar.Wmat(y.save,Ranks.,MM,ki,plag,aa,uu,nn,new.coeffs)
- xbig.save <- temp8$UU %*% temp8$Ht
+ X_vlm_save <- temp8$UU %*% temp8$Ht
if(!length(etastart)) {
- etastart <- xbig.save %*% new.coeffs
+ etastart <- X_vlm_save %*% new.coeffs
etastart <- matrix(etastart, ncol=ncol(y), byrow=TRUE) # So M=ncol(y)
}
@@ -220,7 +220,7 @@ rrar <- function(Ranks=1, coefstart=NULL)
vfamily="rrar",
deriv=expression({
temp8 <- rrar.Wmat(y.save,Ranks.,MM,ki,plag,aa,uu,nn,new.coeffs)
- xbig.save <- temp8$UU %*% temp8$Ht
+ X_vlm_save <- temp8$UU %*% temp8$Ht
extra$coeffs <- new.coeffs
@@ -270,9 +270,9 @@ garma <- function(link=c("identity","loge","reciprocal",
link = match.arg(link, c("identity","loge","reciprocal",
"logit","probit","cloglog","cauchit"))[1]
if(!is.Numeric(p.ar.lag, integer=TRUE))
- stop("bad input for argument \"p.ar.lag\"")
+ stop("bad input for argument 'p.ar.lag'")
if(!is.Numeric(q.lag.ma, integer=TRUE))
- stop("bad input for argument \"q.lag.ma\"")
+ stop("bad input for argument 'q.lag.ma'")
if(q.lag.ma != 0)
stop("sorry, only q.lag.ma=0 is currently implemented")
if(!is.list(earg)) earg = list()
@@ -289,7 +289,7 @@ garma <- function(link=c("identity","loge","reciprocal",
tt <- (1+plag):nrow(x)
pp <- ncol(x)
- copyxbig <- TRUE # x matrix changes at each iteration
+ copy_X_vlm <- TRUE # x matrix changes at each iteration
if( .link == "logit" || .link == "probit" || .link == "cloglog" ||
.link == "cauchit") {
@@ -380,7 +380,7 @@ garma <- function(link=c("identity","loge","reciprocal",
if(iter==1)
old.coeffs <- new.coeffs
- xbig.save <- lm2vlm.model.matrix(x, Blist, xij=control$xij)
+ X_vlm_save <- lm2vlm.model.matrix(x, Blist, xij=control$xij)
vary = switch( .link,
identity=1,
diff --git a/R/family.univariate.q b/R/family.univariate.q
index 1cb39f4..0bdae67 100644
--- a/R/family.univariate.q
+++ b/R/family.univariate.q
@@ -15,9 +15,16 @@
+
+
+
+
+
+
+
getMaxMin = function(vov, objfun, y, x, w, extraargs=NULL, maximize=TRUE,
abs.arg=FALSE) {
- if(!is.vector(vov)) stop("vov must be a vector")
+ if(!is.vector(vov)) stop("'vov' must be a vector")
objvals = vov
for(ii in 1:length(vov))
objvals[ii] = objfun(vov[ii], y=y, x=x, w=w, extraargs=extraargs)
@@ -34,7 +41,7 @@ getMaxMin = function(vov, objfun, y, x, w, extraargs=NULL, maximize=TRUE,
-mccullagh89 = function(ltheta="rhobit", lnu="logoff",
+ mccullagh89 = function(ltheta="rhobit", lnu="logoff",
itheta=NULL, inu=NULL,
etheta=list(),
enu=if(lnu == "logoff") list(offset=0.5) else list(),
@@ -45,7 +52,7 @@ mccullagh89 = function(ltheta="rhobit", lnu="logoff",
if(mode(lnu) != "character" && mode(lnu) != "name")
lnu = as.character(substitute(lnu))
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(!is.list(etheta)) etheta = list()
if(!is.list(enu)) enu = list()
@@ -67,6 +74,7 @@ mccullagh89 = function(ltheta="rhobit", lnu="logoff",
y = as.numeric(y)
if(any(y <= -1 | y >= 1))
stop("all y values must be in (-1,1)")
+
predictors.names= c(namesof("theta", .ltheta, earg= .etheta,tag=FALSE),
namesof("nu", .lnu, earg= .enu, tag=FALSE))
if(!length(etastart)) {
@@ -105,8 +113,8 @@ mccullagh89 = function(ltheta="rhobit", lnu="logoff",
Theta = eta2theta(eta[,1], .ltheta, earg= .etheta )
nu = eta2theta(eta[,2], .lnu, earg= .enu )
if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * ((nu-0.5)*log1p(-y^2) - nu * log1p(-2*Theta*y + Theta^2) -
- lbeta(nu+0.5,0.5 )))
+ sum(w * ((nu-0.5)*log1p(-y^2) - nu * log1p(-2*Theta*y + Theta^2) -
+ lbeta(nu+0.5,0.5 )))
}, list( .ltheta=ltheta, .lnu=lnu, .etheta = etheta, .enu=enu ))),
vfamily=c("mccullagh89"),
deriv=eval(substitute(expression({
@@ -139,12 +147,11 @@ hzeta.control <- function(save.weight=TRUE, ...)
-hzeta = function(link="loglog", earg=list(), ialpha=NULL, nsimEIM=100)
+ hzeta = function(link="loglog", earg=list(), ialpha=NULL, nsimEIM=100)
{
- if(length(ialpha) && !is.Numeric(ialpha, positive=TRUE))
- stop("'ialpha' must be > 0")
- if(!is.Numeric(nsimEIM, allow=1, integ=TRUE) || nsimEIM <= 10)
- stop("'nsimEIM' should be an integer greater than 10")
+
+ stopifnot(ialpha > 0)
+ stopifnot(nsimEIM > 10, length(nsimEIM)==1, nsimEIM==round(nsimEIM))
if(mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
@@ -189,8 +196,9 @@ hzeta = function(link="loglog", earg=list(), ialpha=NULL, nsimEIM=100)
loglikelihood=eval(substitute(
function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
alpha = eta2theta(eta, .link, earg= .earg )
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * log((2*y-1)^(-alpha) - (2*y+1)^(-alpha)))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dhzeta(x=y, alpha=alpha, log=TRUE))
+ }
}, list( .link=link, .earg=earg ))),
vfamily=c("hzeta"),
deriv=eval(substitute(expression({
@@ -215,7 +223,7 @@ hzeta = function(link="loglog", earg=list(), ialpha=NULL, nsimEIM=100)
run.var = ((ii-1) * run.var + temp3^2) / ii
}
wz = if(intercept.only)
- matrix(apply(cbind(run.var), 2, mean),
+ matrix(colMeans(cbind(run.var)),
n, dimm(M), byrow=TRUE) else cbind(run.var)
wz = wz * dalpha.deta^2
@@ -226,18 +234,21 @@ hzeta = function(link="loglog", earg=list(), ialpha=NULL, nsimEIM=100)
-dhzeta = function(x, alpha)
+dhzeta = function(x, alpha, log = FALSE)
{
+ if(!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
if(!is.Numeric(alpha, posit=TRUE))
stop("'alpha' must be numeric and have positive values")
nn = max(length(x), length(alpha))
- x = rep(x, len=nn)
- alpha = rep(alpha, len=nn)
+ x = rep(x, len=nn); alpha = rep(alpha, len=nn)
ox = !is.finite(x)
zero = ox | round(x) != x | x < 1
- ans = 0 * x
+ ans = rep(0, len=nn)
ans[!zero] = (2*x[!zero]-1)^(-alpha[!zero]) - (2*x[!zero]+1)^(-alpha[!zero])
- ans
+ if(log.arg) log(ans) else ans
}
@@ -262,7 +273,7 @@ qhzeta = function(p, alpha)
if(!is.Numeric(alpha, posit=TRUE))
stop("'alpha' must be numeric and have positive values")
if(!is.Numeric(p, posit=TRUE) || any(p >= 1))
- stop("argument \"p\" must have values inside the interval (0,1)")
+ stop("argument 'p' must have values inside the interval (0,1)")
nn = max(length(p), length(alpha))
p = rep(p, len=nn)
alpha = rep(alpha, len=nn)
@@ -275,13 +286,13 @@ rhzeta = function(n, alpha)
if(!is.Numeric(alpha, posit=TRUE))
stop("'alpha' must be numeric and have positive values")
if(!is.Numeric(n, posit=TRUE, integ=TRUE, allow=1))
- stop("argument \"n\" must be a positive integer")
+ stop("argument 'n' must be a positive integer")
ans = ((runif(n)^(-1/alpha) - 1) / 2)
floor(ans+1)
}
-dirmultinomial = function(lphi="logit", ephi = list(),
+ dirmultinomial = function(lphi="logit", ephi = list(),
iphi = 0.10, parallel= FALSE, zero="M")
{
@@ -289,9 +300,9 @@ dirmultinomial = function(lphi="logit", ephi = list(),
lphi = as.character(substitute(lphi))
if(length(zero) &&
!(is.Numeric(zero, integer=TRUE, posit=TRUE) || is.character(zero )))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(!is.Numeric(iphi, positive=TRUE) || max(iphi) >= 1.0)
- stop("bad input for argument \"iphi\"")
+ stop("bad input for argument 'iphi'")
if(!is.list(ephi)) ephi = list()
new("vglmff",
@@ -329,7 +340,7 @@ dirmultinomial = function(lphi="logit", ephi = list(),
namesof("phi", .lphi, short=TRUE))
extra$n2 = w # aka omega, must be integer # as.vector(apply(y, 1, sum))
if(!length(etastart)) {
- prob.init = apply(ycount, 2, sum)
+ prob.init = colSums(ycount)
prob.init = prob.init / sum(prob.init)
prob.init = matrix(prob.init, n, M, byrow=TRUE)
phi.init = rep( .iphi, len=n)
@@ -551,7 +562,7 @@ dirmul.old = function(link="loge", earg=list(), init.alpha = 0.01,
if(mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(!is.Numeric(init.alpha, posit=TRUE))
stop("'init.alpha' must contain positive values only")
if(!is.list(earg)) earg = list()
@@ -574,7 +585,7 @@ dirmul.old = function(link="loge", earg=list(), init.alpha = 0.01,
predictors.names = namesof(paste("shape", 1:M, sep=""), .link,
earg=.earg, short=TRUE)
- extra$n2 = as.vector(apply(y, 1, sum)) # Nb. don't multiply by 2
+ extra$n2 = rowSums(y) # Nb. don't multiply by 2
extra$y = y
if(!length(etastart)) {
yy = if(is.numeric(.init.alpha))
@@ -644,22 +655,21 @@ rdiric = function(n, shape, dimension=NULL) {
dimension = length(shape)
shape = rep(shape, len=dimension)
- ans = if(is.R()) rgamma(n*dimension, rep(shape, rep(n, dimension ))) else
- rgamma(n*dimension, rep(shape, each=n))
+ ans = rgamma(n*dimension, rep(shape, rep(n, dimension)))
dim(ans) = c(n, dimension)
- ans = ans / apply(ans, 1, sum)
- ans
+ ans = ans / rowSums(ans)
+ ans
}
-dirichlet = function(link="loge", earg=list(), zero=NULL)
+ dirichlet = function(link="loge", earg=list(), parallel= FALSE, zero=NULL)
{
if(mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(!is.list(earg)) earg = list()
new("vglmff",
@@ -668,8 +678,9 @@ dirichlet = function(link="loge", earg=list(), zero=NULL)
namesof("shapej", link, earg=earg), "\n\n",
"Mean: shape_j/(1 + sum(shape_j)), j=1,..,ncol(y)"),
constraints=eval(substitute(expression({
+ constraints = cm.vgam(matrix(1,M,1), x, .parallel, constraints, int= TRUE)
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
+ }), list( .parallel=parallel, .zero=zero ))),
initialize=eval(substitute(expression({
y = as.matrix(y)
M = ncol(y)
@@ -685,7 +696,7 @@ dirichlet = function(link="loge", earg=list(), zero=NULL)
inverse=eval(substitute(function(eta, extra=NULL) {
shape = eta2theta(eta, .link, earg= .earg )
M = if(is.matrix(eta)) ncol(eta) else 1
- sumshape = as.vector(shape %*% rep(1, len=M)) # apply(shape, 1, sum)
+ sumshape = rowSums(shape)
shape / sumshape
}, list( .link=link, .earg=earg ))),
last=eval(substitute(expression({
@@ -725,7 +736,8 @@ dirichlet = function(link="loge", earg=list(), zero=NULL)
-zeta = function(x, deriv=0) {
+ zeta = function(x, deriv=0) {
+
deriv.arg = deriv
@@ -768,16 +780,16 @@ zeta = function(x, deriv=0) {
a=12; k=8 # Markman paper
B = c(1/6, -1/30,1/42,-1/30,5/66,-691/2730,7/6,-3617/510)
ans = 0
- for(i in 1:(a-1))
- ans = ans + 1.0 / i^x
+ for(ii in 1:(a-1))
+ ans = ans + 1.0 / ii^x
ans = ans + 1.0 / ((x-1.0)* a^(x-1.0)) + 1.0 / (2.0 * a^x)
term = (x/2) / a^(x+1)
ans = ans + term * B[1]
- for(m in 2:k) {
- term = term * (x+2*m-2) * (x+2*m-3) / (a*a* 2*m * (2*m-1))
- ans = ans + term * B[m]
+ for(mm in 2:k) {
+ term = term * (x+2*mm-2) * (x+2*mm-3) / (a*a* 2*mm * (2*mm-1))
+ ans = ans + term * B[mm]
}
ans
}
@@ -814,27 +826,37 @@ zeta.derivative = function(x, deriv=0)
}
-dzeta = function(x, p)
+dzeta = function(x, p, log = FALSE)
{
- if(!is.Numeric(p, allow=1, posit=TRUE) || p <= 1)
- stop("'p' must be numeric and > 1")
- p = rep(p, len=length(x))
+ if(!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ if(!is.Numeric(p, posit=TRUE)) # || min(p) <= 1
+ stop("'p' must be numeric and > 0")
+ LLL = max(length(p), length(x))
+ x = rep(x, len=LLL); p = rep(p, len=LLL)
ox = !is.finite(x)
zero = ox | round(x) != x | x < 1
if(any(zero)) warning("non-integer x and/or x < 1 or NAs")
- ans = rep(0.0, len=length(x))
- if(any(!zero))
- ans[!zero] = x[!zero]^(-p[!zero]) / zeta(p[!zero])
+ ans = rep(if(log.arg) log(0) else 0, len=LLL)
+ if(any(!zero)) {
+ if(log.arg) {
+ ans[!zero] = (-p[!zero]-1)*log(x[!zero]) - log(zeta(p[!zero]+1))
+ } else {
+ ans[!zero] = x[!zero]^(-p[!zero]-1) / zeta(p[!zero]+1)
+ }
+ }
if(any(ox)) ans[ox] = NA
ans
}
-zetaff = function(link="loge", earg=list(), init.p=NULL)
+ zetaff = function(link="loge", earg=list(), init.p=NULL)
{
if(length(init.p) && !is.Numeric(init.p, positi=TRUE))
- stop("argument \"init.p\" must be > 0")
+ stop("argument 'init.p' must be > 0")
if(mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if(!is.list(earg)) earg = list()
@@ -850,7 +872,7 @@ zetaff = function(link="loge", earg=list(), init.p=NULL)
if(any(y < 1))
stop("all y values must be in 1,2,3,...")
if(any(y != round(y )))
- warning("y should be integer-valued")
+ warning("'y' should be integer-valued")
if(ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
@@ -858,7 +880,7 @@ zetaff = function(link="loge", earg=list(), init.p=NULL)
if(!length(etastart)) {
zetaff.Loglikfun = function(pp, y, x, w, extraargs) {
- sum(w * (-(pp+1) * log(y) - log(zeta(pp+1))))
+ sum(w * dzeta(x=y, p=pp, log=TRUE))
}
p.grid = seq(0.1, 3.0, len=19)
pp.init = if(length( .init.p )) .init.p else
@@ -881,8 +903,9 @@ zetaff = function(link="loge", earg=list(), init.p=NULL)
loglikelihood=eval(substitute(
function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
pp = eta2theta(eta, .link, earg=.earg)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * (-(pp+1) * log(y) - log(zeta(pp+1 ))))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dzeta(x=y, p=pp, log=TRUE))
+ }
}, list( .link=link, .earg=earg ))),
vfamily=c("zetaff"),
deriv=eval(substitute(expression({
@@ -905,9 +928,9 @@ zetaff = function(link="loge", earg=list(), init.p=NULL)
gharmonic = function(n, s=1, lognexponent=0) {
if(!is.Numeric(n, integ=TRUE, posit=TRUE))
- stop("bad input for argument \"n\"")
+ stop("bad input for argument 'n'")
if(!is.Numeric(lognexponent, allow=1))
- stop("bad input for argument \"lognexponent\"")
+ stop("bad input for argument 'lognexponent'")
if(length(n) == 1 && length(s) == 1) {
if(lognexponent != 0) sum(log(1:n)^lognexponent * (1:n)^(-s)) else
sum((1:n)^(-s))
@@ -916,30 +939,39 @@ gharmonic = function(n, s=1, lognexponent=0) {
n = rep(n, len=LEN)
ans = s = rep(s, len=LEN)
if(lognexponent != 0) {
- for(i in 1:LEN)
- ans[i] = sum(log(1:n[i])^lognexponent * (1:n[i])^(-s[i]))
+ for(ii in 1:LEN)
+ ans[ii] = sum(log(1:n[ii])^lognexponent * (1:n[ii])^(-s[ii]))
} else
- for(i in 1:LEN)
- ans[i] = sum((1:n[i])^(-s[i]))
+ for(ii in 1:LEN)
+ ans[ii] = sum((1:n[ii])^(-s[ii]))
ans
}
}
-dzipf = function(x, N, s)
+dzipf = function(x, N, s, log=FALSE)
{
+ if(!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
if(!is.Numeric(x))
- stop("bad input for argument \"x\"")
+ stop("bad input for argument 'x'")
if(!is.Numeric(N, integ=TRUE, posit=TRUE))
- stop("bad input for argument \"N\"")
+ stop("bad input for argument 'N'")
if(!is.Numeric(s, posit=TRUE))
- stop("bad input for argument \"s\"")
+ stop("bad input for argument 's'")
nn = max(length(x), length(N), length(s))
x = rep(x, len=nn); N = rep(N, len=nn); s = rep(s, len=nn);
ox = !is.finite(x)
zero = ox | round(x) != x | x < 1 | x > N
- ans = 0 * x
+ ans = (if(log.arg) log(0) else 0) * x
if(any(!zero))
- ans[!zero] = x[!zero]^(-s[!zero]) / gharmonic(N[!zero], s[!zero])
+ if(log.arg) {
+ ans[!zero] = (-s[!zero]) * log(x[!zero]) -
+ log(gharmonic(N[!zero], s[!zero]))
+ } else {
+ ans[!zero] = x[!zero]^(-s[!zero]) / gharmonic(N[!zero], s[!zero])
+ }
ans
}
@@ -947,11 +979,11 @@ dzipf = function(x, N, s)
pzipf = function(q, N, s) {
if(!is.Numeric(q))
- stop("bad input for argument \"q\"")
+ stop("bad input for argument 'q'")
if(!is.Numeric(N, integ=TRUE, posit=TRUE))
- stop("bad input for argument \"N\"")
+ stop("bad input for argument 'N'")
if(!is.Numeric(s, posit=TRUE))
- stop("bad input for argument \"s\"")
+ stop("bad input for argument 's'")
nn = max(length(q), length(N), length(s))
q = rep(q, len=nn); N = rep(N, len=nn); s = rep(s, len=nn);
@@ -967,14 +999,14 @@ pzipf = function(q, N, s) {
}
-zipf = function(N=NULL, link="loge", earg=list(), init.s=NULL)
+ zipf = function(N=NULL, link="loge", earg=list(), init.s=NULL)
{
if(length(N) &&
(!is.Numeric(N, positi=TRUE, integ=TRUE, allow=1) || N <= 1))
- stop("bad input for argument \"N\"")
+ stop("bad input for argument 'N'")
enteredN = length(N)
if(length(init.s) && !is.Numeric(init.s, positi=TRUE))
- stop("argument \"init.s\" must be > 0")
+ stop("argument 'init.s' must be > 0")
if(mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
@@ -999,13 +1031,13 @@ zipf = function(N=NULL, link="loge", earg=list(), init.s=NULL)
if(!is.Numeric(NN, allow=1, posit=TRUE, integ=TRUE))
NN = max(y)
if(max(y) > NN)
- stop("maximum of the response is greater than argument \"N\"")
+ stop("maximum of the response is greater than argument 'N'")
if(any(y < 1))
- stop(paste("all response values must be in 1,2,3,...,N=",NN,sep=""))
+ stop("all response values must be in 1,2,3,...,N(=", NN,")")
extra$N = NN
if(!length(etastart)) {
llfun = function(ss, y, N, w) {
- sum(w * ((-ss) * log(y) - log(gharmonic(N, ss))))
+ sum(w * dzipf(x=y, N=extra$N, s=ss, log=TRUE))
}
ss.init = if(length( .init.s )) .init.s else
getInitVals(gvals=seq(0.1, 3.0, len=19), llfun=llfun,
@@ -1028,8 +1060,9 @@ zipf = function(N=NULL, link="loge", earg=list(), init.s=NULL)
loglikelihood=eval(substitute(
function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
ss = eta2theta(eta, .link, earg= .earg)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * ((-ss) * log(y) - log(gharmonic(extra$N, ss ))))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dzipf(x=y, N=extra$N, s=ss, log=TRUE))
+ }
}, list( .link=link, .earg=earg ))),
vfamily=c("zipf"),
deriv=eval(substitute(expression({
@@ -1055,7 +1088,7 @@ cauchy.control <- function(save.weight=TRUE, ...)
list(save.weight=save.weight)
}
-cauchy = function(llocation="identity", lscale="loge",
+ cauchy = function(llocation="identity", lscale="loge",
elocation=list(), escale=list(),
ilocation=NULL, iscale=NULL,
iprobs = seq(0.2, 0.8, by=0.2),
@@ -1071,14 +1104,14 @@ cauchy = function(llocation="identity", lscale="loge",
if(!is.list(elocation)) elocation = list()
if(!is.list(escale)) escale = list()
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(length(nsimEIM) &&
(!is.Numeric(nsimEIM, allow=1, integ=TRUE) || nsimEIM <= 50))
stop("'nsimEIM' should be an integer greater than 50")
if(length(iscale) && !is.Numeric(iscale, posit=TRUE))
- stop("bad input for argument \"iscale\"")
+ stop("bad input for argument 'iscale'")
if(!is.Numeric(iprobs, posit=TRUE) || max(iprobs) >= 1)
- stop("bad input for argument \"iprobs\"")
+ stop("bad input for argument 'iprobs'")
new("vglmff",
blurb=c("Two parameter Cauchy distribution (location & scale unknown)\n\n",
@@ -1108,7 +1141,7 @@ cauchy = function(llocation="identity", lscale="loge",
btry = (qy - loc) / ztry
scal = median(btry, na.rm = TRUE)
if(scal <= 0) scal = 0.1
- sum(w * (-log1p(((y-loc)/scal)^2) - log(scal)))
+ sum(w * dcauchy(x=y, loc=loc, scale=scal, log=TRUE))
}
loc.grid = c(quantile(y, probs=seq(0.1, 0.9, by=0.05)))
try.this = getMaxMin(loc.grid, objfun=cauchy2.Loglikfun,
@@ -1153,10 +1186,10 @@ cauchy = function(llocation="identity", lscale="loge",
loglikelihood=eval(substitute(
function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
location = eta2theta(eta[,1], .llocation, earg=.elocation)
- myscale = eta2theta(eta[,2], .lscale, earg=.escale)
- Z = (y-location)/ myscale
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * (-log1p(Z^2) - log(pi) - log(myscale)))
+ myscale = eta2theta(eta[,2], .lscale, earg=.escale)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dcauchy(x=y, loc=location, sc=myscale, log=TRUE))
+ }
}, list( .escale=escale, .lscale=lscale,
.elocation=elocation, .llocation=llocation ))),
vfamily=c("cauchy"),
@@ -1188,7 +1221,7 @@ cauchy = function(llocation="identity", lscale="loge",
temp3[,ind1$row.index]*temp3[,ind1$col.index]) / ii
}
wz = if(intercept.only)
- matrix(apply(run.varcov, 2, mean),
+ matrix(colMeans(run.varcov),
n, ncol(run.varcov), byrow=TRUE) else run.varcov
wz = wz * dthetas.detas[,ind1$row] * dthetas.detas[,ind1$col]
@@ -1210,13 +1243,13 @@ cauchy = function(llocation="identity", lscale="loge",
-cauchy1 = function(scale.arg=1, llocation="identity",
- elocation=list(),
- ilocation=NULL, method.init=1)
+ cauchy1 = function(scale.arg=1, llocation="identity",
+ elocation=list(),
+ ilocation=NULL, method.init=1)
{
if(mode(llocation) != "character" && mode(llocation) != "name")
llocation = as.character(substitute(llocation))
- if(!is.Numeric(scale.arg, posit=TRUE)) stop("bad input for \"scale.arg\"")
+ if(!is.Numeric(scale.arg, posit=TRUE)) stop("bad input for 'scale.arg'")
if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
method.init > 3)
stop("'method.init' must be 1 or 2 or 3")
@@ -1241,7 +1274,7 @@ cauchy1 = function(scale.arg=1, llocation="identity",
if( .method.init == 3) y else {
cauchy1.Loglikfun = function(loc, y, x, w, extraargs) {
scal = extraargs
- sum(w * (-log1p(((y-loc)/scal)^2) - log(scal)))
+ sum(w * dcauchy(x=y, loc=loc, scale=scal, log=TRUE))
}
loc.grid = quantile(y, probs=seq(0.1, 0.9, by=0.05))
try.this = getMaxMin(loc.grid, objfun=cauchy1.Loglikfun,
@@ -1271,9 +1304,9 @@ cauchy1 = function(scale.arg=1, llocation="identity",
loglikelihood=eval(substitute(
function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
location = eta2theta(eta, .llocation, earg=.elocation)
- temp = (y-location)/ .scale.arg
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * (-log1p(temp^2) - log(pi) - log(.scale.arg )))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dcauchy(x=y, loc=location, scale= .scale.arg, log=TRUE))
+ }
}, list( .scale.arg=scale.arg, .elocation=elocation,
.llocation=llocation ))),
vfamily=c("cauchy1"),
@@ -1297,7 +1330,7 @@ cauchy1 = function(scale.arg=1, llocation="identity",
-logistic1 = function(llocation="identity",
+ logistic1 = function(llocation="identity",
elocation=list(),
scale.arg=1, method.init=1)
{
@@ -1343,8 +1376,10 @@ logistic1 = function(llocation="identity",
function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
location = eta2theta(eta, .llocation, earg= .elocation)
zedd = (y-location)/.scale.arg
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * (-zedd - 2 * log1p(exp(-zedd)) - log(.scale.arg )))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dlogis(x=y, location = location,
+ scale = .scale.arg, log = TRUE))
+ }
}, list( .llocation=llocation,
.elocation=elocation, .scale.arg=scale.arg ))),
vfamily=c("logistic1"),
@@ -1365,14 +1400,14 @@ logistic1 = function(llocation="identity",
-erlang = function(shape.arg, link="loge", earg=list(), method.init=1)
+ erlang = function(shape.arg, link="loge", earg=list(), method.init=1)
{
if(!is.Numeric(shape.arg, allow=1, integer=TRUE, positi=TRUE))
- stop("\"shape\" must be a positive integer")
+ stop("'shape' must be a positive integer")
if(!is.Numeric(method.init, allow=1, integer=TRUE, positi=TRUE) ||
method.init > 2)
- stop("\"method.init\" must be 1 or 2")
+ stop("'method.init' must be 1 or 2")
if(mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
@@ -1415,9 +1450,10 @@ erlang = function(shape.arg, link="loge", earg=list(), method.init=1)
loglikelihood=eval(substitute(
function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
sc = eta2theta(eta, .link, earg=.earg)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * ((.shape.arg - 1) * log(y) - y / sc - .shape.arg * log(sc) -
- lgamma( .shape.arg )))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * ((.shape.arg - 1) * log(y) - y / sc - .shape.arg * log(sc) -
+ lgamma( .shape.arg )))
+ }
}, list( .link=link, .earg=earg, .shape.arg=shape.arg ))),
vfamily=c("erlang"),
deriv=eval(substitute(expression({
@@ -1437,32 +1473,38 @@ erlang = function(shape.arg, link="loge", earg=list(), method.init=1)
-dbort = function(x, Qsize=1, a=0.5) {
- if(!is.Numeric(x)) stop("bad input for argument \"x\"")
+dbort = function(x, Qsize=1, a=0.5, log=FALSE) {
+ if(!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ if(!is.Numeric(x)) stop("bad input for argument 'x'")
if(!is.Numeric(Qsize, allow=1, integ=TRUE, posit=TRUE))
- stop("bad input for argument \"Qsize\"")
+ stop("bad input for argument 'Qsize'")
if(!is.Numeric(a, posit=TRUE) || max(a) >= 1)
- stop("bad input for argument \"a\"")
+ stop("bad input for argument 'a'")
N = max(length(x), length(Qsize), length(a))
x = rep(x, len=N); Qsize = rep(Qsize, len=N); a = rep(a, len=N);
xok = (x >= Qsize) & (x == round(x)) & (a > 0) & (a < 1)
- ans = x * 0
- loglik = lgamma(1 + Qsize[xok]) - lgamma(x[xok] + 1 - Qsize[xok]) +
- (x[xok] - 1 - Qsize[xok]) * log(x[xok]) +
- (x[xok] - Qsize[xok]) * log(a[xok]) - a[xok] * x[xok]
- ans[xok] = exp(loglik)
+ ans = rep(if(log.arg) log(0) else 0, len=N) # loglikelihood
+ ans[xok] = lgamma(1 + Qsize[xok]) - lgamma(x[xok] + 1 - Qsize[xok]) +
+ (x[xok] - 1 - Qsize[xok]) * log(x[xok]) +
+ (x[xok] - Qsize[xok]) * log(a[xok]) - a[xok] * x[xok]
+ if(!log.arg) {
+ ans[xok] = exp(ans[xok])
+ }
ans
}
rbort = function(n, Qsize=1, a=0.5) {
if(!is.Numeric(n, integ=TRUE, posit=TRUE, allow=1))
- stop("bad input for argument \"n\"")
+ stop("bad input for argument 'n'")
if(!is.Numeric(Qsize, allow=1, integ=TRUE, posit=TRUE))
- stop("bad input for argument \"Qsize\"")
+ stop("bad input for argument 'Qsize'")
if(!is.Numeric(a, posit=TRUE) || max(a) >= 1)
- stop("bad input for argument \"a\"")
+ stop("bad input for argument 'a'")
N = n
qsize = rep(Qsize, len=N); a = rep(a, len=N)
totqsize = qsize
@@ -1478,10 +1520,10 @@ rbort = function(n, Qsize=1, a=0.5) {
}
-borel.tanner = function(Qsize=1, link="logit", earg=list(), method.init=1)
+ borel.tanner = function(Qsize=1, link="logit", earg=list(), method.init=1)
{
if(!is.Numeric(Qsize, allow=1, integ=TRUE, posit=TRUE))
- stop("bad input for argument \"Qsize\"")
+ stop("bad input for argument 'Qsize'")
if(mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if(!is.list(earg)) earg = list()
@@ -1500,7 +1542,7 @@ borel.tanner = function(Qsize=1, link="logit", earg=list(), method.init=1)
if(ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
if(any(y < .Qsize))
- stop(paste("all y values must be >=", .Qsize))
+ stop("all y values must be >= ", .Qsize)
if(any(y != round(y)))
warning("response should be integer-valued")
@@ -1528,11 +1570,10 @@ borel.tanner = function(Qsize=1, link="logit", earg=list(), method.init=1)
}), list( .link=link, .earg=earg, .Qsize=Qsize ))),
loglikelihood=eval(substitute(
function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- a = eta2theta(eta, .link, earg=.earg)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * (lgamma( 1 + .Qsize) - lgamma( y + 1 - .Qsize) +
- (y - 1 - .Qsize) * log(y) +
- (y- .Qsize) * log(a) - a * y))
+ aa = eta2theta(eta, .link, earg=.earg)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dbort(x=y, Qsize= .Qsize, a=aa, log=TRUE))
+ }
}, list( .link=link, .earg=earg, .Qsize=Qsize ))),
vfamily=c("borel.tanner"),
deriv=eval(substitute(expression({
@@ -1550,23 +1591,29 @@ borel.tanner = function(Qsize=1, link="logit", earg=list(), method.init=1)
-dfelix = function(x, a=0.25) {
- if(!is.Numeric(x)) stop("bad input for argument \"x\"")
- if(!is.Numeric(a, posit=TRUE)) stop("bad input for argument \"a\"")
+dfelix = function(x, a=0.25, log = FALSE) {
+ if(!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ if(!is.Numeric(x)) stop("bad input for argument 'x'")
+ if(!is.Numeric(a, posit=TRUE)) stop("bad input for argument 'a'")
N = max(length(x), length(a))
x = rep(x, len=N); a = rep(a, len=N);
xok = (x %% 2 == 1) & (x == round(x)) & (x >= 1) & (a > 0) & (a < 0.5)
- ans = x * 0
- loglik = ((x[xok]-3)/2) * log(x[xok]) + ((x[xok]-1)/2) * log(a[xok]) -
- lgamma( x[xok]/2 + 0.5) - a[xok] * x[xok]
- ans[xok] = exp(loglik)
+ ans = rep(if(log.arg) log(0) else 0, len=N) # loglikelihood
+ ans[xok] = ((x[xok]-3)/2) * log(x[xok]) + ((x[xok]-1)/2) * log(a[xok]) -
+ lgamma(x[xok]/2 + 0.5) - a[xok] * x[xok]
+ if(!log.arg) {
+ ans[xok] = exp(ans[xok])
+ }
ans
}
-felix = function(link="elogit",
+ felix = function(link="elogit",
earg=if(link=="elogit") list(min=0, max=0.5) else list(),
method.init=1)
{
@@ -1612,10 +1659,10 @@ felix = function(link="elogit",
}), list( .link=link, .earg=earg ))),
loglikelihood=eval(substitute(
function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- a = eta2theta(eta, .link, earg=.earg)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * (((y-3)/2) * log(y) + ((y-1)/2) * log(a) -
- lgamma( y/2 + 0.5) - a * y))
+ aa = eta2theta(eta, .link, earg=.earg)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dfelix(x=y, a=aa, log = TRUE))
+ }
}, list( .link=link, .earg=earg ))),
vfamily=c("felix"),
deriv=eval(substitute(expression({
@@ -1631,21 +1678,31 @@ felix = function(link="elogit",
}), list( .link=link ))))
}
-dsnorm = function(x, location=0, scale=1, shape=0) {
+dsnorm = function(x, location=0, scale=1, shape=0, log=FALSE) {
+ if(!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
if(!is.Numeric(scale, posit=TRUE))
- stop("bad input for argument \"scale\"")
+ stop("bad input for argument 'scale'")
zedd = (x - location) / scale
- 2 * dnorm(zedd) * pnorm(shape * zedd) / scale
+ loglik = log(2) + dnorm(zedd, log=TRUE) + pnorm(shape * zedd, log.p=TRUE) -
+ log(scale)
+ if(log.arg) {
+ loglik
+ } else {
+ exp(loglik)
+ }
}
rsnorm = function(n, location=0, scale=1, shape=0) {
if(!is.Numeric(n, posit=TRUE, integ=TRUE, allow=1))
- stop("bad input for argument \"n\"")
+ stop("bad input for argument 'n'")
if(!is.Numeric(scale, posit=TRUE))
- stop("bad input for argument \"scale\"")
- if(!is.Numeric(shape)) stop("bad input for argument \"shape\"")
+ stop("bad input for argument 'scale'")
+ if(!is.Numeric(shape)) stop("bad input for argument 'shape'")
rho = shape / sqrt(1 + shape^2)
u0 = rnorm(n)
v = rnorm(n)
@@ -1654,8 +1711,8 @@ rsnorm = function(n, location=0, scale=1, shape=0) {
}
-skewnormal1 = function(lshape="identity", earg = list(), ishape=NULL,
- nsimEIM=NULL)
+ skewnormal1 = function(lshape="identity", earg = list(), ishape=NULL,
+ nsimEIM=NULL)
{
if(mode(lshape) != "character" && mode(lshape) != "name")
lshape = as.character(substitute(lshape))
@@ -1703,8 +1760,9 @@ skewnormal1 = function(lshape="identity", earg = list(), ishape=NULL,
loglikelihood=eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
alpha = eta2theta(eta, .lshape, earg=.earg)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * (pnorm(y*alpha, log=TRUE )))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dsnorm(x=y, location=0, scale=1, shape=alpha, log=TRUE))
+ }
}, list( .earg=earg, .lshape=lshape ))),
vfamily=c("skewnormal1"),
deriv=eval(substitute(expression({
@@ -1745,11 +1803,11 @@ skewnormal1 = function(lshape="identity", earg = list(), ishape=NULL,
-betaff = function(A=0, B=1,
- lmu=if(A==0 & B==1) "logit" else "elogit", lphi="loge",
- emu=if(lmu=="elogit") list(min=A,max=B) else list(),
- ephi=list(),
- imu=NULL, iphi=NULL, method.init=1, zero=NULL)
+ betaff = function(A=0, B=1,
+ lmu=if(A==0 & B==1) "logit" else "elogit", lphi="loge",
+ emu=if(lmu=="elogit") list(min=A,max=B) else list(),
+ ephi=list(),
+ imu=NULL, iphi=NULL, method.init=1, zero=NULL)
{
if(!is.Numeric(A, allow=1) || !is.Numeric(B, allow=1) || A >= B)
stop("A must be < B, and both must be of length one")
@@ -1760,12 +1818,12 @@ betaff = function(A=0, B=1,
if(mode(lphi) != "character" && mode(lphi) != "name")
lphi = as.character(substitute(lphi))
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(length(imu) && (!is.Numeric(imu, posit=TRUE) ||
any(imu <= A) || any(imu >= B)))
- stop("bad input for argument \"imu\"")
+ stop("bad input for argument 'imu'")
if(length(iphi) && !is.Numeric(iphi, posit=TRUE))
- stop("bad input for argument \"iphi\"")
+ stop("bad input for argument 'iphi'")
if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
method.init > 2)
stop("'method.init' must be 1 or 2")
@@ -1826,10 +1884,11 @@ betaff = function(A=0, B=1,
m1u = if( .stdbeta ) mu else (mu - .A) / (.B - .A)
phi = eta2theta(eta[,2], .lphi, .ephi )
if(residuals) stop("loglikelihood residuals not implemented yet") else {
- if( .stdbeta ) sum(w * (-lbeta(mu*phi,(1-mu)*phi) +
- (phi*mu-1)*log(y) + ((1-mu)*phi-1)*log1p(-y))) else
- sum(w * (-lbeta(m1u*phi,(1-m1u)*phi) + (phi*m1u-1)*log(y-.A) +
- ((1-m1u)*phi-1)*log(.B-y) - (phi-1)*log(.B-.A)))
+ shape1 = phi * m1u
+ shape2 = (1 - m1u) * phi
+ zedd = (y - .A) / ( .B - .A)
+ sum(w * (dbeta(x=zedd, shape1=shape1, shape2=shape2, log=TRUE) -
+ log( abs( .B - .A ))))
}
}, list( .lmu=lmu, .lphi=lphi, .A=A, .B=B, .emu=emu, .ephi=ephi,
.stdbeta = stdbeta ))),
@@ -1874,21 +1933,21 @@ betaff = function(A=0, B=1,
-beta.ab = function(lshape1="loge", lshape2="loge",
- eshape1=list(), eshape2=list(),
- i1=NULL, i2=NULL, trim=0.05,
- A=0, B=1, parallel=FALSE, zero=NULL)
+ beta.ab = function(lshape1="loge", lshape2="loge",
+ eshape1=list(), eshape2=list(),
+ i1=NULL, i2=NULL, trim=0.05,
+ A=0, B=1, parallel=FALSE, zero=NULL)
{
if(mode(lshape1) != "character" && mode(lshape1) != "name")
lshape1 = as.character(substitute(lshape1))
if(mode(lshape2) != "character" && mode(lshape2) != "name")
lshape2 = as.character(substitute(lshape2))
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(length( i1 ) && !is.Numeric( i1, posit=TRUE))
- stop("bad input for argument \"i1\"")
+ stop("bad input for argument 'i1'")
if(length( i2 ) && !is.Numeric( i2, posit=TRUE))
- stop("bad input for argument \"i2\"")
+ stop("bad input for argument 'i2'")
if(!is.Numeric(A, allow=1) || !is.Numeric(B, allow=1) || A >= B)
stop("A must be < B, and both must be of length one")
@@ -1953,15 +2012,14 @@ beta.ab = function(lshape1="loge", lshape2="loge",
}), list( .lshape1=lshape1, .lshape2=lshape2, .A=A, .B=B,
.eshape1=eshape1, .eshape2=eshape2 ))),
loglikelihood=eval(substitute(
- function(mu, y, w, residuals= FALSE, eta, extra=NULL){
+ function(mu, y, w, residuals= FALSE, eta, extra=NULL){
shapes = cbind(eta2theta(eta[,1], .lshape1, earg= .eshape1 ),
eta2theta(eta[,2], .lshape2, earg= .eshape2 ))
- temp = if(is.R()) lbeta(shapes[,1], shapes[,2]) else
- lgamma(shapes[,1]) + lgamma(shapes[,2]) -
- lgamma(shapes[,1]+shapes[,2])
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * ((shapes[,1]-1) * log(y- .A) + (shapes[,2]-1) * log( .B -y) -
- temp - (shapes[,1]+shapes[,2]-1) * log( .B - .A )))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ zedd = (y - .A) / ( .B - .A)
+ sum(w * (dbeta(x=zedd, shape1=shapes[,1], shape2=shapes[,2],
+ log=TRUE) - log( abs( .B - .A ))))
+ }
}, list( .lshape1=lshape1, .lshape2=lshape2, .A=A, .B=B,
.eshape1=eshape1, .eshape2=eshape2 ))),
vfamily="beta.ab",
@@ -1992,8 +2050,8 @@ beta.ab = function(lshape1="loge", lshape2="loge",
-beta4 = function(link="loge", earg=list(),
- i1=2.3, i2=2.4, iA=NULL, iB=NULL)
+ beta4 = function(link="loge", earg=list(),
+ i1=2.3, i2=2.4, iA=NULL, iB=NULL)
{
@@ -2046,9 +2104,7 @@ beta4 = function(link="loge", earg=list(),
shapes = eta2theta(eta[,1:2], .link, earg=.earg)
.A = eta[,3]
.B = eta[,4]
- temp = if(is.R()) lbeta(shapes[,1], shapes[,2]) else
- lgamma(shapes[,1]) + lgamma(shapes[,2]) -
- lgamma(shapes[,1]+shapes[,2])
+ temp = lbeta(shapes[,1], shapes[,2])
if(residuals) stop("loglikelihood residuals not implemented yet") else
sum(w * ((shapes[,1]-1)*log(y-.A) + (shapes[,2]-1)*log(.B-y) - temp -
(shapes[,1]+shapes[,2]-1)*log(.B-.A )))
@@ -2104,7 +2160,7 @@ beta4 = function(link="loge", earg=list(),
-simple.exponential = function()
+ simple.exponential = function()
{
new("vglmff",
blurb=c("Simple Exponential distribution\n",
@@ -2138,16 +2194,15 @@ simple.exponential = function()
}
-exponential = function(link="loge", earg=list(), location=0, expected=TRUE)
-{
+ exponential = function(link="loge", earg=list(), location=0, expected=TRUE) {
if(!is.Numeric(location, allow=1))
- stop("bad input for argument \"location\"")
+ stop("bad input for argument 'location'")
if(mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if(!is.list(earg)) earg = list()
if(!is.logical(expected) || length(expected) != 1)
- stop("bad input for argument \"expected\"")
+ stop("bad input for argument 'expected'")
new("vglmff",
blurb=c("Exponential distribution\n\n",
@@ -2172,7 +2227,7 @@ exponential = function(link="loge", earg=list(), location=0, expected=TRUE)
stop("response must be a vector or a one-column matrix")
extra$loc = .location # This is passed into, e.g., link, deriv etc.
if(any(y <= extra$loc))
- stop(paste("all responses must be greater than", extra$loc))
+ stop("all responses must be greater than ", extra$loc)
predictors.names = namesof("rate", .link, tag=FALSE)
mu = y + (y == extra$loc) / 8
if(!length(etastart))
@@ -2212,7 +2267,7 @@ exponential = function(link="loge", earg=list(), location=0, expected=TRUE)
-gamma1 = function(link="loge", earg=list())
+ gamma1 = function(link="loge", earg=list())
{
if(mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
@@ -2249,8 +2304,9 @@ gamma1 = function(link="loge", earg=list())
theta2eta(mu, .link, earg=.earg)),
list( .link=link, .earg=earg )),
loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * ((mu-1)*log(y) - y - lgamma(mu))),
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dgamma(x=y, shape=mu, scale=1, log=TRUE))
+ },
vfamily=c("gamma1"),
deriv=eval(substitute(expression({
shape = mu
@@ -2266,22 +2322,22 @@ gamma1 = function(link="loge", earg=list())
}
-gamma2.ab = function(lrate="loge", lshape="loge",
- erate=list(), eshape=list(),
- irate=NULL, ishape=NULL, expected=TRUE, zero=2)
+ gamma2.ab = function(lrate="loge", lshape="loge",
+ erate=list(), eshape=list(),
+ irate=NULL, ishape=NULL, expected=TRUE, zero=2)
{
if(mode(lrate) != "character" && mode(lrate) != "name")
lrate = as.character(substitute(lrate))
if(mode(lshape) != "character" && mode(lshape) != "name")
lshape = as.character(substitute(lshape))
if(length( irate) && !is.Numeric(irate, posit=TRUE))
- stop("bad input for argument \"irate\"")
+ stop("bad input for argument 'irate'")
if(length( ishape) && !is.Numeric(ishape, posit=TRUE))
- stop("bad input for argument \"ishape\"")
+ stop("bad input for argument 'ishape'")
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(!is.logical(expected) || length(expected) != 1)
- stop("bad input for argument \"expected\"")
+ stop("bad input for argument 'expected'")
if(!is.list(erate)) erate = list()
if(!is.list(eshape)) eshape = list()
@@ -2332,8 +2388,9 @@ gamma2.ab = function(lrate="loge", lshape="loge",
function(mu, y, w, residuals= FALSE,eta, extra=NULL) {
rate = eta2theta(eta[,1], .lrate, earg=.erate)
shape = eta2theta(eta[,2], .lshape, earg=.eshape)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w*(-rate * y + (shape-1)*log(y) + shape*log(rate) - lgamma(shape )))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dgamma(x=y, shape=shape, rate=rate, log=TRUE))
+ }
}, list( .lrate=lrate, .lshape=lshape,
.erate=erate, .eshape=eshape ))),
vfamily=c("gamma2.ab"),
@@ -2368,7 +2425,7 @@ gamma2.ab = function(lrate="loge", lshape="loge",
-gamma2 = function(lmu="loge", lshape="loge",
+ gamma2 = function(lmu="loge", lshape="loge",
emu=list(), eshape=list(),
method.init=1,
deviance.arg=FALSE, ishape=NULL, zero=-2)
@@ -2378,9 +2435,9 @@ gamma2 = function(lmu="loge", lshape="loge",
if(mode(lshape) != "character" && mode(lshape) != "name")
lshape = as.character(substitute(lshape))
if(length(zero) && !is.Numeric(zero, integer=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(length( ishape) && !is.Numeric(ishape, posit=TRUE))
- stop("bad input for argument \"ishape\"")
+ stop("bad input for argument 'ishape'")
if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
method.init > 2)
stop("'method.init' must be 1 or 2")
@@ -2403,9 +2460,8 @@ gamma2 = function(lmu="loge", lshape="loge",
constraints = cm.zero.vgam(constraints, x, temp752, M)
}), list( .zero=zero ))),
initialize=eval(substitute(expression({
- if(is.R()) assign("CQO.FastAlgorithm",
- ( .lmu == "loge" && .lshape == "loge"), envir = VGAMenv) else
- CQO.FastAlgorithm <<- ( .lmu == "loge" && .lshape == "loge")
+ assign("CQO.FastAlgorithm", ( .lmu == "loge" && .lshape == "loge"),
+ envir = VGAMenv)
if(any(function.name == c("cqo","cao")) &&
is.Numeric( .zero, allow=1) && .zero != -2)
stop("argument zero=-2 is required")
@@ -2452,13 +2508,8 @@ gamma2 = function(lmu="loge", lshape="loge",
eta2theta(eta[,2*(1:NOS)-1,drop=FALSE], .lmu, earg=.emu )
}, list( .lmu=lmu, .emu=emu ))),
last=eval(substitute(expression({
- if(is.R()) {
- if(exists("CQO.FastAlgorithm", envir = VGAMenv))
- rm("CQO.FastAlgorithm", envir = VGAMenv)
- } else {
- while(exists("CQO.FastAlgorithm"))
- remove("CQO.FastAlgorithm")
- }
+ if(exists("CQO.FastAlgorithm", envir = VGAMenv))
+ rm("CQO.FastAlgorithm", envir = VGAMenv)
tmp34 = c(rep( .lmu, length=NOS), rep( .lshape, length=NOS))
names(tmp34) = c(if(NOS==1) "mu" else paste("mu", 1:NOS, sep=""),
if(NOS==1) "shape" else paste("shape", 1:NOS, sep=""))
@@ -2483,9 +2534,10 @@ gamma2 = function(lmu="loge", lshape="loge",
NOS = ncol(eta) / 2
mymu = mu # eta2theta(eta[,2*(1:NOS)-1], .lmu, earg=.emu )
shapemat = eta2theta(eta[,2*(1:NOS),drop=FALSE], .lshape, earg=.eshape )
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w*((shapemat - 1) * log(y) + shapemat *
- (log(shapemat) - y / mymu - log(mymu)) - lgamma(shapemat )))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dgamma(x=y, shape=c(shapemat), scale=c(mymu/shapemat),
+ log=TRUE))
+ }
}, list( .lmu=lmu, .lshape=lshape,
.emu=emu, .eshape=eshape ))),
vfamily=c("gamma2"),
@@ -2533,10 +2585,10 @@ gamma2 = function(lmu="loge", lshape="loge",
}
-geometric =function(link="logit", earg=list(), expected=TRUE)
+ geometric =function(link="logit", earg=list(), expected=TRUE)
{
if(!is.logical(expected) || length(expected) != 1)
- stop("bad input for argument \"expected\"")
+ stop("bad input for argument 'expected'")
if(mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if(!is.list(earg)) earg = list()
@@ -2571,8 +2623,7 @@ geometric =function(link="logit", earg=list(), expected=TRUE)
function(mu, y, w, residuals= FALSE,eta, extra=NULL) {
prob = eta2theta(eta, .link, earg= .earg)
if(residuals) stop("loglikelihood residuals not implemented yet") else {
- if(is.R()) sum(w * dgeom(x=y, prob=prob, log=TRUE)) else
- sum(w*(y * log1p(-prob) + log(prob )))
+ sum(w * dgeom(x=y, prob=prob, log=TRUE))
}
}, list( .link=link, .earg=earg ))),
vfamily=c("geometric"),
@@ -2593,25 +2644,30 @@ geometric =function(link="logit", earg=list(), expected=TRUE)
dbetageom = function(x, shape1, shape2, log=FALSE) {
- if(!is.Numeric(x)) stop("bad input for argument \"x\"")
- if(!is.Numeric(shape1, pos=TRUE)) stop("bad input for argument \"shape1\"")
- if(!is.Numeric(shape2, pos=TRUE)) stop("bad input for argument \"shape2\"")
+ if(!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ if(!is.Numeric(x)) stop("bad input for argument 'x'")
+ if(!is.Numeric(shape1, pos=TRUE)) stop("bad input for argument 'shape1'")
+ if(!is.Numeric(shape2, pos=TRUE)) stop("bad input for argument 'shape2'")
N = max(length(x), length(shape1), length(shape2))
x = rep(x, len=N); shape1 = rep(shape1, len=N); shape2 = rep(shape2, len=N)
- if(!is.R()) {
- beta = function(x,y) gamma(x) * gamma(y) / gamma(x+y)
- lbeta = function(x,y) lgamma(x) + lgamma(y) - lgamma(x+y)
+ loglik = lbeta(1+shape1, shape2+abs(x)) - lbeta(shape1, shape2)
+ xok = (x == round(x) & x >= 0)
+ loglik[!xok] = log(0)
+ if(log.arg) {
+ loglik
+ } else {
+ exp(loglik)
}
- ans = if(log) lbeta(1+shape1, shape2+abs(x)) - lbeta(shape1, shape2) else
- beta(1+shape1, shape2+abs(x)) / beta(shape1, shape2)
- ifelse(x == round(x) & x >= 0, ans, if(log) -Inf else 0)
}
pbetageom = function(q, shape1, shape2, log.p=FALSE) {
- if(!is.Numeric(q)) stop("bad input for argument \"q\"")
- if(!is.Numeric(shape1, pos=TRUE)) stop("bad input for argument \"shape1\"")
- if(!is.Numeric(shape2, pos=TRUE)) stop("bad input for argument \"shape2\"")
+ if(!is.Numeric(q)) stop("bad input for argument 'q'")
+ if(!is.Numeric(shape1, pos=TRUE)) stop("bad input for argument 'shape1'")
+ if(!is.Numeric(shape2, pos=TRUE)) stop("bad input for argument 'shape2'")
N = max(length(q), length(shape1), length(shape2))
q = rep(q, len=N); shape1 = rep(shape1, len=N); shape2 = rep(shape2, len=N)
ans = q * 0 # Retains names(q)
@@ -2626,25 +2682,25 @@ pbetageom = function(q, shape1, shape2, log.p=FALSE) {
ans[index] = if(i >= 0) sum(temp[1:(1+i)]) else 0
}
} else
- for(i in 1:N) {
- qstar = floor(q[i])
- ans[i] = if(qstar >= 0) sum(dbetageom(x=0:qstar,
- shape1=shape1[i], shape2=shape2[i])) else 0
+ for(ii in 1:N) {
+ qstar = floor(q[ii])
+ ans[ii] = if(qstar >= 0) sum(dbetageom(x=0:qstar,
+ shape1=shape1[ii], shape2=shape2[ii])) else 0
}
if(log.p) log(ans) else ans
}
rbetageom = function(n, shape1, shape2) {
- if(!is.Numeric(n, integ=TRUE,allow=1)) stop("bad input for argument \"n\"")
- if(!is.Numeric(shape1, pos=TRUE)) stop("bad input for argument \"shape1\"")
- if(!is.Numeric(shape2, pos=TRUE)) stop("bad input for argument \"shape2\"")
+ if(!is.Numeric(n, integ=TRUE,allow=1)) stop("bad input for argument 'n'")
+ if(!is.Numeric(shape1, pos=TRUE)) stop("bad input for argument 'shape1'")
+ if(!is.Numeric(shape2, pos=TRUE)) stop("bad input for argument 'shape2'")
rgeom(n=n, prob = rbeta(n=n, shape1=shape1, shape2=shape2))
}
-tobit = function(Lower = 0, Upper = Inf, lmu="identity",
+ tobit = function(Lower = 0, Upper = Inf, lmu="identity",
lsd="loge", emu=list(), esd=list(), imethod=1, zero=2)
{
if(mode(lmu) != "character" && mode(lmu) != "name")
@@ -2657,7 +2713,7 @@ tobit = function(Lower = 0, Upper = Inf, lmu="identity",
!is.numeric(Lower) || !is.numeric(Upper) || Lower >= Upper)
stop("Lower and Upper must have length 1 and be numeric with Lower < Upper")
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(!is.list(emu)) emu = list()
if(!is.list(esd)) esd = list()
@@ -2675,13 +2731,13 @@ tobit = function(Lower = 0, Upper = Inf, lmu="identity",
extra$censoredL = (y <= .Lower)
extra$censoredU = (y >= .Upper)
if(min(y) < .Lower) {
- warning(paste("replacing response values less than the value ",
- .Lower, "by", .Lower))
+ warning("replacing response values less than the value ",
+ .Lower, " by ", .Lower)
y[y < .Lower] = .Lower
}
if(max(y) > .Upper) {
- warning(paste("replacing response values greater than the value",
- .Upper, "by", .Upper))
+ warning("replacing response values greater than the value ",
+ .Upper, " by ", .Upper)
y[y > .Upper] = .Upper
}
predictors.names = c(namesof("mu", .lmu, earg=.emu, tag=FALSE),
@@ -2689,10 +2745,10 @@ tobit = function(Lower = 0, Upper = Inf, lmu="identity",
if(!length(etastart)) {
anyc = extra$censoredL | extra$censoredU
i11 = if( .imethod == 1) anyc else FALSE # can be all data
- junk=if(is.R()) lm.wfit(x=cbind(x[!i11,]),y=y[!i11],w=w[!i11]) else
- lm.wfit(x=cbind(x[!i11,]), y=y[!i11], w=w[!i11],method="qr")
+ junk = lm.wfit(x=cbind(x[!i11,]), y=y[!i11], w=w[!i11])
sd.y.est = sqrt( sum(w[!i11] * junk$resid^2) / junk$df.residual )
- etastart = cbind(mu=y, rep(theta2eta(sd.y.est, .lsd, earg= .esd), length=n))
+ etastart = cbind(mu=y, rep(theta2eta(sd.y.est, .lsd, earg= .esd),
+ length=n))
if(any(anyc)) etastart[anyc,1] = x[anyc,,drop=FALSE] %*% junk$coeff
}
}), list( .Lower=Lower, .Upper=Upper, .lmu=lmu, .lsd=lsd,
@@ -2806,16 +2862,17 @@ tobit = function(Lower = 0, Upper = Inf, lmu="identity",
-normal1 = function(lmean="identity", lsd="loge",
+ normal1 = function(lmean="identity", lsd="loge",
emean=list(), esd=list(), zero=NULL)
{
+ print("20090416; in normal1()")
if(mode(lmean) != "character" && mode(lmean) != "name")
lmean = as.character(substitute(lmean))
if(mode(lsd) != "character" && mode(lsd) != "name")
lsd = as.character(substitute(lsd))
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(!is.list(emean)) emean = list()
if(!is.list(esd)) esd = list()
@@ -2835,8 +2892,7 @@ normal1 = function(lmean="identity", lsd="loge",
if(ncol(y <- cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
if(!length(etastart)) {
- junk = if(is.R()) lm.wfit(x=x, y=y, w=w) else
- lm.wfit(x=x, y=y, w=w, method="qr")
+ junk = lm.wfit(x=x, y=y, w=w)
sd.y.est = sqrt( sum(w * junk$resid^2) / junk$df.residual )
mean.init = if( .lmean == "loge") pmax(1/1024, y) else y
etastart = cbind(theta2eta(mean.init, .lmean, earg= .emean),
@@ -2855,9 +2911,7 @@ normal1 = function(lmean="identity", lsd="loge",
function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
sd = eta2theta(eta[,2], .lsd, earg= .esd)
if(residuals) stop("loglikelihood residuals not implemented yet") else {
- if(is.R())
- sum(w*dnorm(y, m=mu, sd=sd, log=TRUE)) else
- sum(w * (-log(sd*sqrt(2*pi)) - 0.5 * ((y - mu)/sd)^2))
+ sum(w * dnorm(y, m=mu, sd=sd, log=TRUE))
}
}, list( .lsd=lsd, .emean=emean, .esd=esd ))),
vfamily=c("normal1"),
@@ -2884,15 +2938,15 @@ normal1 = function(lmean="identity", lsd="loge",
-lognormal = function(lmeanlog="identity", lsdlog="loge",
- emeanlog=list(), esdlog=list(), zero=NULL)
+ lognormal = function(lmeanlog="identity", lsdlog="loge",
+ emeanlog=list(), esdlog=list(), zero=NULL)
{
if(mode(lmeanlog) != "character" && mode(lmeanlog) != "name")
lmeanlog = as.character(substitute(lmeanlog))
if(mode(lsdlog) != "character" && mode(lsdlog) != "name")
lsdlog = as.character(substitute(lsdlog))
if(length(zero) && (!is.Numeric(zero, integer=TRUE, posit=TRUE) ||
- zero > 2)) stop("bad input for argument argument \"zero\"")
+ zero > 2)) stop("bad input for argument argument 'zero'")
if(!is.list(emeanlog)) emeanlog = list()
if(!is.list(esdlog)) esdlog = list()
@@ -2910,8 +2964,7 @@ lognormal = function(lmeanlog="identity", lsdlog="loge",
predictors.names = c(namesof("meanlog", .lmeanlog, earg=.emeanlog, tag=FALSE),
namesof("sdlog", .lsdlog, earg=.esdlog, tag=FALSE))
if(!length(etastart)) {
- junk = if(is.R()) lm.wfit(x=x, y=log(y), w=w) else
- lm.wfit(x=x, y=log(y), w=w, method="qr")
+ junk = lm.wfit(x=x, y=log(y), w=w)
sdlog.y.est = sqrt( sum(w * junk$resid^2) / junk$df.residual )
etastart = cbind(
meanlog= rep(theta2eta(log(median(y)), .lmeanlog, earg= .emeanlog), length=n),
@@ -2936,9 +2989,7 @@ lognormal = function(lmeanlog="identity", lsdlog="loge",
mulog = eta2theta(eta[,1], .lmeanlog, earg= .emeanlog)
sdlog = eta2theta(eta[,2], .lsdlog, earg= .esdlog)
if(residuals) stop("loglikelihood residuals not implemented yet") else {
- if(is.R())
- sum(w*dlnorm(y, meanlog=mulog, sdlog=sdlog, log=TRUE)) else
- sum(w * (-log(y*sdlog*sqrt(2*pi)) - 0.5 * ((log(y) - mulog)/sdlog)^2))
+ sum(w * dlnorm(y, meanlog=mulog, sdlog=sdlog, log=TRUE))
}
}, list( .lmeanlog = lmeanlog, .lsdlog=lsdlog,
.emeanlog = emeanlog, .esdlog=esdlog ))),
@@ -2966,39 +3017,25 @@ lognormal = function(lmeanlog="identity", lsdlog="loge",
}))
}
-if(!is.R()) {
-
- qlognormal = function(p, meanlog=0, sdlog=1, lambda=0)
- lambda + exp(qnorm(p=p, mean=meanlog, sd=sdlog))
-
- dlognormal = function(x, meanlog=0, sdlog=1, lambda=0)
- (x > lambda) *
- dnorm(x=log(abs(x-lambda)), mean=meanlog, sd=sdlog) / (x-lambda)
-
- rlognormal = function(n, meanlog=0, sdlog=1, lambda=0)
- lambda + exp(rnorm(n, mean=meanlog, sd=sdlog))
- plognormal = function(q, meanlog=0, sdlog=1, lambda=0)
- (q>lambda) * pnorm(q=log(abs(q-lambda)), mean=meanlog, sd=sdlog)
-}
-lognormal3 = function(lmeanlog="identity", lsdlog="loge",
+ lognormal3 = function(lmeanlog="identity", lsdlog="loge",
emeanlog=list(), esdlog=list(),
powers.try = (-3):3,
delta=NULL, zero=NULL)
{
if(length(delta) && !is.Numeric(delta, positive=TRUE))
- stop("bad input for argument argument \"delta\"")
+ stop("bad input for argument argument 'delta'")
if(mode(lmeanlog) != "character" && mode(lmeanlog) != "name")
lmeanlog = as.character(substitute(lmeanlog))
if(mode(lsdlog) != "character" && mode(lsdlog) != "name")
lsdlog = as.character(substitute(lsdlog))
if(length(zero) && (!is.Numeric(zero, integer=TRUE, posit=TRUE) ||
zero > 3))
- stop("bad input for argument argument \"zero\"")
+ stop("bad input for argument argument 'zero'")
if(!is.list(emeanlog)) emeanlog = list()
if(!is.list(esdlog)) esdlog = list()
@@ -3033,8 +3070,7 @@ lognormal3 = function(lmeanlog="identity", lsdlog="loge",
index.lambda=(1:length(powers.try))[pvalue.vec==max(pvalue.vec)]
lambda.init = miny - 10^powers.try[index.lambda]
}
- junk = if(is.R()) lm.wfit(x=x, y=log(y-lambda.init), w=w) else
- lm.wfit(x=x, y=log(y-lambda.init), w=w, method="qr")
+ junk = lm.wfit(x=x, y=log(y-lambda.init), w=w)
sdlog.y.est = sqrt( sum(w * junk$resid^2) / junk$df.residual )
etastart = cbind(mu=log(median(y - lambda.init)),
sdlog=rep(theta2eta(sdlog.y.est, .lsdlog, earg= .esdlog), length=n),
@@ -3063,12 +3099,9 @@ lognormal3 = function(lmeanlog="identity", lsdlog="loge",
sdlog = eta2theta(eta[,2], .lsdlog, earg= .esdlog)
lambda = eta2theta(eta[,3], "identity", earg=list())
if(any(y < lambda))
- cat("warning: bad y\n")
+ warning("bad 'y'")
if(residuals) stop("loglikelihood residuals not implemented yet") else {
- if(is.R())
- sum(w*dlnorm(y-lambda, meanlog=mymu, sdlog=sdlog, log=TRUE)) else
- sum(w * (-log((y-lambda)*sdlog*sqrt(2*pi)) -
- 0.5 * ((log(y-lambda) - mymu)/sdlog)^2))
+ sum(w*dlnorm(y-lambda, meanlog=mymu, sdlog=sdlog, log=TRUE))
}
}, list( .lmeanlog=lmeanlog, .lsdlog=lsdlog,
.emeanlog = emeanlog, .esdlog=esdlog ))),
@@ -3078,7 +3111,7 @@ lognormal3 = function(lmeanlog="identity", lsdlog="loge",
sdlog = eta2theta(eta[,2], .lsdlog, earg= .esdlog)
lambda = eta2theta(eta[,3], "identity", earg=list())
if(any(y < lambda))
- cat("warning: bad y\n")
+ warning("bad 'y'")
dl.dmymu = (log(y-lambda)-mymu) / sdlog^2
dl.dsdlog = -1/sdlog + (log(y-lambda)-mymu)^2 / sdlog^3
dl.dlambda = (1 + (log(y-lambda)-mymu) / sdlog^2) / (y-lambda)
@@ -3108,11 +3141,6 @@ lognormal3 = function(lmeanlog="identity", lsdlog="loge",
-
-
-
-
-
interleave.VGAM = function(L, M) c(matrix(1:L, nrow=M, byrow=TRUE))
negbinomial.control <- function(save.weight=TRUE, ...)
@@ -3130,17 +3158,16 @@ negbinomial.control <- function(save.weight=TRUE, ...)
-
if(length(ik) && !is.Numeric(ik, posit=TRUE))
- stop("bad input for argument \"ik\"")
+ stop("bad input for argument 'ik'")
if(!is.Numeric(cutoff, allow=1) || cutoff<0.8 || cutoff>=1)
- stop("range error in the argument \"cutoff\"")
+ stop("range error in the argument 'cutoff'")
if(!is.Numeric(Maxiter, integ=TRUE, allow=1) || Maxiter < 100)
- stop("bad input for argument \"Maxiter\"")
+ stop("bad input for argument 'Maxiter'")
if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
- method.init > 2) stop("argument \"method.init\" must be 1 or 2")
+ method.init > 3) stop("argument 'method.init' must be 1 or 2 or 3")
if(!is.Numeric(shrinkage.init, allow=1) || shrinkage.init < 0 ||
- shrinkage.init > 1) stop("bad input for argument \"shrinkage.init\"")
+ shrinkage.init > 1) stop("bad input for argument 'shrinkage.init'")
if(!is.null(nsimEIM)) {
if(!is.Numeric(nsimEIM, allow=1, integ=TRUE))
stop("bad input for argument 'nsimEIM'")
@@ -3170,10 +3197,8 @@ negbinomial.control <- function(save.weight=TRUE, ...)
constraints = cm.zero.vgam(constraints, x, temp752, M)
}), list( .zero=zero ))),
initialize=eval(substitute(expression({
- if(is.R())
- assign("CQO.FastAlgorithm", ( .lmu == "loge") &&
- ( .lk == "loge"), envir = VGAMenv) else
- CQO.FastAlgorithm <<- ( .lmu == "loge") && ( .lk == "loge")
+ assign("CQO.FastAlgorithm",
+ ( .lmu == "loge") && ( .lk == "loge"), envir = VGAMenv)
if(any(function.name == c("cqo","cao")) &&
is.Numeric( .zero, allow=1) && .zero != -2)
stop("argument zero=-2 is required")
@@ -3198,16 +3223,17 @@ negbinomial.control <- function(save.weight=TRUE, ...)
if(!length(etastart)) {
mu.init = y
for(iii in 1:ncol(y)) {
- use.this = if( .method.init == 2) {
+ use.this = if( .method.init == 1) {
weighted.mean(y[,iii], w) + 1/16
+ } else if( .method.init == 3) {
+ c(quantile(y[,iii], probs = 0.75) + 1/16)
} else {
median(y[,iii]) + 1/16
}
- mu.init[,iii] = (1- .sinit)*(y[,iii]+1/16) + .sinit * use.this
- max.use.this = 7 * use.this + 10
- vecTF = (mu.init[,iii] > max.use.this)
- if(any(vecTF))
- mu.init[vecTF,iii] = max.use.this
+ medabsres = median(abs(y[,iii] - use.this)) + 1/32
+ allowfun = function(z, maxtol=1) sign(z) * pmin(abs(z), maxtol)
+ mu.init[,iii] = use.this +
+ (1- .sinit)*allowfun(y[,iii] - use.this, maxtol=medabsres)
}
if( is.Numeric( .k.init )) {
@@ -3239,13 +3265,8 @@ negbinomial.control <- function(save.weight=TRUE, ...)
eta2theta(eta[,2*(1:NOS)-1,drop=FALSE], .lmu, earg= .emu)
}, list( .lmu=lmu, .emu=emu, .ek=ek ))),
last=eval(substitute(expression({
- if(is.R()) {
- if(exists("CQO.FastAlgorithm", envir = VGAMenv))
- rm("CQO.FastAlgorithm", envir = VGAMenv)
- } else {
- while(exists("CQO.FastAlgorithm"))
- remove("CQO.FastAlgorithm")
- }
+ if(exists("CQO.FastAlgorithm", envir = VGAMenv))
+ rm("CQO.FastAlgorithm", envir = VGAMenv)
temp0303 = c(rep( .lmu, length=NOS), rep( .lk, length=NOS))
names(temp0303) = c(if(NOS==1) "mu" else paste("mu", 1:NOS, sep=""),
if(NOS==1) "k" else paste("k", 1:NOS, sep=""))
@@ -3326,7 +3347,7 @@ negbinomial.control <- function(save.weight=TRUE, ...)
}
run.varcov = cbind(run.varcov / .nsimEIM)
wz[,2*(1:NOS)] = if(intercept.only)
- matrix(apply(run.varcov, 2, mean),
+ matrix(colMeans(run.varcov),
n, ncol(run.varcov), byrow=TRUE) else run.varcov
wz[,2*(1:NOS)] = wz[,2*(1:NOS)] * dk.deta^2
@@ -3356,7 +3377,7 @@ negbinomial.control <- function(save.weight=TRUE, ...)
}
-negbin.ab = function(link.alpha ="loge", link.k ="loge",
+ negbin.ab = function(link.alpha ="loge", link.k ="loge",
ealpha=list(), ek=list(),
k.init=1,
zero=2,
@@ -3368,7 +3389,7 @@ negbin.ab = function(link.alpha ="loge", link.k ="loge",
if(!is.Numeric(cutoff, allow=1) || cutoff<0.8 || cutoff>=1)
stop("range error in the argument cutoff")
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(mode(link.alpha) != "character" && mode(link.alpha) != "name")
link.alpha = as.character(substitute(link.alpha))
@@ -3417,10 +3438,10 @@ negbin.ab = function(link.alpha ="loge", link.k ="loge",
loglikelihood=eval(substitute(
function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
alpha = eta2theta(eta[,1], .link.alpha, earg= .ealpha)
- k = eta2theta(eta[,2], .link.k, earg= .ek)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * (y * log(alpha) - (y+k)*log1p(alpha) + lgamma(y+k) -
- lgamma(k) - lgamma(y+1 )))
+ kvec = eta2theta(eta[,2], .link.k, earg= .ek)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dnbinom(x=y, mu=kvec*alpha, size=kvec, log=TRUE))
+ }
}, list( .link.alpha=link.alpha, .link.k=link.k,
.ealpha=ealpha, .ek=ek ))),
vfamily=c("negbin.ab"),
@@ -3482,7 +3503,7 @@ nbmud = function(lmu = c("loge","identity","reciprocal"),
if(FALSE)
-neg.binomial = function(link.p="logit", link.k="loge",
+ neg.binomial = function(link.p="logit", link.k="loge",
ep=list(), ek=list(),
zero=2,
ik=NULL,
@@ -3618,11 +3639,11 @@ neg.binomial = function(link.p="logit", link.k="loge",
if(FALSE)
-neg.binomial.k = function(k, link="logit", earg=list(), expected=TRUE, ...)
+ neg.binomial.k = function(k, link="logit", earg=list(), expected=TRUE, ...)
{
if(!is.Numeric(k, allow=1, posit=TRUE))
- stop("bad input for argument argument \"k\"")
+ stop("bad input for argument argument 'k'")
if(mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if(!is.list(earg)) earg = list()
@@ -3693,7 +3714,7 @@ neg.binomial.k = function(k, link="logit", earg=list(), expected=TRUE, ...)
-simple.poisson = function()
+ simple.poisson = function()
{
new("vglmff",
blurb=c("Poisson distribution\n\n",
@@ -3738,7 +3759,7 @@ simple.poisson = function()
-studentt = function(link.df="loglog", earg=list(),
+ studentt = function(link.df="loglog", earg=list(),
idf=NULL, nsimEIM=100)
{
@@ -3783,9 +3804,7 @@ studentt = function(link.df="loglog", earg=list(),
function(mu,y,w,residuals=FALSE,eta,extra=NULL) {
df = eta2theta(eta, .link.df, earg= .earg)
if(residuals) stop("loglikelihood residuals not implemented yet") else {
- if(is.R()) sum(w * dt(x=y, df=df, log=TRUE)) else
- sum(w * (-log(pi*df)/2 - (df+1)*log1p(y^2 / df)/2 +
- lgamma((df+1)/2) - lgamma(df/2)))
+ sum(w * dt(x=y, df=df, log=TRUE))
}
}, list( .link.df=link.df, .earg=earg ))),
vfamily=c("studentt"),
@@ -3819,7 +3838,7 @@ studentt = function(link.df="loglog", earg=list(),
}
-chisq = function(link = "loge", earg=list())
+ chisq = function(link = "loge", earg=list())
{
if(mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
@@ -3849,7 +3868,7 @@ chisq = function(link = "loge", earg=list())
function(mu,y,w,residuals= FALSE,eta,extra=NULL) {
df = eta2theta(eta, .link, earg= .earg)
if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * dchisq(x=y, df=df, ncp = 0, log = TRUE))
+ sum(w * dchisq(x=y, df=df, ncp = 0, log = TRUE))
}, list( .link = link, .earg=earg ))),
vfamily="chisq",
deriv=eval(substitute(expression({
@@ -3871,7 +3890,7 @@ chisq = function(link = "loge", earg=list())
-simplex = function(lmu="logit", lsigma="loge",
+ simplex = function(lmu="logit", lsigma="loge",
emu=list(), esigma=list(), imu=NULL, isigma=NULL)
{
@@ -3980,7 +3999,7 @@ simplex = function(lmu="logit", lsigma="loge",
-rig = function(lmu="identity", llambda="loge",
+ rig = function(lmu="identity", llambda="loge",
emu=list(), elambda=list(), imu=NULL, ilambda=1)
{
@@ -3989,7 +4008,7 @@ rig = function(lmu="identity", llambda="loge",
if(mode(llambda) != "character" && mode(llambda) != "name")
llambda = as.character(substitute(llambda))
if(!is.Numeric(ilambda, posit=TRUE))
- stop("bad input for \"ilambda\"")
+ stop("bad input for 'ilambda'")
if(!is.list(emu)) emu = list()
if(!is.list(elambda)) elambda = list()
@@ -4091,7 +4110,7 @@ rig = function(lmu="identity", llambda="loge",
-hypersecant = function(link.theta="elogit",
+ hypersecant = function(link.theta="elogit",
earg=if(link.theta=="elogit") list(min=-pi/2, max=pi/2) else list(),
init.theta=NULL)
{
@@ -4148,7 +4167,7 @@ hypersecant = function(link.theta="elogit",
-hypersecant.1 = function(link.theta="elogit",
+ hypersecant.1 = function(link.theta="elogit",
earg=if(link.theta=="elogit") list(min=-pi/2, max=pi/2) else list(),
init.theta=NULL)
{
@@ -4212,8 +4231,8 @@ hypersecant.1 = function(link.theta="elogit",
-leipnik = function(lmu="logit", llambda="loge",
- emu=list(), elambda=list(), imu=NULL, ilambda=NULL)
+ leipnik = function(lmu="logit", llambda="loge",
+ emu=list(), elambda=list(), imu=NULL, ilambda=NULL)
{
@@ -4260,8 +4279,6 @@ leipnik = function(lmu="logit", llambda="loge",
}, list( .lmu=lmu,
.emu=emu, .elambda=elambda ))),
last=eval(substitute(expression({
- if(!is.R())
- misc$d3 = d3 # because save.weights=FALSE
misc$link = c(mu= .lmu, lambda= .llambda)
misc$earg = list(mu= .emu, lambda= .elambda)
misc$pooled.weight = pooled.weight
@@ -4280,21 +4297,10 @@ leipnik = function(lmu="logit", llambda="loge",
vfamily=c("leipnik"),
deriv=eval(substitute(expression({
lambda = eta2theta(eta[,2], .llambda, earg= .elambda)
- if(is.R()) {
- dl.dthetas = w * cbind(dl.dmu=lambda*(y-mu)/(y*(1-y)+(y-mu)^2),
- dl.dlambda=-0.5*log1p((y-mu)^2 / (y*(1-y))) -
- 0.5*digamma((lambda+1)/2) +
- 0.5*digamma(1+lambda/2))
- } else {
- if(iter==1) {
- d3 = dfun(~ w * (-0.5*log(y*(1-y)) - 0.5 * lambda * log(1 +
- (y-mu)^2 / (y*(1-y ))) - lgamma((lambda+1)/2) +
- lgamma(1+ lambda/2)),
- c("mu", "lambda"), hessian= TRUE)
- }
- eval.d3 = eval(d3)
- dl.dthetas = attr(eval.d3, "gradient")
- }
+ dl.dthetas = w * cbind(dl.dmu = lambda*(y-mu) / (y*(1-y)+(y-mu)^2),
+ dl.dlambda= -0.5 * log1p((y-mu)^2 / (y*(1-y))) -
+ 0.5*digamma((lambda+1)/2) +
+ 0.5*digamma(1+lambda/2))
dmu.deta = dtheta.deta(mu, .lmu, earg= .emu)
dlambda.deta = dtheta.deta(lambda, .llambda, earg= .elambda)
dtheta.detas = cbind(dmu.deta, dlambda.deta)
@@ -4344,8 +4350,8 @@ leipnik = function(lmu="logit", llambda="loge",
-invbinomial = function(lrho="elogit", llambda="loge",
- erho=if(lrho=="elogit") list(min = 0.5, max = 1) else list(),
+ invbinomial = function(lrho="elogit", llambda="loge",
+ erho=if(lrho=="elogit") list(min = 0.5, max = 1) else list(),
elambda=list(),
irho=NULL,
ilambda=NULL,
@@ -4357,7 +4363,7 @@ invbinomial = function(lrho="elogit", llambda="loge",
if(mode(llambda) != "character" && mode(llambda) != "name")
llambda = as.character(substitute(llambda))
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(!is.list(erho)) erho = list()
if(!is.list(elambda)) elambda = list()
@@ -4455,7 +4461,7 @@ invbinomial = function(lrho="elogit", llambda="loge",
-genpoisson = function(llambda="elogit", ltheta="loge",
+ genpoisson = function(llambda="elogit", ltheta="loge",
elambda=if(llambda=="elogit") list(min=-1,max=1) else list(),
etheta=list(),
ilambda=NULL, itheta=NULL,
@@ -4463,12 +4469,13 @@ genpoisson = function(llambda="elogit", ltheta="loge",
method.init=1, zero=1)
{
+
if(mode(llambda) != "character" && mode(llambda) != "name")
llambda = as.character(substitute(llambda))
if(mode(ltheta) != "character" && mode(ltheta) != "name")
ltheta = as.character(substitute(ltheta))
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(!is.list(elambda)) elambda = list()
if(!is.list(etheta)) etheta = list()
if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
@@ -4598,7 +4605,47 @@ genpoisson = function(llambda="elogit", ltheta="loge",
-lgammaff = function(link="loge", earg=list(), init.k=NULL)
+
+
+
+dlgamma = function(x, location=0, scale=1, k=1, log=FALSE) {
+ if(!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ if(!is.Numeric(scale, posit=TRUE)) stop("bad input for argument 'scale'")
+ if(!is.Numeric(k, posit=TRUE)) stop("bad input for argument 'k'")
+ z = (x-location) / scale
+ if(log.arg) {
+ k * z - exp(z) - log(scale) - lgamma(k)
+ } else {
+ exp(k * z - exp(z)) / (scale * gamma(k))
+ }
+}
+plgamma = function(q, location=0, scale=1, k=1) {
+ if(!is.Numeric(scale, posit=TRUE)) stop("bad input for argument 'scale'")
+ if(!is.Numeric(k, posit=TRUE)) stop("bad input for argument 'k'")
+ z = (q-location)/scale
+ pgamma(exp(z), k)
+}
+qlgamma = function(p, location=0, scale=1, k=1) {
+ if(!is.Numeric(scale, posit=TRUE)) stop("bad input for argument 'scale'")
+ if(!is.Numeric(k, posit=TRUE)) stop("bad input for argument 'k'")
+ q = qgamma(p, k)
+ location + scale * log(q)
+}
+rlgamma = function(n, location=0, scale=1, k=1) {
+ if(!is.Numeric(n, posit=TRUE, integ=TRUE, allow=1))
+ stop("bad input for argument 'n'")
+ if(!is.Numeric(scale, posit=TRUE)) stop("bad input for argument 'scale'")
+ if(!is.Numeric(k, posit=TRUE)) stop("bad input for argument 'k'")
+ y = rgamma(n, k)
+ location + scale * log(y)
+}
+
+
+
+ lgammaff = function(link="loge", earg=list(), init.k=NULL)
{
if(mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
@@ -4632,9 +4679,10 @@ lgammaff = function(link="loge", earg=list(), init.k=NULL)
}), list( .link=link, .earg=earg ))),
loglikelihood=eval(substitute(
function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- k = eta2theta(eta, .link, earg= .earg)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * (k * y - exp(y) - lgamma(k )))
+ kk = eta2theta(eta, .link, earg= .earg)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dlgamma(x=y, location=0, scale=1, k=kk, log=TRUE))
+ }
}, list( .link=link, .earg=earg ))),
vfamily=c("lgammaff"),
deriv=eval(substitute(expression({
@@ -4651,35 +4699,7 @@ lgammaff = function(link="loge", earg=list(), init.k=NULL)
}
-dlgamma = function(x, location=0, scale=1, k=1) {
- if(!is.Numeric(scale, posit=TRUE)) stop("bad input for argument \"scale\"")
- if(!is.Numeric(k, posit=TRUE)) stop("bad input for argument \"k\"")
- z = (x-location) / scale
- exp(k * z - exp(z)) / (scale * gamma(k))
-}
-plgamma = function(q, location=0, scale=1, k=1) {
- if(!is.Numeric(scale, posit=TRUE)) stop("bad input for argument \"scale\"")
- if(!is.Numeric(k, posit=TRUE)) stop("bad input for argument \"k\"")
- z = (q-location)/scale
- pgamma(exp(z), k)
-}
-qlgamma = function(p, location=0, scale=1, k=1) {
- if(!is.Numeric(scale, posit=TRUE)) stop("bad input for argument \"scale\"")
- if(!is.Numeric(k, posit=TRUE)) stop("bad input for argument \"k\"")
- q = qgamma(p, k)
- location + scale * log(q)
-}
-rlgamma = function(n, location=0, scale=1, k=1) {
- if(!is.Numeric(n, posit=TRUE, integ=TRUE, allow=1))
- stop("bad input for argument \"n\"")
- if(!is.Numeric(scale, posit=TRUE)) stop("bad input for argument \"scale\"")
- if(!is.Numeric(k, posit=TRUE)) stop("bad input for argument \"k\"")
- y = rgamma(n, k)
- location + scale * log(y)
-}
-
-
-lgamma3ff = function(llocation="identity", lscale="loge", lshape="loge",
+ lgamma3ff = function(llocation="identity", lscale="loge", lshape="loge",
elocation=list(), escale=list(), eshape=list(),
ilocation=NULL, iscale=NULL, ishape=1, zero=NULL)
{
@@ -4690,9 +4710,9 @@ lgamma3ff = function(llocation="identity", lscale="loge", lshape="loge",
if(mode(lshape) != "character" && mode(lshape) != "name")
lshape = as.character(substitute(lshape))
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(length(iscale) && !is.Numeric(iscale, posit=TRUE))
- stop("bad input for argument \"iscale\"")
+ stop("bad input for argument 'iscale'")
if(!is.list(elocation)) elocation = list()
if(!is.list(escale)) escale = list()
if(!is.list(eshape)) eshape = list()
@@ -4746,12 +4766,12 @@ lgamma3ff = function(llocation="identity", lscale="loge", lshape="loge",
.elocation=elocation, .escale=escale, .eshape=eshape ))),
loglikelihood=eval(substitute(
function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- a = eta2theta(eta[,1], .llocation, earg= .elocation)
- b = eta2theta(eta[,2], .lscale, earg= .escale)
- k = eta2theta(eta[,3], .lshape, earg= .eshape)
- zedd = (y-a)/b
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * (k * zedd - exp(zedd) - lgamma(k) - log(b )))
+ aa = eta2theta(eta[,1], .llocation, earg= .elocation)
+ bb = eta2theta(eta[,2], .lscale, earg= .escale)
+ kk = eta2theta(eta[,3], .lshape, earg= .eshape)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dlgamma(x=y, location=aa, scale=bb, k=kk, log=TRUE))
+ }
}, list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
.elocation=elocation, .escale=escale, .eshape=eshape ))),
vfamily=c("lgamma3ff"),
@@ -4789,7 +4809,7 @@ lgamma3ff = function(llocation="identity", lscale="loge", lshape="loge",
.elocation=elocation, .escale=escale, .eshape=eshape ))))
}
-prentice74 = function(llocation="identity", lscale="loge", lshape="identity",
+ prentice74 = function(llocation="identity", lscale="loge", lshape="identity",
elocation=list(), escale=list(), eshape=list(),
ilocation=NULL, iscale=NULL, ishape=NULL, zero=NULL)
{
@@ -4800,9 +4820,9 @@ prentice74 = function(llocation="identity", lscale="loge", lshape="identity",
if(mode(lshape) != "character" && mode(lshape) != "name")
lshape = as.character(substitute(lshape))
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(length(iscale) && !is.Numeric(iscale, posit=TRUE))
- stop("bad input for argument \"iscale\"")
+ stop("bad input for argument 'iscale'")
if(!is.list(elocation)) elocation = list()
if(!is.list(escale)) escale = list()
if(!is.list(eshape)) eshape = list()
@@ -4907,47 +4927,60 @@ prentice74 = function(llocation="identity", lscale="loge", lshape="identity",
-dggamma = function(x, scale=1, d=1, k=1) {
- if(!is.Numeric(scale, posit=TRUE)) stop("bad input for argument \"scale\"")
- if(!is.Numeric(d, posit=TRUE)) stop("bad input for argument \"d\"")
- if(!is.Numeric(k, posit=TRUE)) stop("bad input for argument \"k\"")
+dggamma = function(x, scale=1, d=1, k=1, log = FALSE) {
+ if(!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ if(!is.Numeric(scale, posit=TRUE)) stop("bad input for argument 'scale'")
+ if(!is.Numeric(d, posit=TRUE)) stop("bad input for argument 'd'")
+ if(!is.Numeric(k, posit=TRUE)) stop("bad input for argument 'k'")
N = max(length(x), length(scale), length(d), length(k))
x = rep(x, len=N); scale = rep(scale, len=N);
d = rep(d, len=N); k = rep(k, len=N);
- ans = rep(0.0, len=N)
- ind = x > 0
- if(any(ind)) {
- z = (x[ind]/scale[ind])^d[ind]
- ans[ind] = d[ind] * scale[ind]^(-d[ind]*k[ind]) *
- x[ind]^(d[ind]*k[ind]-1) * exp(-z) / gamma(k[ind])
+
+ Loglik = rep(log(0), len=N)
+ xok = x > 0
+ if(any(xok)) {
+ zedd = (x[xok]/scale[xok])^d[xok]
+ Loglik[xok] = log(d[xok]) + (-d[xok]*k[xok]) * log(scale[xok]) +
+ (d[xok]*k[xok]-1) * log(x[xok]) - zedd - lgamma(k[xok])
+ }
+ if(log.arg) {
+ Loglik
+ } else {
+ exp(Loglik)
}
- ans
}
+
+
+
+
pggamma = function(q, scale=1, d=1, k=1) {
- if(!is.Numeric(scale, posit=TRUE)) stop("bad input for argument \"scale\"")
- if(!is.Numeric(d, posit=TRUE)) stop("bad input for argument \"d\"")
- if(!is.Numeric(k, posit=TRUE)) stop("bad input for argument \"k\"")
+ if(!is.Numeric(scale, posit=TRUE)) stop("bad input for argument 'scale'")
+ if(!is.Numeric(d, posit=TRUE)) stop("bad input for argument 'd'")
+ if(!is.Numeric(k, posit=TRUE)) stop("bad input for argument 'k'")
z = (q/scale)^d
pgamma(z, k)
}
qggamma = function(p, scale=1, d=1, k=1) {
- if(!is.Numeric(scale, posit=TRUE)) stop("bad input for argument \"scale\"")
- if(!is.Numeric(d, posit=TRUE)) stop("bad input for argument \"d\"")
- if(!is.Numeric(k, posit=TRUE)) stop("bad input for argument \"k\"")
+ if(!is.Numeric(scale, posit=TRUE)) stop("bad input for argument 'scale'")
+ if(!is.Numeric(d, posit=TRUE)) stop("bad input for argument 'd'")
+ if(!is.Numeric(k, posit=TRUE)) stop("bad input for argument 'k'")
q = qgamma(p, k)
scale * q^(1/d)
}
rggamma = function(n, scale=1, d=1, k=1) {
if(!is.Numeric(n, posit=TRUE, integ=TRUE, allow=1))
- stop("bad input for \"n\"")
- if(!is.Numeric(scale, posit=TRUE)) stop("bad input for \"scale\"")
- if(!is.Numeric(d, posit=TRUE)) stop("bad input for \"d\"")
- if(!is.Numeric(k, posit=TRUE)) stop("bad input for \"k\"")
+ stop("bad input for 'n'")
+ if(!is.Numeric(scale, posit=TRUE)) stop("bad input for 'scale'")
+ if(!is.Numeric(d, posit=TRUE)) stop("bad input for 'd'")
+ if(!is.Numeric(k, posit=TRUE)) stop("bad input for 'k'")
y = rgamma(n, k)
scale * y^(1/d)
}
-ggamma = function(lscale="loge", ld="loge", lk="loge",
+ ggamma = function(lscale="loge", ld="loge", lk="loge",
escale=list(), ed=list(), ek=list(),
iscale=NULL, id=NULL, ik=NULL, zero=NULL)
{
@@ -4958,9 +4991,9 @@ ggamma = function(lscale="loge", ld="loge", lk="loge",
if(mode(lk) != "character" && mode(lk) != "name")
lk = as.character(substitute(lk))
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(length(iscale) && !is.Numeric(iscale, posit=TRUE))
- stop("bad input for argument \"iscale\"")
+ stop("bad input for argument 'iscale'")
if(!is.list(escale)) escale = list()
if(!is.list(ed)) ed = list()
if(!is.list(ek)) ek = list()
@@ -5018,8 +5051,9 @@ ggamma = function(lscale="loge", ld="loge", lk="loge",
b = eta2theta(eta[,1], .lscale, earg= .escale)
d = eta2theta(eta[,2], .ld, earg= .ed)
k = eta2theta(eta[,3], .lk, earg= .ek)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w*(log(d) - lgamma(k) + (d*k-1) * log(y) - d*k*log(b) - (y/b)^d))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dggamma(x=y, scale=b, d=d, k=k, log = TRUE))
+ }
}, list( .lscale=lscale, .ld=ld, .lk=lk,
.escale=escale, .ed=ed, .ek=ek ))),
vfamily=c("ggamma"),
@@ -5060,12 +5094,12 @@ ggamma = function(lscale="loge", ld="loge", lk="loge",
dlog = function(x, prob, log=FALSE) {
- if(!is.logical(log) || length(log) != 1)
+ if(!is.logical(log.arg <- log))
stop("bad input for argument 'log'")
- log.arg = log; rm(log)
+ rm(log)
if(!is.Numeric(prob, posit=TRUE) || max(prob) >= 1)
- stop("bad input for argument \"prob\"")
+ stop("bad input for argument 'prob'")
N = max(length(x), length(prob))
x = rep(x, len=N); prob = rep(prob, len=N);
ox = !is.finite(x)
@@ -5087,9 +5121,9 @@ dlog = function(x, prob, log=FALSE) {
plog = function(q, prob, log.p=FALSE) {
- if(!is.Numeric(q)) stop("bad input for argument \"q\"")
+ if(!is.Numeric(q)) stop("bad input for argument 'q'")
if(!is.Numeric(prob, posit=TRUE) || max(prob) >= 1)
- stop("bad input for argument \"prob\"")
+ stop("bad input for argument 'prob'")
N = max(length(q), length(prob))
q = rep(q, len=N); prob = rep(prob, len=N);
@@ -5130,9 +5164,9 @@ plog = function(q, prob, log.p=FALSE) {
if(FALSE)
plog = function(q, prob, log.p=FALSE) {
- if(!is.Numeric(q)) stop("bad input for argument \"q\"")
+ if(!is.Numeric(q)) stop("bad input for argument 'q'")
if(!is.Numeric(prob, posit=TRUE) || max(prob) >= 1)
- stop("bad input for argument \"prob\"")
+ stop("bad input for argument 'prob'")
N = max(length(q), length(prob))
q = rep(q, len=N); prob = rep(prob, len=N);
ans = q * 0 # Retains names(q)
@@ -5161,12 +5195,12 @@ plog = function(q, prob, log.p=FALSE) {
rlog = function(n, prob, Smallno=1.0e-6) {
if(!is.Numeric(n, posit=TRUE, integ=TRUE))
- stop("bad input for argument \"n\"")
+ stop("bad input for argument 'n'")
if(!is.Numeric(prob, allow=1, posit=TRUE) || max(prob) >= 1)
- stop("bad input for argument \"prob\"")
+ stop("bad input for argument 'prob'")
if(!is.Numeric(Smallno, posit=TRUE, allow=1) || Smallno > 0.01 ||
Smallno < 2 * .Machine$double.eps)
- stop("bad input for argument \"Smallno\"")
+ stop("bad input for argument 'Smallno'")
ans = rep(0.0, len=n)
ptr1 = 1; ptr2 = 0
@@ -5199,7 +5233,7 @@ rlog = function(n, prob, Smallno=1.0e-6) {
-logff = function(link="logit", earg=list(), init.c=NULL)
+ logff = function(link="logit", earg=list(), init.c=NULL)
{
if(length(init.c) &&
(!is.Numeric(init.c, posit=TRUE) || max(init.c) >= 1))
@@ -5242,8 +5276,9 @@ logff = function(link="logit", earg=list(), init.c=NULL)
function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
cc = eta2theta(eta, .link, earg= .earg)
a = -1 / log1p(-cc)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * (log(a) + y * log(cc) - log(y )))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dlog(x=y, prob=-expm1(-1/a), log=TRUE))
+ }
}, list( .link=link, .earg=earg ))),
vfamily=c("logff"),
deriv=eval(substitute(expression({
@@ -5264,7 +5299,7 @@ logff = function(link="logit", earg=list(), init.c=NULL)
-levy = function(delta=NULL, link.gamma="loge",
+ levy = function(delta=NULL, link.gamma="loge",
earg=list(), idelta=NULL, igamma=NULL)
{
@@ -5381,7 +5416,7 @@ levy = function(delta=NULL, link.gamma="loge",
if(FALSE)
-stoppa = function(y0,
+ stoppa = function(y0,
link.alpha="loge",
link.theta="loge", ealpha=list(), etheta=list(),
ialpha=NULL,
@@ -5396,7 +5431,7 @@ stoppa = function(y0,
if(mode(link.theta) != "character" && mode(link.theta) != "name")
link.theta = as.character(substitute(link.theta))
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(!is.list(ealpha)) ealpha = list()
if(!is.list(etheta)) etheta = list()
@@ -5405,8 +5440,7 @@ stoppa = function(y0,
"Links: ",
namesof("alpha", link.alpha, earg=ealpha), ", ",
namesof("theta", link.theta, earg=etheta), "\n",
- if(is.R()) "Mean: theta*y0*beta(1-1/alpha, theta)" else
- "Mean: theta*y0*beta(1-1/alpha, theta)"),
+ "Mean: theta*y0*beta(1-1/alpha, theta)"),
constraints=eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero=zero ))),
@@ -5482,36 +5516,36 @@ stoppa = function(y0,
-dlino = function(x, shape1, shape2, lambda=1) {
- if(!is.Numeric(x)) stop("bad input for argument \"x\"")
- if(!is.Numeric(shape1, posit=TRUE))
- stop("bad input for argument \"shape1\"")
- if(!is.Numeric(shape2, posit=TRUE))
- stop("bad input for argument \"shape2\"")
- if(!is.Numeric(lambda, posit=TRUE))
- stop("bad input for argument \"lambda\"")
- dbeta(x=x, shape1=shape1, shape2=shape2) * lambda^shape1 /
- (1 - (1-lambda)*x)^(shape1+shape2)
+
+dlino = function(x, shape1, shape2, lambda=1, log = FALSE) {
+ if(!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ loglik = dbeta(x=x, shape1=shape1, shape2=shape2, log=TRUE) +
+ shape1 * log(lambda) -
+ (shape1+shape2) * log1p(-(1-lambda)*x)
+ if(log.arg) loglik else exp(loglik)
}
plino = function(q, shape1, shape2, lambda=1) {
- if(!is.Numeric(q)) stop("bad input for \"q\"")
+ if(!is.Numeric(q)) stop("bad input for 'q'")
if(!is.Numeric(shape1, posit=TRUE))
- stop("bad input for argument \"shape1\"")
+ stop("bad input for argument 'shape1'")
if(!is.Numeric(shape2, posit=TRUE))
- stop("bad input for argument \"shape2\"")
+ stop("bad input for argument 'shape2'")
if(!is.Numeric(lambda, posit=TRUE))
- stop("bad input for argument \"lambda\"")
+ stop("bad input for argument 'lambda'")
pbeta(q=lambda*q/(1 - (1-lambda)*q), shape1=shape1, shape2=shape2)
}
qlino = function(p, shape1, shape2, lambda=1) {
if(!is.Numeric(p, posit=TRUE) || any(p >= 1))
- stop("bad input for argument \"p\"")
+ stop("bad input for argument 'p'")
if(!is.Numeric(shape1, posit=TRUE))
- stop("bad input for argument \"shape1\"")
+ stop("bad input for argument 'shape1'")
if(!is.Numeric(lambda, posit=TRUE))
- stop("bad input for argument \"lambda\"")
+ stop("bad input for argument 'lambda'")
Y = qbeta(p=p, shape1=shape1, shape2=shape2)
Y / (lambda + (1-lambda)*Y)
}
@@ -5519,24 +5553,24 @@ qlino = function(p, shape1, shape2, lambda=1) {
rlino = function(n, shape1, shape2, lambda=1) {
if(!is.Numeric(n, posit=TRUE, integ=TRUE, allow=1))
- stop("bad input for argument \"n\"")
+ stop("bad input for argument 'n'")
if(!is.Numeric(shape1, posit=TRUE))
- stop("bad input for argument \"shape1\"")
+ stop("bad input for argument 'shape1'")
if(!is.Numeric(shape2, posit=TRUE))
- stop("bad input for argument \"shape2\"")
+ stop("bad input for argument 'shape2'")
if(!is.Numeric(lambda, posit=TRUE))
- stop("bad input for argument \"lambda\"")
+ stop("bad input for argument 'lambda'")
Y = rbeta(n=n, shape1=shape1, shape2=shape2)
Y / (lambda + (1-lambda)*Y)
}
-lino = function(lshape1="loge",
- lshape2="loge",
- llambda="loge",
- eshape1=list(), eshape2=list(), elambda=list(),
- ishape1=NULL, ishape2=NULL, ilambda=1, zero=NULL)
+ lino = function(lshape1="loge",
+ lshape2="loge",
+ llambda="loge",
+ eshape1=list(), eshape2=list(), elambda=list(),
+ ishape1=NULL, ishape2=NULL, ilambda=1, zero=NULL)
{
if(mode(lshape1) != "character" && mode(lshape1) != "name")
lshape1 = as.character(substitute(lshape1))
@@ -5545,9 +5579,9 @@ lino = function(lshape1="loge",
if(mode(llambda) != "character" && mode(llambda) != "name")
llambda = as.character(substitute(llambda))
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(!is.Numeric(ilambda, positive=TRUE))
- stop("bad input for argument \"ilambda\"")
+ stop("bad input for argument 'ilambda'")
if(!is.list(eshape1)) eshape1 = list()
if(!is.list(eshape2)) eshape2 = list()
if(!is.list(elambda)) elambda = list()
@@ -5606,10 +5640,9 @@ lino = function(lshape1="loge",
sh1 = eta2theta(eta[,1], .lshape1, earg= .eshape1)
sh2 = eta2theta(eta[,2], .lshape2, earg= .eshape2)
lambda = eta2theta(eta[,3], .llambda, earg= .elambda)
- if(!is.R()) lbeta = function(a,b) lgamma(a) + lgamma(b) - lgamma(a+b)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w*(sh1*log(lambda) + (sh1-1)*log(y) + (sh2-1)*log1p(-y) -
- lbeta(sh1,sh2) -(sh1+sh2)*log1p(-(1-lambda)*y)) )
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dlino(y, shape1=sh1, shape2=sh2, lambda=lambda, log=TRUE))
+ }
}, list( .lshape1=lshape1, .lshape2=lshape2, .llambda=llambda,
.eshape1=eshape1, .eshape2=eshape2, .elambda=elambda ))),
vfamily=c("lino"),
@@ -5631,7 +5664,6 @@ lino = function(lshape1="loge",
}), list( .lshape1=lshape1, .lshape2=lshape2, .llambda=llambda,
.eshape1=eshape1, .eshape2=eshape2, .elambda=elambda ))),
weight=eval(substitute(expression({
- if(!is.R()) beta = function(a,b) (gamma(a) / gamma(a+b)) * gamma(b)
temp3 = trigamma(sh1+sh2)
ed2l.dsh1 = trigamma(sh1) - temp3
ed2l.dsh2 = trigamma(sh2) - temp3
@@ -5653,16 +5685,17 @@ lino = function(lshape1="loge",
}
-genbetaII= function(link.a="loge",
- link.scale="loge",
- link.p="loge",
- link.q="loge",
- earg.a=list(), earg.scale=list(), earg.p=list(), earg.q=list(),
- init.a=NULL,
- init.scale=NULL,
- init.p=1.0,
- init.q=1.0,
- zero=NULL)
+ genbetaII= function(link.a="loge",
+ link.scale="loge",
+ link.p="loge",
+ link.q="loge",
+ earg.a=list(), earg.scale=list(),
+ earg.p=list(), earg.q=list(),
+ init.a=NULL,
+ init.scale=NULL,
+ init.p=1.0,
+ init.q=1.0,
+ zero=NULL)
{
if(mode(link.a) != "character" && mode(link.a) != "name")
@@ -5674,7 +5707,7 @@ genbetaII= function(link.a="loge",
if(mode(link.q) != "character" && mode(link.q) != "name")
link.q = as.character(substitute(link.q))
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(!is.list(earg.a)) earg.a = list()
if(!is.list(earg.scale)) earg.scale = list()
if(!is.list(earg.p)) earg.p = list()
@@ -5747,10 +5780,10 @@ genbetaII= function(link.a="loge",
scale = eta2theta(eta[,2], .link.scale, earg= .earg.scale)
parg = eta2theta(eta[,3], .link.p, earg= .earg.p)
qq = eta2theta(eta[,4], .link.q, earg= .earg.q)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w*(log(aa) + (aa*parg-1)*log(y) - aa*parg*log(scale) +
- (if(is.R()) -lbeta(parg, qq) else lgamma(parg+qq)-lgamma(parg)-lgamma(qq))-
- (parg+qq)*log1p((y/scale)^aa)))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w*(log(aa) + (aa*parg-1)*log(y) - aa*parg*log(scale) +
+ -lbeta(parg, qq) - (parg+qq)*log1p((y/scale)^aa)))
+ }
}, list( .link.a=link.a, .link.scale=link.scale,
.earg.a=earg.a, .earg.scale=earg.scale,
.earg.p=earg.p, .earg.q=earg.q,
@@ -5820,31 +5853,31 @@ genbetaII= function(link.a="loge",
}
-rsinmad = function(n, a, scale, q.arg)
+rsinmad = function(n, a, scale=1, q.arg)
qsinmad(runif(n), a, scale, q.arg)
-rlomax = function(n, scale, q.arg)
+rlomax = function(n, scale=1, q.arg)
rsinmad(n, a=1, scale, q.arg)
-rfisk = function(n, a, scale)
+rfisk = function(n, a, scale=1)
rsinmad(n, a, scale, q.arg=1)
-rparalogistic = function(n, a, scale)
+rparalogistic = function(n, a, scale=1)
rsinmad(n, a, scale, a)
-rdagum = function(n, a, scale, p.arg)
- qdagum(runif(n), a, scale, p.arg)
+rdagum = function(n, a, scale=1, p.arg)
+ qdagum(runif(n), a, scale=1, p.arg)
-rinvlomax = function(n, scale, p.arg)
+rinvlomax = function(n, scale=1, p.arg)
rdagum(n, a=1, scale, p.arg)
-rinvparalogistic = function(n, a, scale)
+rinvparalogistic = function(n, a, scale=1)
rdagum(n, a, scale, a)
-qsinmad = function(p, a, scale, q.arg) {
+qsinmad = function(p, a, scale=1, q.arg) {
bad = (p < 0) | (p > 1)
ans = NA * p
a = rep(a, len=length(p))[!bad]
@@ -5855,16 +5888,16 @@ qsinmad = function(p, a, scale, q.arg) {
ans
}
-qlomax = function(p, scale, q.arg)
+qlomax = function(p, scale=1, q.arg)
qsinmad(p, a=1, scale, q.arg)
-qfisk = function(p, a, scale)
+qfisk = function(p, a, scale=1)
qsinmad(p, a, scale, q.arg=1)
-qparalogistic = function(p, a, scale)
+qparalogistic = function(p, a, scale=1)
qsinmad(p, a, scale, a)
-qdagum = function(p, a, scale, p.arg) {
+qdagum = function(p, a, scale=1, p.arg) {
bad = (p < 0) | (p > 1)
ans = NA * p
a = rep(a, len=length(p))[!bad]
@@ -5875,10 +5908,10 @@ qdagum = function(p, a, scale, p.arg) {
ans
}
-qinvlomax = function(p, scale, p.arg)
+qinvlomax = function(p, scale=1, p.arg)
qdagum(p, a=1, scale, p.arg)
-qinvparalogistic = function(p, a, scale)
+qinvparalogistic = function(p, a, scale=1)
qdagum(p, a, scale, a)
@@ -5886,7 +5919,7 @@ qinvparalogistic = function(p, a, scale)
-psinmad = function(q, a, scale, q.arg) {
+psinmad = function(q, a, scale=1, q.arg) {
zero = q <= 0
a = rep(a, len=length(q))[!zero]
scale = rep(scale, len=length(q))[!zero]
@@ -5897,18 +5930,18 @@ psinmad = function(q, a, scale, q.arg) {
ans
}
-plomax = function(q, scale, q.arg)
+plomax = function(q, scale=1, q.arg)
psinmad(q, a=1, scale, q.arg)
-pfisk = function(q, a, scale)
+pfisk = function(q, a, scale=1)
psinmad(q, a, scale, q.arg=1)
-pparalogistic = function(q, a, scale)
+pparalogistic = function(q, a, scale=1)
psinmad(q, a, scale, a)
-pdagum = function(q, a, scale, p.arg) {
+pdagum = function(q, a, scale=1, p.arg) {
zero = q <= 0
a = rep(a, len=length(q))[!zero]
scale = rep(scale, len=length(q))[!zero]
@@ -5919,56 +5952,70 @@ pdagum = function(q, a, scale, p.arg) {
ans
}
-pinvlomax = function(q, scale, p.arg)
+pinvlomax = function(q, scale=1, p.arg)
pdagum(q, a=1, scale, p.arg)
-pinvparalogistic = function(q, a, scale)
+pinvparalogistic = function(q, a, scale=1)
pdagum(q, a, scale, a)
-dsinmad = function(x, a, scale, q.arg) {
- zero = x <= 0
- a = rep(a, len=length(x))[!zero]
- scale = rep(scale, len=length(x))[!zero]
- q = rep(q.arg, len=length(x))[!zero]
- ans = 0 * x
- xx = x[!zero]
- ans[!zero] = a * q * xx^(a-1) / (scale^a * (1 + (xx/scale)^a)^(1+q))
- ans
+dsinmad = function(x, a, scale=1, q.arg, log=FALSE) {
+ if(!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+ LLL = max(length(x), length(a), length(scale), length(q.arg))
+ x = rep(x, len=LLL); a = rep(a, len=LLL)
+ scale = rep(scale, len=LLL); q.arg = rep(q.arg, len=LLL)
+
+ Loglik = rep(log(0), len=LLL)
+ xok = (x > 0) # Avoids evaluating log(x) if x is negative.
+ Loglik[xok] = log(a[xok]) + log(q.arg[xok]) + (a[xok]-1)*log(x[xok]) -
+ a[xok]*log(scale[xok]) -
+ (1+q.arg[xok]) * log1p((x[xok]/scale[xok])^a[xok])
+ if(log.arg) Loglik else exp(Loglik)
}
-dlomax = function(x, scale, q.arg)
- dsinmad(x, a=1, scale, q.arg)
+dlomax = function(x, scale=1, q.arg, log=FALSE)
+ dsinmad(x, a=1, scale, q.arg, log=log)
-dfisk = function(x, a, scale)
- dsinmad(x, a, scale, q.arg=1)
+dfisk = function(x, a, scale=1, log=FALSE)
+ dsinmad(x, a, scale, q.arg=1, log=log)
-dparalogistic = function(x, a, scale)
- dsinmad(x, a, scale, a)
+dparalogistic = function(x, a, scale=1, log=FALSE)
+ dsinmad(x, a, scale, a, log=log)
-ddagum = function(x, a, scale, p.arg) {
- zero = x <= 0
- a = rep(a, len=length(x))[!zero]
- scale = rep(scale, len=length(x))[!zero]
- p = rep(p.arg, len=length(x))[!zero]
- ans = 0 * x
- xx = x[!zero]
- ans[!zero] = a * p * xx^(a*p-1) / (scale^(a*p) * (1 + (xx/scale)^a)^(1+p))
- ans
+ddagum = function(x, a, scale=1, p.arg, log=FALSE) {
+ if(!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ LLL = max(length(x), length(a), length(scale), length(p.arg))
+ x = rep(x, len=LLL); a = rep(a, len=LLL)
+ scale = rep(scale, len=LLL); p.arg = rep(p.arg, len=LLL)
+
+ Loglik = rep(log(0), len=LLL)
+ xok = (x > 0) # Avoids evaluating log(x) if x is negative.
+ Loglik[xok] = log(a[xok]) + log(p.arg[xok]) +
+ (a[xok]*p.arg[xok]-1)*log(x[xok]) -
+ a[xok]*p.arg[xok]*log(scale[xok]) -
+ (1+p.arg[xok]) * log1p((x[xok]/scale[xok])^a[xok])
+ Loglik[p.arg <= 0] = NaN
+ if(log.arg) Loglik else exp(Loglik)
+
}
-dinvlomax = function(x, scale, p.arg)
- ddagum(x, a=1, scale, p.arg)
+dinvlomax = function(x, scale=1, p.arg, log=FALSE)
+ ddagum(x, a=1, scale, p.arg, log=log)
-dinvparalogistic = function(x, a, scale)
- ddagum(x, a, scale, a)
+dinvparalogistic = function(x, a, scale=1, log=FALSE)
+ ddagum(x, a, scale, a, log=log)
-sinmad = function(link.a="loge",
+ sinmad = function(link.a="loge",
link.scale="loge",
link.q="loge",
earg.a=list(), earg.scale=list(), earg.q=list(),
@@ -5985,7 +6032,7 @@ sinmad = function(link.a="loge",
if(mode(link.q) != "character" && mode(link.q) != "name")
link.q = as.character(substitute(link.q))
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(!is.list(earg.a)) earg.a = list()
if(!is.list(earg.scale)) earg.scale = list()
if(!is.list(earg.q)) earg.q = list()
@@ -6053,14 +6100,11 @@ sinmad = function(link.a="loge",
scale = eta2theta(eta[,2], .link.scale, earg= .earg.scale)
parg = 1
qq = eta2theta(eta[,3], .link.q, earg= .earg)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w*(log(aa) + (aa*parg-1)*log(y) - aa*parg*log(scale) +
- (if(is.R()) -lbeta(parg, qq) else lgamma(parg+qq)-lgamma(parg)-lgamma(qq))-
- (parg+qq)*log1p((y/scale)^aa)))
- }, list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale,
- .earg.q=earg.q,
- .link.q=link.q ))),
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dsinmad(x=y, a=aa, scale=scale, q.arg=qq, log=TRUE))
+ }
+ }, list( .link.a=link.a, .link.scale=link.scale, .link.q=link.q,
+ .earg.a=earg.a, .earg.scale=earg.scale, .earg.q=earg.q ))),
vfamily=c("sinmad"),
deriv=eval(substitute(expression({
aa = eta2theta(eta[,1], .link.a, earg= .earg.a)
@@ -6128,7 +6172,7 @@ sinmad = function(link.a="loge",
if(mode(link.p) != "character" && mode(link.p) != "name")
link.p = as.character(substitute(link.p))
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(!is.list(earg.a)) earg.a = list()
if(!is.list(earg.scale)) earg.scale = list()
if(!is.list(earg.p)) earg.p = list()
@@ -6196,14 +6240,11 @@ sinmad = function(link.a="loge",
scale = eta2theta(eta[,2], .link.scale, earg= .earg.scale)
parg = eta2theta(eta[,3], .link.p, earg= .earg.p)
qq = 1
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w*(log(aa) + (aa*parg-1)*log(y) - aa*parg*log(scale) +
- (if(is.R()) -lbeta(parg, qq) else lgamma(parg+qq)-lgamma(parg)-lgamma(qq))-
- (parg+qq)*log1p((y/scale)^aa)))
- }, list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale,
- .earg.p=earg.p,
- .link.p=link.p ))),
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * ddagum(x=y, a=aa, scale=scale, p.arg=parg, log=TRUE))
+ }
+ }, list( .link.a=link.a, .link.scale=link.scale, .link.p=link.p,
+ .earg.a=earg.a, .earg.scale=earg.scale, .earg.p=earg.p ))),
vfamily=c("dagum"),
deriv=eval(substitute(expression({
aa = eta2theta(eta[,1], .link.a, earg= .earg.a)
@@ -6255,14 +6296,9 @@ sinmad = function(link.a="loge",
-betaII= function(link.scale="loge",
- link.p="loge",
- link.q="loge",
- earg.scale=list(), earg.p=list(), earg.q=list(),
- init.scale=NULL,
- init.p=1.0,
- init.q=1.0,
- zero=NULL)
+ betaII = function(link.scale="loge", link.p="loge", link.q="loge",
+ earg.scale=list(), earg.p=list(), earg.q=list(),
+ init.scale=NULL, init.p=1.0, init.q=1.0, zero=NULL)
{
if(mode(link.scale) != "character" && mode(link.scale) != "name")
@@ -6272,7 +6308,7 @@ betaII= function(link.scale="loge",
if(mode(link.q) != "character" && mode(link.q) != "name")
link.q = as.character(substitute(link.q))
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(!is.list(earg.scale)) earg.scale = list()
if(!is.list(earg.p)) earg.p = list()
if(!is.list(earg.q)) earg.q = list()
@@ -6341,9 +6377,8 @@ betaII= function(link.scale="loge",
parg = eta2theta(eta[,2], .link.p, earg= .earg.p)
qq = eta2theta(eta[,3], .link.q, earg= .earg.q)
if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w*(log(aa) + (aa*parg-1)*log(y) - aa*parg*log(scale) +
- (if(is.R()) -lbeta(parg, qq) else lgamma(parg+qq)-lgamma(parg)-lgamma(qq))-
- (parg+qq)*log1p((y/scale)^aa)))
+ sum(w*(log(aa) + (aa*parg-1)*log(y) - aa*parg*log(scale) +
+ (-lbeta(parg, qq)) - (parg+qq)*log1p((y/scale)^aa)))
}, list( .link.scale=link.scale,
.earg.scale=earg.scale,
.earg.p=earg.p, .earg.q=earg.q,
@@ -6399,7 +6434,7 @@ betaII= function(link.scale="loge",
-lomax = function(link.scale="loge",
+ lomax = function(link.scale="loge",
link.q="loge",
earg.scale=list(), earg.q=list(),
init.scale=NULL,
@@ -6412,7 +6447,7 @@ lomax = function(link.scale="loge",
if(mode(link.q) != "character" && mode(link.q) != "name")
link.q = as.character(substitute(link.q))
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(!is.list(earg.scale)) earg.scale = list()
if(!is.list(earg.q)) earg.q = list()
@@ -6447,41 +6482,31 @@ lomax = function(link.scale="loge",
etastart = cbind(theta2eta(scale, .link.scale, earg= .earg.scale),
theta2eta(qq, .link.q, earg= .earg.q))
}
- }), list( .link.scale=link.scale,
- .link.q=link.q,
- .earg.scale=earg.scale,
- .earg.q=earg.q,
- .init.scale=init.scale,
- .init.q=init.q ))),
+ }), list( .link.scale=link.scale, .link.q=link.q,
+ .earg.scale=earg.scale, .earg.q=earg.q,
+ .init.scale=init.scale, .init.q=init.q ))),
inverse=eval(substitute(function(eta, extra=NULL) {
scale = eta2theta(eta[,1], .link.scale, earg= .earg.scale)
qq = eta2theta(eta[,2], .link.q, earg= .earg.q)
scale/(qq-1)
- }, list( .link.scale=link.scale,
- .earg.scale=earg.scale,
- .earg.q=earg.q,
- .link.q=link.q ))),
+ }, list( .link.scale=link.scale, .link.q=link.q,
+ .earg.scale=earg.scale, .earg.q=earg.q ))),
last=eval(substitute(expression({
misc$link = c(scale= .link.scale, q= .link.q)
misc$earg = list(scale= .earg.scale, q= .earg.q)
- }), list( .link.scale=link.scale,
- .earg.scale=earg.scale,
- .earg.q=earg.q,
- .link.q=link.q ))),
+ }), list( .link.scale=link.scale, .link.q=link.q,
+ .earg.scale=earg.scale, .earg.q=earg.q ))),
loglikelihood=eval(substitute(
function(mu, y, w, residuals= FALSE,eta, extra=NULL) {
aa = 1
scale = eta2theta(eta[,1], .link.scale, earg= .earg.scale)
parg = 1
qq = eta2theta(eta[,2], .link.q, earg= .earg.q)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w*(log(aa) + (aa*parg-1)*log(y) - aa*parg*log(scale) +
- (if(is.R()) -lbeta(parg, qq) else lgamma(parg+qq)-lgamma(parg)-lgamma(qq))-
- (parg+qq)*log1p((y/scale)^aa)))
- }, list( .link.scale=link.scale,
- .earg.scale=earg.scale,
- .earg.q=earg.q,
- .link.q=link.q ))),
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dlomax(x=y, scale=scale, q.arg=qq, log=TRUE))
+ }
+ }, list( .link.scale=link.scale, .link.q=link.q,
+ .earg.scale=earg.scale, .earg.q=earg.q ))),
vfamily=c("lomax"),
deriv=eval(substitute(expression({
aa = 1
@@ -6496,10 +6521,8 @@ lomax = function(link.scale="loge",
dq.deta = dtheta.deta(qq, .link.q, earg= .earg.q)
w * cbind( dl.dscale * dscale.deta,
dl.dq * dq.deta )
- }), list( .link.scale=link.scale,
- .earg.scale=earg.scale,
- .earg.q=earg.q,
- .link.q=link.q ))),
+ }), list( .link.scale=link.scale, .link.q=link.q,
+ .earg.scale=earg.scale, .earg.q=earg.q ))),
weight=eval(substitute(expression({
ed2l.dscale = aa^2 * parg * qq / (scale^2 * (1+parg+qq))
ed2l.dq = 1/qq^2
@@ -6510,10 +6533,8 @@ lomax = function(link.scale="loge",
wz[,iam(1,2,M)] = ed2l.dscaleq * dscale.deta * dq.deta
wz = w * wz
wz
- }), list( .link.scale=link.scale,
- .earg.scale=earg.scale,
- .earg.q=earg.q,
- .link.q=link.q ))))
+ }), list( .link.scale=link.scale, .link.q=link.q,
+ .earg.scale=earg.scale, .earg.q=earg.q ))))
}
@@ -6530,7 +6551,7 @@ lomax = function(link.scale="loge",
if(mode(link.scale) != "character" && mode(link.scale) != "name")
link.scale = as.character(substitute(link.scale))
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(!is.list(earg.a)) earg.a = list()
if(!is.list(earg.scale)) earg.scale = list()
@@ -6564,16 +6585,14 @@ lomax = function(link.scale="loge",
}
}), list( .link.a=link.a, .link.scale=link.scale,
.earg.a=earg.a, .earg.scale=earg.scale,
- .init.a=init.a, .init.scale=init.scale
- ))),
+ .init.a=init.a, .init.scale=init.scale ))),
inverse=eval(substitute(function(eta, extra=NULL) {
aa = eta2theta(eta[,1], .link.a, earg= .earg.a)
scale = eta2theta(eta[,2], .link.scale, earg= .earg.scale)
qq = 1
scale*gamma(1 + 1/aa)*gamma(1-1/aa)
}, list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale
- ))),
+ .earg.a=earg.a, .earg.scale=earg.scale ))),
last=eval(substitute(expression({
misc$link = c(a= .link.a, scale= .link.scale)
misc$earg = list(a= .earg.a, scale= .earg.scale)
@@ -6585,12 +6604,11 @@ lomax = function(link.scale="loge",
aa = eta2theta(eta[,1], .link.a, earg= .earg)
scale = eta2theta(eta[,2], .link.scale, earg= .earg.scale)
parg = qq = 1
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w*(log(aa) + (aa*parg-1)*log(y) - aa*parg*log(scale) +
- (if(is.R()) -lbeta(parg, qq) else lgamma(parg+qq)-lgamma(parg)-lgamma(qq))-
- (parg+qq)*log1p((y/scale)^aa)))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dfisk(x=y, a=aa, scale=scale, log=TRUE))
+ }
}, list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale ))),
+ .earg.a=earg.a, .earg.scale=earg.scale ))),
vfamily=c("fisk"),
deriv=eval(substitute(expression({
aa = eta2theta(eta[,1], .link.a, earg= .earg.a)
@@ -6608,8 +6626,7 @@ lomax = function(link.scale="loge",
dscale.deta = dtheta.deta(scale, .link.scale, earg= .earg.scale)
w * cbind( dl.da * da.deta, dl.dscale * dscale.deta )
}), list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale
- ))),
+ .earg.a=earg.a, .earg.scale=earg.scale ))),
weight=eval(substitute(expression({
ed2l.da = (1 + parg+qq + parg * qq * (trigamma(parg) + trigamma(qq) +
(temp3b - temp3a + (parg-qq)/(parg*qq))^2 -
@@ -6628,12 +6645,12 @@ lomax = function(link.scale="loge",
}
-invlomax = function(link.scale="loge",
- link.p="loge",
- earg.scale=list(), earg.p=list(),
- init.scale=NULL,
- init.p=1.0,
- zero=NULL)
+ invlomax = function(link.scale="loge",
+ link.p="loge",
+ earg.scale=list(), earg.p=list(),
+ init.scale=NULL,
+ init.p=1.0,
+ zero=NULL)
{
if(mode(link.scale) != "character" && mode(link.scale) != "name")
@@ -6641,7 +6658,7 @@ invlomax = function(link.scale="loge",
if(mode(link.p) != "character" && mode(link.p) != "name")
link.p = as.character(substitute(link.p))
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(!is.list(earg.scale)) earg.scale = list()
if(!is.list(earg.p)) earg.p = list()
@@ -6699,14 +6716,11 @@ invlomax = function(link.scale="loge",
aa = qq = 1
scale = eta2theta(eta[,1], .link.scale, earg= .earg.scale)
parg = eta2theta(eta[,2], .link.p, earg= .earg.p)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w*(log(aa) + (aa*parg-1)*log(y) - aa*parg*log(scale) +
- (if(is.R()) -lbeta(parg, qq) else lgamma(parg+qq)-lgamma(parg)-lgamma(qq))-
- (parg+qq)*log1p((y/scale)^aa)))
- }, list( .link.scale=link.scale,
- .earg.scale=earg.scale,
- .earg.p=earg.p,
- .link.p=link.p ))),
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dinvlomax(x=y, scale=scale, p.arg=parg, log=TRUE))
+ }
+ }, list( .link.scale=link.scale, .link.p=link.p,
+ .earg.scale=earg.scale, .earg.p=earg.p ))),
vfamily=c("invlomax"),
deriv=eval(substitute(expression({
aa = qq = 1
@@ -6722,10 +6736,8 @@ invlomax = function(link.scale="loge",
dp.deta = dtheta.deta(parg, .link.p, earg= .earg.p)
w * cbind( dl.dscale * dscale.deta,
dl.dp * dp.deta )
- }), list( .link.scale=link.scale,
- .earg.scale=earg.scale,
- .earg.p=earg.p,
- .link.p=link.p ))),
+ }), list( .link.scale=link.scale, .link.p=link.p,
+ .earg.scale=earg.scale, .earg.p=earg.p ))),
weight=eval(substitute(expression({
ed2l.dscale = aa^2 * parg * qq / (scale^2 * (1+parg+qq))
ed2l.dp = 1/parg^2
@@ -6736,19 +6748,17 @@ invlomax = function(link.scale="loge",
wz[,iam(1,2,M)] = ed2l.dscalep * dscale.deta * dp.deta
wz = w * wz
wz
- }), list( .link.scale=link.scale,
- .earg.scale=earg.scale,
- .earg.p=earg.p,
- .link.p=link.p ))))
+ }), list( .link.scale=link.scale, .link.p=link.p,
+ .earg.scale=earg.scale, .earg.p=earg.p ))))
}
-paralogistic = function(link.a="loge",
- link.scale="loge",
- earg.a=list(), earg.scale=list(),
- init.a=1.0,
- init.scale=NULL,
- zero=NULL)
+ paralogistic = function(link.a="loge",
+ link.scale="loge",
+ earg.a=list(), earg.scale=list(),
+ init.a=1.0,
+ init.scale=NULL,
+ zero=NULL)
{
if(mode(link.a) != "character" && mode(link.a) != "name")
@@ -6756,7 +6766,7 @@ paralogistic = function(link.a="loge",
if(mode(link.scale) != "character" && mode(link.scale) != "name")
link.scale = as.character(substitute(link.scale))
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(!is.list(earg.a)) earg.a = list()
if(!is.list(earg.scale)) earg.scale = list()
@@ -6801,27 +6811,23 @@ paralogistic = function(link.a="loge",
qq = aa
scale*gamma(1 + 1/aa)*gamma(qq-1/aa)/(gamma(qq))
}, list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale
- ))),
+ .earg.a=earg.a, .earg.scale=earg.scale ))),
last=eval(substitute(expression({
misc$link = c(a= .link.a, scale= .link.scale)
misc$earg = list(a= .earg.a, scale= .earg.scale )
}), list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale
- ))),
+ .earg.a=earg.a, .earg.scale=earg.scale ))),
loglikelihood=eval(substitute(
function(mu, y, w, residuals= FALSE,eta, extra=NULL) {
aa = eta2theta(eta[,1], .link.a, earg= .earg.a)
scale = eta2theta(eta[,2], .link.scale, earg= .earg.scale)
parg = 1
qq = aa
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w*(log(aa) + (aa*parg-1)*log(y) - aa*parg*log(scale) +
- (if(is.R()) -lbeta(parg, qq) else lgamma(parg+qq)-lgamma(parg)-lgamma(qq))-
- (parg+qq)*log1p((y/scale)^aa)))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dparalogistic(x=y, a=aa, scale=scale, log=TRUE))
+ }
}, list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale
- ))),
+ .earg.a=earg.a, .earg.scale=earg.scale ))),
vfamily=c("paralogistic"),
deriv=eval(substitute(expression({
aa = eta2theta(eta[,1], .link.a, earg= .earg.a)
@@ -6840,8 +6846,7 @@ paralogistic = function(link.a="loge",
dscale.deta = dtheta.deta(scale, .link.scale, earg= .earg.scale)
w * cbind( dl.da * da.deta, dl.dscale * dscale.deta)
}), list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale
- ))),
+ .earg.a=earg.a, .earg.scale=earg.scale ))),
weight=eval(substitute(expression({
ed2l.da = (1 + parg+qq + parg * qq * (trigamma(parg) + trigamma(qq) +
(temp3b - temp3a + (parg-qq)/(parg*qq))^2 -
@@ -6856,8 +6861,7 @@ paralogistic = function(link.a="loge",
wz = w * wz
wz
}), list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale
- ))))
+ .earg.a=earg.a, .earg.scale=earg.scale ))))
}
@@ -6874,7 +6878,7 @@ paralogistic = function(link.a="loge",
if(mode(link.scale) != "character" && mode(link.scale) != "name")
link.scale = as.character(substitute(link.scale))
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(!is.list(earg.a)) earg.a = list()
if(!is.list(earg.scale)) earg.scale = list()
@@ -6931,10 +6935,9 @@ paralogistic = function(link.a="loge",
scale = eta2theta(eta[,2], .link.scale, earg= .earg.scale)
parg = aa
qq = 1
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w*(log(aa) + (aa*parg-1)*log(y) - aa*parg*log(scale) +
- (if(is.R()) -lbeta(parg, qq) else lgamma(parg+qq)-lgamma(parg)-lgamma(qq))-
- (parg+qq)*log1p((y/scale)^aa)))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dinvparalogistic(x=y, a=aa, scale=scale, log=TRUE))
+ }
}, list( .link.a=link.a, .link.scale=link.scale,
.earg.a=earg.a, .earg.scale=earg.scale ))),
vfamily=c("invparalogistic"),
@@ -6976,22 +6979,22 @@ paralogistic = function(link.a="loge",
if(FALSE)
-genlognormal = function(link.sigma="loge", link.r="loge",
+ genlognormal = function(link.sigma="loge", link.r="loge",
esigma=list(), er=list(),
init.sigma=1, init.r=1, zero=NULL)
{
-warning(paste("2/4/04; doesn't work, possibly because first derivs are",
- "not continuous (sign() is used). Certainly, the derivs wrt",
- "mymu are problematic (run with maxit=4:9 and look at weight",
- "matrices). Possibly fundamentally cannot be estimated by IRLS.",
- "Pooling doesn't seem to help."))
+warning("2/4/04; doesn't work, possibly because first derivs are ",
+ "not continuous (sign() is used). Certainly, the derivs wrt ",
+ "mymu are problematic (run with maxit=4:9 and look at weight ",
+ "matrices). Possibly fundamentally cannot be estimated by IRLS. ",
+ "Pooling doesn't seem to help")
if(mode(link.sigma) != "character" && mode(link.sigma) != "name")
link.sigma = as.character(substitute(link.sigma))
if(mode(link.r) != "character" && mode(link.r) != "name")
link.r = as.character(substitute(link.r))
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(!is.list(esigma)) esigma = list()
if(!is.list(er)) er = list()
@@ -7082,7 +7085,7 @@ warning(paste("2/4/04; doesn't work, possibly because first derivs are",
}
-betaprime = function(link="loge", earg=list(), i1=2, i2=NULL, zero=NULL)
+ betaprime = function(link="loge", earg=list(), i1=2, i2=NULL, zero=NULL)
{
if(mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
@@ -7128,13 +7131,12 @@ betaprime = function(link="loge", earg=list(), i1=2, i2=NULL, zero=NULL)
misc$earg = list(shape1 = .earg, shape2 = .earg)
}), list( .link=link, .earg=earg ))),
loglikelihood=eval(substitute(
- function(mu, y, w, residuals= FALSE, eta, extra=NULL){
+ function(mu, y, w, residuals= FALSE, eta, extra=NULL){
shapes = eta2theta(eta, .link, earg= .earg)
- temp = if(is.R()) lbeta(shapes[,1], shapes[,2]) else
- lgamma(shapes[,1]) + lgamma(shapes[,2]) -
- lgamma(shapes[,1]+shapes[,2])
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w *((shapes[,1]-1)*log(y)-(shapes[,2]+shapes[,1])*log1p(y)-temp))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w *((shapes[,1]-1) * log(y) - lbeta(shapes[,1], shapes[,2]) -
+ (shapes[,2]+shapes[,1]) * log1p(y)))
+ }
}, list( .link=link, .earg=earg ))),
vfamily="betaprime",
deriv=eval(substitute(expression({
@@ -7163,7 +7165,47 @@ betaprime = function(link="loge", earg=list(), i1=2, i2=NULL, zero=NULL)
-maxwell = function(link="loge", earg=list()) {
+
+
+
+dmaxwell = function(x, a, log = FALSE) {
+ if(!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ L = max(length(x), length(a))
+ x = rep(x, len=L); a = rep(a, len=L);
+ logdensity = rep(log(0), len=L)
+ xok = (x > 0)
+ logdensity[xok] = 0.5 * log(2/pi) + 1.5 * log(a[xok]) +
+ 2 * log(x[xok]) - 0.5 * a[xok] * x[xok]^2
+ if(log.arg) logdensity else exp(logdensity)
+}
+
+pmaxwell = function(q, a) {
+ if(any(a <= 0)) stop("argument 'a' must be positive")
+ L = max(length(q), length(a))
+ q = rep(q, len=L); a = rep(a, len=L);
+ ifelse(q > 0, erf(q*sqrt(a/2)) - q*exp(-0.5*a*q^2) * sqrt(2*a/pi), 0)
+}
+
+rmaxwell = function(n, a) {
+ if(!is.Numeric(n, posit=TRUE, allow=1))
+ stop("bad input for argument 'n'")
+ if(any(a <= 0)) stop("argument 'a' must be positive")
+ sqrt(2 * rgamma(n=n, 1.5) / a)
+}
+
+qmaxwell = function(p, a) {
+ if(!is.Numeric(p, posit=TRUE) || any(p>=1))
+ stop("bad input for argument 'p'")
+ if(any(a <= 0)) stop("argument 'a' must be positive")
+ N = max(length(p), length(a)); p = rep(p, len=N); a = rep(a, len=N)
+ sqrt(2 * qgamma(p=p, 1.5) / a)
+}
+
+
+ maxwell = function(link="loge", earg=list()) {
if(mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if(!is.list(earg)) earg = list()
@@ -7192,9 +7234,9 @@ maxwell = function(link="loge", earg=list()) {
}), list( .link=link, .earg=earg ))),
loglikelihood=eval(substitute(
function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- a = eta2theta(eta, .link, earg= .earg)
+ aa = eta2theta(eta, .link, earg= .earg)
if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * (1.5 * log(a) + 2 * log(y) - 0.5 * a * y^2 + 0.5*log(2/pi)))
+ sum(w * dmaxwell(x=y, a=aa, log = TRUE))
}, list( .link=link, .earg=earg ))),
vfamily=c("maxwell"),
deriv=eval(substitute(expression({
@@ -7211,52 +7253,31 @@ maxwell = function(link="loge", earg=list()) {
}
-dmaxwell = function(x, a) {
- if(any(a <= 0)) stop("argument \"a\" must be positive")
- L = max(length(x), length(a))
- x = rep(x, len=L); a = rep(a, len=L);
- ifelse(x>0, sqrt(2/pi) * a^(1.5) * x^2 * exp(-0.5*a*x^2), 0)
-}
-
-pmaxwell = function(q, a) {
- if(any(a <= 0)) stop("argument \"a\" must be positive")
- L = max(length(q), length(a))
- q = rep(q, len=L); a = rep(a, len=L);
- ifelse(q > 0, erf(q*sqrt(a/2)) - q*exp(-0.5*a*q^2) * sqrt(2*a/pi), 0)
-}
-
-rmaxwell = function(n, a) {
- if(!is.Numeric(n, posit=TRUE, allow=1))
- stop("bad input for argument \"n\"")
- if(any(a <= 0)) stop("argument \"a\" must be positive")
- sqrt(2 * rgamma(n=n, 1.5) / a)
-}
-
-qmaxwell = function(p, a) {
- if(!is.Numeric(p, posit=TRUE) || any(p>=1))
- stop("bad input for argument \"p\"")
- if(any(a <= 0)) stop("argument \"a\" must be positive")
- N = max(length(p), length(a)); p = rep(p, len=N); a = rep(a, len=N)
- sqrt(2 * qgamma(p=p, 1.5) / a)
-}
-
-
-dnaka = function(x, shape, scale=1) {
+dnaka = function(x, shape, scale=1, log = FALSE) {
+ if(!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
L = max(length(x), length(shape), length(scale))
x = rep(x, len=L); shape = rep(shape, len=L); scale = rep(scale, len=L);
- ifelse(x <= 0, 0, dgamma(x=x^2, shape=shape, scale=scale/shape) * 2 * x)
+
+ logdensity = rep(log(0), len=L)
+ xok = (x > 0)
+ logdensity[xok] = dgamma(x=x[xok]^2, shape=shape[xok],
+ scale=scale[xok]/shape[xok], log=TRUE) +
+ log(2) + log(x[xok])
+ if(log.arg) logdensity else exp(logdensity)
}
pnaka = function(q, shape, scale=1) {
if(!is.Numeric(q))
- stop("bad input for argument \"q\"")
+ stop("bad input for argument 'q'")
if(!is.Numeric(shape, posit=TRUE))
- stop("bad input for argument \"shape\"")
+ stop("bad input for argument 'shape'")
if(!is.Numeric(scale, posit=TRUE))
- stop("bad input for argument \"scale\"")
+ stop("bad input for argument 'scale'")
L = max(length(q), length(shape), length(scale))
q = rep(q, len=L); shape = rep(shape, len=L); scale = rep(scale, len=L);
ifelse(q <= 0, 0, pgamma(shape * q^2 / scale, shape))
@@ -7265,11 +7286,11 @@ pnaka = function(q, shape, scale=1) {
qnaka = function(p, shape, scale=1, ...) {
if(!is.Numeric(p, posit=TRUE) || max(p) >= 1)
- stop("bad input for argument \"p\"")
+ stop("bad input for argument 'p'")
if(!is.Numeric(shape, posit=TRUE))
- stop("bad input for argument \"shape\"")
+ stop("bad input for argument 'shape'")
if(!is.Numeric(scale, posit=TRUE))
- stop("bad input for argument \"scale\"")
+ stop("bad input for argument 'scale'")
L = max(length(p), length(shape), length(scale))
p = rep(p, len=L); shape = rep(shape, len=L); scale = rep(scale, len=L);
ans = rep(0.0, len=L)
@@ -7289,14 +7310,14 @@ qnaka = function(p, shape, scale=1, ...) {
rnaka = function(n, shape, scale=1, Smallno=1.0e-6) {
if(!is.Numeric(n, posit=TRUE, integ=TRUE))
- stop("bad input for argument \"n\"")
+ stop("bad input for argument 'n'")
if(!is.Numeric(scale, posit=TRUE, allow=1))
- stop("bad input for argument \"scale\"")
+ stop("bad input for argument 'scale'")
if(!is.Numeric(shape, posit=TRUE, allow=1))
- stop("bad input for argument \"shape\"")
+ stop("bad input for argument 'shape'")
if(!is.Numeric(Smallno, posit=TRUE, allow=1) || Smallno > 0.01 ||
Smallno < 2 * .Machine$double.eps)
- stop("bad input for argument \"Smallno\"")
+ stop("bad input for argument 'Smallno'")
ans = rep(0.0, len=n)
ptr1 = 1; ptr2 = 0
@@ -7323,16 +7344,14 @@ rnaka = function(n, shape, scale=1, Smallno=1.0e-6) {
-
-
-nakagami = function(lshape="loge", lscale="loge",
- eshape=list(), escale=list(), ishape=NULL, iscale=1) {
+ nakagami = function(lshape="loge", lscale="loge",
+ eshape=list(), escale=list(), ishape=NULL, iscale=1) {
if(mode(lshape) != "character" && mode(lshape) != "name")
lshape = as.character(substitute(lshape))
if(mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
if(!is.null(iscale) && !is.Numeric(iscale, positi=TRUE))
- stop("argument \"iscale\" must be a positive number or NULL")
+ stop("argument 'iscale' must be a positive number or NULL")
if(!is.list(eshape)) eshape = list()
if(!is.list(escale)) escale = list()
@@ -7382,8 +7401,7 @@ nakagami = function(lshape="loge", lscale="loge",
shape = eta2theta(eta[,1], .lshape, earg= .eshape)
scale = eta2theta(eta[,2], .lscale, earg= .escale)
if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * (log(2) + shape * log(shape/scale) - lgamma(shape) +
- (2*shape-1) * log(y) - shape * y^2 / scale))
+ sum(w * dnaka(x=y, shape=shape, scale=scale, log=TRUE))
}, list( .lscale=lscale, .lshape=lshape,
.escale=escale, .eshape=eshape ))),
vfamily=c("nakagami"),
@@ -7411,8 +7429,42 @@ nakagami = function(lshape="loge", lscale="loge",
+drayleigh = function(x, a, log=FALSE) {
+ if(!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ L = max(length(x), length(a))
+ x = rep(x, len=L); a = rep(a, len=L);
+ logdensity = rep(log(0), len=L)
+ xok = (x > 0)
+ logdensity[xok] = log(x[xok]) - 0.5 * (x[xok]/a[xok])^2 - 2*log(a[xok])
+ if(log.arg) logdensity else exp(logdensity)
+}
+
+prayleigh = function(q, a) {
+ if(any(a <= 0)) stop("argument 'a' must be positive")
+ L = max(length(q), length(a))
+ q = rep(q, len=L); a = rep(a, len=L);
+ ifelse(q > 0, 1 - exp(-0.5*(q/a)^2), 0)
+}
+
+qrayleigh = function(p, a) {
+ if(any(a <= 0)) stop("argument 'a' must be positive")
+ if(any(p <= 0) || any(p >= 1)) stop("argument 'p' must be between 0 and 1")
+ a * sqrt(-2 * log1p(-p))
+}
+
+rrayleigh = function(n, a) {
+ if(!is.Numeric(n, posit=TRUE, allow=1, integ=TRUE))
+ stop("bad input for argument 'n'")
+ if(any(a <= 0)) stop("a must be positive")
+ a * sqrt(-2 * log(runif(n )))
+}
+
+
-rayleigh = function(link="loge", earg=list(), nrfs=1/3+0.01) {
+ rayleigh = function(link="loge", earg=list(), nrfs=1/3+0.01) {
if(mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if(!is.list(earg)) earg = list()
@@ -7445,8 +7497,9 @@ rayleigh = function(link="loge", earg=list(), nrfs=1/3+0.01) {
loglikelihood=eval(substitute(
function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
a = eta2theta(eta, .link, earg= .earg)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * (log(y) - 2 * log(a) - 0.5 * (y/a)^2))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * drayleigh(x=y, a=a, log=TRUE))
+ }
}, list( .link=link, .earg=earg ))),
vfamily=c("rayleigh"),
deriv=eval(substitute(expression({
@@ -7464,64 +7517,37 @@ rayleigh = function(link="loge", earg=list(), nrfs=1/3+0.01) {
}
-drayleigh = function(x, a) {
- if(any(a <= 0)) stop("argument \"a\" must be positive")
- L = max(length(x), length(a))
- x = rep(x, len=L); a = rep(a, len=L);
- ifelse(x>0, x*exp(-0.5*(x/a)^2)/a^2, 0)
-}
-
-prayleigh = function(q, a) {
- if(any(a <= 0)) stop("argument \"a\" must be positive")
- L = max(length(q), length(a))
- q = rep(q, len=L); a = rep(a, len=L);
- ifelse(q > 0, 1 - exp(-0.5*(q/a)^2), 0)
-}
-
-qrayleigh = function(p, a) {
- if(any(a <= 0)) stop("argument \"a\" must be positive")
- if(any(p <= 0) || any(p >= 1)) stop("argument \"p\" must be between 0 and 1")
- a * sqrt(-2 * log1p(-p))
-}
-
-rrayleigh = function(n, a) {
- if(!is.Numeric(n, posit=TRUE, allow=1, integ=TRUE))
- stop("bad input for argument \"n\"")
- if(any(a <= 0)) stop("a must be positive")
- a * sqrt(-2 * log(runif(n )))
-}
-
+dparetoIV = function(x, location=0, scale=1, inequality=1, shape=1, log=FALSE) {
+ if(!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
-dparetoIV = function(x, location=0, scale=1, inequality=1, shape=1) {
- if(!is.Numeric(x)) stop("bad input for argument \"x\"")
- if(!is.Numeric(scale, posit=TRUE)) stop("bad input for argument \"scale\"")
- if(!is.Numeric(inequality, posi=TRUE))
- stop("bad input for argument \"inequality\"")
- if(!is.Numeric(shape, posit=TRUE)) stop("bad input for argument \"shape\"")
N = max(length(x), length(location), length(scale), length(inequality),
length(shape))
x = rep(x, len=N); location = rep(location, len=N)
scale = rep(scale, len=N); inequality = rep(inequality, len=N)
shape = rep(shape, len=N)
- answer = x * 0
- ii = x > location
- zedd = (x[ii] - location[ii]) / scale[ii]
- answer[ii] = (shape[ii]/(scale[ii]*inequality[ii])) *
- zedd^(1/inequality[ii]-1) / (1+zedd^(1/inequality[ii]))^(shape[ii]+1)
- answer
+
+ logdensity = rep(log(0), len=N)
+ xok = (x > location)
+ zedd = (x - location) / scale
+ logdensity[xok] = log(shape[xok]) - log(scale[xok]) - log(inequality[xok])+
+ (1/inequality[xok]-1) * log(zedd[xok]) -
+ (shape[xok]+1) * log1p(zedd[xok]^(1/inequality[xok]))
+ if(log.arg) logdensity else exp(logdensity)
}
pparetoIV = function(q, location=0, scale=1, inequality=1, shape=1) {
- if(!is.Numeric(q)) stop("bad input for argument \"q\"")
+ if(!is.Numeric(q)) stop("bad input for argument 'q'")
if(!is.Numeric(scale, posit=TRUE))
- stop("bad input for argument \"scale\"")
+ stop("bad input for argument 'scale'")
if(!is.Numeric(inequality, posi=TRUE))
- stop("bad input for argument \"inequality\"")
+ stop("bad input for argument 'inequality'")
if(!is.Numeric(shape, posit=TRUE))
- stop("bad input for argument \"shape\"")
+ stop("bad input for argument 'shape'")
N = max(length(q), length(location), length(scale), length(inequality),
length(shape))
q = rep(q, len=N); location = rep(location, len=N)
@@ -7536,43 +7562,48 @@ pparetoIV = function(q, location=0, scale=1, inequality=1, shape=1) {
qparetoIV = function(p, location=0, scale=1, inequality=1, shape=1) {
if(!is.Numeric(p, posit=TRUE) || any(p >= 1))
- stop("bad input for argument \"p\"")
+ stop("bad input for argument 'p'")
if(!is.Numeric(scale, posit=TRUE))
- stop("bad input for argument \"scale\"")
+ stop("bad input for argument 'scale'")
if(!is.Numeric(inequality, posi=TRUE))
- stop("bad input for argument \"inequality\"")
+ stop("bad input for argument 'inequality'")
if(!is.Numeric(shape, posit=TRUE))
- stop("bad input for argument \"shape\"")
+ stop("bad input for argument 'shape'")
location + scale * (-1 + (1-p)^(-1/shape))^inequality
}
rparetoIV = function(n, location=0, scale=1, inequality=1, shape=1) {
if(!is.Numeric(n, posit=TRUE, integ=TRUE, allow=1))
stop("bad input for argument n")
- if(!is.Numeric(scale, posit=TRUE)) stop("bad input for argument \"scale\"")
+ if(!is.Numeric(scale, posit=TRUE)) stop("bad input for argument 'scale'")
if(!is.Numeric(inequality, posi=TRUE))
- stop("bad input for argument \"inequality\"")
- if(!is.Numeric(shape, posit=TRUE)) stop("bad input for argument \"shape\"")
+ stop("bad input for argument 'inequality'")
+ if(!is.Numeric(shape, posit=TRUE)) stop("bad input for argument 'shape'")
location + scale * (-1 + runif(n)^(-1/shape))^inequality
}
-dparetoIII = function(x, location=0, scale=1, inequality=1)
- dparetoIV(x=x, location=location, scale=scale, inequality=inequality, shape=1)
+dparetoIII = function(x, location=0, scale=1, inequality=1, log=FALSE)
+ dparetoIV(x=x, location=location, scale=scale, inequality=inequality,
+ shape=1, log=log)
pparetoIII = function(q, location=0, scale=1, inequality=1)
- pparetoIV(q=q, location=location, scale=scale, inequality=inequality, shape=1)
+ pparetoIV(q=q, location=location, scale=scale, inequality=inequality,
+ shape=1)
qparetoIII = function(p, location=0, scale=1, inequality=1)
- qparetoIV(p=p, location=location, scale=scale, inequality=inequality, shape=1)
+ qparetoIV(p=p, location=location, scale=scale, inequality=inequality,
+ shape=1)
rparetoIII = function(n, location=0, scale=1, inequality=1)
- rparetoIV(n=n, location=location, scale=scale, inequality=inequality, shape=1)
+ rparetoIV(n=n, location=location, scale=scale, inequality=inequality,
+ shape=1)
-dparetoII = function(x, location=0, scale=1, shape=1)
- dparetoIV(x=x, location=location, scale=scale, inequality=1, shape=shape)
+dparetoII = function(x, location=0, scale=1, shape=1, log=FALSE)
+ dparetoIV(x=x, location=location, scale=scale, inequality=1, shape=shape,
+ log=log)
pparetoII = function(q, location=0, scale=1, shape=1)
pparetoIV(q=q, location=location, scale=scale, inequality=1, shape=shape)
@@ -7598,7 +7629,7 @@ rparetoI = function(n, scale=1, shape=1)
-paretoIV = function(location=0,
+ paretoIV = function(location=0,
lscale="loge",
linequality="loge",
lshape="loge",
@@ -7612,17 +7643,17 @@ paretoIV = function(location=0,
if(mode(lshape) != "character" && mode(lshape) != "name")
lshape = as.character(substitute(lshape))
if(!is.Numeric(location))
- stop("argument \"location\" must be numeric")
+ stop("argument 'location' must be numeric")
if(is.Numeric(iscale) && any(iscale <= 0))
- stop("argument \"iscale\" must be positive")
+ stop("argument 'iscale' must be positive")
if(is.Numeric(iinequality) && any(iinequality <= 0))
- stop("argument \"iinequality\" must be positive")
+ stop("argument 'iinequality' must be positive")
if(is.Numeric(ishape) && any(ishape <= 0))
- stop("argument \"ishape\" must be positive")
+ stop("argument 'ishape' must be positive")
if(!is.Numeric(method.init, allow=1, integ=TRUE) || method.init>2)
- stop("bad input for argument \"method.init\"")
+ stop("bad input for argument 'method.init'")
if(linequality == "nloge" && location != 0)
- warning("The Burr distribution has location=0 and linequality=nloge")
+ warning("The Burr distribution has 'location=0' and 'linequality=nloge'")
if(!is.list(escale)) escale = list()
if(!is.list(einequality)) einequality = list()
if(!is.list(eshape)) eshape = list()
@@ -7645,7 +7676,7 @@ paretoIV = function(location=0,
namesof("shape", .lshape, earg=.eshape, tag=FALSE))
extra$location = location = .location
if(any(y <= location))
- stop("the response must have values > than the \"location\" argument")
+ stop("the response must have values > than the 'location' argument")
if(!length(etastart)) {
inequality.init = if(length(.iinequality)) .iinequality else 1
scale.init = if(length( .iscale)) .iscale else 1
@@ -7693,9 +7724,10 @@ paretoIV = function(location=0,
inequality = eta2theta(eta[,2], .linequality, earg= .einequality)
shape = eta2theta(eta[,3], .lshape, earg= .eshape)
zedd = (y - location) / Scale
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * (log(shape) - log(inequality) - log(Scale) + (1/inequality -1) *
- log(zedd) - (shape+1) * log1p(zedd^(1/inequality))))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dparetoIV(x=y, location=location, scale=Scale,
+ inequality=inequality, shape=shape, log=TRUE))
+ }
}, list( .lscale=lscale, .linequality=linequality, .lshape=lshape,
.escale=escale, .einequality=einequality, .eshape=eshape ))),
vfamily=c("paretoIV"),
@@ -7739,21 +7771,24 @@ paretoIV = function(location=0,
.escale=escale, .einequality=einequality, .eshape=eshape ))))
}
-paretoIII = function(location=0,
- lscale="loge",
- linequality="loge",
- escale=list(), einequality=list(),
- iscale=NULL, iinequality=NULL) {
+
+
+
+ paretoIII = function(location=0,
+ lscale="loge",
+ linequality="loge",
+ escale=list(), einequality=list(),
+ iscale=NULL, iinequality=NULL) {
if(mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
if(mode(linequality) != "character" && mode(linequality) != "name")
linequality = as.character(substitute(linequality))
if(!is.Numeric(location))
- stop("argument \"location\" must be numeric")
+ stop("argument 'location' must be numeric")
if(is.Numeric(iscale) && any(iscale <= 0))
- stop("argument \"iscale\" must be positive")
+ stop("argument 'iscale' must be positive")
if(is.Numeric(iinequality) && any(iinequality <= 0))
- stop("argument \"iinequality\" must be positive")
+ stop("argument 'iinequality' must be positive")
if(!is.list(escale)) escale = list()
if(!is.list(einequality)) einequality = list()
@@ -7762,8 +7797,9 @@ paretoIII = function(location=0,
")/scale)^(1/inequality)]^(-1),",
"\n", " y > ",
location, ", scale > 0, inequality > 0, \n",
- "Links: ", namesof("scale", lscale, earg=escale ), ", ",
- namesof("inequality", linequality, earg=einequality ), "\n",
+ "Links: ",
+ namesof("scale", lscale, earg=escale ), ", ",
+ namesof("inequality", linequality, earg=einequality ), "\n",
"Mean: location + scale * NA"),
initialize=eval(substitute(expression({
if(ncol(cbind(y)) != 1)
@@ -7773,7 +7809,7 @@ paretoIII = function(location=0,
namesof("inequality", .linequality, earg=.einequality, tag=FALSE))
extra$location = location = .location
if(any(y <= location))
- stop("the response must have values > than the \"location\" argument")
+ stop("the response must have values > than the 'location' argument")
if(!length(etastart)) {
inequality.init = if(length(.iinequality)) .iinequality else NULL
scale.init = if(length( .iscale)) .iscale else NULL
@@ -7788,7 +7824,8 @@ paretoIII = function(location=0,
}
etastart=cbind(
theta2eta(rep(scale.init, len=n), .lscale, earg= .escale),
- theta2eta(rep(inequality.init, len=n), .linequality, earg= .einequality))
+ theta2eta(rep(inequality.init, len=n), .linequality,
+ earg= .einequality))
}
}), list( .location=location, .lscale=lscale, .linequality=linequality,
.escale=escale, .einequality=einequality,
@@ -7812,9 +7849,10 @@ paretoIII = function(location=0,
Scale = eta2theta(eta[,1], .lscale, earg= .escale)
inequality = eta2theta(eta[,2], .linequality, earg= .einequality)
zedd = (y - location) / Scale
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * (-log(inequality) - log(Scale) + (1/inequality -1) *
- log(zedd) - (1+1) * log1p(zedd^(1/inequality))))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dparetoIII(x=y, location=location, scale=Scale,
+ inequality=inequality, log=TRUE))
+ }
}, list( .lscale=lscale, .linequality=linequality,
.escale=escale, .einequality=einequality ))),
vfamily=c("paretoIII"),
@@ -7846,21 +7884,24 @@ paretoIII = function(location=0,
}
-paretoII = function(location=0,
- lscale="loge",
- lshape="loge",
- escale=list(), eshape=list(),
- iscale=NULL, ishape=NULL) {
+
+
+
+ paretoII = function(location=0,
+ lscale="loge",
+ lshape="loge",
+ escale=list(), eshape=list(),
+ iscale=NULL, ishape=NULL) {
if(mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
if(mode(lshape) != "character" && mode(lshape) != "name")
lshape = as.character(substitute(lshape))
if(!is.Numeric(location))
- stop("argument \"location\" must be numeric")
+ stop("argument 'location' must be numeric")
if(is.Numeric(iscale) && any(iscale <= 0))
- stop("argument \"iscale\" must be positive")
+ stop("argument 'iscale' must be positive")
if(is.Numeric(ishape) && any(ishape <= 0))
- stop("argument \"ishape\" must be positive")
+ stop("argument 'ishape' must be positive")
if(!is.list(escale)) escale = list()
if(!is.list(eshape)) eshape = list()
@@ -7880,7 +7921,7 @@ paretoII = function(location=0,
namesof("shape", .lshape, earg=.eshape, tag=FALSE))
extra$location = location = .location
if(any(y <= location))
- stop("the response must have values > than the \"location\" argument")
+ stop("the response must have values > than the 'location' argument")
if(!length(etastart)) {
scale.init = if(length( .iscale)) .iscale else NULL
shape.init = if(length( .ishape)) .ishape else NULL
@@ -7920,8 +7961,10 @@ paretoII = function(location=0,
Scale = eta2theta(eta[,1], .lscale, earg= .escale)
shape = eta2theta(eta[,2], .lshape, earg= .eshape)
zedd = (y - location) / Scale
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * (log(shape) - log(Scale) - (shape+1) * log1p(zedd)))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dparetoII(x=y, location=location, scale=Scale,
+ shape=shape, log=TRUE))
+ }
}, list( .lscale=lscale, .lshape=lshape,
.escale=escale, .eshape=eshape ))),
vfamily=c("paretoII"),
@@ -7954,16 +7997,58 @@ paretoII = function(location=0,
-pareto1 = function(lshape="loge", earg=list(), location=NULL) {
+
+
+dpareto = function(x, location, shape, log = FALSE) {
+ if(!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ L = max(length(x), length(location), length(shape))
+ x = rep(x, len=L); location = rep(location, len=L); shape= rep(shape, len=L)
+
+ logdensity = rep(log(0), len=L)
+ xok = (x > location)
+ logdensity[xok] = log(shape[xok]) + shape[xok] * log(location[xok]) -
+ (shape[xok]+1) * log(x[xok])
+ if(log.arg) logdensity else exp(logdensity)
+}
+
+ppareto = function(q, location, shape) {
+ if(any(location <= 0)) stop("argument 'location' must be positive")
+ if(any(shape <= 0)) stop("argument 'shape' must be positive")
+ L = max(length(q), length(location), length(shape))
+ q = rep(q, len=L); location = rep(location, len=L); shape= rep(shape, len=L)
+ ifelse(q > location, 1 - (location/q)^shape, 0)
+}
+
+qpareto = function(p, location, shape) {
+ if(any(location <= 0)) stop("argument 'location' must be positive")
+ if(any(shape <= 0)) stop("argument 'shape' must be positive")
+ if(any(p <= 0) || any(p >= 1)) stop("argument 'p' must be between 0 and 1")
+ location / (1 - p)^(1/shape)
+}
+
+rpareto = function(n, location, shape) {
+ if(!is.Numeric(n, posit=TRUE, integ=TRUE, allow=1))
+ stop("bad input for argument 'n'")
+ if(any(location <= 0)) stop("argument 'location' must be positive")
+ if(any(shape <= 0)) stop("argument 'shape' must be positive")
+ location / runif(n)^(1/shape)
+}
+
+
+
+ pareto1 = function(lshape="loge", earg=list(), location=NULL) {
if(mode(lshape) != "character" && mode(lshape) != "name")
lshape = as.character(substitute(lshape))
if(is.Numeric(location) && location <= 0)
- stop("argument \"location\" must be positive")
+ stop("argument 'location' must be positive")
if(!is.list(earg)) earg = list()
new("vglmff",
blurb=c("Pareto distribution f(y) = shape * location^shape / y^(shape+1),",
- " y>location>0, shape>0\n",
+ " 0<location<y, shape>0\n",
"Link: ", namesof("shape", lshape, earg=earg), "\n", "\n",
"Mean: location*shape/(shape-1) for shape>1"),
initialize=eval(substitute(expression({
@@ -7972,7 +8057,7 @@ pareto1 = function(lshape="loge", earg=list(), location=NULL) {
predictors.names = namesof("shape", .lshape, earg=.earg, tag=FALSE)
locationhat = if(!length( .location)) {
locationEstimated = TRUE
- min(y)
+ min(y) # - .smallno
} else {
locationEstimated = FALSE
.location
@@ -7985,7 +8070,8 @@ pareto1 = function(lshape="loge", earg=list(), location=NULL) {
k.init = (y + 1/8) / (y - locationhat + 1/8)
etastart = theta2eta(k.init, .lshape, earg= .earg)
}
- }), list( .lshape=lshape, .earg=earg, .location=location ))),
+ }), list( .lshape=lshape, .earg=earg,
+ .location=location ))),
inverse=eval(substitute(function(eta, extra=NULL) {
k = eta2theta(eta, .lshape, earg= .earg)
location = extra$location
@@ -8000,8 +8086,11 @@ pareto1 = function(lshape="loge", earg=list(), location=NULL) {
function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
k = eta2theta(eta, .lshape, earg= .earg)
location = extra$location
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * (log(k) + k * log(location) - (k+1) * log(y )))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+
+
+ sum(w * (log(k) + k * log(location) - (k+1) * log(y )))
+ }
}, list( .lshape=lshape, .earg=earg ))),
vfamily=c("pareto1"),
deriv=eval(substitute(expression({
@@ -8019,50 +8108,66 @@ pareto1 = function(lshape="loge", earg=list(), location=NULL) {
}
-dpareto = function(x, location, shape) {
- if(any(location <= 0)) stop("argument \"location\" must be positive")
- if(any(shape <= 0)) stop("argument \"shape\" must be positive")
- L = max(length(x), length(location), length(shape))
- x = rep(x, len=L); location = rep(location, len=L); shape= rep(shape, len=L)
- ifelse(x>location, shape * location^shape / x^(shape+1), 0)
+
+dtpareto = function(x, lower, upper, shape) {
+ if(!is.Numeric(x)) stop("bad input for argument 'x'")
+ if(!is.Numeric(lower, pos=TRUE)) stop("argument 'lower' must be positive")
+ if(!is.Numeric(upper, pos=TRUE)) stop("argument 'upper' must be positive")
+ if(!is.Numeric(shape, pos=TRUE)) stop("argument 'shape' must be positive")
+ L = max(length(x), length(lower), length(upper), length(shape))
+ x = rep(x, len=L); lower = rep(lower, len=L); upper = rep(upper, len=L);
+ shape= rep(shape, len=L)
+ ifelse(x > lower & x < upper,
+ shape * lower^shape / (x^(shape+1) * (1-(lower/upper)^shape)), 0)
}
-ppareto = function(q, location, shape) {
- if(any(location <= 0)) stop("argument \"location\" must be positive")
- if(any(shape <= 0)) stop("argument \"shape\" must be positive")
- L = max(length(q), length(location), length(shape))
- q = rep(q, len=L); location = rep(location, len=L); shape= rep(shape, len=L)
- ifelse(q > location, 1 - (location/q)^shape, 0)
+ptpareto = function(q, lower, upper, shape) {
+ if(!is.Numeric(q)) stop("bad input for argument 'q'")
+ if(!is.Numeric(lower, pos=TRUE)) stop("argument 'lower' must be positive")
+ if(!is.Numeric(upper, pos=TRUE)) stop("argument 'upper' must be positive")
+ if(!is.Numeric(shape, pos=TRUE)) stop("argument 'shape' must be positive")
+ L = max(length(q), length(lower), length(upper), length(shape))
+ q = rep(q, len=L); lower = rep(lower, len=L); upper = rep(upper, len=L);
+ shape= rep(shape, len=L)
+ ans = q * 0
+ ans[q > lower & q < upper] = (1-(lower/q)^shape) / (1-(lower/upper)^shape)
+ ans[q >= upper] = 1
+ ans
}
-qpareto = function(p, location, shape) {
- if(any(location <= 0)) stop("argument \"location\" must be positive")
- if(any(shape <= 0)) stop("argument \"shape\" must be positive")
- if(any(p <= 0) || any(p >= 1)) stop("argument \"p\" must be between 0 and 1")
- location / (1 - p)^(1/shape)
+qtpareto = function(p, lower, upper, shape) {
+ if(!is.Numeric(p, posit=TRUE)) stop("bad input for argument 'p'")
+ if(!is.Numeric(lower, pos=TRUE)) stop("argument 'lower' must be positive")
+ if(!is.Numeric(upper, pos=TRUE)) stop("argument 'upper' must be positive")
+ if(!is.Numeric(shape, pos=TRUE)) stop("argument 'shape' must be positive")
+ if(max(p) >= 1) stop("argument 'p' must be between 0 and 1")
+ lower / (1 - p*(1-(lower/upper)^shape))^(1/shape)
}
-rpareto = function(n, location, shape) {
+rtpareto = function(n, lower, upper, shape) {
if(!is.Numeric(n, posit=TRUE, integ=TRUE, allow=1))
- stop("bad input for argument \"n\"")
- if(any(location <= 0)) stop("argument \"location\" must be positive")
- if(any(shape <= 0)) stop("argument \"shape\" must be positive")
- location / runif(n)^(1/shape)
+ stop("bad input for argument 'n'")
+ if(!is.Numeric(lower, pos=TRUE)) stop("argument 'lower' must be positive")
+ if(!is.Numeric(upper, pos=TRUE)) stop("argument 'upper' must be positive")
+ if(!is.Numeric(shape, pos=TRUE)) stop("argument 'shape' must be positive")
+ lower / (1 - runif(n)*(1-(lower/upper)^shape))^(1/shape)
}
-tpareto1 = function(lower, upper, lshape="loge", earg=list(), ishape=NULL,
+
+
+ tpareto1 = function(lower, upper, lshape="loge", earg=list(), ishape=NULL,
method.init=1) {
if(mode(lshape) != "character" && mode(lshape) != "name")
lshape = as.character(substitute(lshape))
if(!is.Numeric(lower, posit=TRUE, allow=1))
- stop("bad input for argument \"lower\"")
+ stop("bad input for argument 'lower'")
if(!is.Numeric(upper, posit=TRUE, allow=1))
- stop("bad input for argument \"upper\"")
+ stop("bad input for argument 'upper'")
if(lower >= upper)
stop("lower < upper is required")
if(length(ishape) && !is.Numeric(ishape, posit=TRUE))
- stop("bad input for argument \"ishape\"")
+ stop("bad input for argument 'ishape'")
if(!is.list(earg)) earg = list()
if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
method.init > 2)
@@ -8080,12 +8185,12 @@ tpareto1 = function(lower, upper, lshape="loge", earg=list(), ishape=NULL,
stop("response must be a vector or a one-column matrix")
predictors.names = namesof("shape", .lshape, earg=.earg, tag=FALSE)
if(any(y <= .lower))
- stop(paste("the value of argument \"lower\" is too high",
- "(requires 0 < lower < min(y))"))
+ stop("the value of argument 'lower' is too high ",
+ "(requires '0 < lower < min(y)')")
extra$lower = .lower
if(any(y >= .upper))
- stop(paste("the value of argument \"upper\" is too low",
- "(requires max(y) < upper)"))
+ stop("the value of argument 'upper' is too low ",
+ "(requires 'max(y) < upper')")
extra$upper = .upper
if(!length(etastart)) {
shape.init = if(is.Numeric( .ishape)) 0 * y + .ishape else
@@ -8146,49 +8251,7 @@ tpareto1 = function(lower, upper, lshape="loge", earg=list(), ishape=NULL,
}), list( .lshape=lshape, .earg=earg, .lower=lower, .upper=upper ))))
}
-dtpareto = function(x, lower, upper, shape) {
- if(!is.Numeric(x)) stop("bad input for argument \"x\"")
- if(!is.Numeric(lower, pos=TRUE)) stop("argument \"lower\" must be positive")
- if(!is.Numeric(upper, pos=TRUE)) stop("argument \"upper\" must be positive")
- if(!is.Numeric(shape, pos=TRUE)) stop("argument \"shape\" must be positive")
- L = max(length(x), length(lower), length(upper), length(shape))
- x = rep(x, len=L); lower = rep(lower, len=L); upper = rep(upper, len=L);
- shape= rep(shape, len=L)
- ifelse(x > lower & x < upper,
- shape * lower^shape / (x^(shape+1) * (1-(lower/upper)^shape)), 0)
-}
-ptpareto = function(q, lower, upper, shape) {
- if(!is.Numeric(q)) stop("bad input for argument \"q\"")
- if(!is.Numeric(lower, pos=TRUE)) stop("argument \"lower\" must be positive")
- if(!is.Numeric(upper, pos=TRUE)) stop("argument \"upper\" must be positive")
- if(!is.Numeric(shape, pos=TRUE)) stop("argument \"shape\" must be positive")
- L = max(length(q), length(lower), length(upper), length(shape))
- q = rep(q, len=L); lower = rep(lower, len=L); upper = rep(upper, len=L);
- shape= rep(shape, len=L)
- ans = q * 0
- ans[q > lower & q < upper] = (1-(lower/q)^shape) / (1-(lower/upper)^shape)
- ans[q >= upper] = 1
- ans
-}
-
-qtpareto = function(p, lower, upper, shape) {
- if(!is.Numeric(p, posit=TRUE)) stop("bad input for argument \"p\"")
- if(!is.Numeric(lower, pos=TRUE)) stop("argument \"lower\" must be positive")
- if(!is.Numeric(upper, pos=TRUE)) stop("argument \"upper\" must be positive")
- if(!is.Numeric(shape, pos=TRUE)) stop("argument \"shape\" must be positive")
- if(max(p) >= 1) stop("argument \"p\" must be between 0 and 1")
- lower / (1 - p*(1-(lower/upper)^shape))^(1/shape)
-}
-
-rtpareto = function(n, lower, upper, shape) {
- if(!is.Numeric(n, posit=TRUE, integ=TRUE, allow=1))
- stop("bad input for argument \"n\"")
- if(!is.Numeric(lower, pos=TRUE)) stop("argument \"lower\" must be positive")
- if(!is.Numeric(upper, pos=TRUE)) stop("argument \"upper\" must be positive")
- if(!is.Numeric(shape, pos=TRUE)) stop("argument \"shape\" must be positive")
- lower / (1 - runif(n)*(1-(lower/upper)^shape))^(1/shape)
-}
erf = function(x)
@@ -8199,7 +8262,7 @@ erfc = function(x)
-wald <- function(link.lambda="loge", earg=list(), init.lambda=NULL)
+ wald <- function(link.lambda="loge", earg=list(), init.lambda=NULL)
{
if(mode(link.lambda) != "character" && mode(link.lambda) != "name")
link.lambda = as.character(substitute(link.lambda))
@@ -8238,8 +8301,7 @@ wald <- function(link.lambda="loge", earg=list(), init.lambda=NULL)
function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
lambda = eta2theta(eta, link=.link.lambda, earg= .earg)
if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w*(0.5 * log(lambda / (2 * pi * y^3)) -
- lambda *(y-1)^2 / (2* y )))
+ sum(w * (0.5 * log(lambda/(2*pi*y^3)) - lambda * (y-1)^2 / (2*y)))
}, list( .link.lambda=link.lambda, .earg=earg ))),
vfamily="wald",
deriv=eval(substitute(expression({
@@ -8255,7 +8317,7 @@ wald <- function(link.lambda="loge", earg=list(), init.lambda=NULL)
}
-expexp = function(lshape="loge", lscale="loge",
+ expexp = function(lshape="loge", lscale="loge",
eshape=list(), escale=list(),
ishape=1.1, iscale=NULL, # ishape cannot be 1
tolerance = 1.0e-6,
@@ -8266,13 +8328,13 @@ expexp = function(lshape="loge", lscale="loge",
if(mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(!is.Numeric(tolerance, posit=TRUE, allow=1) || tolerance>1.0e-2)
- stop("bad input for argument \"tolerance\"")
+ stop("bad input for argument 'tolerance'")
if(!is.Numeric(ishape, posit=TRUE))
- stop("bad input for argument \"ishape\"")
+ stop("bad input for argument 'ishape'")
if(length(iscale) && !is.Numeric(iscale, posit=TRUE))
- stop("bad input for argument \"iscale\"")
+ stop("bad input for argument 'iscale'")
ishape[ishape==1] = 1.1 # Fails in @deriv
if(!is.list(escale)) escale = list()
if(!is.list(eshape)) eshape = list()
@@ -8294,7 +8356,7 @@ expexp = function(lshape="loge", lscale="loge",
namesof("scale", .lscale, earg=.escale, short=TRUE))
if(!length(etastart)) {
shape.init = if(!is.Numeric( .ishape, posit=TRUE))
- stop("argument \"ishape\" must be positive") else
+ stop("argument 'ishape' must be positive") else
rep(.ishape, len=n)
scale.init = if(length( .iscale)) rep(.iscale, len=n) else
(digamma(shape.init+1) - digamma(1)) / (y+1/8)
@@ -8379,7 +8441,7 @@ expexp = function(lshape="loge", lscale="loge",
-expexp1 = function(lscale="loge",
+ expexp1 = function(lscale="loge",
escale=list(),
iscale=NULL,
ishape=1) {
@@ -8387,7 +8449,7 @@ expexp1 = function(lscale="loge",
lscale = as.character(substitute(lscale))
if(!is.list(escale)) escale = list()
if(length(iscale) && !is.Numeric(iscale, posit=TRUE))
- stop("bad input for argument \"iscale\"")
+ stop("bad input for argument 'iscale'")
new("vglmff",
blurb=c("Exponentiated Exponential Distribution",
@@ -8408,7 +8470,7 @@ expexp1 = function(lscale="loge",
extra$w = w
if(!length(etastart)) {
shape.init = if(!is.Numeric( .ishape, posit=TRUE))
- stop("argument \"ishape\" must be positive") else
+ stop("argument 'ishape' must be positive") else
rep(.ishape, len=n)
scaleinit = if(length( .iscale)) rep(.iscale, len=n) else
(digamma(shape.init+1) - digamma(1)) / (y+1/8)
@@ -8477,7 +8539,8 @@ betaffqn.control <- function(save.weight=TRUE, ...)
-betaffqn = function(link="loge", earg=list(),
+if(FALSE)
+ betaffqn = function(link="loge", earg=list(),
i1=NULL, i2=NULL, trim=0.05, A=0, B=1)
{
if(mode(link) != "character" && mode(link) != "name")
@@ -8540,12 +8603,14 @@ betaffqn = function(link="loge", earg=list(),
loglikelihood=eval(substitute(
function(mu, y, w, residuals= FALSE, eta, extra=NULL){
shapes = eta2theta(eta, .link, earg= .earg)
- temp = if(is.R()) lbeta(shapes[,1], shapes[,2]) else
- lgamma(shapes[,1]) + lgamma(shapes[,2]) -
- lgamma(shapes[,1]+shapes[,2])
- if(residuals) stop("loglikelihood residuals not implemented yet") else
+ temp = lbeta(shapes[,1], shapes[,2])
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+
+
+
sum(w * ((shapes[,1]-1)*log(y-.A) + (shapes[,2]-1)*log(.B-y) - temp -
(shapes[,1]+shapes[,2]-1)*log(.B-.A )))
+ }
}, list( .link=link, .earg=earg, .A=A, .B=B ))),
vfamily="betaffqn",
deriv=eval(substitute(expression({
@@ -8578,7 +8643,9 @@ betaffqn = function(link="loge", earg=list(),
-logistic2 = function(llocation="identity",
+
+
+ logistic2 = function(llocation="identity",
lscale="loge",
elocation=list(),
escale=list(),
@@ -8589,11 +8656,11 @@ logistic2 = function(llocation="identity",
if(mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
- method.init > 2) stop("argument \"method.init\" must be 1 or 2")
+ method.init > 2) stop("argument 'method.init' must be 1 or 2")
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(length(iscale) && !is.Numeric(iscale, posit=TRUE))
- stop("bad input for argument \"iscale\"")
+ stop("bad input for argument 'iscale'")
if(!is.list(elocation)) elocation = list()
if(!is.list(escale)) escale = list()
@@ -8647,9 +8714,10 @@ logistic2 = function(llocation="identity",
function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
location = eta2theta(eta[,1], .llocation, earg= .elocation)
Scale = eta2theta(eta[,2], .lscale, earg= .escale)
- zedd = (y-location) / Scale
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * (-zedd - 2 * log1p(exp(-zedd)) - log(Scale )))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dlogis(x=y, location = location,
+ scale = Scale, log = TRUE))
+ }
}, list( .llocation=llocation, .lscale=lscale,
.elocation=elocation, .escale=escale ))),
vfamily=c("logistic2"),
@@ -8682,7 +8750,7 @@ logistic2 = function(llocation="identity",
-alaplace2 = function(tau = NULL,
+ alaplace2 = function(tau = NULL,
llocation="identity", lscale="loge",
elocation=list(), escale=list(),
ilocation=NULL, iscale=NULL,
@@ -8693,7 +8761,7 @@ alaplace2 = function(tau = NULL,
method.init=1, zero="(1 + M/2):M") {
if(!is.Numeric(kappa, posit=TRUE))
- stop("bad input for argument \"kappa\"")
+ stop("bad input for argument 'kappa'")
if(length(tau) && max(abs(kappa - sqrt(tau/(1-tau)))) > 1.0e-6)
stop("arguments 'kappa' and 'tau' do not match")
if(mode(llocation) != "character" && mode(llocation) != "name")
@@ -8701,23 +8769,23 @@ alaplace2 = function(tau = NULL,
if(mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
- method.init > 4) stop("argument \"method.init\" must be 1, 2 or ... 4")
+ method.init > 4) stop("argument 'method.init' must be 1, 2 or ... 4")
if(length(iscale) && !is.Numeric(iscale, posit=TRUE))
- stop("bad input for argument \"iscale\"")
+ stop("bad input for argument 'iscale'")
if(!is.list(elocation)) elocation = list()
if(!is.list(escale)) escale = list()
if(!is.Numeric(shrinkage.init, allow=1) || shrinkage.init < 0 ||
- shrinkage.init > 1) stop("bad input for argument \"shrinkage.init\"")
+ shrinkage.init > 1) stop("bad input for argument 'shrinkage.init'")
if(length(zero) &&
!(is.Numeric(zero, integer=TRUE, posit=TRUE) || is.character(zero )))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(!is.logical(sameScale) || length(sameScale) != 1)
- stop("bad input for argument \"sameScale\"")
+ stop("bad input for argument 'sameScale'")
if(!is.logical(parallelLocation) || length(parallelLocation) != 1)
- stop("bad input for argument \"parallelLocation\"")
+ stop("bad input for argument 'parallelLocation'")
fittedMean = FALSE
if(!is.logical(fittedMean) || length(fittedMean) != 1)
- stop("bad input for argument \"fittedMean\"")
+ stop("bad input for argument 'fittedMean'")
new("vglmff",
blurb=c("Two-parameter asymmetric Laplace distribution\n\n",
@@ -8842,10 +8910,10 @@ alaplace2 = function(tau = NULL,
location = eta2theta(eta[,1:(extra$M/2),drop=FALSE],
.llocation, earg= .elocation)
Scale = eta2theta(eta[,(1+extra$M/2):extra$M], .lscale, earg= .escale)
- zedd = ifelse(ymat >= location, kappamat, 1/kappamat) * sqrt(2) *
- abs(ymat-location) / Scale
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w*(-zedd - log(Scale) - log(2)/2 + log(kappa) - log1p(kappamat^2)))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dalap(x=c(ymat), location=c(location),
+ scale=c(Scale), kappa=c(kappamat), log=TRUE))
+ }
}, list( .elocation=elocation, .llocation=llocation,
.escale=escale, .lscale=lscale,
.kappa=kappa ))),
@@ -8887,7 +8955,7 @@ alaplace1.control <- function(maxit=300, ...)
list(maxit=maxit)
}
-alaplace1 = function(tau = NULL,
+ alaplace1 = function(tau = NULL,
llocation="identity",
elocation=list(),
ilocation=NULL,
@@ -8899,26 +8967,26 @@ alaplace1 = function(tau = NULL,
if(!is.Numeric(kappa, posit=TRUE))
- stop("bad input for argument \"kappa\"")
+ stop("bad input for argument 'kappa'")
if(length(tau) && max(abs(kappa - sqrt(tau/(1-tau)))) > 1.0e-6)
stop("arguments 'kappa' and 'tau' do not match")
if(mode(llocation) != "character" && mode(llocation) != "name")
llocation = as.character(substitute(llocation))
if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
- method.init > 4) stop("argument \"method.init\" must be 1, 2 or ... 4")
+ method.init > 4) stop("argument 'method.init' must be 1, 2 or ... 4")
if(!is.list(elocation)) elocation = list()
if(!is.Numeric(shrinkage.init, allow=1) || shrinkage.init < 0 ||
- shrinkage.init > 1) stop("bad input for argument \"shrinkage.init\"")
+ shrinkage.init > 1) stop("bad input for argument 'shrinkage.init'")
if(length(zero) &&
!(is.Numeric(zero, integer=TRUE, posit=TRUE) || is.character(zero )))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(!is.Numeric(Scale.arg, posit=TRUE))
- stop("bad input for argument \"Scale.arg\"")
+ stop("bad input for argument 'Scale.arg'")
if(!is.logical(parallelLocation) || length(parallelLocation) != 1)
- stop("bad input for argument \"parallelLocation\"")
+ stop("bad input for argument 'parallelLocation'")
fittedMean = FALSE
if(!is.logical(fittedMean) || length(fittedMean) != 1)
- stop("bad input for argument \"fittedMean\"")
+ stop("bad input for argument 'fittedMean'")
new("vglmff",
blurb=c("One-parameter asymmetric Laplace distribution\n\n",
@@ -9010,10 +9078,10 @@ alaplace1 = function(tau = NULL,
kappamat = matrix(extra$kappa, extra$n, extra$M, byrow=TRUE)
location = eta2theta(eta, .llocation, earg= .elocation)
Scale = matrix(extra$Scale, extra$n, extra$M, byrow=TRUE)
- zedd = ifelse(ymat >= location, kappamat, 1/kappamat) * sqrt(2) *
- abs(ymat-location) / Scale
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w*(-zedd - log(Scale) - log(2)/2 + log(kappa) - log1p(kappamat^2)))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dalap(x=c(ymat), location=c(location),
+ scale=c(Scale), kappa=c(kappamat), log=TRUE))
+ }
}, list( .elocation=elocation, .llocation=llocation,
.Scale.arg=Scale.arg, .kappa=kappa ))),
vfamily=c("alaplace1"),
@@ -9025,8 +9093,6 @@ alaplace1 = function(tau = NULL,
zedd = abs(ymat-location) / Scale
dl.dlocation = ifelse(ymat >= location, kappamat, 1/kappamat) *
sqrt(2) * sign(ymat-location) / Scale
- dl.dscale = ifelse(ymat >= location, kappamat, 1/kappamat) *
- sqrt(2) * zedd / Scale - 1 / Scale
dlocation.deta = dtheta.deta(location, .llocation, earg= .elocation)
w * cbind(dl.dlocation * dlocation.deta)
}), list( .Scale.arg=Scale.arg, .elocation=elocation,
@@ -9044,7 +9110,7 @@ alaplace1 = function(tau = NULL,
-alaplace3 = function(llocation="identity", lscale="loge", lkappa="loge",
+ alaplace3 = function(llocation="identity", lscale="loge", lkappa="loge",
elocation=list(), escale=list(), ekappa=list(),
ilocation=NULL, iscale=NULL, ikappa=1.0,
method.init=1, zero=2:3) {
@@ -9055,11 +9121,11 @@ alaplace3 = function(llocation="identity", lscale="loge", lkappa="loge",
if(mode(lkappa) != "character" && mode(lkappa) != "name")
lkappa = as.character(substitute(lkappa))
if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
- method.init > 2) stop("argument \"method.init\" must be 1 or 2")
+ method.init > 2) stop("argument 'method.init' must be 1 or 2")
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(length(iscale) && !is.Numeric(iscale, posit=TRUE))
- stop("bad input for argument \"iscale\"")
+ stop("bad input for argument 'iscale'")
if(!is.list(elocation)) elocation = list()
if(!is.list(escale)) escale = list()
if(!is.list(ekappa)) ekappa = list()
@@ -9126,11 +9192,11 @@ alaplace3 = function(llocation="identity", lscale="loge", lkappa="loge",
function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
location = eta2theta(eta[,1], .llocation, earg= .elocation)
Scale = eta2theta(eta[,2], .lscale, earg= .escale)
- kappa = eta2theta(eta[,3], .lkappa, earg= .ekappa)
- zedd = ifelse(y >= location, kappa, 1/kappa) * sqrt(2) *
- abs(y-location) / Scale
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * (-zedd - log(Scale) - log(2)/2 + log(kappa) - log1p(kappa^2)))
+ kappamat = eta2theta(eta[,3], .lkappa, earg= .ekappa)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dalap(x=y, location=location,
+ scale=Scale, kappa=kappamat, log=TRUE))
+ }
}, list( .elocation=elocation, .llocation=llocation,
.escale=escale, .lscale=lscale,
.ekappa=ekappa, .lkappa=lkappa ))),
@@ -9175,9 +9241,43 @@ alaplace3 = function(llocation="identity", lscale="loge", lkappa="loge",
+dlaplace = function(x, location=0, scale=1, log = FALSE) {
+ if(!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ logdensity = (-abs(x-location)/scale) - log(2*scale)
+ if(log.arg) logdensity else exp(logdensity)
+}
+
+plaplace = function(q, location=0, scale=1) {
+ if(!is.Numeric(scale, posit=TRUE))
+ stop("argument 'scale' must be positive")
+ zedd = (q-location) / scale
+ L = max(length(q), length(location), length(scale))
+ q = rep(q, len=L); location = rep(location, len=L); scale= rep(scale, len=L)
+ ifelse(q < location, 0.5*exp(zedd), 1-0.5*exp(-zedd))
+}
+
+qlaplace = function(p, location=0, scale=1) {
+ if(!is.Numeric(scale, posit=TRUE))
+ stop("argument 'scale' must be positive")
+ L = max(length(p), length(location), length(scale))
+ p = rep(p, len=L); location = rep(location, len=L); scale= rep(scale, len=L)
+ location - sign(p-0.5) * scale * log(2*ifelse(p < 0.5, p, 1-p))
+}
+
+rlaplace = function(n, location=0, scale=1) {
+ if(!is.Numeric(n, posit=TRUE, integ=TRUE, allow=1))
+ stop("bad input for argument 'n'")
+ if(!is.Numeric(scale, posit=TRUE)) stop("'scale' must be positive")
+ location = rep(location, len=n); scale= rep(scale, len=n)
+ r = runif(n)
+ location - sign(r-0.5) * scale * log(2*ifelse(r < 0.5, r, 1-r))
+}
-laplace = function(llocation="identity", lscale="loge",
+ laplace = function(llocation="identity", lscale="loge",
elocation=list(), escale=list(),
ilocation=NULL, iscale=NULL,
method.init=1, zero=2) {
@@ -9186,13 +9286,13 @@ laplace = function(llocation="identity", lscale="loge",
if(mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
- method.init > 3) stop("argument \"method.init\" must be 1 or 2 or 3")
+ method.init > 3) stop("argument 'method.init' must be 1 or 2 or 3")
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(!is.list(elocation)) elocation = list()
if(!is.list(escale)) escale = list()
if(length(iscale) && !is.Numeric(iscale, posit=TRUE))
- stop("bad input for argument \"iscale\"")
+ stop("bad input for argument 'iscale'")
new("vglmff",
blurb=c("Two-parameter Laplace distribution\n\n",
@@ -9248,9 +9348,9 @@ laplace = function(llocation="identity", lscale="loge",
function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
location = eta2theta(eta[,1], .llocation, earg= .elocation)
Scale = eta2theta(eta[,2], .lscale, earg= .escale)
- zedd = abs(y-location) / Scale
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * (-zedd - log(Scale) - log(2)))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dlaplace(x=y, location=location, scale=Scale, log=TRUE))
+ }
}, list( .escale=escale, .lscale=lscale,
.elocation=elocation, .llocation=llocation ))),
vfamily=c("laplace"),
@@ -9275,39 +9375,6 @@ laplace = function(llocation="identity", lscale="loge",
.elocation=elocation, .llocation=llocation ))))
}
-dlaplace = function(x, location=0, scale=1) {
- if(!is.Numeric(scale, posit=TRUE))
- stop("argument \"scale\" must be positive")
- exp(-abs(x-location)/scale) / (2*scale)
-}
-
-plaplace = function(q, location=0, scale=1) {
- if(!is.Numeric(scale, posit=TRUE))
- stop("argument \"scale\" must be positive")
- zedd = (q-location) / scale
- L = max(length(q), length(location), length(scale))
- q = rep(q, len=L); location = rep(location, len=L); scale= rep(scale, len=L)
- ifelse(q < location, 0.5*exp(zedd), 1-0.5*exp(-zedd))
-}
-
-qlaplace = function(p, location=0, scale=1) {
- if(!is.Numeric(scale, posit=TRUE))
- stop("argument \"scale\" must be positive")
- L = max(length(p), length(location), length(scale))
- p = rep(p, len=L); location = rep(location, len=L); scale= rep(scale, len=L)
- location - sign(p-0.5) * scale * log(2*ifelse(p < 0.5, p, 1-p))
-}
-
-rlaplace = function(n, location=0, scale=1) {
- if(!is.Numeric(n, posit=TRUE, integ=TRUE, allow=1))
- stop("bad input for argument \"n\"")
- if(!is.Numeric(scale, posit=TRUE)) stop("\"scale\" must be positive")
- location = rep(location, len=n); scale= rep(scale, len=n)
- r = runif(n)
- location - sign(r-0.5) * scale * log(2*ifelse(r < 0.5, r, 1-r))
-}
-
-
fff.control <- function(save.weight=TRUE, ...)
@@ -9315,15 +9382,15 @@ fff.control <- function(save.weight=TRUE, ...)
list(save.weight=save.weight)
}
-fff = function(link="loge", earg=list(),
+ fff = function(link="loge", earg=list(),
idf1=NULL, idf2=NULL,
method.init=1, zero=NULL) {
if(mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
- method.init > 2) stop("argument \"method.init\" must be 1 or 2")
+ method.init > 2) stop("argument 'method.init' must be 1 or 2")
if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"zero\"")
+ stop("bad input for argument 'zero'")
if(!is.list(earg)) earg = list()
new("vglmff",
@@ -9376,10 +9443,9 @@ fff = function(link="loge", earg=list(),
function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
df1 = eta2theta(eta[,1], .link, earg= .earg)
df2 = eta2theta(eta[,2], .link, earg= .earg)
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * (lgamma(0.5*(df1+df2)) + 0.5*df1*log(df1/df2) +
- 0.5*(df1-2) * log(y) - lgamma(df1/2) - lgamma(df2/2) -
- 0.5*(df1+df2)*log1p(df1*y/df2)))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * df(x=y, df1=df1, df2=df2, log=TRUE))
+ }
}, list( .link=link, .earg=earg ))),
vfamily=c("fff"),
deriv=eval(substitute(expression({
@@ -9422,17 +9488,18 @@ fff = function(link="loge", earg=list(),
-hyperg = function(N=NULL, D=NULL,
- lprob="logit", earg=list(),
- iprob=NULL) {
+ hyperg = function(N=NULL, D=NULL,
+ lprob="logit", earg=list(),
+ iprob=NULL) {
+ print("hi 20090409")
if(mode(lprob) != "character" && mode(lprob) != "name")
lprob = as.character(substitute(lprob))
inputN = is.Numeric(N, positive=TRUE)
inputD = is.Numeric(D, positive=TRUE)
if(inputD && inputN)
- stop("only one of \"N\" and \"D\" is to be inputted")
+ stop("only one of 'N' and 'D' is to be inputted")
if(!inputD && !inputN)
- stop("one of \"N\" and \"D\" needs to be inputted")
+ stop("one of 'N' and 'D' needs to be inputted")
if(!is.list(earg)) earg = list()
new("vglmff",
@@ -9471,6 +9538,7 @@ hyperg = function(N=NULL, D=NULL,
if(!length(etastart)) {
init.prob = if(length( .iprob)) rep( .iprob, len=n) else mustart
etastart = matrix(init.prob, n, ncol(cbind(y )))
+
}
}), list( .lprob=lprob, .earg=earg, .N=N, .D=D, .iprob=iprob ))),
inverse=eval(substitute(function(eta, extra=NULL) {
@@ -9492,13 +9560,19 @@ hyperg = function(N=NULL, D=NULL,
prob = mu
yvec = w * y
if(residuals) stop("loglikelihood residuals not implemented yet") else {
- if(extra$Nunknown) {
- tmp12 = Dvec * (1-prob) / prob
- sum(lgamma(1+tmp12) + lgamma(1+Dvec/prob-w) -
- lgamma(1+tmp12-w+yvec) - lgamma(1+Dvec/prob))
- } else
- sum(lgamma(1+N*prob) + lgamma(1+N*(1-prob)) -
- lgamma(1+N*prob-yvec) - lgamma(1+N*(1-prob) -w + yvec))
+ if(extra$Nunknown) {
+ tmp12 = Dvec * (1-prob) / prob
+
+
+ sum(lgamma(1+tmp12) + lgamma(1+Dvec/prob-w) -
+ lgamma(1+tmp12-w+yvec) - lgamma(1+Dvec/prob))
+
+ } else {
+
+
+ sum(lgamma(1+N*prob) + lgamma(1+N*(1-prob)) -
+ lgamma(1+N*prob-yvec) - lgamma(1+N*(1-prob) -w + yvec))
+ }
}
}, list( .lprob=lprob, .earg=earg ))),
vfamily=c("hyperg"),
@@ -9548,23 +9622,26 @@ hyperg = function(N=NULL, D=NULL,
-dbenini = function(x, shape, y0) {
- if(!is.Numeric(x)) stop("bad input for argument \"x\"")
- if(!is.Numeric(shape, posit=TRUE)) stop("bad input for argument \"shape\"")
- if(!is.Numeric(y0, posit=TRUE)) stop("bad input for argument \"y0\"")
+dbenini = function(x, shape, y0, log=FALSE) {
+ if(!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
N = max(length(x), length(shape), length(y0))
x = rep(x, len=N); shape = rep(shape, len=N); y0 = rep(y0, len=N);
- ok = x > y0
- temp = log(x[ok]/y0[ok])
- ans = y0 * 0
- ans[ok] = 2 * shape[ok] * exp(-shape[ok] * temp^2) * temp / x[ok]
- ans
+
+ logdensity = rep(log(0), len=N)
+ xok = (x > y0)
+ tempxok = log(x[xok]/y0[xok])
+ logdensity[xok] = log(2*shape[xok]) - shape[xok] * tempxok^2 +
+ log(tempxok) - log(x[xok])
+ if(log.arg) logdensity else exp(logdensity)
}
pbenini = function(q, shape, y0) {
- if(!is.Numeric(q)) stop("bad input for argument \"q\"")
- if(!is.Numeric(shape, posit=TRUE)) stop("bad input for argument \"shape\"")
- if(!is.Numeric(y0, posit=TRUE)) stop("bad input for argument \"y0\"")
+ if(!is.Numeric(q)) stop("bad input for argument 'q'")
+ if(!is.Numeric(shape, posit=TRUE)) stop("bad input for argument 'shape'")
+ if(!is.Numeric(y0, posit=TRUE)) stop("bad input for argument 'y0'")
N = max(length(q), length(shape), length(y0))
q = rep(q, len=N); shape = rep(shape, len=N); y0 = rep(y0, len=N);
ans = y0 * 0
@@ -9575,29 +9652,29 @@ pbenini = function(q, shape, y0) {
qbenini = function(p, shape, y0) {
if(!is.Numeric(p, posit=TRUE) || any(p >= 1))
- stop("bad input for argument \"p\"")
- if(!is.Numeric(shape, posit=TRUE)) stop("bad input for argument \"shape\"")
- if(!is.Numeric(y0, posit=TRUE)) stop("bad input for argument \"y0\"")
+ stop("bad input for argument 'p'")
+ if(!is.Numeric(shape, posit=TRUE)) stop("bad input for argument 'shape'")
+ if(!is.Numeric(y0, posit=TRUE)) stop("bad input for argument 'y0'")
y0 * exp(sqrt(-log1p(-p) / shape))
}
rbenini = function(n, shape, y0) {
if(!is.Numeric(n, posit=TRUE, integ=TRUE, allow=1))
- stop("bad input for argument \"n\"")
- if(!is.Numeric(shape, posit=TRUE)) stop("bad input for argument \"shape\"")
- if(!is.Numeric(y0, posit=TRUE)) stop("bad input for argument \"y0\"")
+ stop("bad input for argument 'n'")
+ if(!is.Numeric(shape, posit=TRUE)) stop("bad input for argument 'shape'")
+ if(!is.Numeric(y0, posit=TRUE)) stop("bad input for argument 'y0'")
y0 * exp(sqrt(-log(runif(n)) / shape))
}
-benini = function(y0=stop("argument \"y0\" must be specified"),
- lshape="loge", earg=list(),
- ishape=NULL, method.init=1) {
+ benini = function(y0=stop("argument 'y0' must be specified"),
+ lshape="loge", earg=list(),
+ ishape=NULL, method.init=1) {
if(mode(lshape) != "character" && mode(lshape) != "name")
lshape = as.character(substitute(lshape))
if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
- method.init > 2) stop("argument \"method.init\" must be 1 or 2")
+ method.init > 2) stop("argument 'method.init' must be 1 or 2")
if(!is.Numeric(y0, allow=1, posit=TRUE))
- stop("bad input for argument \"y0\"")
+ stop("bad input for argument 'y0'")
if(!is.list(earg)) earg = list()
new("vglmff",
@@ -9610,7 +9687,7 @@ benini = function(y0=stop("argument \"y0\" must be specified"),
stop("response must be a vector or a one-column matrix")
predictors.names = c(namesof("shape", .lshape, earg=.earg, tag=FALSE))
extra$y0 = .y0
- if(min(y) <= extra$y0) stop("argument \"y0\" is too large")
+ if(min(y) <= extra$y0) stop("argument 'y0' is too large")
if(!length(etastart)) {
probs = (1:3) / 4
qofy= quantile(rep(y, times=w), probs=probs) # fails if w != integer
@@ -9640,8 +9717,9 @@ benini = function(y0=stop("argument \"y0\" must be specified"),
function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
shape = eta2theta(eta, .lshape, earg= .earg)
y0 = extra$y0
- if(residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * (log(shape) - log(y) - shape*(log(y/y0))^2 + log(log(y/y0 ))))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dbenini(x=y, shape=shape, y0=y0, log=TRUE))
+ }
}, list( .lshape=lshape, .earg=earg ))),
vfamily=c("benini"),
deriv=eval(substitute(expression({
@@ -9674,23 +9752,22 @@ dpolono = function(x, meanlog=0, sdlog=1, bigx=Inf, ...) {
ans = x * 0
integrand = function(t, x, meanlog, sdlog)
exp(t*x - exp(t) - 0.5*((t-meanlog)/sdlog)^2)
- for(i in 1:N) {
- if(x[i] == round(x[i]) && x[i] >= 0) {
- if(x[i] >= bigx) {
- zedd = (log(x[i])-meanlog[i]) / sdlog[i]
- temp = 1 + (zedd^2 + log(x[i]) - meanlog[i] -
- 1) / (2*x[i]*(sdlog[i])^2)
- ans[i] = temp * exp(-0.5*zedd^2)/(sqrt(2*pi) * sdlog[i] * x[i])
+ for(ii in 1:N) {
+ if(x[ii] == round(x[ii]) && x[ii] >= 0) {
+ if(x[ii] >= bigx) {
+ zedd = (log(x[ii])-meanlog[ii]) / sdlog[ii]
+ temp = 1 + (zedd^2 + log(x[ii]) - meanlog[ii] -
+ 1) / (2*x[ii]*(sdlog[ii])^2)
+ ans[ii] = temp * exp(-0.5*zedd^2)/(sqrt(2*pi)*sdlog[ii] * x[ii])
} else {
- temp = integrate(f=integrand, lower=-Inf, upper=Inf,
- x=x[i], meanlog=meanlog[i], sdlog=sdlog[i], ...)
+ temp = integrate(f=integrand, lower=-Inf, upper=Inf, x=x[ii],
+ meanlog=meanlog[ii], sdlog=sdlog[ii], ...)
if(temp$message == "OK") {
- ans[i] = temp$value / (sqrt(2*pi) * sdlog[i] *
- exp(lgamma(x[i]+1)))
+ ans[ii] = temp$value / (sqrt(2*pi) * sdlog[ii] *
+ exp(lgamma(x[ii]+1)))
} else {
- warning(paste("could not integrate",
- " (numerically) observation", i))
- ans[i] = NA
+ warning("could not integrate (numerically) observation ",ii)
+ ans[ii] = NA
}
}
}
@@ -9700,12 +9777,7 @@ dpolono = function(x, meanlog=0, sdlog=1, bigx=Inf, ...) {
rpolono = function(n, meanlog=0, sdlog=1) {
- if(!is.Numeric(n, integ=TRUE,allow=1)) stop("bad input for argument \"n\"")
- if(!is.Numeric(meanlog)) stop("bad input for argument \"meanlog\"")
- if(!is.Numeric(sdlog)) stop("bad input for argument \"sdlog\"")
- meanlog = rep(meanlog, len=n); sdlog = rep(sdlog, len=n);
- lambda = if(is.R()) rlnorm(n=n, meanlog=meanlog, sdlog=sdlog) else
- stop("suppressing a warning message")
+ lambda = rlnorm(n=n, meanlog=meanlog, sdlog=sdlog)
rpois(n=n, lambda=lambda)
}
@@ -9717,32 +9789,36 @@ rpolono = function(n, meanlog=0, sdlog=1) {
-dtriangle = function(x, theta, lower=0, upper=1) {
- if(!is.Numeric(x)) stop("bad input for argument \"x\"")
- if(!is.Numeric(theta)) stop("bad input for argument \"theta\"")
- if(!is.Numeric(lower)) stop("bad input for argument \"lower\"")
- if(!is.Numeric(upper)) stop("bad input for argument \"upper\"")
- if(!all(lower < theta & theta < upper))
- stop("lower < theta < upper values are required")
+
+
+dtriangle = function(x, theta, lower=0, upper=1, log = FALSE) {
+ if(!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
N = max(length(x), length(theta), length(lower), length(upper))
x = rep(x, len=N); lower = rep(lower, len=N); upper = rep(upper, len=N);
theta = rep(theta, len=N)
- ans = x * 0
- neg = (lower <= x) & (x <= theta)
- pos = (theta <= x) & (x <= upper)
+
denom1 = ((upper-lower)*(theta-lower))
denom2 = ((upper-lower)*(upper-theta))
- ans[neg] = pmax(2 * (x-lower) / denom1, 0)[neg]
- ans[pos] = pmax(2 * (upper-x) / denom2, 0)[pos]
- ans
+ logdensity = rep(log(0), len=N)
+ xok.neg = (lower < x) & (x <= theta)
+ xok.pos = (theta <= x) & (x < upper)
+ logdensity[xok.neg] = log(2 * (x[xok.neg]-lower[xok.neg]) / denom1[xok.neg])
+ logdensity[xok.pos] = log(2 * (upper[xok.pos]-x[xok.pos]) / denom2[xok.pos])
+ logdensity[lower >= upper] = NaN
+ logdensity[lower > theta] = NaN
+ logdensity[upper < theta] = NaN
+ if(log.arg) logdensity else exp(logdensity)
}
rtriangle = function(n, theta, lower=0, upper=1) {
- if(!is.Numeric(n, integ=TRUE,allow=1)) stop("bad input for argument \"n\"")
- if(!is.Numeric(theta)) stop("bad input for argument \"theta\"")
- if(!is.Numeric(lower)) stop("bad input for argument \"lower\"")
- if(!is.Numeric(upper)) stop("bad input for argument \"upper\"")
+ if(!is.Numeric(n, integ=TRUE,allow=1)) stop("bad input for argument 'n'")
+ if(!is.Numeric(theta)) stop("bad input for argument 'theta'")
+ if(!is.Numeric(lower)) stop("bad input for argument 'lower'")
+ if(!is.Numeric(upper)) stop("bad input for argument 'upper'")
if(!all(lower < theta & theta < upper))
stop("lower < theta < upper values are required")
N = n
@@ -9757,10 +9833,10 @@ rtriangle = function(n, theta, lower=0, upper=1) {
qtriangle = function(p, theta, lower=0, upper=1) {
- if(!is.Numeric(p, posit=TRUE)) stop("bad input for argument \"p\"")
- if(!is.Numeric(theta)) stop("bad input for argument \"theta\"")
- if(!is.Numeric(lower)) stop("bad input for argument \"lower\"")
- if(!is.Numeric(upper)) stop("bad input for argument \"upper\"")
+ if(!is.Numeric(p, posit=TRUE)) stop("bad input for argument 'p'")
+ if(!is.Numeric(theta)) stop("bad input for argument 'theta'")
+ if(!is.Numeric(lower)) stop("bad input for argument 'lower'")
+ if(!is.Numeric(upper)) stop("bad input for argument 'upper'")
if(!all(lower < theta & theta < upper))
stop("lower < theta < upper values are required")
@@ -9791,10 +9867,10 @@ qtriangle = function(p, theta, lower=0, upper=1) {
ptriangle = function(q, theta, lower=0, upper=1) {
- if(!is.Numeric(q)) stop("bad input for argument \"q\"")
- if(!is.Numeric(theta)) stop("bad input for argument \"theta\"")
- if(!is.Numeric(lower)) stop("bad input for argument \"lower\"")
- if(!is.Numeric(upper)) stop("bad input for argument \"upper\"")
+ if(!is.Numeric(q)) stop("bad input for argument 'q'")
+ if(!is.Numeric(theta)) stop("bad input for argument 'theta'")
+ if(!is.Numeric(lower)) stop("bad input for argument 'lower'")
+ if(!is.Numeric(upper)) stop("bad input for argument 'upper'")
if(!all(lower < theta & theta < upper))
stop("lower < theta < upper values are required")
@@ -9817,12 +9893,12 @@ ptriangle = function(q, theta, lower=0, upper=1) {
-triangle = function(lower=0, upper=1,
+ triangle = function(lower=0, upper=1,
link="elogit", earg=if(link=="elogit")
list(min = lower, max = upper) else list(), itheta=NULL)
{
- if(!is.Numeric(lower)) stop("bad input for argument \"lower\"")
- if(!is.Numeric(upper)) stop("bad input for argument \"upper\"")
+ if(!is.Numeric(lower)) stop("bad input for argument 'lower'")
+ if(!is.Numeric(upper)) stop("bad input for argument 'upper'")
if(!all(lower < upper))
stop("lower < upper values are required")
if(length(itheta) && !is.Numeric(itheta))
@@ -9877,11 +9953,8 @@ triangle = function(lower=0, upper=1,
lower = extra$lower
upper = extra$upper
if(residuals) stop("loglikelihood residuals not implemented yet") else {
- pos = y >= Theta
- neg = y < Theta
- sum(w * (log(2) - log(upper-lower))) +
- sum(w[neg]*(log(y[neg]-lower[neg]) - log(Theta[neg]-lower[neg]))) +
- sum(w[pos]*(log(upper[pos]-y[pos]) - log(upper[pos]-Theta[pos])))
+ sum(w * dtriangle(x=y, theta=Theta, lower=lower,
+ upper=upper, log=TRUE))
}
}, list( .link=link, .earg=earg ))),
vfamily=c("triangle"),
@@ -9906,3 +9979,679 @@ triangle = function(lower=0, upper=1,
+
+
+
+
+adjust0.loglaplace1 = function(ymat, y, w, rep0) {
+ rangey0 = range(y[y > 0])
+ ymat[ymat <= 0] = min(rangey0[1] / 2, rep0)
+ ymat
+}
+
+
+loglaplace1.control <- function(maxit=300, ...)
+{
+ list(maxit=maxit)
+}
+
+ loglaplace1 = function(tau = NULL,
+ llocation="loge",
+ elocation=list(),
+ ilocation=NULL,
+ kappa = sqrt(tau/(1-tau)),
+ Scale.arg=1,
+ shrinkage.init=0.95, parallelLocation=FALSE, digt=4,
+ dfmu.init = 3,
+ rep0 = 0.5, # 0.0001,
+ minquantile = 0, maxquantile = Inf,
+ method.init=1, zero=NULL) {
+
+ print("loglaplace1 20090406 a")
+ if(length(minquantile) != 1)
+ stop("bad input for argument 'minquantile'")
+ if(length(maxquantile) != 1)
+ stop("bad input for argument 'maxquantile'")
+ if(!is.Numeric(rep0, posit=TRUE, allow=1) || rep0 > 1)
+ stop("bad input for argument 'rep0'")
+ if(!is.Numeric(kappa, posit=TRUE))
+ stop("bad input for argument 'kappa'")
+ if(length(tau) && max(abs(kappa - sqrt(tau/(1-tau)))) > 1.0e-6)
+ stop("arguments 'kappa' and 'tau' do not match")
+ if(mode(llocation) != "character" && mode(llocation) != "name")
+ llocation = as.character(substitute(llocation))
+ if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
+ method.init > 4) stop("argument 'method.init' must be 1, 2 or ... 4")
+ if(!is.list(elocation)) elocation = list()
+ if(!is.Numeric(shrinkage.init, allow=1) || shrinkage.init < 0 ||
+ shrinkage.init > 1) stop("bad input for argument 'shrinkage.init'")
+ if(length(zero) &&
+ !(is.Numeric(zero, integer=TRUE, posit=TRUE) || is.character(zero )))
+ stop("bad input for argument 'zero'")
+ if(!is.Numeric(Scale.arg, posit=TRUE))
+ stop("bad input for argument 'Scale.arg'")
+ if(!is.logical(parallelLocation) || length(parallelLocation) != 1)
+ stop("bad input for argument 'parallelLocation'")
+ fittedMean = FALSE
+ if(!is.logical(fittedMean) || length(fittedMean) != 1)
+ stop("bad input for argument 'fittedMean'")
+
+ mystring0 = namesof("location", llocation, earg=elocation)
+ mychars = substring(mystring0, fi=1:nchar(mystring0), la=1:nchar(mystring0))
+ mychars[nchar(mystring0)] = ", inverse=TRUE)"
+ mystring1 = paste(mychars, collapse="")
+
+ new("vglmff",
+ blurb=c("One-parameter ",
+ if(llocation=="loge") "log-Laplace" else c(llocation, "-Laplace"),
+ " distribution\n\n",
+ "Links: ", mystring0, "\n", "\n",
+ "Quantiles: ", mystring1),
+ constraints=eval(substitute(expression({
+ constraints = cm.vgam(matrix(1,M,1), x, .parallelLocation,
+ constraints, intercept=FALSE)
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .parallelLocation=parallelLocation,
+ .Scale.arg=Scale.arg, .zero=zero ))),
+ initialize=eval(substitute(expression({
+ extra$M = M = max(length( .Scale.arg ), length( .kappa )) # Recycle
+ extra$Scale = rep( .Scale.arg, length=M)
+ extra$kappa = rep( .kappa, length=M)
+ extra$tau = extra$kappa^2 / (1 + extra$kappa^2)
+ if(ncol(y <- cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+ extra$n = n
+ extra$y.names = y.names = paste("tau=", round(extra$tau, dig=.digt),
+ sep="")
+ extra$individual = FALSE
+ predictors.names = namesof(paste("quantile(", y.names, ")", sep=""),
+ .llocation, earg=.elocation, tag=FALSE)
+
+
+ if(FALSE) {
+ if(min(y) < 0) stop("negative response values detected")
+ if((prop.0. <- weighted.mean(1*(y == 0), w)) >= min(extra$tau))
+ stop("sample proportion of 0s == ", round(prop.0., dig=4),
+ " > minimum 'tau' value. Choose larger values for 'tau'.")
+ if(.rep0 == 0.5 &&
+ (ave.tau <- (weighted.mean(1*(y <= 0), w) +
+ weighted.mean(1*(y <= 1), w))/2) >= min(extra$tau))
+ warning("the minimum 'tau' value should be greater than ",
+ round(ave.tau, dig=4))
+ }
+
+ print("extra$tau")
+ print( extra$tau )
+ if(!length(etastart)) {
+ if( .method.init == 1) {
+ location.init = quantile(rep(y, w), probs= extra$tau) + 1/16
+ } else if( .method.init == 2) {
+ location.init = weighted.mean(y, w)
+ } else if( .method.init == 3) {
+ location.init = median(y)
+ } else if( .method.init == 4) {
+ fit500 = vsmooth.spline(x=x[,min(ncol(x),2)], y=y, w=w,
+ df= .dfmu.init)
+ location.init = c(predict(fit500, x=x[,min(ncol(x),2)])$y)
+ } else {
+ use.this = weighted.mean(y, w)
+ location.init = (1- .sinit)*y + .sinit * use.this
+ }
+ location.init = if(length(.ilocation)) rep(.ilocation, len=M) else
+ rep(location.init, len=M)
+ location.init = matrix(location.init, n, M, byrow=TRUE)
+ if( .llocation == "loge")
+ location.init = abs(location.init)
+ etastart =
+ cbind(theta2eta(location.init, .llocation, earg= .elocation))
+ }
+ print("location.init[1:3,]")
+ print( location.init[1:3,] )
+ print("etastart[1:3,]")
+ print( etastart[1:3,] )
+ }), list( .method.init=method.init,
+ .dfmu.init=dfmu.init, .rep0 = rep0,
+ .sinit=shrinkage.init, .digt=digt,
+ .elocation=elocation, .Scale.arg=Scale.arg,
+ .llocation=llocation, .kappa=kappa,
+ .ilocation=ilocation ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ location.y = eta2theta(eta, .llocation, earg= .elocation)
+ if( .fittedMean) {
+ stop("Yet to do: handle 'fittedMean = TRUE'")
+ kappamat = matrix(extra$kappa, extra$n, extra$M, byrow=TRUE)
+ Scale = matrix(extra$Scale, extra$n, extra$M, byrow=TRUE)
+ location.y + Scale * (1/kappamat - kappamat)
+ } else {
+ if(length(location.y) > extra$n)
+ dimnames(location.y) = list(dimnames(eta)[[1]], extra$y.names)
+ location.y
+ }
+ location.y[location.y < .minquantile] = .minquantile
+ location.y[location.y > .maxquantile] = .maxquantile
+ location.y
+ }, list( .elocation=elocation, .llocation=llocation,
+ .minquantile = minquantile, .maxquantile = maxquantile,
+ .fittedMean=fittedMean, .Scale.arg=Scale.arg,
+ .kappa=kappa ))),
+ last=eval(substitute(expression({
+ misc$link = c(location= .llocation)
+ misc$earg = list(location= .elocation)
+ misc$expected = TRUE
+ extra$kappa = misc$kappa = .kappa
+ extra$tau = misc$tau = misc$kappa^2 / (1 + misc$kappa^2)
+ extra$Scale.arg = .Scale.arg
+ misc$true.mu = .fittedMean # @fitted is not a true mu?
+ misc$rep0 = .rep0
+ misc$minquantile = .minquantile
+ misc$maxquantile = .maxquantile
+ extra$percentile = numeric(length(misc$kappa))
+ location.y = as.matrix(location.y)
+ for(ii in 1:length(misc$kappa))
+ extra$percentile[ii] = 100 * weighted.mean(y <= location.y[,ii], w)
+ }), list( .elocation=elocation, .llocation=llocation,
+ .Scale.arg=Scale.arg, .fittedMean=fittedMean,
+ .minquantile = minquantile, .maxquantile = maxquantile,
+ .rep0 = rep0, .kappa=kappa ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ kappamat = matrix(extra$kappa, extra$n, extra$M, byrow=TRUE)
+ Scale.w = matrix(extra$Scale, extra$n, extra$M, byrow=TRUE)
+ ymat = matrix(y, extra$n, extra$M)
+
+
+ if( .llocation == "loge")
+ ymat = adjust0.loglaplace1(ymat=ymat, y=y, w=w, rep0= .rep0)
+ w.mat = theta2eta(ymat, .llocation, earg= .elocation) # e.g., logoff()
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ ALDans =
+ sum(w * dalap(x=c(w.mat), location=c(eta),
+ scale=c(Scale.w), kappa=c(kappamat), log=TRUE))
+ ALDans
+ }
+ }, list( .elocation=elocation, .llocation=llocation,
+ .rep0 = rep0,
+ .Scale.arg=Scale.arg, .kappa=kappa ))),
+ vfamily=c("loglaplace1"),
+ deriv=eval(substitute(expression({
+ ymat = matrix(y, n, M)
+ Scale.w = matrix(extra$Scale, extra$n, extra$M, byrow=TRUE)
+ location.w = eta
+ location.y = eta2theta(location.w, .llocation, earg= .elocation)
+ kappamat = matrix(extra$kappa, n, M, byrow=TRUE)
+
+ ymat = adjust0.loglaplace1(ymat=ymat, y=y, w=w, rep0= .rep0)
+ w.mat = theta2eta(ymat, .llocation, earg= .elocation) # e.g., logit()
+ zedd = abs(w.mat-location.w) / Scale.w
+ dl.dlocation = ifelse(w.mat >= location.w, kappamat, 1/kappamat) *
+ sqrt(2) * sign(w.mat-location.w) / Scale.w
+ dlocation.deta = dtheta.deta(location.w, "identity", earg= .elocation)
+ w * cbind(dl.dlocation * dlocation.deta)
+ }), list( .Scale.arg=Scale.arg, .elocation=elocation,
+ .rep0 = rep0,
+ .llocation=llocation, .kappa=kappa ))),
+ weight=eval(substitute(expression({
+ d2l.dlocation2 = 2 / Scale.w^2
+ wz = cbind(d2l.dlocation2 * dlocation.deta^2)
+ w * wz
+ }), list( .Scale.arg=Scale.arg,
+ .elocation=elocation, .llocation=llocation ))))
+}
+
+
+
+
+
+loglaplace2.control <- function(save.weight=TRUE, ...)
+{
+ list(save.weight=save.weight)
+}
+
+ loglaplace2 = function(tau = NULL,
+ llocation="loge", lscale="loge",
+ elocation=list(), escale=list(),
+ ilocation=NULL, iscale=NULL,
+ kappa = sqrt(tau/(1-tau)),
+ shrinkage.init=0.95, parallelLocation=FALSE, digt=4,
+ sameScale=TRUE,
+ dfmu.init = 3,
+ rep0 = 0.5, nsimEIM=NULL,
+ method.init=1, zero="(1 + M/2):M") {
+ warning("it is best to use loglaplace1()")
+
+ if(length(nsimEIM) &&
+ (!is.Numeric(nsimEIM, allow=1, integ=TRUE) || nsimEIM <= 10))
+ stop("'nsimEIM' should be an integer greater than 10")
+ if(!is.Numeric(rep0, posit=TRUE, allow=1) || rep0 > 1)
+ stop("bad input for argument 'rep0'")
+ if(!is.Numeric(kappa, posit=TRUE))
+ stop("bad input for argument 'kappa'")
+ if(length(tau) && max(abs(kappa - sqrt(tau/(1-tau)))) > 1.0e-6)
+ stop("arguments 'kappa' and 'tau' do not match")
+ if(mode(llocation) != "character" && mode(llocation) != "name")
+ llocation = as.character(substitute(llocation))
+ if(mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
+ if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
+ method.init > 4) stop("argument 'method.init' must be 1, 2 or ... 4")
+ if(length(iscale) && !is.Numeric(iscale, posit=TRUE))
+ stop("bad input for argument 'iscale'")
+ if(!is.list(elocation)) elocation = list()
+ if(!is.list(escale)) escale = list()
+ if(!is.Numeric(shrinkage.init, allow=1) || shrinkage.init < 0 ||
+ shrinkage.init > 1) stop("bad input for argument 'shrinkage.init'")
+ if(length(zero) &&
+ !(is.Numeric(zero, integer=TRUE, posit=TRUE) || is.character(zero )))
+ stop("bad input for argument 'zero'")
+ if(!is.logical(sameScale) || length(sameScale) != 1)
+ stop("bad input for argument 'sameScale'")
+ if(!is.logical(parallelLocation) || length(parallelLocation) != 1)
+ stop("bad input for argument 'parallelLocation'")
+ fittedMean = FALSE
+ if(!is.logical(fittedMean) || length(fittedMean) != 1)
+ stop("bad input for argument 'fittedMean'")
+
+ if(llocation != "loge")
+ stop("argument 'llocation' must be \"loge\"")
+
+ new("vglmff",
+ blurb=c("Two-parameter log-Laplace distribution\n\n",
+ "Links: ",
+ namesof("location", llocation, earg=elocation), ", ",
+ namesof("scale", lscale, earg=escale),
+ "\n", "\n",
+ "Mean: zz location + scale * (1/kappa - kappa) / sqrt(2)", "\n",
+ "Quantiles: location", "\n",
+ "Variance: zz scale^2 * (1 + kappa^4) / (2 * kappa^2)"),
+ constraints=eval(substitute(expression({
+ .ZERO = .zero
+ if(is.character(.ZERO)) .ZERO = eval(parse(text = .ZERO))
+ .PARALLEL = .parallelLocation
+ parHmat = if(is.logical(.PARALLEL) && .PARALLEL)
+ matrix(1, M/2, 1) else diag(M/2)
+ scaleHmat = if(is.logical(.sameScale) && .sameScale)
+ matrix(1, M/2, 1) else diag(M/2)
+ mycmatrix = cbind(rbind(parHmat, 0*parHmat),
+ rbind(0*scaleHmat, scaleHmat))
+ constraints=cm.vgam(mycmatrix, x, .PARALLEL, constraints, int=FALSE)
+ constraints = cm.zero.vgam(constraints, x, .ZERO, M)
+
+ if(.PARALLEL && names(constraints)[1] == "(Intercept)") {
+ parHmat = diag(M/2)
+ mycmatrix = cbind(rbind(parHmat, 0*parHmat),
+ rbind(0*scaleHmat, scaleHmat))
+ constraints[["(Intercept)"]] = mycmatrix
+ }
+ if(is.logical(.sameScale) && .sameScale &&
+ names(constraints)[1] == "(Intercept)") {
+ temp3 = constraints[["(Intercept)"]]
+ temp3 = cbind(temp3[,1:(M/2)], rbind(0*scaleHmat, scaleHmat))
+ constraints[["(Intercept)"]] = temp3
+ }
+ }), list( .sameScale=sameScale, .parallelLocation=parallelLocation,
+ .zero=zero ))),
+ initialize=eval(substitute(expression({
+ extra$kappa = .kappa
+ extra$tau = extra$kappa^2 / (1 + extra$kappa^2)
+ if(ncol(y <- cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+ extra$M = M = 2 * length(extra$kappa)
+ extra$n = n
+ extra$y.names = y.names = paste("tau=", round(extra$tau, dig=.digt),
+ sep="")
+ extra$individual = FALSE
+ predictors.names =
+ c(namesof(paste("quantile(", y.names, ")", sep=""),
+ .llocation, earg=.elocation, tag=FALSE),
+ namesof(if(M==2) "scale" else paste("scale", 1:(M/2), sep=""),
+ .lscale, earg=.escale, tag=FALSE))
+ if(weighted.mean(1*(y < 0.001), w) >= min(extra$tau))
+ stop("sample proportion of 0s > minimum 'tau' value. ",
+ "Choose larger values for 'tau'.")
+
+ if(!length(etastart)) {
+ if( .method.init == 1) {
+ location.init.y = weighted.mean(y, w)
+ scale.init = sqrt(var(y) / 2)
+ } else if( .method.init == 2) {
+ location.init.y = median(y)
+ scale.init = sqrt(sum(w*abs(y-median(y))) / (sum(w) *2))
+ } else if( .method.init == 3) {
+ fit500 = vsmooth.spline(x=x[,min(ncol(x),2)], y=y, w=w,
+ df= .dfmu.init)
+ location.init.y = c(predict(fit500, x=x[,min(ncol(x),2)])$y)
+ scale.init = sqrt(sum(w*abs(y-median(y))) / (sum(w) *2))
+ } else {
+ use.this = weighted.mean(y, w)
+ location.init.y = (1- .sinit)*y + .sinit * use.this
+ scale.init = sqrt(sum(w*abs(y-median(y ))) / (sum(w) *2))
+ }
+ location.init.y = if(length(.ilocation)) rep(.ilocation, len=n) else
+ rep(location.init.y, len=n)
+ location.init.y = matrix(location.init.y, n, M/2)
+ scale.init = if(length(.iscale)) rep(.iscale, len=n) else
+ rep(scale.init, len=n)
+ scale.init = matrix(scale.init, n, M/2)
+ etastart =
+ cbind(theta2eta(location.init.y, .llocation, earg= .elocation),
+ theta2eta(scale.init, .lscale, earg= .escale))
+ }
+ }), list( .method.init=method.init,
+ .dfmu.init=dfmu.init,
+ .sinit=shrinkage.init, .digt=digt,
+ .elocation=elocation, .escale=escale,
+ .llocation=llocation, .lscale=lscale, .kappa=kappa,
+ .ilocation=ilocation, .iscale=iscale ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ location.y = eta2theta(eta[,1:(extra$M/2),drop=FALSE],
+ .llocation, earg= .elocation)
+ if( .fittedMean) {
+ kappamat = matrix(extra$kappa, extra$n, extra$M/2, byrow=TRUE)
+ Scale.y = eta2theta(eta[,(1+extra$M/2):extra$M], .lscale, earg= .escale)
+ location.y + Scale.y * (1/kappamat - kappamat)
+ } else {
+ dimnames(location.y) = list(dimnames(eta)[[1]], extra$y.names)
+ location.y
+ }
+ }, list( .elocation=elocation, .llocation=llocation,
+ .fittedMean=fittedMean, .escale=escale, .lscale=lscale,
+ .kappa=kappa ))),
+ last=eval(substitute(expression({
+ misc$link = c(location= .llocation, scale= .lscale)
+ misc$earg = list(location= .elocation, scale= .escale)
+ misc$expected = TRUE
+ extra$kappa = misc$kappa = .kappa
+ extra$tau = misc$tau = misc$kappa^2 / (1 + misc$kappa^2)
+ misc$true.mu = .fittedMean # @fitted is not a true mu?
+ misc$nsimEIM = .nsimEIM
+ misc$rep0 = .rep0
+ extra$percentile = numeric(length(misc$kappa))
+ location = as.matrix(location.y)
+ for(ii in 1:length(misc$kappa))
+ extra$percentile[ii] = 100 * weighted.mean(y <= location.y[,ii], w)
+ }), list( .elocation=elocation, .llocation=llocation,
+ .escale=escale, .lscale=lscale,
+ .fittedMean=fittedMean,
+ .nsimEIM=nsimEIM, .rep0 = rep0,
+ .kappa=kappa ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ kappamat = matrix(extra$kappa, extra$n, extra$M/2, byrow=TRUE)
+ Scale.w = eta2theta(eta[,(1+extra$M/2):extra$M], .lscale, earg= .escale)
+ ymat = matrix(y, extra$n, extra$M/2)
+ ymat[ymat <= 0] = min(min(y[y > 0]), .rep0) # Adjust for 0s
+ ell.mat = matrix(c(dloglaplace(x=c(ymat),
+ location.ald=c(eta[,1:(extra$M/2)]),
+ scale.ald=c(Scale.w),
+ kappa=c(kappamat), log=TRUE)),
+ extra$n, extra$M/2)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * ell.mat)
+ }, list( .elocation=elocation, .llocation=llocation,
+ .escale=escale, .lscale=lscale,
+ .rep0 = rep0, .kappa=kappa ))),
+ vfamily=c("loglaplace2"),
+ deriv=eval(substitute(expression({
+ ymat = matrix(y, n, M/2)
+ Scale.w = eta2theta(eta[,(1+extra$M/2):extra$M], .lscale, earg= .escale)
+ location.w = eta[,1:(extra$M/2),drop=FALSE]
+ location.y = eta2theta(location.w, .llocation, earg= .elocation)
+ kappamat = matrix(extra$kappa, n, M/2, byrow=TRUE)
+ w.mat = ymat
+ w.mat[w.mat <= 0] = min(min(w.mat[w.mat > 0]), .rep0) # Adjust for 0s
+ w.mat= theta2eta(w.mat, .llocation, earg= .elocation) # w.mat=log(w.mat)
+ zedd = abs(w.mat-location.w) / Scale.w
+ dl.dlocation = sqrt(2) *
+ ifelse(w.mat >= location.w, kappamat, 1/kappamat) *
+ sign(w.mat-location.w) / Scale.w
+ dl.dscale = sqrt(2) *
+ ifelse(w.mat >= location.w, kappamat, 1/kappamat) *
+ zedd / Scale.w - 1 / Scale.w
+ dlocation.deta = dtheta.deta(location.w, .llocation, earg= .elocation)
+ dscale.deta = dtheta.deta(Scale.w, .lscale, earg= .escale)
+ w * cbind(dl.dlocation * dlocation.deta,
+ dl.dscale * dscale.deta)
+ }), list( .escale=escale, .lscale=lscale,
+ .elocation=elocation, .llocation=llocation,
+ .rep0 = rep0, .kappa=kappa ))),
+ weight=eval(substitute(expression({
+ run.varcov = 0
+ ind1 = iam(NA, NA, M=M, both=TRUE, diag=TRUE)
+ dthetas.detas = cbind(dlocation.deta, dscale.deta)
+ if(length( .nsimEIM )) {
+ for(ii in 1:( .nsimEIM )) {
+ wsim = matrix(rloglap(n*M/2, loc=c(location.w),
+ sca=c(Scale.w),
+ kappa=c(kappamat)), n, M/2)
+ zedd = abs(wsim-location.w) / Scale.w
+ dl.dlocation = sqrt(2) *
+ ifelse(wsim >= location.w, kappamat, 1/kappamat) *
+ sign(wsim-location.w) / Scale.w
+ dl.dscale = sqrt(2) *
+ ifelse(wsim >= location.w, kappamat, 1/kappamat) *
+ zedd / Scale.w - 1 / Scale.w
+
+ rm(wsim)
+ temp3 = cbind(dl.dlocation, dl.dscale) # n x M matrix
+ run.varcov = ((ii-1) * run.varcov +
+ temp3[,ind1$row.index]*temp3[,ind1$col.index]) / ii
+ }
+ wz = if(intercept.only)
+ matrix(colMeans(run.varcov),
+ n, ncol(run.varcov), byrow=TRUE) else run.varcov
+
+ wz = wz * dthetas.detas[,ind1$row] * dthetas.detas[,ind1$col]
+ wz = w * matrix(wz, n, dimm(M))
+ wz
+ } else {
+ d2l.dlocation2 = 2 / (Scale.w * location.w)^2
+ d2l.dscale2 = 1 / Scale.w^2
+ wz = cbind(d2l.dlocation2 * dlocation.deta^2,
+ d2l.dscale2 * dscale.deta^2)
+ w * wz
+ }
+ }), list( .escale=escale, .lscale=lscale,
+ .nsimEIM=nsimEIM,
+ .elocation=elocation, .llocation=llocation ))))
+}
+
+
+
+
+logitlaplace1.control <- function(maxit=300, ...)
+{
+ list(maxit=maxit)
+}
+
+
+adjust01.logitlaplace1 = function(ymat, y, w, rep01) {
+ rangey01 = range(y[(y > 0) & (y < 1)])
+ ymat[ymat <= 0] = min(rangey01[1] / 2, rep01 / w[y <= 0])
+ ymat[ymat >= 1] = max((1 + rangey01[2]) / 2, 1 - rep01 / w[y >= 1])
+ ymat
+}
+
+ logitlaplace1 = function(tau = NULL,
+ llocation="logit",
+ elocation=list(),
+ ilocation=NULL,
+ kappa = sqrt(tau/(1-tau)),
+ Scale.arg=1,
+ shrinkage.init=0.95, parallelLocation=FALSE, digt=4,
+ dfmu.init = 3,
+ rep01 = 0.5,
+ method.init=1, zero=NULL) {
+
+ if(!is.Numeric(rep01, posit=TRUE, allow=1) || rep01 > 0.5)
+ stop("bad input for argument 'rep01'")
+ if(!is.Numeric(kappa, posit=TRUE))
+ stop("bad input for argument 'kappa'")
+ if(length(tau) && max(abs(kappa - sqrt(tau/(1-tau)))) > 1.0e-6)
+ stop("arguments 'kappa' and 'tau' do not match")
+ if(mode(llocation) != "character" && mode(llocation) != "name")
+ llocation = as.character(substitute(llocation))
+ if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
+ method.init > 4) stop("argument 'method.init' must be 1, 2 or ... 4")
+ if(!is.list(elocation)) elocation = list()
+ if(!is.Numeric(shrinkage.init, allow=1) || shrinkage.init < 0 ||
+ shrinkage.init > 1) stop("bad input for argument 'shrinkage.init'")
+ if(length(zero) &&
+ !(is.Numeric(zero, integer=TRUE, posit=TRUE) || is.character(zero )))
+ stop("bad input for argument 'zero'")
+ if(!is.Numeric(Scale.arg, posit=TRUE))
+ stop("bad input for argument 'Scale.arg'")
+ if(!is.logical(parallelLocation) || length(parallelLocation) != 1)
+ stop("bad input for argument 'parallelLocation'")
+ fittedMean = FALSE
+ if(!is.logical(fittedMean) || length(fittedMean) != 1)
+ stop("bad input for argument 'fittedMean'")
+
+ mystring0 = namesof("location", llocation, earg=elocation)
+ mychars = substring(mystring0, fi=1:nchar(mystring0), la=1:nchar(mystring0))
+ mychars[nchar(mystring0)] = ", inverse=TRUE)"
+ mystring1 = paste(mychars, collapse="")
+
+ new("vglmff",
+ blurb=c("One-parameter ", llocation, "-Laplace distribution\n\n",
+ "Links: ", mystring0, "\n", "\n",
+ "Quantiles: ", mystring1),
+ constraints=eval(substitute(expression({
+ constraints = cm.vgam(matrix(1,M,1), x, .parallelLocation,
+ constraints, intercept=FALSE)
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .parallelLocation=parallelLocation,
+ .Scale.arg=Scale.arg, .zero=zero ))),
+ initialize=eval(substitute(expression({
+ extra$M = M = max(length( .Scale.arg ), length( .kappa )) # Recycle
+ extra$Scale = rep( .Scale.arg, length=M)
+ extra$kappa = rep( .kappa, length=M)
+ extra$tau = extra$kappa^2 / (1 + extra$kappa^2)
+ if(ncol(y <- cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+ extra$n = n
+ extra$y.names = y.names = paste("tau=", round(extra$tau, dig=.digt),
+ sep="")
+ extra$individual = FALSE
+ predictors.names = namesof(paste("quantile(", y.names, ")", sep=""),
+ .llocation, earg=.elocation, tag=FALSE)
+
+ if(all(y == 0 | y == 1)) stop("response cannot be all 0s or 1s")
+ if(min(y) < 0) stop("negative response values detected")
+ if(max(y) > 1) stop("response values greater than 1 detected")
+ if((prop.0. <- weighted.mean(1*(y == 0), w)) >= min(extra$tau))
+ stop("sample proportion of 0s == ", round(prop.0., dig=4),
+ " > minimum 'tau' value. Choose larger values for 'tau'.")
+ if((prop.1. <- weighted.mean(1*(y == 1), w)) >= max(extra$tau))
+ stop("sample proportion of 1s == ", round(prop.1., dig=4),
+ " < maximum 'tau' value. Choose smaller values for 'tau'.")
+ if(!length(etastart)) {
+ if( .method.init == 1) {
+ location.init = quantile(rep(y, w), probs= extra$tau)
+ } else if( .method.init == 2) {
+ location.init = weighted.mean(y, w)
+ location.init = median(rep(y, w))
+ } else if( .method.init == 3) {
+ use.this = weighted.mean(y, w)
+ location.init = (1- .sinit)*y + use.this * .sinit
+ } else {
+ stop("this option not implemented")
+ }
+
+
+ location.init = if(length(.ilocation)) rep(.ilocation, len=M) else
+ rep(location.init, len=M)
+ location.init = matrix(location.init, n, M, byrow=TRUE)
+ location.init = abs(location.init)
+ etastart =
+ cbind(theta2eta(location.init, .llocation, earg= .elocation))
+ }
+ print("location.init[1:3,]")
+ print( location.init[1:3,] )
+ print("etastart[1:3,]")
+ print( etastart[1:3,] )
+ }), list( .method.init=method.init,
+ .dfmu.init=dfmu.init,
+ .sinit=shrinkage.init, .digt=digt,
+ .elocation=elocation, .Scale.arg=Scale.arg,
+ .llocation=llocation, .kappa=kappa,
+ .ilocation=ilocation ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ location.y = eta2theta(eta, .llocation, earg= .elocation)
+ if( .fittedMean) {
+ stop("Yet to do: handle 'fittedMean = TRUE'")
+ kappamat = matrix(extra$kappa, extra$n, extra$M, byrow=TRUE)
+ Scale = matrix(extra$Scale, extra$n, extra$M, byrow=TRUE)
+ location.y + Scale * (1/kappamat - kappamat)
+ } else {
+ if(length(location.y) > extra$n)
+ dimnames(location.y) = list(dimnames(eta)[[1]], extra$y.names)
+ location.y
+ }
+ }, list( .elocation=elocation, .llocation=llocation,
+ .fittedMean=fittedMean, .Scale.arg=Scale.arg,
+ .kappa=kappa ))),
+ last=eval(substitute(expression({
+ misc$link = c(location= .llocation)
+ misc$earg = list(location= .elocation)
+ misc$expected = TRUE
+ extra$kappa = misc$kappa = .kappa
+ extra$tau = misc$tau = misc$kappa^2 / (1 + misc$kappa^2)
+ extra$Scale.arg = .Scale.arg
+ misc$true.mu = .fittedMean # @fitted is not a true mu?
+ misc$rep01 = .rep01
+
+ extra$percentile = numeric(length(misc$kappa))
+ location.y = eta2theta(eta, .llocation, earg= .elocation)
+ location.y = as.matrix(location.y)
+ for(ii in 1:length(misc$kappa))
+ extra$percentile[ii] = 100 * weighted.mean(y <= location.y[,ii], w)
+
+ }), list( .elocation=elocation, .llocation=llocation,
+ .Scale.arg=Scale.arg, .fittedMean=fittedMean,
+ .rep01 = rep01,
+ .kappa=kappa ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ kappamat = matrix(extra$kappa, extra$n, extra$M, byrow=TRUE)
+ Scale.w = matrix(extra$Scale, extra$n, extra$M, byrow=TRUE)
+ ymat = matrix(y, extra$n, extra$M)
+ ymat = adjust01.logitlaplace1(ymat=ymat, y=y, w=w, rep01= .rep01)
+ w.mat = theta2eta(ymat, .llocation, earg= .elocation) # e.g., logit()
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ ALDans =
+ sum(w * dalap(x=c(w.mat), location=c(eta),
+ scale=c(Scale.w), kappa=c(kappamat), log=TRUE))
+ ALDans
+ }
+ }, list( .elocation=elocation, .llocation=llocation,
+ .rep01 = rep01,
+ .Scale.arg=Scale.arg, .kappa=kappa ))),
+ vfamily=c("logitlaplace1"),
+ deriv=eval(substitute(expression({
+ ymat = matrix(y, n, M)
+ Scale.w = matrix(extra$Scale, extra$n, extra$M, byrow=TRUE)
+ location.w = eta
+ kappamat = matrix(extra$kappa, n, M, byrow=TRUE)
+ ymat = adjust01.logitlaplace1(ymat=ymat, y=y, w=w, rep01= .rep01)
+ w.mat = theta2eta(ymat, .llocation, earg= .elocation) # e.g., logit()
+ zedd = abs(w.mat-location.w) / Scale.w
+ dl.dlocation = ifelse(w.mat >= location.w, kappamat, 1/kappamat) *
+ sqrt(2) * sign(w.mat-location.w) / Scale.w
+ dlocation.deta = dtheta.deta(location.w, "identity", earg= .elocation)
+ w * cbind(dl.dlocation * dlocation.deta)
+ }), list( .Scale.arg=Scale.arg, .elocation=elocation,
+ .rep01 = rep01,
+ .llocation=llocation, .kappa=kappa ))),
+ weight=eval(substitute(expression({
+ d2l.dlocation2 = 2 / Scale.w^2
+ wz = cbind(d2l.dlocation2 * dlocation.deta^2)
+ w * wz
+ }), list( .Scale.arg=Scale.arg,
+ .elocation=elocation, .llocation=llocation ))))
+}
+
+
+
+
+
+
diff --git a/R/family.zeroinf.q b/R/family.zeroinf.q
index 8bc9e49..8bead25 100644
--- a/R/family.zeroinf.q
+++ b/R/family.zeroinf.q
@@ -14,7 +14,7 @@ dzanegbin = function(x, p0, size, prob=NULL, munb=NULL, log=FALSE) {
stop("'prob' and 'munb' both specified")
prob <- size/(size + munb)
}
- if(!is.logical(log.arg <- log)) stop("bad input for 'log'")
+ if(!is.logical(log.arg <- log)) stop("bad input for argument 'log'")
rm(log)
L = max(length(x), length(p0), length(prob), length(size))
@@ -85,16 +85,20 @@ qzanegbin = function(p, p0, size, prob=NULL, munb=NULL) {
}
rzanegbin = function(n, p0, size, prob=NULL, munb=NULL) {
+ use.n = if((length.n <- length(n)) > 1) length.n else
+ if(!is.Numeric(n, integ=TRUE, allow=1, posit=TRUE))
+ stop("bad input for argument 'n'") else n
+
if (length(munb)) {
if (length(prob))
stop("'prob' and 'munb' both specified")
prob <- size/(size + munb)
}
- ans = rposnegbin(n, prob=prob, size=size)
+ ans = rposnegbin(use.n, prob=prob, size=size)
p0 = rep(p0, len=length(ans))
if(!is.Numeric(p0) || any(p0 < 0) || any(p0 > 1))
stop("'p0' must be between 0 and 1 inclusive")
- ifelse(runif(n) < p0, 0, ans)
+ ifelse(runif(use.n) < p0, 0, ans)
}
@@ -102,7 +106,7 @@ rzanegbin = function(n, p0, size, prob=NULL, munb=NULL) {
dzapois = function(x, lambda, p0=0, log=FALSE) {
- if(!is.logical(log.arg <- log)) stop("bad input for 'log'")
+ if(!is.logical(log.arg <- log)) stop("bad input for argument 'log'")
rm(log)
L = max(length(x), length(lambda), length(p0))
@@ -151,11 +155,15 @@ qzapois = function(p, lambda, p0=0) {
}
rzapois = function(n, lambda, p0=0) {
- ans = rpospois(n, lambda)
+ use.n = if((length.n <- length(n)) > 1) length.n else
+ if(!is.Numeric(n, integ=TRUE, allow=1, posit=TRUE))
+ stop("bad input for argument 'n'") else n
+
+ ans = rpospois(use.n, lambda)
p0 = rep(p0, len=length(ans))
if(!is.Numeric(p0) || any(p0 < 0) || any(p0 > 1))
stop("p0 must be between 0 and 1 inclusive")
- ifelse(runif(n) < p0, 0, ans)
+ ifelse(runif(use.n) < p0, 0, ans)
}
@@ -163,7 +171,7 @@ rzapois = function(n, lambda, p0=0) {
dzipois = function(x, lambda, phi=0, log=FALSE) {
- if(!is.logical(log.arg <- log)) stop("bad input for 'log'")
+ if(!is.logical(log.arg <- log)) stop("bad input for argument 'log'")
rm(log)
L = max(length(x), length(lambda), length(phi))
@@ -188,37 +196,42 @@ dzipois = function(x, lambda, phi=0, log=FALSE) {
pzipois = function(q, lambda, phi=0) {
ans = ppois(q, lambda)
- phi = rep(phi, length=length(ans))
- if(!is.Numeric(phi) || any(phi < 0) || any(phi > 1))
- stop("'phi' must be between 0 and 1 inclusive")
- ifelse(x<0, 0, phi + (1-phi) * ans)
+ LL = max(length(phi), length(ans))
+ phi = rep(phi, length=LL)
+ ans = rep(ans, length=LL)
+ ans = ifelse(q<0, 0, phi + (1-phi) * ans)
+ ans[phi < 0] = NaN
+ ans[phi > 1] = NaN
+ ans
}
qzipois = function(p, lambda, phi=0) {
- nn = max(length(p), length(lambda), length(phi))
- p = rep(p, len=nn)
- lambda = rep(lambda, len=nn)
- phi = rep(phi, len=nn)
- if(!is.Numeric(phi) || any(phi < 0) || any(phi > 1))
- stop("'phi' must be between 0 and 1 inclusive")
- ans = p
+ LL = max(length(p), length(lambda), length(phi))
+ ans = p = rep(p, len=LL)
+ lambda = rep(lambda, len=LL)
+ phi = rep(phi, len=LL)
ans[p<=phi] = 0
ans[p>phi] = qpois((p[p>phi]-phi[p>phi])/(1-phi[p>phi]), lam=lambda[p>phi])
+ ans[phi < 0] = NaN
+ ans[phi > 1] = NaN
ans
}
rzipois = function(n, lambda, phi=0) {
- if(!is.Numeric(n, positive=TRUE, integer=TRUE, allow=1))
- stop("'n' must be a single positive integer")
- ans = rpois(n, lambda)
+ use.n = if((length.n <- length(n)) > 1) length.n else
+ if(!is.Numeric(n, integ=TRUE, allow=1, posit=TRUE))
+ stop("bad input for argument 'n'") else n
+
+ ans = rpois(use.n, lambda)
phi = rep(phi, len=length(ans))
- if(!is.Numeric(phi) || any(phi < 0) || any(phi > 1))
- stop("phi must be between 0 and 1 inclusive")
- ifelse(runif(n) < phi, 0, ans)
+ ans = ifelse(runif(use.n) < phi, 0, ans)
+ ans[phi < 0] = NaN
+ ans[phi > 1] = NaN
+ ans
}
-yip88 = function(link.lambda="loge", n.arg=NULL)
+ yip88 = function(link.lambda="loge", n.arg=NULL)
{
if(mode(link.lambda) != "character" && mode(link.lambda) != "name")
link.lambda = as.character(substitute(link.lambda))
@@ -233,8 +246,8 @@ yip88 = function(link.lambda="loge", n.arg=NULL)
if(length(extra)) extra$sumw = sum(w) else
extra = list(sumw=sum(w))
if(is.numeric(.n.arg) && extra$sumw != .n.arg)
- stop(paste("value of n.arg conflicts with data",
- "(it need not be specified anyway)"))
+ stop("value of 'n.arg' conflicts with data ",
+ "(it need not be specified anyway)")
warning("trimming out the zero observations")
axa.save = attr(x, "assign")
@@ -284,8 +297,11 @@ yip88 = function(link.lambda="loge", n.arg=NULL)
loglikelihood=eval(substitute(
function(mu,y,w,residuals=FALSE, eta, extra=NULL) {
lambda = eta2theta(eta, .link.lambda)
- lstar = -lambda + y * log(lambda) - log1p(-exp(-lambda))
- sum(w * lstar)
+ temp5 = exp(-lambda)
+ phi = (1 - temp5 - extra$sumw/extra$narg) / (1 - temp5)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dzipois(x=y, phi=phi, lambda=lambda, log=TRUE))
+ }
}, list( .link.lambda=link.lambda ))),
vfamily=c("yip88"),
deriv=eval(substitute(expression({
@@ -376,7 +392,9 @@ yip88 = function(link.lambda="loge", n.arg=NULL)
NOS = extra$NOS
p0 = cbind(eta2theta(eta[,1:NOS], .lp0, earg= .ep0))
lambda = cbind(eta2theta(eta[,NOS+(1:NOS)], .llambda, earg= .elambda ))
- sum(w * dzapois(x=y, p0=p0, lambda=lambda, log=TRUE))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dzapois(x=y, p0=p0, lambda=lambda, log=TRUE))
+ }
}, list( .lp0=lp0, .llambda=llambda, .ep0= ep0, .elambda= elambda ))),
vfamily=c("zapoisson"),
deriv=eval(substitute(expression({
@@ -444,9 +462,9 @@ yip88 = function(link.lambda="loge", n.arg=NULL)
if(length(ik) && !is.Numeric(ik, positiv=TRUE))
stop("If given, ik must contain positive values only")
if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
- method.init > 2) stop("argument \"method.init\" must be 1 or 2")
+ method.init > 2) stop("argument 'method.init' must be 1 or 2")
if(!is.Numeric(shrinkage.init, allow=1) || shrinkage.init < 0 ||
- shrinkage.init > 1) stop("bad input for argument \"shrinkage.init\"")
+ shrinkage.init > 1) stop("bad input for argument 'shrinkage.init'")
if(mode(lmunb) != "character" && mode(lmunb) != "name")
lmunb = as.character(substitute(lmunb))
@@ -583,7 +601,9 @@ yip88 = function(link.lambda="loge", n.arg=NULL)
p0 = eta2theta(eta[,3*(1:NOS)-2,drop=FALSE], .lp0, earg= .ep0 )
munb = eta2theta(eta[,3*(1:NOS)-1,drop=FALSE], .lmunb, earg= .emunb )
kmat = eta2theta(eta[,3*(1:NOS),drop=FALSE], .lk, earg= .ek )
- sum(w * dzanegbin(x=y, p0=p0, munb=munb, size=kmat, log=TRUE))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dzanegbin(x=y, p0=p0, munb=munb, size=kmat, log=TRUE))
+ }
}, list( .lp0=lp0, .lmunb=lmunb, .lk=lk,
.ep0=ep0, .emunb=emunb, .ek=ek ))),
vfamily=c("zanegbinomial"),
@@ -686,11 +706,11 @@ yip88 = function(link.lambda="loge", n.arg=NULL)
if(FALSE)
rposnegbin = function(n, munb, k) {
if(!is.Numeric(k, posit=TRUE))
- stop("argument \"k\" must be positive")
+ stop("argument 'k' must be positive")
if(!is.Numeric(munb, posit=TRUE))
- stop("argument \"munb\" must be positive")
+ stop("argument 'munb' must be positive")
if(!is.Numeric(n, posit=TRUE, integ=TRUE, allow=1))
- stop("argument \"n\" must be a positive integer")
+ stop("argument 'n' must be a positive integer")
ans = rnbinom(n=n, mu=munb, size=k)
munb = rep(munb, len=n)
k = rep(k, len=n)
@@ -708,9 +728,9 @@ rposnegbin = function(n, munb, k) {
if(FALSE)
dposnegbin = function(x, munb, k, log=FALSE) {
if(!is.Numeric(k, posit=TRUE))
- stop("argument \"k\" must be positive")
+ stop("argument 'k' must be positive")
if(!is.Numeric(munb, posit=TRUE))
- stop("argument \"munb\" must be positive")
+ stop("argument 'munb' must be positive")
ans = dnbinom(x=x, mu=munb, size=k, log=log)
ans0 = dnbinom(x=0, mu=munb, size=k, log=FALSE)
ans = if(log) ans - log1p(-ans0) else ans/(1-ans0)
@@ -735,9 +755,9 @@ dposnegbin = function(x, munb, k, log=FALSE) {
if(!is.list(ephi)) ephi = list()
if(!is.list(elambda)) elambda = list()
if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
- method.init > 2) stop("argument \"method.init\" must be 1 or 2")
+ method.init > 2) stop("argument 'method.init' must be 1 or 2")
if(!is.Numeric(shrinkage.init, allow=1) || shrinkage.init < 0 ||
- shrinkage.init > 1) stop("bad input for argument \"shrinkage.init\"")
+ shrinkage.init > 1) stop("bad input for argument 'shrinkage.init'")
new("vglmff",
blurb=c("Zero-inflated Poisson\n\n",
@@ -798,7 +818,9 @@ dposnegbin = function(x, munb, k, log=FALSE) {
phi = pmax(phi, smallno)
phi = pmin(phi, 1.0-smallno)
lambda = eta2theta(eta[,2], .llambda, earg= .elambda )
- sum(w * dzipois(x=y, phi=phi, lambda=lambda, log=TRUE))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dzipois(x=y, phi=phi, lambda=lambda, log=TRUE))
+ }
}, list( .lphi=lphi, .llambda=llambda,
.ephi=ephi, .elambda=elambda ))),
vfamily=c("zipoisson"),
@@ -843,15 +865,15 @@ dposnegbin = function(x, munb, k, log=FALSE) {
- zibinomial = function(lphi="logit", link.mu="logit",
+ zibinomial = function(lphi="logit", lmu="logit",
ephi=list(), emu=list(),
iphi=NULL, zero=1, mv=FALSE)
{
- if(as.logical(mv)) stop("argument \"mv\" must be FALSE")
+ if(as.logical(mv)) stop("argument 'mv' must be FALSE")
if(mode(lphi) != "character" && mode(lphi) != "name")
lphi = as.character(substitute(lphi))
- if(mode(link.mu) != "character" && mode(link.mu) != "name")
- link.mu = as.character(substitute(link.mu))
+ if(mode(lmu) != "character" && mode(lmu) != "name")
+ lmu = as.character(substitute(lmu))
if(is.Numeric(iphi))
if(!is.Numeric(iphi, allow=1, posit=TRUE) || iphi >= 1)
stop("iphi must be a single number inside the interval (0,1)")
@@ -861,7 +883,7 @@ dposnegbin = function(x, munb, k, log=FALSE) {
new("vglmff",
blurb=c("Zero-inflated binomial\n\n",
"Links: ", namesof("phi", lphi, earg= ephi ), ", ",
- namesof("mu", link.mu, earg= emu ), "\n",
+ namesof("mu", lmu, earg= emu ), "\n",
"Mean: (1-phi) * mu / (1 - (1-mu)^w)"),
constraints=eval(substitute(expression({
constraints <- cm.zero.vgam(constraints, x, .zero, M)
@@ -893,52 +915,49 @@ dposnegbin = function(x, munb, k, log=FALSE) {
}
predictors.names = c( namesof("phi", .lphi, earg= .ephi, tag=FALSE),
- namesof("mu", .link.mu, earg= .emu, tag=FALSE))
+ namesof("mu", .lmu, earg= .emu, tag=FALSE))
phi.init = if(length( .iphi)) .iphi else {
sum(w[y==0]) / sum(w)
}
if(phi.init <= 0 || phi.init >=1) phi.init = 0.1 # Last resort
mustart = cbind(rep(phi.init, len=n), mustart) # 1st coln not a real mu
- }), list( .lphi=lphi, .link.mu=link.mu,
+ }), list( .lphi=lphi, .lmu=lmu,
.ephi=ephi, .emu=emu,
.iphi=iphi ))),
inverse=eval(substitute(function(eta, extra=NULL) {
phi = eta2theta(eta[,1], .lphi, earg= .ephi )
- mubin = eta2theta(eta[,2], .link.mu, earg= .emu )
+ mubin = eta2theta(eta[,2], .lmu, earg= .emu )
(1-phi) * mubin
- }, list( .lphi=lphi, .link.mu=link.mu,
+ }, list( .lphi=lphi, .lmu=lmu,
.ephi=ephi, .emu=emu ))),
last=eval(substitute(expression({
- misc$link <- c("phi" = .lphi, "mu" = .link.mu)
+ misc$link <- c("phi" = .lphi, "mu" = .lmu)
misc$earg <- list("phi" = .ephi, "mu" = .emu )
if(intercept.only && all(w==w[1])) {
phi = eta2theta(eta[1,1], .lphi, earg= .ephi )
- mubin = eta2theta(eta[1,2], .link.mu, earg= .emu )
+ mubin = eta2theta(eta[1,2], .lmu, earg= .emu )
misc$p0 = phi + (1-phi) * (1-mubin)^w[1] # P(Y=0)
}
- }), list( .lphi=lphi, .link.mu=link.mu,
+ }), list( .lphi=lphi, .lmu=lmu,
.ephi=ephi, .emu=emu ))),
- link=eval(substitute(function(mu, extra=NULL)
+ link=eval(substitute(function(mu, extra=NULL) {
cbind(theta2eta(mu[,1], .lphi, earg= .ephi ),
- theta2eta(mu[,2], .link.mu, earg= .emu ))
- , list( .lphi=lphi, .link.mu=link.mu,
- .ephi=ephi, .emu=emu ))),
+ theta2eta(mu[,2], .lmu, earg= .emu ))
+ }, list( .lphi=lphi, .lmu=lmu,
+ .ephi=ephi, .emu=emu ))),
loglikelihood=eval(substitute(
function(mu,y,w,residuals=FALSE, eta, extra=NULL) {
- phi = eta2theta(eta[,1], .lphi, earg= .ephi )
- mubin = eta2theta(eta[,2], .link.mu, earg= .emu )
- index = (y==0)
- tmp8 = phi + (1-phi)*(1-mubin)^w
- ell0 = log(tmp8[index])
- ell1 = log1p(-phi[!index]) + dbinom(x=round(w[!index]*y[!index]),
- size=w[!index], prob=mubin[!index], log=TRUE)
- sum(ell0) + sum(ell1)
- }, list( .lphi=lphi, .link.mu=link.mu,
+ phi = eta2theta(eta[,1], .lphi, earg= .ephi )
+ mubin = eta2theta(eta[,2], .lmu, earg= .emu )
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(dzibinom(x=round(w*y), size=w, prob=mubin, log=TRUE, phi=phi))
+ }
+ }, list( .lphi=lphi, .lmu=lmu,
.ephi=ephi, .emu=emu ))),
vfamily=c("zibinomial"),
deriv=eval(substitute(expression({
phi = eta2theta(eta[,1], .lphi, earg= .ephi )
- mubin = eta2theta(eta[,2], .link.mu, earg= .emu )
+ mubin = eta2theta(eta[,2], .lmu, earg= .emu )
prob0 = (1-mubin)^w # Actually q^w
tmp8 = phi + (1-phi)*prob0
index = (y==0)
@@ -948,13 +967,13 @@ dposnegbin = function(x, munb, k, log=FALSE) {
dl.dmubin[!index] = w[!index] * (y[!index]/mubin[!index] -
(1-y[!index]) / (1-mubin[!index]))
dphi.deta = dtheta.deta(phi, .lphi, earg= .ephi )
- dmubin.deta = dtheta.deta(mubin, .link.mu, earg= .emu )
+ dmubin.deta = dtheta.deta(mubin, .lmu, earg= .emu )
ans = cbind(dl.dphi * dphi.deta, dl.dmubin * dmubin.deta)
- if(.link.mu == "logit") {
+ if(.lmu == "logit") {
ans[!index,2] = w[!index] * (y[!index] - mubin[!index])
}
ans
- }), list( .lphi=lphi, .link.mu=link.mu,
+ }), list( .lphi=lphi, .lmu=lmu,
.ephi=ephi, .emu=emu ))),
weight=eval(substitute(expression({
wz = matrix(as.numeric(NA), nrow=n, ncol=dimm(M))
@@ -971,20 +990,27 @@ dposnegbin = function(x, munb, k, log=FALSE) {
wz[ind6,iam(2,2,M)] = .Machine$double.eps
}
wz
- }), list( .lphi=lphi, .link.mu=link.mu,
+ }), list( .lphi=lphi, .lmu=lmu,
.ephi=ephi, .emu=emu ))))
}
dzibinom = function(x, size, prob, log = FALSE, phi=0) {
+ if(!is.logical(log.arg <- log)) stop("bad input for argument 'log'")
+ rm(log)
+
L = max(length(x), length(size), length(prob), length(phi))
x = rep(x, len=L); size = rep(size, len=L);
prob = rep(prob, len=L); phi = rep(phi, len=L);
- ans = dbinom(x, size, prob, log=log)
+ ans = dbinom(x=x, size=size, prob=prob, log=TRUE)
if(!is.Numeric(phi) || any(phi < 0) || any(phi > 1))
stop("phi must be between 0 and 1 inclusive")
- ifelse(x==0, phi + (1-phi) * ans, (1-phi) * ans)
+ if(log.arg) {
+ ifelse(x==0, log(phi + (1-phi) * exp(ans)), log1p(-phi) + ans)
+ } else {
+ ifelse(x==0, phi + (1-phi) * exp(ans), (1-phi) * exp(ans))
+ }
}
pzibinom = function(q, size, prob, lower.tail = TRUE, log.p = FALSE, phi=0) {
@@ -1035,7 +1061,7 @@ dzinegbin = function(x, phi, size, prob=NULL, munb=NULL, log=FALSE) {
stop("'prob' and 'munb' both specified")
prob <- size/(size + munb)
}
- log.arg = log
+ if(!is.logical(log.arg <- log)) stop("bad input for argument 'log'")
rm(log)
if(!is.logical(log.arg) || length(log.arg) != 1)
stop("bad input for 'log.arg'")
@@ -1117,13 +1143,13 @@ zinegbinomial.control <- function(save.weight=TRUE, ...)
if(length(ik) && !is.Numeric(ik, positiv=TRUE))
stop("'ik' must contain positive values only")
if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
- method.init > 3) stop("argument \"method.init\" must be 1, 2 or 3")
+ method.init > 3) stop("argument 'method.init' must be 1, 2 or 3")
if(!is.Numeric(nsimEIM, allow=1, integ=TRUE))
stop("'nsimEIM' must be a positive integer")
if(nsimEIM <= 10)
warning("'nsimEIM' should be greater than 10, say")
if(!is.Numeric(shrinkage.init, allow=1) || shrinkage.init < 0 ||
- shrinkage.init > 1) stop("bad input for argument \"shrinkage.init\"")
+ shrinkage.init > 1) stop("bad input for argument 'shrinkage.init'")
if(mode(lmunb) != "character" && mode(lmunb) != "name")
lmunb = as.character(substitute(lmunb))
@@ -1263,7 +1289,9 @@ zinegbinomial.control <- function(save.weight=TRUE, ...)
phi = eta2theta(eta[,3*(1:NOS)-2,drop=FALSE], .lphi, earg= .ephi )
munb = eta2theta(eta[,3*(1:NOS)-1,drop=FALSE], .lmunb, earg= .emunb )
kmat = eta2theta(eta[,3*(1:NOS), drop=FALSE], .lk, earg= .ek )
- sum(w*dzinegbin(x=y, phi=phi, munb=munb, size=kmat, log=TRUE))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w*dzinegbin(x=y, phi=phi, munb=munb, size=kmat, log=TRUE))
+ }
}, list( .lphi=lphi, .lmunb=lmunb, .lk=lk,
.ephi=ephi, .emunb=emunb, .ek=ek ))),
vfamily=c("zinegbinomial"),
@@ -1343,7 +1371,7 @@ zinegbinomial.control <- function(save.weight=TRUE, ...)
temp3[,ind1$row.index]*temp3[,ind1$col.index]) / ii
}
wz1 = if(intercept.only)
- matrix(apply(run.varcov, 2, mean),
+ matrix(colMeans(run.varcov),
nr=n, nc=ncol(run.varcov), byrow=TRUE) else run.varcov
wz1 = wz1 * dthetas.detas[,3*(spp. -1) + ind1$row] *
diff --git a/R/formula.vlm.q b/R/formula.vlm.q
new file mode 100644
index 0000000..26bd76b
--- /dev/null
+++ b/R/formula.vlm.q
@@ -0,0 +1,185 @@
+# These functions are
+# Copyright (C) 1998-2009 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+
+
+
+formulavlm = function(x, fnumber=1, ...) {
+ if(!is.Numeric(fnumber, integ=TRUE, allow=1, posit=TRUE) ||
+ fnumber > 2)
+ stop("argument 'fnumber' must be 1 or 2")
+
+ if(!any(slotNames(x) == "misc"))
+ stop("cannot find slot 'misc'")
+
+ if(fnumber == 1) x at misc$formula else x at misc$form2
+}
+
+
+
+formulaNA.VGAM = function(x, ...) {
+ stop("a formula does not make sense for object 'x'")
+}
+
+
+
+
+
+setMethod("formula", "vlm",
+ function(x, ...)
+ formulavlm(x=x, ...))
+
+setMethod("formula", "vglm",
+ function(x, ...)
+ formulavlm(x=x, ...))
+
+
+
+setMethod("formula", "vgam",
+ function(x, ...)
+ formulavlm(x=x, ...))
+
+setMethod("formula", "rrvglm",
+ function(x, ...)
+ formulavlm(x=x, ...))
+
+setMethod("formula", "qrrvglm",
+ function(x, ...)
+ formulavlm(x=x, ...))
+
+setMethod("formula", "grc",
+ function(x, ...)
+ formulavlm(x=x, ...))
+
+
+
+
+
+
+
+
+
+
+variable.namesvlm <- function(object, full = FALSE, ...) {
+ qrslot <- object at qr
+ if(!length(qrslot$qr)) {
+ use.this <- object at x
+ if(!length(use.this))
+ stop("argument 'object' has empty 'qr' and 'x' slots.")
+ } else {
+ use.this = qrslot$qr
+ }
+ if(full) dimnames(use.this)[[2]] else
+ if(object at rank) dimnames(use.this)[[2]][seq_len(object at rank)] else
+ character(0)
+}
+
+
+
+
+variable.namesrrvglm <- function(object, ...) {
+
+ qrslot <- object at qr
+ if(!length(qrslot$qr)) {
+ use.this <- object at x
+ if(!length(use.this))
+ stop("argument 'object' has empty 'qr' and 'x' slots.")
+ } else {
+ use.this = qrslot$qr
+ }
+ dimnames(use.this)[[2]]
+}
+
+
+
+
+
+
+
+case.namesvlm <- function(object, full = FALSE, ...) {
+ w <- weights(object, type="prior")
+ use.this <- residuals(object, type="working")
+ if(!length(use.this))
+ use.this <- object at x
+ if(!length(use.this))
+ use.this <- object at y
+ if(!length(use.this))
+ stop("argument 'object' has empty 'x' and 'y' slots.")
+ dn <- dimnames(use.this)[[1]]
+ if(full || is.null(w) || ncol(cbind(w)) != 1) dn else dn[w!=0]
+}
+
+
+setMethod("variable.names", "vlm",
+ function(object, ...)
+ variable.namesvlm(object=object, ...))
+
+setMethod("variable.names", "vglm",
+ function(object, ...)
+ variable.namesvlm(object=object, ...))
+
+setMethod("variable.names", "vgam",
+ function(object, ...)
+ variable.namesvlm(object=object, ...))
+
+setMethod("variable.names", "rrvglm",
+ function(object, ...)
+ variable.namesrrvglm(object=object, ...))
+
+setMethod("variable.names", "qrrvglm",
+ function(object, ...)
+ variable.namesvlm(object=object, ...))
+
+setMethod("variable.names", "grc",
+ function(object, ...)
+ variable.namesvlm(object=object, ...))
+
+
+
+
+
+
+setMethod("case.names", "vlm",
+ function(object, ...)
+ case.namesvlm(object=object, ...))
+
+setMethod("case.names", "vglm",
+ function(object, ...)
+ case.namesvlm(object=object, ...))
+
+setMethod("case.names", "vgam",
+ function(object, ...)
+ case.namesvlm(object=object, ...))
+
+setMethod("case.names", "rrvglm",
+ function(object, ...)
+ case.namesvlm(object=object, ...))
+
+setMethod("case.names", "qrrvglm",
+ function(object, ...)
+ case.namesvlm(object=object, ...))
+
+setMethod("case.names", "grc",
+ function(object, ...)
+ case.namesvlm(object=object, ...))
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/R/links.q b/R/links.q
index 692d1c1..4336647 100644
--- a/R/links.q
+++ b/R/links.q
@@ -18,7 +18,7 @@
TypicalVGAMfamilyFunction <- function(lsigma="loge", esigma=list(),
isigma=NULL, parallel=TRUE,
- method.init=1,
+ shrinkage.init = 0.95, method.init=1,
nsimEIM=100, zero=NULL) {
NULL
}
@@ -656,8 +656,10 @@ elogit <- function(theta, earg=list(min=0, max=1), inverse=FALSE, deriv=0,
-logit <- function(theta, earg=list(), inverse=FALSE, deriv=0,
- short=TRUE, tag=FALSE)
+
+
+ logit <- function(theta, earg=list(), inverse=FALSE, deriv=0,
+ short=TRUE, tag=FALSE)
{
if(is.character(theta)) {
string <- if(short)
@@ -680,9 +682,13 @@ logit <- function(theta, earg=list(), inverse=FALSE, deriv=0,
}
} else {
switch(deriv+1, {
- log(theta) - log1p(-theta)},
+ temp2 = log(theta) - log1p(-theta)
+ if(any(near0.5 <- (abs(theta - 0.5) < 0.000125)))
+ temp2[near0.5] = log(theta[near0.5] / (1-theta[near0.5]))
+ temp2
+ },
exp(log(theta) + log1p(-theta)),
- theta * (1 - theta) * (1 - 2 * theta))
+ exp(log(theta) + log1p(-theta)) * (1 - 2 * theta))
}
}
@@ -1229,7 +1235,7 @@ checkCut = function(y) {
oklevels = 1:L
if(L == 1) stop("only one unique value")
for(ii in oklevels) {
- if(all(ii != uy)) stop(paste("there is no", ii, "value"))
+ if(all(ii != uy)) stop("there is no ", ii, " value")
}
TRUE
}
diff --git a/R/model.matrix.vglm.q b/R/model.matrix.vglm.q
index a17be0a..f9fba4d 100644
--- a/R/model.matrix.vglm.q
+++ b/R/model.matrix.vglm.q
@@ -5,30 +5,55 @@
-vlabel <- function(xn, ncolBlist, M, separator=":") {
+
+
+
+
+
+
+ attrassigndefault = function(mmat, tt) {
+ if(!inherits(tt, "terms"))
+ stop("need terms object")
+ aa = attr(mmat, "assign")
+ if(is.null(aa))
+ stop("argument is not really a model matrix")
+ ll = attr(tt, "term.labels")
+ if(attr(tt, "intercept") > 0)
+ ll = c("(Intercept)", ll)
+ aaa = factor(aa, labels = ll)
+ split(order(aa), aaa)
+}
+
+
+ attrassignlm = function(object, ...)
+ attrassigndefault(model.matrix(object), object at terms)
+
+
+
+ vlabel = function(xn, ncolBlist, M, separator=":") {
if(length(xn) != length(ncolBlist))
stop("length of first two arguments not equal")
- n1 <- rep(xn, ncolBlist)
- if(M==1)
+ n1 = rep(xn, ncolBlist)
+ if(M == 1)
return(n1)
- n2 <- as.list(ncolBlist)
- n2 <- lapply(n2, seq)
- n2 <- unlist(n2)
- n2 <- as.character(n2)
- n2 <- paste(separator, n2, sep="")
- n3 <- rep(ncolBlist, ncolBlist)
- n2[n3==1] <- ""
- n1n2 <- paste(n1, n2, sep="")
+ n2 = as.list(ncolBlist)
+ n2 = lapply(n2, seq)
+ n2 = unlist(n2)
+ n2 = as.character(n2)
+ n2 = paste(separator, n2, sep="")
+ n3 = rep(ncolBlist, ncolBlist)
+ n2[n3==1] = ""
+ n1n2 = paste(n1, n2, sep="")
n1n2
}
-lm2vlm.model.matrix <- function(x, Blist=NULL, assign.attributes=TRUE,
- M=NULL, xij=NULL, Aarray=NULL, Aindex=NULL)
-{
-
+ lm2vlm.model.matrix = function(x, Blist=NULL, assign.attributes=TRUE,
+ M=NULL, xij=NULL, Xm2=NULL) {
+
+
if(length(Blist) != ncol(x))
@@ -38,239 +63,182 @@ lm2vlm.model.matrix <- function(x, Blist=NULL, assign.attributes=TRUE,
if(inherits(xij, "formula"))
xij = list(xij)
if(!is.list(xij))
- stop("xij is not a list of formulae")
+ stop("'xij' is not a list of formulae")
}
if(!is.numeric(M))
- M <- nrow(Blist[[1]])
+ M = nrow(Blist[[1]])
- if(length(xij)) {
- Blist.NAed = Blist
- atx = attr(x, "assign")
- for(i in 1:length(xij)) {
- form = xij[[i]]
- if(length(form) != 3)
- stop(paste("xij[[", i, "]] is not a formula with a response"))
- tform = terms(form)
- atform = attr(tform, "term.labels") # doesn't include response
- if(length(atform) != M) {
- stop(paste("xij[[", i, "]] does not contain", M, " terms"))
- }
- for(k in 1:length(atform)) {
- for(s in atx[[(atform[[k]])]]) {
- if(length(Blist[[s]])) {
- Blist[[s]] = ei(k, M) # Easy for later
- Blist.NAed[[s]] = Blist[[s]] * NA # NA'ed
- }
- }
- }
- }
- }
- n <- nrow(x)
- if(all(trivial.constraints(Blist) == 1) && !length(Aarray)) {
- xbig <- if(M > 1) kronecker(x, diag(M)) else x
- ncolBlist <- rep(M, ncol(x))
+ nrow_X_lm = nrow(x)
+ if(all(trivial.constraints(Blist) == 1)) {
+ X_vlm = if(M > 1) kronecker(x, diag(M)) else x
+ ncolBlist = rep(M, ncol(x))
} else {
- allB <- matrix(unlist(Blist), nrow=M)
- ncolBlist <- unlist(lapply(Blist, ncol))
- R <- sum(ncolBlist)
-
- X1 <- rep(c(t(x)), rep(ncolBlist,n))
- dim(X1) <- c(R, n)
- BB <- kronecker(matrix(1,n,1), allB)
- if(length(Aarray)) {
- tmp34 = aperm(Aarray, c(1,3,2)) # c(M,n,r)
- for(ii in 1:length(Aindex))
- BB[,Aindex[[ii]]] = c(tmp34)
- }
- xbig <- kronecker(t(X1), matrix(1,M,1)) * BB
+ allB = matrix(unlist(Blist), nrow=M)
+ ncolBlist = unlist(lapply(Blist, ncol))
+ Rsum = sum(ncolBlist)
+
+ X1 = rep(c(t(x)), rep(ncolBlist,nrow_X_lm))
+ dim(X1) = c(Rsum, nrow_X_lm)
+ X_vlm = kronecker(t(X1), matrix(1,M,1)) *
+ kronecker(matrix(1,nrow_X_lm,1), allB)
+ rm(X1)
}
- dn <- labels(x)
- yn <- dn[[1]]
- xn <- dn[[2]]
- dimnames(xbig) <- list(vlabel(yn, rep(M, n), M),
+ dn = labels(x)
+ yn = dn[[1]]
+ xn = dn[[2]]
+ dimnames(X_vlm) = list(vlabel(yn, rep(M, nrow_X_lm), M),
vlabel(xn, ncolBlist, M))
if(assign.attributes) {
+ attr(X_vlm, "contrasts") = attr(x, "contrasts")
+ attr(X_vlm, "factors") = attr(x, "factors")
+ attr(X_vlm, "formula") = attr(x, "formula")
+ attr(X_vlm, "class") = attr(x, "class")
+ attr(X_vlm, "order") = attr(x, "order")
+ attr(X_vlm, "term.labels") = attr(x, "term.labels")
- attr(xbig, "contrasts") <- attr(x, "contrasts")
- attr(xbig, "factors") <- attr(x, "factors")
- attr(xbig, "formula") <- attr(x, "formula")
- attr(xbig, "class") <- attr(x, "class")
- attr(xbig, "order") <- attr(x, "order")
- attr(xbig, "term.labels") <- attr(x, "term.labels")
-
-
- nasgn <- oasgn <- attr(x, "assign")
- low <- 0
- for(i in 1:length(oasgn)) {
- len <- length(oasgn[[i]]) * ncolBlist[oasgn[[i]][1]]
- nasgn[[i]] <- (low+1):(low+len)
- low = low + len
- }
- if(low != ncol(xbig))
+ nasgn = oasgn = attr(x, "assign")
+ lowind = 0
+ for(ii in 1:length(oasgn)) {
+ mylen = length(oasgn[[ii]]) * ncolBlist[oasgn[[ii]][1]]
+ nasgn[[ii]] = (lowind+1):(lowind+mylen)
+ lowind = lowind + mylen
+ } # End of ii
+ if(lowind != ncol(X_vlm))
stop("something gone wrong")
- attr(xbig, "assign") <- nasgn
+ attr(X_vlm, "assign") = nasgn
- fred <- unlist(lapply(nasgn, length)) / unlist(lapply(oasgn, length))
- vasgn <- vector("list", sum(fred))
- k <- 0
- for(i in 1:length(oasgn)) {
- temp <- matrix(nasgn[[i]], ncol=length(oasgn[[i]]))
- for(j in 1:nrow(temp)) {
- k <- k + 1
- vasgn[[k]] <- temp[j,]
+ fred = unlist(lapply(nasgn, length)) / unlist(lapply(oasgn, length))
+ vasgn = vector("list", sum(fred))
+ kk = 0
+ for(ii in 1:length(oasgn)) {
+ temp = matrix(nasgn[[ii]], ncol=length(oasgn[[ii]]))
+ for(jloc in 1:nrow(temp)) {
+ kk = kk + 1
+ vasgn[[kk]] = temp[jloc,]
}
}
- names(vasgn) <- vlabel(names(oasgn), fred, M)
- attr(xbig, "vassign") <- vasgn
+ names(vasgn) = vlabel(names(oasgn), fred, M)
+ attr(X_vlm, "vassign") = vasgn
+ attr(X_vlm, "constraints") = Blist
+ } # End of if(assign.attributes)
- attr(xbig, "constraints") <- Blist
- }
+ if(!length(xij)) return(X_vlm)
- xasgn <- attr(x, "assign")
- if(length(xij)) {
- rm.col.index = NULL # Remove these columns from xbig
- for(i in 1:length(xij)) {
- form = xij[[i]] # deparse(form1[[3]])
- tform = terms(form)
- atform = attr(tform, "term.labels") # doesn't include response
- response.name = (dimnames(attr(tform, "factors"))[[1]])[1]
-
- ptr0 = NULL
- for(s in 1:M)
- if(length(nasgn[[atform[s]]])) {
- ptr0 = s
- break
- }
- if(!is.numeric(ptr0)) stop("no destination column indices")
- dest.col.index = nasgn[[atform[ptr0]]]
- if(M > 1)
- for(k in ((1:M)[-ptr0])) {
- from.col.index = nasgn[[atform[k]]] # May be NULL
- if(length(from.col.index)) {
- xbig[,dest.col.index] = xbig[,dest.col.index] +
- xbig[,from.col.index]
- rm.col.index = c(rm.col.index, from.col.index)
- vasgn[[atform[k]]] = NULL # Delete it
- }
- }
- d2 = dimnames(xbig)[[2]]
- d2[dest.col.index] = vlabel(response.name,
- length(dest.col.index), M=M, separator="")
- dimnames(xbig) = list(dimnames(xbig)[[1]], d2)
- ptr = (1:length(names(vasgn)))[(names(vasgn)==atform[[ptr0]])]
- names(vasgn)[ptr] = response.name
+ at.x = attr(x, "assign")
+ at.vlmx = attr(X_vlm, "assign")
+ at.Xm2 = attr(Xm2, "assign")
- }
+ for(ii in 1:length(xij)) {
+ form.xij = xij[[ii]]
+ if(length(form.xij) != 3)
+ stop("xij[[", ii, "]] is not a formula with a response")
+ tform.xij = terms(form.xij)
+ aterm.form = attr(tform.xij, "term.labels") # Does not include response
+ if(length(aterm.form) != M)
+ stop("xij[[", ii, "]] does not contain ", M, " terms")
- if(length(rm.col.index))
- xbig = xbig[,-rm.col.index,drop=FALSE] # Delete the columns in 1 go
+ name.term.y = as.character(form.xij)[2]
+ cols.X_vlm = at.vlmx[[name.term.y]] # May be > 1 in length.
- if(assign.attributes) {
- attr(xbig, "constraints") <- Blist.NAed # Not quite right
- attr(xbig, "vassign") <- vasgn
- attr(xbig, "assign") <- nasgn
- attr(xbig, "xij") <- xij
- }
+ x.name.term.2 = aterm.form[1] # Choose the first one
+ One.such.term = at.Xm2[[x.name.term.2]]
+ for(bbb in 1:length(One.such.term)) {
+ use.cols.Xm2 = NULL
+ for(sss in 1:M) {
+ x.name.term.2 = aterm.form[sss]
+ one.such.term = at.Xm2[[x.name.term.2]]
+ use.cols.Xm2 = c(use.cols.Xm2, one.such.term[bbb])
+ } # End of sss
- }
+ allXk = Xm2[,use.cols.Xm2,drop=FALSE]
+ cmat.no = (at.x[[name.term.y]])[1] # First one will do (all the same).
+ cmat = Blist[[cmat.no]]
+ Rsum.k = ncol(cmat)
+ tmp44 = kronecker(matrix(1, nrow_X_lm, 1), t(cmat)) *
+ kronecker(allXk, matrix(1,ncol(cmat), 1)) # n*Rsum.k x M
+ tmp44 = array(t(tmp44), c(M, Rsum.k, nrow_X_lm))
+ tmp44 = aperm(tmp44, c(1,3,2)) # c(M, n, Rsum.k)
+ rep.index = cols.X_vlm[((bbb-1)*Rsum.k+1):(bbb*Rsum.k)]
+ X_vlm[,rep.index] = c(tmp44)
+ } # End of bbb
+ } # End of for(ii in 1:length(xij))
- xbig
+ if(assign.attributes) {
+ attr(X_vlm, "vassign") = vasgn
+ attr(X_vlm, "assign") = nasgn
+ attr(X_vlm, "xij") = xij
+ }
+ X_vlm
}
-model.matrixvlm = function(object, type=c("vlm","lm","lm2"), ...) {
+
+
+
+
+ model.matrixvlm = function(object, type=c("vlm","lm","lm2","bothlmlm2"),
+ ...) {
+
if(mode(type) != "character" && mode(type) != "name")
- type <- as.character(substitute(type))
- type <- match.arg(type, c("vlm","lm","lm2"))[1]
+ type = as.character(substitute(type))
+ type = match.arg(type, c("vlm","lm","lm2","bothlmlm2"))[1]
+
+
- x <- slot(object, "x")
- Xm2 <- slot(object, "Xm2")
+ x = slot(object, "x")
+ Xm2 = slot(object, "Xm2")
if(!length(x)) {
data = model.frame(object, xlev=object at xlevels, ...)
kill.con = if(length(object at contrasts)) object at contrasts else NULL
- x <- vmodel.matrix.default(object, data=data,
- contrasts.arg = kill.con)
- if(is.R()) {
-
- if(TRUE) {
- attrassigndefault <- function(mmat, tt) {
- if (!inherits(tt, "terms"))
- stop("need terms object")
- aa <- attr(mmat, "assign")
- if (is.null(aa))
- stop("argument is not really a model matrix")
- ll <- attr(tt, "term.labels")
- if (attr(tt, "intercept") > 0)
- ll <- c("(Intercept)", ll)
- aaa <- factor(aa, labels = ll)
- split(order(aa), aaa)
- }
- }
- tt = terms(object)
- attr(x, "assign") <- attrassigndefault(x, tt)
- }
+ x = vmodel.matrix.default(object, data=data,
+ contrasts.arg = kill.con)
+ tt = terms(object)
+ attr(x, "assign") = attrassigndefault(x, tt)
}
- if(type == "lm2" && !length(Xm2)) {
+ if((type == "lm2" || type == "bothlmlm2") && !length(Xm2)) {
object.copy2 = object
- object.copy2 at call = object.copy2 at callXm2
data = model.frame(object.copy2, xlev=object.copy2 at xlevels, ...)
kill.con = if(length(object.copy2 at contrasts))
object.copy2 at contrasts else NULL
- Xm2 <- vmodel.matrix.default(object.copy2, data=data,
- contrasts.arg = kill.con)
- if(is.R()) {
-
- if(TRUE) {
- attrassigndefault <- function(mmat, tt) {
- if (!inherits(tt, "terms"))
- stop("need terms object")
- aa <- attr(mmat, "assign")
- if (is.null(aa))
- stop("argument is not really a model matrix")
- ll <- attr(tt, "term.labels")
- if (attr(tt, "intercept") > 0)
- ll <- c("(Intercept)", ll)
- aaa <- factor(aa, labels = ll)
- split(order(aa), aaa)
- }
- }
- ttXm2 = terms(object.copy2 at misc$form2)
- attr(Xm2, "assign") <- attrassigndefault(Xm2, ttXm2)
- }
+ Xm2 = vmodel.matrix.default(object.copy2, data=data,
+ contrasts.arg = kill.con)
+ ttXm2 = terms(object.copy2 at misc$form2)
+ attr(Xm2, "assign") = attrassigndefault(Xm2, ttXm2)
}
if(type == "lm") {
return(x)
- } else
- if(type == "lm2") {
+ } else if(type == "lm2") {
return(Xm2)
+ } else if(type == "bothlmlm2") {
+ return(list(X=x, Xm2=Xm2))
} else {
- M <- object at misc$M
- Blist <- object at constraints # Is NULL if there were no constraints?
- lm2vlm.model.matrix(x=x, Blist=Blist, xij=object at control$xij)
+ M = object at misc$M
+ Blist = object at constraints # Is NULL if there were no constraints?
+ lm2vlm.model.matrix(x=x, Blist=Blist, xij=object at control$xij, Xm2=Xm2)
}
}
@@ -285,32 +253,31 @@ setMethod("model.matrix", "vlm", function(object, ...)
- if(is.R()) {
-model.framevlm = function(object, ...) {
+ model.framevlm = function(object,
+ setupsmart=TRUE, wrapupsmart=TRUE, ...) {
- dots <- list(...)
- nargs <- dots[match(c("data", "na.action", "subset"), names(dots), 0)]
+ dots = list(...)
+ nargs = dots[match(c("data", "na.action", "subset"), names(dots), 0)]
if(length(nargs) || !length(object at model)) {
- fcall <- object at call
- fcall$method <- "model.frame"
- fcall[[1]] <- as.name("vlm")
+ fcall = object at call
+ fcall$method = "model.frame"
+ fcall[[1]] = as.name("vlm")
- fcall$smart <- FALSE
- if(length(object at smart.prediction)) {
+ fcall$smart = FALSE
+ if(setupsmart && length(object at smart.prediction)) {
setup.smart("read", smart.prediction=object at smart.prediction)
}
- fcall[names(nargs)] <- nargs
- env <- environment(object at terms$terms) # @terms or @terms$terms ??
+ fcall[names(nargs)] = nargs
+ env = environment(object at terms$terms) # @terms or @terms$terms ??
if (is.null(env))
- env <- parent.frame()
+ env = parent.frame()
ans = eval(fcall, env, parent.frame())
- if(length(object at smart.prediction)) {
+ if(wrapupsmart && length(object at smart.prediction)) {
wrapup.smart()
}
-
ans
} else object at model
}
@@ -323,42 +290,46 @@ if(!isGeneric("model.frame"))
setMethod("model.frame", "vlm", function(formula, ...)
model.framevlm(object=formula, ...))
-}
-
-vmodel.matrix.default = function (object, data = environment(object),
- contrasts.arg = NULL, xlev = NULL, ...) {
- t <- terms(object)
- if (is.null(attr(data, "terms")))
- data <- model.frame(object, data, xlev = xlev) else
- {
- reorder <- match(sapply(attr(t, "variables"), deparse,
+ vmodel.matrix.default = function(object, data = environment(object),
+ contrasts.arg = NULL, xlev = NULL, ...) {
+ t <- if (missing(data)) terms(object) else terms(object, data = data)
+ if (is.null(attr(data, "terms")))
+ data <- model.frame(object, data, xlev = xlev) else {
+ reorder <- match(sapply(attr(t, "variables"), deparse,
width.cutoff = 500)[-1], names(data))
- if (any(is.na(reorder)))
+ if (any(is.na(reorder)))
stop("model frame and formula mismatch in model.matrix()")
- data <- data[, reorder, drop = FALSE]
+ if (!identical(reorder, seq_len(ncol(data))))
+ data <- data[, reorder, drop = FALSE]
}
int <- attr(t, "response")
if (length(data)) {
contr.funs <- as.character(getOption("contrasts"))
+ namD <- names(data)
+ for (i in namD) if (is.character(data[[i]])) {
+ data[[i]] <- factor(data[[i]])
+ warning(gettextf("variable '%s' converted to a factor",
+ i), domain = NA)
+ }
isF <- sapply(data, function(x) is.factor(x) || is.logical(x))
isF[int] <- FALSE
isOF <- sapply(data, is.ordered)
- namD <- names(data)
- for (nn in namD[isF]) if (is.null(attr(data[[nn]], "contrasts")))
+ for (nn in namD[isF]) if (is.null(attr(data[[nn]], "contrasts")))
contrasts(data[[nn]]) <- contr.funs[1 + isOF[nn]]
if (!is.null(contrasts.arg) && is.list(contrasts.arg)) {
- if (is.null(namC <- names(contrasts.arg)))
- stop("invalid contrasts argument")
+ if (is.null(namC <- names(contrasts.arg)))
+ stop("invalid 'contrasts.arg' argument")
for (nn in namC) {
- if (is.na(ni <- match(nn, namD)))
- warning(paste("Variable", nn,
- "absent, contrast ignored")) else {
+ if (is.na(ni <- match(nn, namD)))
+ warning(gettextf(
+ "variable '%s' is absent, its contrast will be ignored",
+ nn), domain = NA) else {
ca <- contrasts.arg[[nn]]
- if (is.matrix(ca))
+ if (is.matrix(ca))
contrasts(data[[ni]], ncol(ca)) <- ca else
contrasts(data[[ni]]) <- contrasts.arg[[nn]]
}
@@ -369,7 +340,7 @@ vmodel.matrix.default = function (object, data = environment(object),
data <- list(x = rep(0, nrow(data)))
}
ans <- .Internal(model.matrix(t, data))
- cons <- if (any(isF))
+ cons <- if (any(isF))
lapply(data[isF], function(x) attr(x, "contrasts")) else NULL
attr(ans, "contrasts") <- cons
ans
@@ -380,5 +351,3 @@ vmodel.matrix.default = function (object, data = environment(object),
-
-
diff --git a/R/plot.vglm.q b/R/plot.vglm.q
index 0561677..fedfa2c 100644
--- a/R/plot.vglm.q
+++ b/R/plot.vglm.q
@@ -8,18 +8,18 @@
-if(!exists("is.R")) is.R <- function()
- exists("version") && !is.null(version$language) && version$language=="R"
-plotvgam <- function(x, newdata=NULL, y=NULL, residuals=NULL, rugplot=TRUE,
- se= FALSE, scale=0,
- raw= TRUE, offset.arg=0, deriv.arg=0, overlay= FALSE,
- type.residuals=c("deviance","working","pearson","response"),
- plot.arg= TRUE, which.term=NULL, which.cf=NULL,
- control=plotvgam.control(...),
- ...)
+
+
+plotvgam = function(x, newdata=NULL, y=NULL, residuals=NULL, rugplot=TRUE,
+ se=FALSE, scale=0,
+ raw=TRUE, offset.arg=0, deriv.arg=0, overlay=FALSE,
+ type.residuals=c("deviance","working","pearson","response"),
+ plot.arg=TRUE, which.term=NULL, which.cf=NULL,
+ control=plotvgam.control(...),
+ varxij = 1, ...)
{
missing.control = missing(control)
@@ -27,6 +27,13 @@ plotvgam <- function(x, newdata=NULL, y=NULL, residuals=NULL, rugplot=TRUE,
na.act = x at na.action
x at na.action = list() # Don't want NAs returned from predict() or resid()
+ if(!is.Numeric(varxij, integ=TRUE, allow=1, posit=TRUE))
+ stop("bad input for the 'varxij' argument")
+ if(any(slotNames(x) == "control")) {
+ x at control$varxij = varxij
+ }
+
+
missing.type.residuals = missing(type.residuals)
if(mode(type.residuals) != "character" && mode(type.residuals) != "name")
type.residuals <- as.character(substitute(type.residuals))
@@ -35,23 +42,20 @@ plotvgam <- function(x, newdata=NULL, y=NULL, residuals=NULL, rugplot=TRUE,
c("deviance","working","pearson","response"))[1]
- if(!is.numeric(deriv.arg) || deriv.arg<0 ||
- deriv.arg!=round(deriv.arg) || length(deriv.arg)>1)
- stop("bad input for the deriv argument")
+ if(!is.Numeric(deriv.arg, integ=TRUE, allow=1) || deriv.arg<0)
+ stop("bad input for the 'deriv' argument")
if(se && deriv.arg>0) {
- warning("standard errors not available with derivatives. Setting se=FALSE")
+ warning("standard errors not available with derivatives. ",
+ "Setting 'se=FALSE'")
se = FALSE
}
preplot.object <- x at preplot
if(!length(preplot.object)) {
- if(is.R()) {
- preplot.object <- preplotvgam(x, newdata=newdata,
- raw=raw, deriv=deriv.arg, se=se)
- } else {
- preplot.object <- preplotvgam(x, raw=raw, deriv=deriv.arg, se=se)
- }
+ preplot.object <- preplotvgam(x, newdata=newdata,
+ raw=raw, deriv=deriv.arg, se=se,
+ varxij=varxij)
}
x at preplot = preplot.object
@@ -65,7 +69,7 @@ plotvgam <- function(x, newdata=NULL, y=NULL, residuals=NULL, rugplot=TRUE,
} else {
residuals=resid(x,typ=type.residuals) #Get the prespecified type
if(!length(residuals))
- warning("residuals are NULL. Ignoring residuals=T")
+ warning("residuals are NULL. Ignoring 'residuals=TRUE'")
}
} else {
residuals <- NULL
@@ -73,12 +77,8 @@ plotvgam <- function(x, newdata=NULL, y=NULL, residuals=NULL, rugplot=TRUE,
}
if(!missing.control) {
- control = if(is.R()) c(plotvgam.control(.include.dots= FALSE, ...),
- control,
- plotvgam.control(...)) else
- c(plotvgam.control(.include.dots= FALSE, ...),
- control,
- plotvgam.control(...))
+ control = c(plotvgam.control(.include.dots= FALSE, ...),
+ control, plotvgam.control(...))
}
x at post$plotvgam.control = control # Add it to the object
@@ -100,7 +100,7 @@ plotvgam <- function(x, newdata=NULL, y=NULL, residuals=NULL, rugplot=TRUE,
ylim.scale <- function(ylim, scale=0) {
if(length(ylim) != 2 || ylim[2] < ylim[1])
- stop("error in ylim")
+ stop("error in 'ylim'")
try <- ylim[2] - ylim[1]
if(try > scale) ylim else
c(ylim[1]+ylim[2]-scale, ylim[1]+ylim[2]+scale) / 2
@@ -109,149 +109,140 @@ ylim.scale <- function(ylim, scale=0) {
+getallresponses = function(xij) {
+ if(!is.list(xij)) return("")
+ allterms = lapply(xij, terms)
+ allres = NULL
+ for(ii in 1:length(xij))
+ allres = c(allres, as.character(attr(allterms[[ii]],"variables"))[2])
+ allres
+}
-preplotvgam = function(object, newdata=NULL,
- terms=if(is.R()) attr((object at terms)$terms, "term.labels") else
- v.labels.lm(object),
- raw= TRUE, deriv.arg=deriv.arg, se= FALSE)
-{
+
+
+headpreplotvgam = function(object, newdata=NULL,
+ terms=attr((object at terms)$terms, "term.labels"),
+ raw=TRUE, deriv.arg=deriv.arg, se=FALSE,
+ varxij = 1) {
Terms <- terms(object) # 11/8/03; object at terms$terms
aa <- attributes(Terms)
- Call <- object at call
all.terms <- labels(Terms)
- xvars <- if(is.R()) parse(text=all.terms) else as.vector(Terms)
+ xvars <- parse(text=all.terms)
+
+
- if(is.R()) {
- names(xvars) <- all.terms
- terms <- sapply(terms, match.arg, all.terms)
- } else {
- names(xvars) <- all.terms
- terms <- match.arg(terms, all.terms)
- }
+ names(xvars) <- all.terms
+ terms <- sapply(terms, match.arg, all.terms)
Interactions <- aa$order > 1
if(any(Interactions)) {
- if(is.R())
- stop("can't handle interactions")
- all.terms <- all.terms[!Interactions]
- TM <- match(terms, all.terms, 0)
- if(!all(TM)) {
- terms <- terms[TM > 0]
- warning("No terms saved for \"a:b\" style interaction terms")
- }
+ stop("cannot handle interactions")
}
- if(is.R()) {
- xvars <- xvars[terms]
- xnames <- as.list(terms)
- names(xnames) <- terms
- modes <- sapply(xvars, mode)
- for(term in terms[modes != "name"]) {
- evars <- all.names(xvars[term], functions= FALSE, unique= TRUE)
- if(!length(evars))
- next
- xnames[[term]] <- evars
- evars <- parse(text=evars)
- if(length(evars) == 1)
- evars <- evars[[1]]
- else {
- evars <- c(as.name("list"), evars)
- mode(evars) <- "call"
- }
- xvars[[term]] <- evars
- }
-
-
- xvars <- c(as.name("list"), xvars)
- mode(xvars) <- "call"
- if(length(newdata)) {
- xvars <- eval(xvars, newdata)
+ xvars <- xvars[terms]
+ xnames <- as.list(terms)
+ names(xnames) <- terms
+ modes <- sapply(xvars, mode)
+ for(term in terms[modes != "name"]) {
+ evars <- all.names(xvars[term], functions= FALSE, unique= TRUE)
+ if(!length(evars))
+ next
+ xnames[[term]] <- evars
+ evars <- parse(text=evars)
+ if(length(evars) == 1) {
+ evars <- evars[[1]]
+ } else if( length(evars) > 1 &&
+ any(getallresponses(object at control$xij) == names(xnames)) ) {
+
+
+
+
+ evars <- evars[[varxij]]
} else {
- if(!is.null(Call$subset) | !is.null(Call$na.action) |
- !is.null(options("na.action")[[1]])) {
- Rownames <- names(fitted(object))
- if(!(Rl <- length(Rownames)))
- Rownames <- dimnames(fitted(object))[[1]]
-
- if(length(object at x) && !(Rl <- length(Rownames)))
- Rownames <- (dimnames(object at x))[[1]]
- if(length(object at y) && !(Rl <- length(Rownames)))
- Rownames <- (dimnames(object at y))[[1]]
-
- if(!(Rl <- length(Rownames)))
- stop(paste("need to have names for fitted.values",
- "when call has a subset or na.action argument"))
-
- form <- paste("~", unlist(xnames), collapse="+")
- Mcall <- c(as.name("model.frame"), list(formula =
- terms(as.formula(form)),
- subset = Rownames, na.action = function(x) x))
- mode(Mcall) <- "call"
- Mcall$data <- Call$data
- xvars <- eval(xvars, eval(Mcall))
- } else {
- ecall <- substitute(eval(expression(xvars)))
- ecall$local <- Call$data
- xvars <- eval(ecall)
- }
- }
- } else {
- xvars <- xvars[terms]
- xnames <- as.list(terms)
- names(xnames) <- terms
- modes <- sapply(xvars, mode)
- for(term in terms[modes != "name"]) {
- evars <- all.names(xvars[term], functions= FALSE, unique= TRUE)
- if(!length(evars))
- next
- xnames[[term]] <- evars
- evars <- parse(text=evars)
- if(length(evars) == 1)
- evars <- evars[[1]]
- else {
- evars <- c(as.name("list"), evars)
- mode(evars) <- "call"
- }
- xvars[[term]] <- evars
+ evars <- c(as.name("list"), evars)
+ mode(evars) <- "call"
}
+ xvars[[term]] <- evars
+ }
- act.vars <- as.character(xvars)
- xvars <- c(as.name("list"), xvars)
- mode(xvars) <- "call"
- if(length(newdata)) {
- xvars <- eval(xvars, newdata)
+ xvars <- c(as.name("list"), xvars)
+ mode(xvars) <- "call"
+ if(length(newdata)) {
+ xvars <- eval(xvars, newdata)
+ } else {
+ Call <- object at call
+ if(!is.null(Call$subset) | !is.null(Call$na.action) |
+ !is.null(options("na.action")[[1]])) {
+ Rownames <- names(fitted(object))
+ if(!(Rl <- length(Rownames)))
+ Rownames <- dimnames(fitted(object))[[1]]
+
+ if(length(object at x) && !(Rl <- length(Rownames)))
+ Rownames <- (dimnames(object at x))[[1]]
+ if(length(object at y) && !(Rl <- length(Rownames)))
+ Rownames <- (dimnames(object at y))[[1]]
+
+ if(!(Rl <- length(Rownames)))
+ stop("need to have names for fitted.values ",
+ "when call has a 'subset' or 'na.action' argument")
+
+ form <- paste("~", unlist(xnames), collapse="+")
+ Mcall <- c(as.name("model.frame"), list(formula =
+ terms(as.formula(form)),
+ subset = Rownames, na.action = function(x) x))
+ mode(Mcall) <- "call"
+ Mcall$data <- Call$data
+ xvars <- eval(xvars, eval(Mcall))
} else {
- if(!is.null(Call$subset) | !is.null(Call$na.action) |
- !is.null(options("na.action")[[1]])) {
- Rownames <- names(fitted(object))
- if(!(Rl <- length(Rownames)))
- Rownames <- dimnames(fitted(object))[[1]]
- if(!(Rl <- length(Rownames)))
- stop(paste("need to have names for fitted.values",
- "when call has a subset or na.action argument"))
- Mcall <- c(as.name("model.frame"), list(formula=
- terms.inner(parse(text=unlist(xnames))),
- subset=Rownames, na.action=function(x) x))
- mode(Mcall) <- "call"
- Mcall$data <- Call$data
- xvars <- eval(xvars, eval(Mcall))
- } else {
- ecall <- substitute(eval(expression(xvars)))
- ecall$local <- Call$data
- xvars <- eval(ecall)
- }
+ ecall <- substitute(eval(expression(xvars)))
+ ecall$local <- Call$data
+ xvars <- eval(ecall)
}
}
+ list(xnames=xnames, xvars=xvars)
+}
- if(length(newdata)) {
- pred <- predict(object, newdata, type="terms",
- raw=raw, se.fit=se, deriv.arg=deriv.arg)
+
+
+
+
+
+
+
+preplotvgam = function(object, newdata=NULL,
+ terms=attr((object at terms)$terms, "term.labels"),
+ raw=TRUE, deriv.arg=deriv.arg, se=FALSE,
+ varxij=1) {
+
+ result1 = headpreplotvgam(object, newdata=newdata, terms=terms,
+ raw=raw, deriv.arg=deriv.arg, se=se,
+ varxij=varxij)
+
+ xvars = result1$xvars
+ xnames = result1$xnames
+
+
+
+ if(FALSE && !is.null(object at control$jix)) {
+
+
+
+
+ myxij = object at control$xij
+ if(length(myxij)) {
+ }
+
+ }
+
+ pred <- if(length(newdata)) {
+ predict(object, newdata, type="terms",
+ raw=raw, se.fit=se, deriv.arg=deriv.arg)
} else {
- pred <- predict(object, type="terms",
- raw=raw, se.fit=se, deriv.arg=deriv.arg)
+ predict(object, type="terms",
+ raw=raw, se.fit=se, deriv.arg=deriv.arg)
}
fits <- if(is.atomic(pred)) NULL else pred$fit
@@ -260,127 +251,45 @@ preplotvgam = function(object, newdata=NULL,
fits <- pred
fred <- attr(fits, "vterm.assign") # NULL for M==1
- if(FALSE && is.R()) {
- xnames <- vector("list", length(fred))
- names(xnames) <- names(fred)
- }
-
gamplot <- xnames
- if(FALSE && is.R()) {
- s.x = if(any(slotNames(object)=="s.xargument")) object at s.xargument else
- NULL
- n.s.x = names(s.x)
- }
-
- loop.var = if(is.R()) names(fred) else terms
+ loop.var = names(fred)
for(term in loop.var) {
- if(FALSE && is.R()) {
- useterm <- term
- if(length(n.s.x) && any(n.s.x == useterm))
- useterm <- s.x[useterm]
- innerx <- parse(text=useterm)
- innerx <- all.vars(innerx)
- if(length(innerx) == 0)
- warning(paste("couldn't extract variable from", useterm, "\n"))
- if(length(innerx) > 1) {
- warning(paste("using the first of \"", innerx,
- "\" terms\n", sep=""))
- innerx <- innerx[1]
- }
- }
-
- if(FALSE && is.R()) {
- .VGAM.x <- if(length(newdata)) newdata[[innerx]] else {
- if(( is.R() && object at misc$dataname != "list") ||
- (!is.R() && object at misc$dataname != "sys.parent")) {
- mytext <- paste(object at misc$dataname,
- "[['", innerx, "']]", sep="")
- } else {
- mytext <- innerx
- }
- getx <- parse(text=mytext)
- .VGAM.ans = if(exists(x=mytext, envir = .GlobalEnv))
- eval(getx, envir = .GlobalEnv) else eval(getx)
- .VGAM.ans
- }
- } # else {
-
.VGAM.x <- xvars[[term]]
-
- if(FALSE && is.R()) {
- class(.VGAM.x)=unique(c(class(.VGAM.x),data.class(unclass(.VGAM.x))))
- }
-
myylab = if(all(substring(term, 1:nchar(term), 1:nchar(term)) != "("))
paste("partial for", term) else term
TT <- list(x = .VGAM.x,
- y = fits[, if(is.null(fred)) term else fred[[term]]],
+ y = fits[,(if(is.null(fred)) term else fred[[term]])],
se.y = if(is.null(se.fit)) NULL else
- se.fit[, if(is.null(fred)) term else fred[[term]]],
+ se.fit[,(if(is.null(fred)) term else fred[[term]])],
xlab = xnames[[term]],
ylab = myylab)
class(TT) <- "preplotvgam"
gamplot[[term]] <- TT
}
- if(!is.R())
- class(gamplot) <- "preplotvgam" # Commented out 8/6/02
invisible(gamplot)
}
-if(!is.R())
-v.labels.lm <- function(object, ...)
-{
- TL <- terms(object) # 11/8/03; object at terms$terms
- if(!is.null(TL)) {
- TL <- attr(TL, "term.labels")
- TA <- object at assign
- if(!is.null(TA)) {
- TA <- names(TA)
- TL <- TL[match(TA, TL, 0.)]
- }
- }
- TL
-}
-
-
-plotvlm <- function(object, residuals=NULL, rugplot= FALSE, ...)
-{
+plotvlm <- function(object, residuals=NULL, rugplot= FALSE, ...) {
stop("sorry, this function hasn't been written yet")
}
plotvglm <- function(x, residuals=NULL, smooths= FALSE,
- rugplot= FALSE, id.n= FALSE, ...)
-{
+ rugplot= FALSE, id.n= FALSE, ...) {
stop("this function hasn't been written yet")
-
-
-
- invisible(x)
}
-if(!is.R()) jitter <- function(x, factor=1)
-{
-
- z <- diff(range(x[!is.na(x)]))
- if(all(z==0))
- return(x)
- z <- factor * (z/50)
- x + runif(length(x), - z, z)
-}
-
-
plotpreplotvgam <- function(x, y=NULL, residuals=NULL,
rugplot= TRUE, se= FALSE, scale=0,
- offset.arg=0, deriv.arg=0, overlay= FALSE,
- which.term=NULL, which.cf=NULL,
+ offset.arg=0, deriv.arg=0, overlay= FALSE,
+ which.term=NULL, which.cf=NULL,
control=NULL)
{
listof <- inherits(x[[1]], "preplotvgam")
@@ -389,14 +298,14 @@ plotpreplotvgam <- function(x, y=NULL, residuals=NULL,
if(is.null(which.term))
which.term = TT # Plot them all
plot.no = 0
- for(i in TT) {
- plot.no = plot.no + 1
- if((is.character(which.term) && any(which.term==i)) ||
- (is.numeric(which.term) && any(which.term==plot.no)))
- plotpreplotvgam(x[[i]], y=NULL,
- residuals, rugplot, se, scale,
+ for(ii in TT) {
+ plot.no = plot.no + 1
+ if((is.character(which.term) && any(which.term == ii)) ||
+ (is.numeric(which.term) && any(which.term == plot.no)))
+ plotpreplotvgam(x[[ii]], y=NULL,
+ residuals, rugplot=rugplot, se=se, scale=scale,
offset.arg=offset.arg,
- deriv.arg=deriv.arg, overlay=overlay,
+ deriv.arg=deriv.arg, overlay=overlay,
which.cf=which.cf,
control=control)
}
@@ -436,10 +345,8 @@ vplot.default <- function(x, y, se.y=NULL, xlab="", ylab="",
residuals, rugplot, scale, se,
offset.arg=offset.arg, overlay=overlay, ...)
} else {
- warning(paste("The \"x\" component of \"", ylab,
- "\" has class \"",
- paste(class(x), collapse="\", \""),
- "\"; no vplot() methods available", sep=""))
+ warning("The \"x\" component of \"", ylab, "\" has class \"",
+ class(x), "\"; no vplot() methods available")
}
)
}
@@ -466,7 +373,7 @@ plotvgam.control = function(
which.cf=NULL,
xlim=NULL, ylim=NULL,
llty=par()$lty,
- slty=if(is.R()) "dashed" else 3,
+ slty="dashed",
pcex=par()$cex,
pch=par()$pch,
pcol=par()$col,
@@ -498,38 +405,34 @@ plotvgam.control = function(
} else {
default.vals = plotvgam.control()
return.list = list()
- for(i in names(default.vals)) {
- replace.val =
- if(is.R())
- !((length(ans[[i]]) == length(default.vals[[i]])) &&
- (length(default.vals[[i]]) > 0) &&
- (is.logical(all.equal(ans[[i]], default.vals[[i]]))) &&
- all.equal(ans[[i]], default.vals[[i]])) else
- !((length(ans[[i]]) == length(default.vals[[i]])) &&
- (length(default.vals[[i]]) > 0) &&
- all(ans[[i]] == default.vals[[i]]))
-
- if(FALSE && replace.val) {
- }
+ for(ii in names(default.vals)) {
+ replace.val = !((length(ans[[ii]]) == length(default.vals[[ii]])) &&
+ (length(default.vals[[ii]]) > 0) &&
+ (is.logical(all.equal(ans[[ii]], default.vals[[ii]]))) &&
+ all.equal(ans[[ii]], default.vals[[ii]]))
+
if(replace.val)
- return.list[[i]] = ans[[i]]
+ return.list[[ii]] = ans[[ii]]
}
if(length(return.list)) {
names(return.list) = names(return.list)
return.list
- } else NULL
+ } else NULL
}
}
+
+
+
+
vplot.numeric <- function(x, y, se.y=NULL, xlab, ylab,
residuals=NULL, rugplot= FALSE, se= FALSE, scale=0,
offset.arg=0, deriv.arg=0, overlay= FALSE,
which.cf=NULL,
xlim=NULL, ylim=NULL,
-
llty=par()$lty,
- slty=if(is.R()) "dashed" else 3,
+ slty="dashed",
pcex=par()$cex,
pch=par()$pch,
pcol=par()$col,
@@ -542,7 +445,6 @@ vplot.numeric <- function(x, y, se.y=NULL, xlab, ylab,
one.at.a.time= FALSE,
noxmean = FALSE,
separator = ":",
-
...)
{
@@ -552,7 +454,7 @@ vplot.numeric <- function(x, y, se.y=NULL, xlab, ylab,
ylim0 <- ylim
if(length(y)/length(x) != round(length(y)/length(x)))
- stop("length of x and y do not seem to match")
+ stop("length of 'x' and 'y' do not seem to match")
y <- as.matrix(y)
if(!length(which.cf))
which.cf = 1:ncol(y) # Added 7/8/04
@@ -608,8 +510,8 @@ vplot.numeric <- function(x, y, se.y=NULL, xlab, ylab,
ylim <- range(c(ylim, residuals[,which.cf]), na.rm= TRUE)
} else {
residuals <- NULL
- warning(paste("Residuals do not match x in \"", ylab,
- "\" preplot object", sep=""))
+ warning("Residuals do not match 'x' in \"", ylab,
+ "\" preplot object")
}
}
@@ -624,27 +526,21 @@ vplot.numeric <- function(x, y, se.y=NULL, xlab, ylab,
if(overlay) {
if(!length(which.cf)) which.cf = 1:ncol(uy) # Added 7/8/04
if(!add.arg) {
- if(is.R()) {
- matplot(ux, uy[,which.cf], type="n",
- xlim=xlim, ylim=ylim,
- xlab=xlab, ylab=ylab, ...)
- } else {
- matplot(ux, uy[,which.cf], type="n",
- xlim=xlim, ylim=ylim,
- xlab=xlab, ylab=ylab, ...)
- }
+ matplot(ux, uy[,which.cf], type="n",
+ xlim=xlim, ylim=ylim,
+ xlab=xlab, ylab=ylab, ...)
}
matlines(ux, uy[,which.cf],
lwd=llwd, col=lcol, lty=llty)
if(!is.null(residuals))
- if(ncol(y)==1) {
+ if(ncol(y) == 1) {
points(x, residuals, pch=pch, col=pcol, cex=pcex)
} else {
matpoints(x, residuals[,which.cf],
pch=pch, col=pcol, cex=pcex) # add.arg=TRUE,
}
if(rugplot)
- if(is.R()) rug(jx, col=rcol) else rug(jx)
+ rug(jx, col=rcol)
if(se && !is.null(se.y)) {
matlines(ux, se.upper[,which.cf], lty= slty, lwd=slwd, col=scol)
matlines(ux, se.lower[,which.cf], lty= slty, lwd=slwd, col=scol)
@@ -684,15 +580,9 @@ vplot.numeric <- function(x, y, se.y=NULL, xlab, ylab,
if(one.at.a.time) {
readline("Hit return for the next plot ")
}
- if(is.R()) {
- plot(ux, uy[,ii], type="n",
- xlim=xlim, ylim=ylim,
- xlab=xlab, ylab=YLAB, ...)
- } else {
- plot(ux, uy[,ii], type="n",
- xlim=xlim, ylim=ylim,
- xlab=xlab, ylab=YLAB, ...)
- }
+ plot(ux, uy[,ii], type="n",
+ xlim=xlim, ylim=ylim,
+ xlab=xlab, ylab=YLAB, ...)
}
lines(ux, uy[,ii],
lwd=llwd[ii], col=lcol[ii], lty=llty[ii])
@@ -700,7 +590,7 @@ vplot.numeric <- function(x, y, se.y=NULL, xlab, ylab,
points(x, residuals[,ii], pch=pch[ii],
col=pcol[ii], cex=pcex[ii])
if(rugplot)
- if(is.R()) rug(jx, col=rcol[ii]) else rug(jx)
+ rug(jx, col=rcol[ii])
if(se && !is.null(se.y)) {
lines(ux, se.upper[,ii], lty=slty[ii], lwd=slwd[ii],
@@ -718,26 +608,24 @@ vplot.numeric <- function(x, y, se.y=NULL, xlab, ylab,
vplot.matrix <- function(x, y, se.y=NULL, xlab, ylab,
residuals=NULL, rugplot= FALSE, scale=0, se= FALSE,
offset.arg=0, deriv.arg=0, overlay= FALSE,
- which.cf=NULL, ...)
-{
+ which.cf=NULL, ...) {
stop("You shouldn't ever call this function!")
}
add.hookey <- function(ch, deriv.arg=0) {
- if(!is.numeric(deriv.arg) || deriv.arg<0 ||
- deriv.arg!=round(deriv.arg) || length(deriv.arg)>1)
- stop("bad input for the deriv argument")
+ if(!is.Numeric(deriv.arg, integ=TRUE, allow=1) || deriv.arg<0)
+ stop("bad input for the 'deriv' argument")
- if(deriv.arg==0)
+ if(deriv.arg == 0)
return(ch)
hookey <- switch(deriv.arg, "'", "''", "'''", "''''",
"'''''", stop("too high a derivative"))
nc <- nchar(ch)
sub <- substring(ch, 1:nc, 1:nc)
- if(nc >= 2 && sub[1]=="s" && sub[2]=="(") {
+ if(nc >= 2 && sub[1] == "s" && sub[2] == "(") {
paste("s", hookey, substring(ch, 2, nc), sep="", coll="")
} else {
paste(ch, hookey, sep="", collapse="")
@@ -756,7 +644,7 @@ vplot.factor <- function(x, y, se.y=NULL, xlab, ylab,
return(NULL)
if(length(y)/length(x) != round(length(y)/length(x)))
- stop("length of x and y do not seem to match")
+ stop("length of 'x' and 'y' do not seem to match")
y <- as.matrix(y)
if(!is.null(se.y))
@@ -773,23 +661,23 @@ vplot.factor <- function(x, y, se.y=NULL, xlab, ylab,
}
if(overlay) {
vvplot.factor(x, y,
- se.y=if(is.null(se.y)) NULL else se.y,
- xlab=xlab, ylab=ylab,
- residuals=residuals,
- rugplot=rugplot, scale=scale,
- se=se, xlim=xlim, ylim=ylim, ...)
+ se.y=if(is.null(se.y)) NULL else se.y,
+ xlab=xlab, ylab=ylab,
+ residuals=residuals,
+ rugplot=rugplot, scale=scale,
+ se=se, xlim=xlim, ylim=ylim, ...)
} else {
- for(i in 1:ncol(y)) {
+ for(ii in 1:ncol(y)) {
ylab <- rep(ylab, len=ncol(y))
- if(ncol(y)>1)
+ if(ncol(y) > 1)
ylab <- dimnames(y)[[2]]
- vvplot.factor(x, y[,i,drop= FALSE],
- se.y=if(is.null(se.y)) NULL else se.y[,i,drop= FALSE],
- xlab=xlab, ylab=ylab[i],
- residuals= if(is.null(residuals))
- NULL else residuals[,i,drop= FALSE],
- rugplot=rugplot, scale=scale,
- se=se, xlim=xlim, ylim=ylim, ...)
+ vvplot.factor(x, y[,ii,drop= FALSE],
+ se.y=if(is.null(se.y)) NULL else se.y[,ii,drop= FALSE],
+ xlab=xlab, ylab=ylab[ii],
+ residuals= if(is.null(residuals))
+ NULL else residuals[,ii,drop= FALSE],
+ rugplot=rugplot, scale=scale,
+ se=se, xlim=xlim, ylim=ylim, ...)
}
}
@@ -836,8 +724,8 @@ vvplot.factor <- function(x, y, se.y=NULL, xlab, ylab,
ylim <- range(c(ylim, residuals))
} else {
residuals <- NULL
- warning(paste("Residuals do not match x in \"", ylab,
- "\" preplot object", sep=""))
+ warning("Residuals do not match 'x' in \"", ylab,
+ "\" preplot object")
}
}
ylim <- ylim.scale(ylim, scale)
@@ -854,11 +742,11 @@ vvplot.factor <- function(x, y, se.y=NULL, xlab, ylab,
about <- function(ux, M, Delta=1/M) {
- if(M==1) return(cbind(ux))
+ if(M == 1) return(cbind(ux))
ans <- matrix(as.numeric(NA), length(ux), M)
grid <- seq(-Delta, Delta, len=M)
- for(i in 1:M) {
- ans[,i] <- ux + grid[i]
+ for(ii in 1:M) {
+ ans[,ii] <- ux + grid[ii]
}
ans
}
@@ -866,42 +754,34 @@ vvplot.factor <- function(x, y, se.y=NULL, xlab, ylab,
uxx <- about(ux, M, Delta=min(delta))
xlim <- range(c(xlim, uxx))
- if(is.R()) {
- matplot(ux, uy, ylim=ylim, xlim=xlim, xlab="", type="n",
- ylab=ylab, axes= FALSE, frame.plot=TRUE, ...)
- mtext(xlab, 1, 2, adj=0.5)
- axis(side=2)
+ matplot(ux, uy, ylim=ylim, xlim=xlim, xlab="", type="n",
+ ylab=ylab, axes= FALSE, frame.plot=TRUE, ...)
+ mtext(xlab, 1, 2, adj=0.5)
+ axis(side=2)
+ lpos <- par("mar")[3]
+ mtext(Levels, side=3, line=lpos/2, at=ux, adj=0.5, srt=45)
- lpos <- par("mar")[3]
- mtext(Levels, side=3, line=lpos/2, at=ux, adj=0.5, srt=45)
- } else {
- matplot(ux, uy, ylim=ylim, xlim=xlim, xlab="", type="n",
- ylab=ylab, xaxt="c", ...) # xaxt="c", xaxt="n",
- mtext(xlab, 1, 2, adj=0.5)
- axis(side=3, at=ux, labels=Levels, srt=45, ticks= FALSE, adj=0)
- }
-
- for(i in 1:M)
- segments(uxx[,i] - 1.0 * delta, uy[,i],
- uxx[,i] + 1.0 * delta, uy[,i])
+ for(ii in 1:M)
+ segments(uxx[,ii] - 1.0 * delta, uy[,ii],
+ uxx[,ii] + 1.0 * delta, uy[,ii])
if(!is.null(residuals)) {
- for(i in 1:M) {
- jux <- uxx[,i]
+ for(ii in 1:M) {
+ jux <- uxx[,ii]
jux <- jux[codex]
jux <- jux + runif(length(jux), -0.7*min(delta), 0.7*min(delta))
- if(M==1) points(jux, residuals[,i]) else
- points(jux, residuals[,i], pch=as.character(i))
+ if(M == 1) points(jux, residuals[,ii]) else
+ points(jux, residuals[,ii], pch=as.character(ii))
}
}
if(rugplot)
rug(nnajx)
if(se) {
- for(i in 1:M) {
- segments(uxx[,i]+0.5*delta, se.upper[,i],
- uxx[,i]-0.5*delta, se.upper[,i])
- segments(uxx[,i]+0.5*delta, se.lower[,i],
- uxx[,i]-0.5*delta, se.lower[,i])
- segments(uxx[,i], se.lower[,i], uxx[,i], se.upper[,i], lty=2)
+ for(ii in 1:M) {
+ segments(uxx[,ii]+0.5*delta, se.upper[,ii],
+ uxx[,ii]-0.5*delta, se.upper[,ii])
+ segments(uxx[,ii]+0.5*delta, se.lower[,ii],
+ uxx[,ii]-0.5*delta, se.lower[,ii])
+ segments(uxx[,ii], se.lower[,ii], uxx[,ii], se.upper[,ii], lty=2)
}
}
invisible(diff(ylim))
@@ -923,40 +803,20 @@ setMethod("vplot", "numeric", function(x, ...)
setMethod("plot", "vlm",
function(x, y, ...) {
- if(!missing(y)) stop("can't process the \"y\" argument")
+ if(!missing(y)) stop("cannot process the 'y' argument")
invisible(plotvlm(x, y, ...))})
setMethod("plot", "vglm",
function(x, y, ...) {
- if(!missing(y)) stop("can't process the \"y\" argument")
+ if(!missing(y)) stop("cannot process the 'y' argument")
invisible(plotvglm(x, y, ...))})
setMethod("plot", "vgam",
function(x, y, ...) {
- if(!missing(y)) stop("can't process the \"y\" argument")
+ if(!missing(y)) stop("cannot process the 'y' argument")
invisible(plotvgam(x, ...))})
-if(FALSE)
-vmerge.list = function(list1, list2) {
-
-
-
-
- if(!is.list(list1) || !is.list(list1))
- stop("list1 and list2 must be lists")
-
- n1 = names(list1)
- n2 = names(list2)
- un12 = unique(c(n1, n2))
- ans = vector("list", length(un12))
- names(ans) = un12
- for(i in un12) {
- ans[[i]] = if(length(list1[[i]])) list1[[i]] else list2[[i]]
- }
- ans
-}
-
plotqrrvglm = function(object,
@@ -979,11 +839,11 @@ plotqrrvglm = function(object,
deviance="Deviance", working="Working")
done = 0
- for(r in 1:Rank)
- for(i in 1:M) {
- plot(Coef.object at lv[,r], res[,i],
- xlab=paste(xlab, if(Rank==1) "" else r, sep=""),
- ylab=my.ylab[i],
+ for(rr in 1:Rank)
+ for(ii in 1:M) {
+ plot(Coef.object at lv[,rr], res[,ii],
+ xlab=paste(xlab, if(Rank == 1) "" else rr, sep=""),
+ ylab=my.ylab[ii],
main = main, ...)
done = done + 1
if(done >= prod(par()$mfrow) && ask && done != Rank*M) {
diff --git a/R/predict.vgam.q b/R/predict.vgam.q
index 517d0a0..d95cf9a 100644
--- a/R/predict.vgam.q
+++ b/R/predict.vgam.q
@@ -27,24 +27,24 @@ predict.vgam <- function(object, newdata=NULL,
if(untransform && (type!="link" || se.fit || deriv.arg != 0 || offset != 0))
- stop(paste("argument \"untransform\"=TRUE only if type=\"link\",",
- "se.fit=FALSE, deriv=0"))
+ stop("argument 'untransform=TRUE' only if type='link', ",
+ "se.fit=FALSE, deriv=0")
if(raw && type!="terms")
- stop("raw=TRUE only works when type=\"terms\"")
+ stop("'raw=TRUE' only works when 'type=\"terms\"'")
if(!is.numeric(deriv.arg) || deriv.arg<0 ||
deriv.arg!=round(deriv.arg) || length(deriv.arg)>1)
- stop("bad input for the deriv argument")
+ stop("bad input for the 'deriv' argument")
if(deriv.arg>0 && type!="terms")
- stop("deriv>0 can only be specified if type=\"terms\"")
+ stop("'deriv>0' can only be specified if 'type=\"terms\"'")
if(deriv.arg != 0 && !(type!="response" && !se.fit))
- stop(paste("deriv= only works with type!=\"response\"and se.fit=FALSE"))
+ stop("'deriv=' only works with type!='response' and se.fit=FALSE")
if(se.fit && length(newdata))
- stop("can't specify se.fit=TRUE when there is newdata")
+ stop("cannot specify 'se.fit=TRUE' when 'newdata' is assigned")
tt <- terms(object) # 11/8/03; object at terms$terms
@@ -168,14 +168,13 @@ predict.vgam <- function(object, newdata=NULL,
xx <- newdata[,fred] # [,s.xargument[i]] # [,nindex[i]]
ox <- order(xx)
- raw.mat <- predictvsmooth.spline.fit(
+ rawMat <- predictvsmooth.spline.fit(
object at Bspline[[i]],
x=xx,
deriv=deriv.arg)$y
- eta.mat <- if(raw) raw.mat else
- raw.mat %*% t(Blist[[i]])
+ eta.mat <- if(raw) rawMat else (rawMat %*% t(Blist[[i]]))
if(type=="terms") {
ii <- tmp6[[i]]
@@ -190,7 +189,7 @@ predict.vgam <- function(object, newdata=NULL,
iii <- iii[[i]]
object at var[,iii,drop=FALSE]
} else
- stop("can't handle se's with raw=FALSE")
+ stop("cannot handle se's with raw=FALSE")
predictor$se.fit[,ii] <- (predictor$se.fit[,ii]^2 +
TS * temp.var)^0.5
diff --git a/R/predict.vglm.q b/R/predict.vglm.q
index 1acfa3d..4c4e144 100644
--- a/R/predict.vglm.q
+++ b/R/predict.vglm.q
@@ -3,136 +3,113 @@
-untransformVGAM = function(object, pred) {
- M = object at misc$M
- Links = object at misc$link
- if(length(Links) != M && length(Links) != 1)
- stop("cannot obtain the link functions to untransform the object")
- upred = pred
- earg = object at misc$earg
- for(ii in 1:M) {
- TTheta = pred[,ii] # Transformed theta
- newcall = paste(Links[ii], "(theta=TTheta, earg=earg, inverse=TRUE)", sep="")
- newcall = parse(text=newcall)[[1]]
- Theta = eval(newcall) # Theta, the untransformed parameter
- upred[,ii] = Theta
- }
- dmn2 = if(length(names(object at misc$link))) names(object at misc$link) else {
- if(length(object at misc$parameters)) object at misc$parameters else NULL}
- dimnames(upred) = list(dimnames(upred)[[1]], dmn2)
- upred
-}
-
-
-
-predict.vglm <- function(object,
- newdata=NULL,
- type=c("link", "response", "terms"),
- se.fit=FALSE,
- deriv=0,
- dispersion=NULL,
- untransform=FALSE,
- extra=object at extra, ...)
-{
+predict.vglm = function(object,
+ newdata=NULL,
+ type=c("link", "response", "terms"),
+ se.fit=FALSE,
+ deriv=0,
+ dispersion=NULL,
+ untransform=FALSE,
+ extra=object at extra, ...) {
na.act = object at na.action
object at na.action = list()
- mextra <- missing(extra)
- if(mextra) {
+ if(missing(extra)) {
}
- if(deriv!=0)
- stop("deriv must be 0 for predict.vglm")
+ if(deriv != 0)
+ stop("'deriv' must be 0 for predict.vglm()")
if(mode(type) != "character" && mode(type) != "name")
- type <- as.character(substitute(type))
- type <- match.arg(type, c("link", "response", "terms"))[1]
+ type = as.character(substitute(type))
+ type = match.arg(type, c("link", "response", "terms"))[1]
if(untransform && (type!="link" || se.fit || deriv != 0))
- stop(paste("argument \"untransform\"=TRUE only if type=\"link\",",
- "se.fit=FALSE, deriv=0"))
+ stop("argument 'untransform=TRUE' only if 'type=\"link\", ",
+ "se.fit=FALSE, deriv=0'")
-
- if(length(object at misc$form2)) {
- Xm2 = predict.vlm(object, newdata=newdata, type="lm2", ...)
- object at extra$Xm2 = Xm2
- }
-
-
- pred =
- if(se.fit)
+ pred = if(se.fit) {
switch(type,
response = {
- warning(paste("type=\"response\" and se.fit=TRUE not valid",
- "together; setting se.fit=FALSE"))
- se.fit <- FALSE
- predictor <- predict.vlm(object, newdata=newdata,
- type=type, se.fit=se.fit,
- deriv=deriv,
- dispersion=dispersion, ...)
- fv <- object at family@inverse(predictor, extra)
- dimnames(fv) <- list(dimnames(fv)[[1]],
+ warning("'type=\"response\"' and 'se.fit=TRUE' not valid ",
+ "together; setting 'se.fit=FALSE'")
+ se.fit = FALSE
+ predictor = predict.vlm(object, newdata=newdata,
+ type=type, se.fit=se.fit,
+ deriv=deriv,
+ dispersion=dispersion, ...)
+ fv = object at family@inverse(predictor, extra)
+ dimnames(fv) = list(dimnames(fv)[[1]],
dimnames(object at fitted.values)[[2]])
- fv
+ fv
},
link = {
- type <- "response"
- predict.vlm(object, newdata=newdata,
- type=type, se.fit=se.fit,
- deriv=deriv, dispersion=dispersion, ...)
+ predict.vlm(object, newdata=newdata,
+ type="response", se.fit=se.fit,
+ deriv=deriv, dispersion=dispersion, ...)
},
terms={
- predict.vlm(object, newdata=newdata,
- type=type, se.fit=se.fit,
- deriv=deriv, dispersion=dispersion, ...)
- }) else {
+ predict.vlm(object, newdata=newdata,
+ type=type, se.fit=se.fit,
+ deriv=deriv, dispersion=dispersion, ...)
+ }) # End of switch
+ } else {
if(is.null(newdata)) {
switch(type,
- link=object at predictors,
- response=object at fitted.values,
+ link = object at predictors,
+ response = object at fitted.values,
terms={
predict.vlm(object, newdata=newdata,
type=type, se.fit=se.fit,
deriv=deriv, dispersion=dispersion, ...)
})
} else {
- if(!(length(object at offset)==1 && object at offset==0))
+ if(!(length(object at offset) == 1 && object at offset == 0))
warning("zero offset used")
switch(type,
- response={
- predictor <- predict.vlm(object, newdata=newdata,
- type=type, se.fit=se.fit,
- deriv=deriv,
- dispersion=dispersion, ...)
- M <- object at misc$M
+ response={
- fv <- object at family@inverse(predictor, extra)
- if(M > 1 && is.matrix(fv)) {
- dimnames(fv) <- list(dimnames(fv)[[1]],
- dimnames(object at fitted.values)[[2]])
- } else {
- }
- fv
- },
- link = {
- type <- "response"
- predict.vlm(object, newdata=newdata,
- type=type, se.fit=se.fit,
- deriv=deriv, dispersion=dispersion, ...)
- },
- terms={
- predict.vlm(object, newdata=newdata,
- type=type, se.fit=se.fit,
- deriv=deriv, dispersion=dispersion, ...)
- }
- )
+
+ predictor = predict.vlm(object, newdata=newdata,
+ type=type, se.fit=se.fit,
+ deriv=deriv,
+ dispersion=dispersion, ...)
+
+
+
+ M = object at misc$M
+
+ fv = object at family@inverse(predictor, extra)
+ if(M > 1 && is.matrix(fv)) {
+ dimnames(fv) = list(dimnames(fv)[[1]],
+ dimnames(object at fitted.values)[[2]])
+ } else {
+ }
+ fv
+ },
+ link = {
+
+
+
+ predict.vlm(object, newdata=newdata,
+ type="response", se.fit=se.fit,
+ deriv=deriv, dispersion=dispersion, ...)
+
+
+
+ },
+ terms = {
+ predict.vlm(object, newdata=newdata,
+ type=type, se.fit=se.fit,
+ deriv=deriv, dispersion=dispersion, ...)
+ }) # End of switch
}
}
@@ -155,34 +132,35 @@ setMethod("predict", "vglm", function(object, ...)
+
+
predict.rrvglm = function(object,
- newdata=NULL,
- type=c("link", "response", "terms"),
- se.fit=FALSE,
- deriv=0,
- dispersion=NULL,
- extra=object at extra, ...)
-{
+ newdata=NULL,
+ type=c("link", "response", "terms"),
+ se.fit=FALSE,
+ deriv=0,
+ dispersion=NULL,
+ extra=object at extra, ...) {
if(se.fit) {
stop("11/8/03; predict.rrvglm(..., se.fit=TRUE) not complete yet")
pred =
switch(type,
response = {
- warning(paste("type=\"response\" and se.fit=TRUE not valid",
- "together; setting se.fit=FALSE"))
- se.fit <- FALSE
- predictor <- predict.vlm(object, newdata=newdata,
+ warning("'type=\"response\"' and 'se.fit=TRUE' not valid ",
+ "together; setting 'se.fit=FALSE'")
+ se.fit = FALSE
+ predictor = predict.vlm(object, newdata=newdata,
type=type, se.fit=se.fit,
deriv=deriv,
dispersion=dispersion, ...)
- fv <- object at family@inverse(predictor, extra)
- dimnames(fv) <- list(dimnames(fv)[[1]],
+ fv = object at family@inverse(predictor, extra)
+ dimnames(fv) = list(dimnames(fv)[[1]],
dimnames(object at fitted.values)[[2]])
fv
},
link = {
- type <- "response"
+ type = "response"
predict.vlm(object, newdata=newdata,
type=type, se.fit=se.fit,
deriv=deriv, dispersion=dispersion, ...)
@@ -220,22 +198,29 @@ setMethod("predict", "rrvglm", function(object, ...)
-if(FALSE) {
-predict.rrvlm = function(object,
- newdata=NULL,
- type=c("link", "response", "terms"),
- se.fit=FALSE,
- deriv=0,
- dispersion=NULL,
- extra=object at extra, ...)
-{
- stop("this function hasn't been written yet")
+untransformVGAM = function(object, pred) {
+ M = object at misc$M
+ Links = object at misc$link
+ if(length(Links) != M && length(Links) != 1)
+ stop("cannot obtain the link functions to untransform the object")
+ upred = pred
+ earg = object at misc$earg
+ for(ii in 1:M) {
+ TTheta = pred[,ii] # Transformed theta
+ newcall = paste(Links[ii], "(theta=TTheta, earg=earg, inverse=TRUE)", sep="")
+ newcall = parse(text=newcall)[[1]]
+ Theta = eval(newcall) # Theta, the untransformed parameter
+ upred[,ii] = Theta
+ }
+ dmn2 = if(length(names(object at misc$link))) names(object at misc$link) else {
+ if(length(object at misc$parameters)) object at misc$parameters else NULL
+ }
+ dimnames(upred) = list(dimnames(upred)[[1]], dmn2)
+ upred
}
-setMethod("predict", "rrvlm", function(object, ...)
- predict.rrvlm(object, ...))
-}
+
diff --git a/R/predict.vlm.q b/R/predict.vlm.q
index 34f2763..3a101aa 100644
--- a/R/predict.vlm.q
+++ b/R/predict.vlm.q
@@ -5,33 +5,30 @@
-if(!exists("is.R")) is.R <- function()
- exists("version") && !is.null(version$language) && version$language=="R"
-
-
-predict.vlm <- function(object,
- newdata=NULL,
- type=c("response","terms","lm2"),
- se.fit = FALSE, scale = NULL,
- terms.arg=NULL,
- raw=FALSE,
- dispersion = NULL, ...)
+predict.vlm = function(object,
+ newdata=NULL,
+ type=c("response","terms"),
+ se.fit = FALSE, scale = NULL,
+ terms.arg=NULL,
+ raw=FALSE,
+ dispersion = NULL, ...)
{
+ Xm2 = NULL
+ xij.used = length(form2 <- object at misc$form2) || length(object at control$xij)
if(mode(type) != "character" && mode(type) != "name")
- type <- as.character(substitute(type))
- type <- match.arg(type, c("response","terms","lm2"))[1]
+ type = as.character(substitute(type))
+ type = match.arg(type, c("response","terms"))[1]
na.act = object at na.action
object at na.action = list()
if(raw && type != "terms")
- stop("sorry, raw=TRUE only works when type=\"terms\"")
-
+ stop("sorry, 'raw=TRUE' only works when 'type=\"terms\"'")
- if(!length(newdata) && type=="response" && !se.fit &&
- length(object at fitted.values)) {
+ if(!length(newdata) && type == "response" && !se.fit &&
+ length(object at fitted.values)) {
if(length(na.act)) {
return(napredict(na.act[[1]], object at fitted.values))
} else {
@@ -39,221 +36,225 @@ predict.vlm <- function(object,
}
}
+ ttob = terms(object) # 11/8/03; object at terms$terms
- tt <- terms(object) # 11/8/03; object at terms$terms
- if(type == "lm2")
- ttXm2 = terms(object at misc$form2)
if(!length(newdata)) {
- offset <- object at offset
- X <- model.matrix(object, type="lm")
- if(type == "lm2")
- Xm2 <- model.matrix(object, type="lm2")
+ offset = object at offset
+
+ if(xij.used) {
+ bothList = model.matrix(object, type="bothlmlm2")
+ X = bothList$X
+ Xm2 = bothList$Xm2
+ } else {
+ X = model.matrix(object, type="lm")
+ }
} else {
if(is.smart(object) && length(object at smart.prediction)) {
setup.smart("read", smart.prediction=object at smart.prediction)
-
}
-
-
- X = model.matrix(delete.response(tt), newdata,
- contrasts=if(length(object at contrasts)) object at contrasts else NULL,
- xlev = object at xlevels)
- if(type == "lm2")
+ X = model.matrix(delete.response(ttob), newdata,
+ contrasts=if(length(object at contrasts)) object at contrasts
+ else NULL,
+ xlev = object at xlevels)
+ if(xij.used) {
+ ttXm2 = terms(form2)
Xm2 = model.matrix(delete.response(ttXm2), newdata,
- contrasts=if(length(object at contrasts)) object at contrasts else
- NULL,
- xlev = object at xlevels)
-
+ contrasts = if(length(object at contrasts))
+ object at contrasts else NULL,
+ xlev = object at xlevels)
+ }
- if(is.R() && object at misc$intercept.only && nrow(X)!=nrow(newdata)) {
+ if(object at misc$intercept.only && nrow(X) != nrow(newdata)) {
as.save = attr(X, "assign")
X = X[rep(1, nrow(newdata)),,drop=FALSE] # =matrix(1,nrow(newdata),1)
dimnames(X) = list(dimnames(newdata)[[1]], "(Intercept)")
attr(X, "assign") = as.save # Restored
}
- offset <- if (!is.null(off.num<-attr(tt,"offset"))) {
- eval(attr(tt,"variables")[[off.num+1]], newdata)
- } else if (!is.null(object at offset))
+ offset = if(!is.null(off.num <- attr(ttob, "offset"))) {
+ eval(attr(ttob, "variables")[[off.num+1]], newdata)
+ } else if(!is.null(object at offset))
eval(object at call$offset, newdata)
if(is.smart(object) && length(object at smart.prediction)) {
wrapup.smart()
}
- if(is.R()) {
- attr(X, "assign") <- attrassigndefault(X, tt)
- if(type == "lm2" && length(Xm2))
- attr(Xm2, "assign") <- attrassigndefault(Xm2, ttXm2)
- }
-
+ attr(X, "assign") = attrassigndefault(X, ttob)
+ if(length(Xm2))
+ attr(Xm2, "assign") = attrassigndefault(Xm2, ttXm2)
}
- if(type == "lm2")
- return(Xm2)
- hasintercept <- attr(tt, "intercept")
+ hasintercept = attr(ttob, "intercept")
- dx1 <- dimnames(X)[[1]]
- M <- object at misc$M
- Blist <- object at constraints
- ncolBlist <- unlist(lapply(Blist, ncol))
+ dx1 = dimnames(X)[[1]]
+ M = object at misc$M
+ Blist = object at constraints
+ ncolBlist = unlist(lapply(Blist, ncol))
if(hasintercept)
- ncolBlist <- ncolBlist[-1]
+ ncolBlist = ncolBlist[-1]
- xbar <- NULL
+ xbar = x2bar = NULL
if(type == "terms" && hasintercept) {
- if(is.null(newdata)) {
- xbar <- apply(X, 2, mean)
- X <- sweep(X, 2, xbar)
- } else {
- xbar <- apply(model.matrixvlm(object, type="lm"), 2, mean)
- xbar <- apply(X, 2, mean)
- X <- sweep(X, 2, xbar)
+ if(length(object at control$xij)) {
+ x2bar = colMeans(Xm2)
+ Xm2 = sweep(Xm2, 2, x2bar)
}
- nac <- is.na(object at coefficients)
+ xbar = colMeans(X)
+ X = sweep(X, 2, xbar)
+ nac = is.na(object at coefficients)
if(any(nac)) {
- X <- X[, !nac, drop=FALSE]
- xbar <- xbar[!nac]
+ if(length(object at control$xij))
+ stop("cannot handle 'xij' argument when ",
+ "there are NAs in the coefficients")
+ X = X[, !nac, drop=FALSE]
+ xbar = xbar[!nac]
}
}
if(!is.null(newdata) && !is.data.frame(newdata))
- newdata <- as.data.frame(newdata)
+ newdata = as.data.frame(newdata)
- nn <- if(!is.null(newdata)) nrow(newdata) else object at misc$n
+ nn = if(!is.null(newdata)) nrow(newdata) else object at misc$n
if(raw) {
- Blist <- canonical.Blist(Blist)
- object at constraints <- Blist
+ Blist = canonical.Blist(Blist)
+ object at constraints = Blist
}
- X <- lm2vlm.model.matrix(X, Blist=Blist, M=M, xij=object at control$xij)
- attr(X, "constant") <- xbar
- coefs <- coefvlm(object)
- vasgn <- attr(X, "vassign")
+ X_vlm = lm2vlm.model.matrix(X, Blist=Blist, M=M,
+ xij=object at control$xij, Xm2=Xm2)
+
+
+ attr(X_vlm, "constant") = xbar
+ attr(X_vlm, "constant2") = x2bar
+
+
+
+
+
+ coefs = coefvlm(object)
+ vasgn = attr(X_vlm, "vassign")
if(type == "terms") {
- nv <- names(vasgn)
+ nv = names(vasgn)
if(hasintercept)
- nv <- nv[-(1:ncol(object at constraints[["(Intercept)"]]))]
- terms.arg <- if(is.null(terms.arg)) nv else terms.arg
+ nv = nv[-(1:ncol(object at constraints[["(Intercept)"]]))]
+ terms.arg = if(is.null(terms.arg)) nv else terms.arg
- index <- if(is.R()) charmatch(terms.arg, nv) else
- match.arg(terms.arg, nv)
- if(all(index==0)) {
+ index = charmatch(terms.arg, nv)
+ if(all(index == 0)) {
warning("no match found; returning all terms")
- index <- 1:length(nv)
- }
- vasgn <- if(is.R()) vasgn[nv[index]] else {
- ans <- vector("list", length(index))
- for(loop in 1:length(index))
- ans[[loop]] <- vasgn[[index[loop]]]
- names(ans) <- index
- ans
+ index = 1:length(nv)
}
+ vasgn = vasgn[nv[index]]
}
if(any(is.na(object at coefficients)))
- stop("can't handle NA's in object at coefficients")
+ stop("cannot handle NAs in 'object at coefficients'")
- dname2 <- object at misc$predictors.names
+ dname2 = object at misc$predictors.names
if(se.fit) {
- class(object) <- "vlm" # coerce
- fit.summary <- summaryvlm(object, dispersion=dispersion)
- sigma <- if(!is.null(fit.summary at sigma)) fit.summary at sigma else
- sqrt(deviance(object) / object at df.resid) # was @rss
- pred <- Build.terms.vlm(X, coefs,
- cov=sigma^2 * fit.summary at cov.unscaled,
- vasgn,
- collapse=type!="terms", M=M,
- dimname=list(dx1, dname2),
- coefmat=coefvlm(object, matrix=TRUE))
- pred$df <- object at df.residual
- pred$sigma <- sigma
- } else
- pred <- Build.terms.vlm(X, coefs, cov=NULL, assign = vasgn,
- collapse=type!="terms", M=M,
- dimname=list(dx1, dname2),
- coefmat=coefvlm(object, matrix=TRUE))
- constant <- attr(pred, "constant")
+ object = as(object, "vlm") # Coerce
+ fit.summary = summaryvlm(object, dispersion=dispersion)
+ sigma = if(is.numeric(fit.summary at sigma)) fit.summary at sigma else
+ sqrt(deviance(object) / object at df.residual) # was @rss
+ pred = Build.terms.vlm(x=X_vlm, coefs=coefs,
+ cov=sigma^2 * fit.summary at cov.unscaled,
+ assign=vasgn,
+ collapse=type!="terms", M=M,
+ dimname=list(dx1, dname2),
+ coefmat=coefvlm(object, matrix=TRUE))
+ pred$df = object at df.residual
+ pred$sigma = sigma
+ } else {
+ pred = Build.terms.vlm(x=X_vlm, coefs=coefs,
+ cov=NULL,
+ assign=vasgn,
+ collapse=type!="terms", M=M,
+ dimname=list(dx1, dname2),
+ coefmat=coefvlm(object, matrix=TRUE))
+ }
+ constant = attr(pred, "constant")
if(type != "terms" && length(offset) && any(offset != 0)) {
if(se.fit) {
- pred$fitted.values <- pred$fitted.values + offset
+ pred$fitted.values = pred$fitted.values + offset
} else {
- pred <- pred + offset
+ pred = pred + offset
}
}
+
+
if(type == "terms") {
- Blist <- subconstraints(object at misc$orig.assign, object at constraints)
- ncolBlist <- unlist(lapply(Blist, ncol))
+ Blist = subconstraints(object at misc$orig.assign, object at constraints)
+ ncolBlist = unlist(lapply(Blist, ncol))
if(hasintercept)
- ncolBlist <- ncolBlist[-1]
+ ncolBlist = ncolBlist[-1]
- cs <- cumsum(c(1, ncolBlist)) # Like a pointer
- for(i in 1:(length(cs)-1))
- if(cs[i+1]-cs[i]>1)
- for(k in (cs[i]+1):(cs[i+1]-1))
+ cs = cumsum(c(1, ncolBlist)) # Like a pointer
+ for(ii in 1:(length(cs)-1))
+ if(cs[ii+1]-cs[ii] > 1)
+ for(kk in (cs[ii]+1):(cs[ii+1]-1))
if(se.fit) {
- pred$fitted.values[,cs[i]]=pred$fitted.values[,cs[i]] +
- pred$fitted.values[,k]
- pred$se.fit[,cs[i]] = pred$se.fit[,cs[i]] +
- pred$se.fit[,k]
+ pred$fitted.values[,cs[ii]]= pred$fitted.values[,cs[ii]] +
+ pred$fitted.values[,kk]
+ pred$se.fit[,cs[ii]] = pred$se.fit[,cs[ii]] +
+ pred$se.fit[,kk]
} else {
- pred[,cs[i]] <- pred[,cs[i]] + pred[,k]
+ pred[,cs[ii]] = pred[,cs[ii]] + pred[,kk]
}
-
if(se.fit) {
- pred$fitted.values=pred$fitted.values[,cs[-length(cs)],drop=FALSE]
- pred$se.fit <- pred$se.fit[, cs[-length(cs)], drop=FALSE]
+ pred$fitted.values = pred$fitted.values[,cs[-length(cs)],drop=FALSE]
+ pred$se.fit = pred$se.fit[, cs[-length(cs)], drop=FALSE]
} else {
- pred <- pred[, cs[-length(cs)], drop=FALSE]
+ pred = pred[, cs[-length(cs)], drop=FALSE]
}
- pp <- if(se.fit) ncol(pred$fitted.values) else ncol(pred)
+ pp = if(se.fit) ncol(pred$fitted.values) else ncol(pred)
if(se.fit) {
- dimnames(pred$fitted.values) <- dimnames(pred$se.fit) <- NULL
- dim(pred$fitted.values) <- dim(pred$se.fit) <- c(M, nn, pp)
- pred$fitted.values <- aperm(pred$fitted.values, c(2,1,3))
- pred$se.fit <- aperm(pred$se.fit, c(2,1,3))
- dim(pred$fitted.values) <- dim(pred$se.fit) <- c(nn, M*pp)
+ dimnames(pred$fitted.values) = dimnames(pred$se.fit) = NULL
+ dim(pred$fitted.values) = dim(pred$se.fit) = c(M, nn, pp)
+ pred$fitted.values = aperm(pred$fitted.values, c(2,1,3))
+ pred$se.fit = aperm(pred$se.fit, c(2,1,3))
+ dim(pred$fitted.values) = dim(pred$se.fit) = c(nn, M*pp)
} else {
- dimnames(pred) <- NULL # Saves a warning
- dim(pred) <- c(M, nn, pp)
- pred <- aperm(pred, c(2,1,3))
- dim(pred) <- c(nn, M*pp)
+ dimnames(pred) = NULL # Saves a warning
+ dim(pred) = c(M, nn, pp)
+ pred = aperm(pred, c(2,1,3))
+ dim(pred) = c(nn, M*pp)
}
if(raw) {
- kindex <- NULL
- for(i in 1:pp)
- kindex <- c(kindex, (i-1)*M + (1:ncolBlist[i]))
+ kindex = NULL
+ for(ii in 1:pp)
+ kindex = c(kindex, (ii-1)*M + (1:ncolBlist[ii]))
if(se.fit) {
- pred$fitted.values <- pred$fitted.values[,kindex,drop=FALSE]
- pred$se.fit <- pred$se.fit[,kindex,drop=FALSE]
+ pred$fitted.values = pred$fitted.values[,kindex,drop=FALSE]
+ pred$se.fit = pred$se.fit[,kindex,drop=FALSE]
} else {
- pred <- pred[,kindex,drop=FALSE]
+ pred = pred[,kindex,drop=FALSE]
}
}
- temp <- if(raw) ncolBlist else rep(M, length(ncolBlist))
- dd <- vlabel(names(ncolBlist), temp, M)
+ temp = if(raw) ncolBlist else rep(M, length(ncolBlist))
+ dd = vlabel(names(ncolBlist), temp, M)
if(se.fit) {
- dimnames(pred$fitted.values) <-
- dimnames(pred$se.fit) <- list(if(length(newdata))
- dimnames(newdata)[[1]] else dx1, dd)
+ dimnames(pred$fitted.values) =
+ dimnames(pred$se.fit) = list(if(length(newdata))
+ dimnames(newdata)[[1]] else dx1, dd)
} else {
- dimnames(pred) <- list(if(length(newdata))
- dimnames(newdata)[[1]] else dx1, dd)
+ dimnames(pred) = list(if(length(newdata))
+ dimnames(newdata)[[1]] else dx1, dd)
}
if(!length(newdata) && length(na.act)) {
@@ -265,77 +266,74 @@ predict.vlm <- function(object,
}
}
- if(!raw) {
- cs <- cumsum(c(1, M + 0*ncolBlist))
- }
- fred <- vector("list", length(ncolBlist))
- for(i in 1:length(fred))
- fred[[i]] <- cs[i]:(cs[i+1]-1)
- names(fred) <- names(ncolBlist)
+ if(!raw)
+ cs = cumsum(c(1, M + 0*ncolBlist))
+ fred = vector("list", length(ncolBlist))
+ for(ii in 1:length(fred))
+ fred[[ii]] = cs[ii]:(cs[ii+1]-1)
+ names(fred) = names(ncolBlist)
if(se.fit) {
- attr(pred$fitted.values, "vterm.assign") <- fred
- attr(pred$se.fit, "vterm.assign") <- fred
+ attr(pred$fitted.values, "vterm.assign") = fred
+ attr(pred$se.fit, "vterm.assign") = fred
} else {
- attr(pred, "vterm.assign") <- fred
+ attr(pred, "vterm.assign") = fred
}
- }
+ } # End of if(type == "terms")
- if(!is.null(xbar))
+ if(!is.null(xbar)) {
if(se.fit) {
- attr(pred$fitted.values, "constant") <- constant
+ attr(pred$fitted.values, "constant") = constant
} else {
- attr(pred, "constant") <- constant
+ attr(pred, "constant") = constant
}
+ }
pred
}
-subconstraints <- function(assign, constraints) {
- ans <- vector("list", length(assign))
+setMethod("predict", "vlm",
+ function(object, ...)
+ predict.vlm(object, ...))
+
+
+
+subconstraints = function(assign, constraints) {
+
+
+ ans = vector("list", length(assign))
if(!length(assign) || !length(constraints))
stop("assign and/or constraints is empty")
- for(i in 1:length(assign))
- ans[[i]] <- constraints[[assign[[i]][1]]]
- names(ans) <- names(assign)
+ for(ii in 1:length(assign))
+ ans[[ii]] = constraints[[assign[[ii]][1]]]
+ names(ans) = names(assign)
ans
}
-is.linear.term <- function(ch) {
- lch <- length(ch)
- ans <- rep(FALSE, len=lch)
- for(i in 1:lch) {
- nc <- nchar(ch[i])
- x <- substring(ch[i], 1:nc, 1:nc)
- ans[i] <- all(x!="(" & x!="+" & x!="-" & x!="/" & x!="*" &
- x!="^")
+is.linear.term = function(ch) {
+ lch = length(ch)
+ ans = rep(FALSE, len=lch)
+ for(ii in 1:lch) {
+ nc = nchar(ch[ii])
+ x = substring(ch[ii], 1:nc, 1:nc)
+ ans[ii] = all(x!="(" & x!="+" & x!="-" & x!="/" & x!="*" & x!="^")
}
- names(ans) <- ch
+ names(ans) = ch
ans
}
-
-
-canonical.Blist <- function(Blist) {
- for(i in 1:length(Blist)) {
- temp <- Blist[[i]] * 0
- r <- ncol(temp)
- temp[cbind(1:r,1:r)] <- 1
- Blist[[i]] <- temp
+canonical.Blist = function(Blist) {
+ for(ii in 1:length(Blist)) {
+ temp = Blist[[ii]] * 0
+ temp[cbind(1:ncol(temp),1:ncol(temp))] = 1
+ Blist[[ii]] = temp
}
Blist
}
-setMethod("predict", "vlm",
- function(object, ...)
- predict.vlm(object, ...))
-
-
-
-
diff --git a/R/print.summary.others.q b/R/print.summary.others.q
deleted file mode 100644
index c511146..0000000
--- a/R/print.summary.others.q
+++ /dev/null
@@ -1,29 +0,0 @@
-# These functions are
-# Copyright (C) 1998-2009 T.W. Yee, University of Auckland. All rights reserved.
-
-
-
-
-if(FALSE)
-printsummary.lms <- function(x, digits = NULL, quote = TRUE, prefix = "") {
-
- printsummary.vglm(x, digits = NULL, quote = TRUE, prefix = "")
-
-
-
- invisible(NULL)
-}
-
-
-
-printsummary.rc.exponential <- function(x, digits = NULL, quote = TRUE,
- prefix = "") {
-
- printsummary.vglm(x, digits = NULL, quote = TRUE, prefix = "")
-
-
- cat("\nNumber of censored observations: ", x$num.censored, "\n")
-
- invisible(NULL)
-}
-
diff --git a/R/qrrvglm.control.q b/R/qrrvglm.control.q
index 94bbb15..b5c8746 100644
--- a/R/qrrvglm.control.q
+++ b/R/qrrvglm.control.q
@@ -34,39 +34,39 @@ qrrvglm.control = function(Rank=1,
- if(!is.Numeric(iShape, posit=TRUE)) stop("bad input for \"iShape\"")
- if(!is.Numeric(iKvector, posit=TRUE)) stop("bad input for \"iKvector\"")
- if(!is.Numeric(isdlv, posit=TRUE)) stop("bad input for \"isdlv\"")
+ if(!is.Numeric(iShape, posit=TRUE)) stop("bad input for 'iShape'")
+ if(!is.Numeric(iKvector, posit=TRUE)) stop("bad input for 'iKvector'")
+ if(!is.Numeric(isdlv, posit=TRUE)) stop("bad input for 'isdlv'")
if(any(isdlv < 0.2 | isdlv > 10))
stop("isdlv values must lie between 0.2 and 10")
if(length(isdlv) > 1 && any(diff(isdlv) > 0))
stop("successive isdlv values must not increase")
if(!is.Numeric(epsilon, posit=TRUE, allow=1))
- stop("bad input for \"epsilon\"")
+ stop("bad input for 'epsilon'")
if(!is.Numeric(Etamat.colmax, posit=TRUE, allow=1) || Etamat.colmax < Rank)
- stop("bad input for \"Etamat.colmax\"")
+ stop("bad input for 'Etamat.colmax'")
if(!is.Numeric(Hstep, posit=TRUE, allow=1))
- stop("bad input for \"Hstep\"")
+ stop("bad input for 'Hstep'")
if(!is.Numeric(maxitl, posit=TRUE, allow=1, integer=TRUE))
- stop("bad input for \"maxitl\"")
+ stop("bad input for 'maxitl'")
if(!is.Numeric(method.init, posit=TRUE, allow=1, integer=TRUE))
- stop("bad input for \"method.init\"")
+ stop("bad input for 'method.init'")
if(!is.Numeric(Maxit.optim, integ=TRUE, posit=TRUE))
- stop("Bad input for \"Maxit.optim\"")
+ stop("Bad input for 'Maxit.optim'")
if(!is.Numeric(MUXfactor, posit=TRUE))
- stop("bad input for \"MUXfactor\"")
+ stop("bad input for 'MUXfactor'")
if(any(MUXfactor < 1 | MUXfactor > 10))
stop("MUXfactor values must lie between 1 and 10")
if(!is.Numeric(optim.maxit, allow=1, integ=TRUE, posit=TRUE))
- stop("Bad input for \"optim.maxit\"")
+ stop("Bad input for 'optim.maxit'")
if(!is.Numeric(Rank, posit=TRUE, allow=1, integer=TRUE))
- stop("bad input for \"Rank\"")
+ stop("bad input for 'Rank'")
if(!is.Numeric(SD.Cinit, posit=TRUE, allow=1))
- stop("bad input for \"SD.Cinit\"")
+ stop("bad input for 'SD.Cinit'")
if(ITolerances && !EqualTolerances)
stop("EqualTolerances must be TRUE if ITolerances is TRUE")
if(!is.Numeric(Bestof, posit=TRUE, allow=1, integer=TRUE))
- stop("bad input for \"Bestof\"")
+ stop("bad input for 'Bestof'")
FastAlgorithm = as.logical(FastAlgorithm)[1]
@@ -79,9 +79,9 @@ qrrvglm.control = function(Rank=1,
stop("Parscale must contain positive numbers only")
if(!is.logical(checkwz) || length(checkwz) != 1)
- stop("bad input for \"checkwz\"")
+ stop("bad input for 'checkwz'")
if(!is.Numeric(wzepsilon, allow=1, positive=TRUE))
- stop("bad input for \"wzepsilon\"")
+ stop("bad input for 'wzepsilon'")
ans = list(
Bestof = Bestof,
diff --git a/R/qtplot.q b/R/qtplot.q
index d3944ed..894e126 100644
--- a/R/qtplot.q
+++ b/R/qtplot.q
@@ -103,7 +103,7 @@ qtplot.lmscreg <- function(object,
fitted.values <- as.matrix(fitted.values)
} else {
if(!is.numeric(percentiles))
- stop("\"percentiles\" must be specified")
+ stop("'percentiles' must be specified")
eta <- if(length(newdata)) predict(object, newdata=newdata, type="link") else
object at predictors
@@ -275,10 +275,10 @@ qtplot.gumbel <-
tcol.arg=par()$col, tadj=1, ...)
{
if(!is.logical(mpv) || length(mpv) != 1)
- stop("bad input for \"mpv\"")
+ stop("bad input for 'mpv'")
if(!length(percentiles) ||
(!is.Numeric(percentiles, posit=TRUE) || max(percentiles) >= 100))
- stop("bad input for \"percentiles\"")
+ stop("bad input for 'percentiles'")
@@ -289,7 +289,7 @@ qtplot.gumbel <-
R <- rep(R, length=nrow(eta))
if(!is.Numeric(percentiles))
- stop("the \"percentiles\" argument needs to be assigned a value")
+ stop("the 'percentiles' argument needs to be assigned a value")
extra = object at extra
@@ -670,14 +670,14 @@ rlplot.gev <-
...)
{
if(!is.Numeric(epsilon, allow=1) || abs(epsilon) > 0.10)
- stop("bad input for \"epsilon\"")
+ stop("bad input for 'epsilon'")
if(!is.Numeric(probability, posit=TRUE) || max(probability) >= 1 ||
length(probability) < 5)
- stop("bad input for \"probability\"")
+ stop("bad input for 'probability'")
if(!is.logical(Log) || length(Log) != 1)
- stop("bad input for argument \"Log\"")
+ stop("bad input for argument 'Log'")
if(!is.logical(CI) || length(CI) != 1)
- stop("bad input for argument \"CI\"")
+ stop("bad input for argument 'CI'")
if(!object at misc$intercept.only)
stop("object must be an intercept-only fit, i.e., y ~ 1 is the response")
@@ -764,3 +764,5 @@ setMethod("rlplot", "vglm", function(object, ...)
+
+
diff --git a/R/residuals.vlm.q b/R/residuals.vlm.q
index 4ce28b8..e525318 100644
--- a/R/residuals.vlm.q
+++ b/R/residuals.vlm.q
@@ -186,6 +186,75 @@ residualsvglm <- function(object,
}
}
+
+
+
+
+residualsqrrvglm <- function(object,
+ type = c("response"),
+ matrix.arg=TRUE)
+{
+
+
+ if(mode(type) != "character" && mode(type) != "name")
+ type <- as.character(substitute(type))
+ type <- match.arg(type,
+ c("response"))[1]
+
+ na.act = object at na.action
+ object at na.action = list()
+
+ pooled.weight <- object at misc$pooled.weight
+ if(is.null(pooled.weight))
+ pooled.weight <- FALSE
+
+ answer =
+ switch(type,
+ working = if(pooled.weight) NULL else object at residuals,
+ pearson = {
+ stop("have not programmed pearson resids yet")
+ },
+ deviance = {
+ stop("have not programmed deviance resids yet")
+ },
+ ldot = {
+ stop("have not programmed ldot resids yet")
+ },
+ response = {
+ y <- object at y
+ mu <- fitted(object)
+
+ true.mu <- object at misc$true.mu
+ if(is.null(true.mu))
+ true.mu <- TRUE
+
+ ans <- if(true.mu) y - mu else NULL
+
+
+ if(!matrix.arg && length(ans)) {
+ if(ncol(ans) == 1) {
+ names.ans = dimnames(ans)[[1]]
+ ans = c(ans)
+ names(ans) = names.ans
+ ans
+ } else {
+ warning("ncol(ans) is not 1")
+ ans
+ }
+ } else ans
+ })
+
+ if(length(answer) && length(na.act)) {
+ napredict(na.act[[1]], answer)
+ } else {
+ answer
+ }
+}
+
+
+
+
+
setMethod("residuals", "vlm",
function(object, ...)
residualsvlm(object, ...))
@@ -195,6 +264,9 @@ residualsvglm <- function(object,
setMethod("residuals", "vgam",
function(object, ...)
residualsvglm(object, ...))
+ setMethod("residuals", "qrrvglm",
+ function(object, ...)
+ residualsqrrvglm(object, ...))
setMethod("resid", "vlm",
function(object, ...)
@@ -205,6 +277,9 @@ residualsvglm <- function(object,
setMethod("resid", "vgam",
function(object, ...)
residualsvglm(object, ...))
+ setMethod("resid", "qrrvglm",
+ function(object, ...)
+ residualsqrrvglm(object, ...))
diff --git a/R/rrvglm.R b/R/rrvglm.R
index 37c05de..4d8c951 100644
--- a/R/rrvglm.R
+++ b/R/rrvglm.R
@@ -45,14 +45,17 @@ rrvglm <- function(formula,
xvars <- as.character(attr(mt, "variables"))[-1]
if ((yvar <- attr(mt, "response")) > 0)
xvars <- xvars[-yvar]
- xlev <- if (length(xvars) > 0) {
- xlev <- lapply(mf[xvars], levels)
- xlev[!sapply(xlev, is.null)]
- }
- y <- model.response(mf, "numeric") # model.extract(mf, "response")
- x <- model.matrix(mt, mf, contrasts)
- attr(x, "assign") <- attrassigndefault(x, mt) # So as to make it like Splus
+
+
+ xlev = .getXlevels(mt, mf)
+ y <- model.response(mf, "any")
+ x <- if (!is.empty.model(mt)) model.matrix(mt, mf, contrasts) else
+ matrix(, NROW(y), 0)
+ attr(x, "assign") = attrassigndefault(x, mt)
+
+
+
offset <- model.offset(mf)
if(is.null(offset))
offset <- 0 # yyy ???
@@ -67,7 +70,7 @@ rrvglm <- function(formula,
if(is.function(family))
family <- family()
if(!inherits(family, "vglmff")) {
- stop(paste("family=", family, "is not a VGAM family function"))
+ stop("'family=", family, "' is not a VGAM family function")
}
eval(vcontrol.expression)
@@ -78,8 +81,8 @@ rrvglm <- function(formula,
# 10/12/04: testing for an empty (function) slot not elegant:
if(control$Quadratic && control$FastAlgorithm &&
length(as.list(family at deviance)) <= 1)
- stop(paste("The fast algorithm requires the family",
- "function to have a deviance slot"))
+ stop("The fast algorithm requires the family ",
+ "function to have a deviance slot")
rrvglm.fitter <- get(method)
@@ -165,6 +168,7 @@ rrvglm <- function(formula,
slot(answer, "xlevels") = xlev
if(y.arg)
slot(answer, "y") = as.matrix(fit$y)
+ answer at misc$formula = formula
slot(answer, "control") = fit$control
diff --git a/R/rrvglm.control.q b/R/rrvglm.control.q
index a5c6b69..7663cba 100644
--- a/R/rrvglm.control.q
+++ b/R/rrvglm.control.q
@@ -11,7 +11,7 @@ rrvglm.control = function(Rank=1,
Wmat=NULL,
Svd.arg=FALSE,
Index.corner=if(length(Structural.zero))
- ((1:1000)[-Structural.zero])[1:Rank] else 1:Rank,
+ head((1:1000)[-Structural.zero], Rank) else 1:Rank,
Alpha=0.5,
Bestof = 1,
Cinit=NULL,
@@ -36,19 +36,19 @@ rrvglm.control = function(Rank=1,
if(Svd.arg) Corner = FALSE
if(!is.Numeric(Rank, posit=TRUE, allow=1, integer=TRUE))
- stop("bad input for \"Rank\"")
+ stop("bad input for 'Rank'")
if(!is.Numeric(Alpha, posit=TRUE, allow=1) || Alpha > 1)
- stop("bad input for \"Alpha\"")
+ stop("bad input for 'Alpha'")
if(!is.Numeric(Bestof, posit=TRUE, allow=1, integer=TRUE))
- stop("bad input for \"Bestof\"")
+ stop("bad input for 'Bestof'")
if(!is.Numeric(SD.Cinit, posit=TRUE, allow=1))
- stop("bad input for \"SD.Cinit\"")
+ stop("bad input for 'SD.Cinit'")
if(!is.Numeric(Etamat.colmax, posit=TRUE, allow=1) || Etamat.colmax < Rank)
- stop("bad input for \"Etamat.colmax\"")
+ stop("bad input for 'Etamat.colmax'")
if(length(Structural.zero) && (any(round(Structural.zero) != Structural.zero)
|| any(Structural.zero<1)))
- stop("bad input for the argument \"Structural.zero\"")
+ stop("bad input for the argument 'Structural.zero'")
Quadratic = FALSE
@@ -63,18 +63,18 @@ rrvglm.control = function(Rank=1,
stop("Quadratic model can only be fitted using the derivative algorithm")
if(Corner && (Svd.arg || Uncor || length(Wmat)))
- stop("can't have Corner=TRUE and either Svd=TRUE or Uncor=TRUE or Wmat")
+ stop("cannot have Corner=TRUE and either Svd=TRUE or Uncor=TRUE or Wmat")
if(Corner && length(intersect(Structural.zero, Index.corner)))
- stop("can't have Structural.zero and Index.corner having common values")
+ stop("cannot have Structural.zero and Index.corner having common values")
if(length(Index.corner) != Rank)
stop("length(Index.corner) != Rank")
if(!is.logical(checkwz) || length(checkwz) != 1)
- stop("bad input for \"checkwz\"")
+ stop("bad input for 'checkwz'")
if(!is.Numeric(wzepsilon, allow=1, positive=TRUE))
- stop("bad input for \"wzepsilon\"")
+ stop("bad input for 'wzepsilon'")
ans =
c(vglm.control(trace = trace, ...),
diff --git a/R/rrvglm.fit.q b/R/rrvglm.fit.q
index 85acaca..ba82a72 100644
--- a/R/rrvglm.fit.q
+++ b/R/rrvglm.fit.q
@@ -6,7 +6,6 @@
-
rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
etastart=NULL, mustart=NULL, coefstart=NULL,
offset=0, family,
@@ -23,7 +22,6 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
nonparametric <- FALSE
epsilon <- control$epsilon
maxit <- control$maxit
- backchat <- control$backchat && !control$Quadratic # rrr;
save.weight <- control$save.weight
trace <- control$trace
orig.stepsize <- control$stepsize
@@ -35,9 +33,7 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
new.s.call <- expression({
if(c.list$one.more)
{
- fv <- if(backchat) {
- if(M>1) matrix(c.list$fit,n,M,byrow=TRUE) else c.list$fit
- } else c.list$fit
+ fv <- c.list$fit
new.coeffs <- c.list$coeff
if(length(family at middle))
@@ -61,12 +57,9 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
if(trace && orig.stepsize==1) {
cat(if(control$Quadratic) "QRR-VGLM" else "RR-VGLM",
" linear loop ", iter, ": ", criterion, "= ")
- uuuu =
- switch(criterion,
- coefficients= if(is.R())
- format(new.crit, dig=round(2-log10(epsilon))) else
- format(round(new.crit, round(2-log10(epsilon)))),
- format(round(new.crit, 4)))
+ uuuu = switch(criterion, coefficients=
+ format(new.crit, dig=round(2-log10(epsilon))),
+ format(round(new.crit, 4)))
switch(criterion,
coefficients={if(length(new.crit) > 2) cat("\n");
cat(uuuu, fill=TRUE, sep=", ")},
@@ -90,8 +83,7 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
repeat {
if(trace) {
cat(".")
- if(exists("flush.console"))
- flush.console()
+ flush.console()
}
stepsize <- stepsize / 2
if(too.small <- stepsize < 0.001)
@@ -102,7 +94,7 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
if(length(family at middle))
eval(family at middle)
- fv <- xbig.save %*% new.coeffs
+ fv <- X_vlm_save %*% new.coeffs
if(M > 1)
fv <- matrix(fv, n, M, byrow=TRUE)
@@ -128,19 +120,16 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
if(trace)
cat("\n")
if(too.small) {
- warning(paste("iterations terminated because",
- "half-step sizes are very small"))
+ warning("iterations terminated because ",
+ "half-step sizes are very small")
one.more <- FALSE
} else {
if(trace) {
cat(if(control$Quadratic) "QRR-VGLM" else "RR-VGLM",
" linear loop ", iter, ": ", criterion, "= ")
- uuuu =
- switch(criterion,
- coefficients=if(is.R())
- format(new.crit, dig=round(2-log10(epsilon))) else
- format(round(new.crit, round(2-log10(epsilon)))),
- format(round(new.crit, 4)))
+ uuuu = switch(criterion, coefficients=
+ format(new.crit, dig=round(2-log10(epsilon))),
+ format(round(new.crit, 4)))
switch(criterion,
coefficients={if(length(new.crit) > 2) cat("\n");
@@ -154,8 +143,7 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
one.more <- eval(control$convergence)
}
}
- if(exists("flush.console"))
- flush.console()
+ flush.console()
if(one.more) {
iter <- iter + 1
@@ -177,7 +165,7 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
c.list$z <- z # contains \bI_{Rank} \bnu
c.list$U <- U
- if(copyxbig) c.list$xbig <- xbig.save
+ if(copy_X_vlm) c.list$X_vlm <- X_vlm_save
}
c.list$one.more <- one.more
@@ -191,7 +179,7 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
- copyxbig <- FALSE # May be overwritten in @initialize
+ copy_X_vlm <- FALSE # May be overwritten in @initialize
stepsize <- orig.stepsize
old.coeffs <- coefstart
@@ -229,8 +217,8 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
if(any(is.na(index)))
stop("Dzero argument didn't fully match y-names")
if(length(index) == M)
- stop(paste("all linear predictors are linear in the",
- "latent variable(s); so set Quadratic=FALSE"))
+ stop("all linear predictors are linear in the ",
+ "latent variable(s); so set 'Quadratic=FALSE'")
rrcontrol$Dzero = control$Dzero = index
}
@@ -250,8 +238,8 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
if(!control$Quadratic && sum(!tc1)) {
for(ii in names(tc1))
if(!tc1[ii] && !any(ii == names(findex)[findex==1]))
- warning(paste("\"", ii, "\"", " is a non-trivial constraint",
- " that will be overwritten by reduced-rank regression", sep=""))
+ warning("'", ii, "' is a non-trivial constraint that ",
+ "will be overwritten by reduced-rank regression")
}
if(all(findex == 1))
@@ -317,7 +305,7 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
dimB <- sum(ncolBlist)
- xbig.save <- if(control$Quadratic) {
+ X_vlm_save <- if(control$Quadratic) {
tmp500 = lm2qrrvlm.model.matrix(x=x, Blist=Blist,
C=Cmat, control=control)
xsmall.qrr = tmp500$new.lv.model.matrix
@@ -341,8 +329,8 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
if(length(coefstart)) {
- eta <- if(ncol(xbig.save)>1) xbig.save %*% coefstart +
- offset else xbig.save * coefstart + offset
+ eta <- if(ncol(X_vlm_save)>1) X_vlm_save %*% coefstart +
+ offset else X_vlm_save * coefstart + offset
eta <- if(M > 1) matrix(eta, ncol=M, byrow=TRUE) else c(eta)
@@ -371,50 +359,25 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
z <- eta + vbacksub(U, tvfor, M=M, n=n) - offset
c.list <- list(z=as.double(z), fit=as.double(t(eta)), one.more=TRUE,
- coeff=as.double(rep(1,ncol(xbig.save))), U=as.double(U),
- copyxbig=copyxbig,
- xbig=if(copyxbig) as.double(xbig.save) else double(3))
+ coeff=as.double(rep(1,ncol(X_vlm_save))), U=as.double(U),
+ copy_X_vlm=copy_X_vlm,
+ X_vlm=if(copy_X_vlm) as.double(X_vlm_save) else double(3))
- dxbig <- as.integer(dim(xbig.save))
- n.big <- dxbig[[1]]
- p.big <- dxbig[[2]]
+ dX_vlm <- as.integer(dim(X_vlm_save))
+ nrow_X_vlm <- dX_vlm[[1]]
+ ncol_X_vlm <- dX_vlm[[2]]
- if(n.big < p.big)
- stop(paste(p.big, "parameters but only", n.big, "observations"))
-
- if(backchat) {
- nframe <- sys.nframe()
- dotC(name="v_init_call", as.integer(nframe), new.s.call)
- }
+ if(nrow_X_vlm < ncol_X_vlm)
+ stop(ncol_X_vlm, " parameters but only ", nrow_X_vlm, " observations")
-
- if(backchat) {
- if(control$Quadratic)
- stop("29/11/02; can't handle zedd")
- index.vglm <- iam(NA, NA, M, both=TRUE)
- tfit <- dotFortran(name="vglmf", as.double(xbig.save), n.big, p.big,
- backchat=as.integer(TRUE),
- as.integer(n), as.double(z), # not zedd
- coefficients=double(p.big),
- predictors=double(n.big), effects=double(n.big),
- qr=as.double(xbig.save), qraux=double(p.big),
- rank=as.integer(0), pivot=as.integer(seq(p.big)),
- work=double(max(n.big, 2 * p.big)),
- wkmm=double(M*M*5 + M*p.big),
- as.double(U), as.integer(M),
- dimu=as.integer(if(is.matrix(U)) nrow(U) else 1),
- dimm=as.integer(if(is.matrix(wz)) ncol(wz) else 1),
- as.integer(index.vglm$row), as.integer(index.vglm$col),
- copyxbig=as.integer(copyxbig),
- rss=double(1))
- } else {
- bf.call = expression(vlm.wfit(xbig.save, zedd,
+ {
+ bf.call = expression(vlm.wfit(xmat=X_vlm_save, zedd,
Blist=if(control$Quadratic) B.list else Blist,
ncolx=ncol(x), U=U,
Eta.range = control$Eta.range,
- matrix.out=if(control$Quadratic) FALSE else TRUE, XBIG=TRUE,
+ matrix.out=if(control$Quadratic) FALSE else TRUE, is.vlmX=TRUE,
qr=qr.arg, xij=control$xij))
while(c.list$one.more) {
@@ -453,54 +416,49 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
}
if(maxit>1 && iter>=maxit)
- warning(paste("convergence not obtained in", maxit, "iterations."))
+ warning("convergence not obtained in ", maxit, " iterations")
- dn.big <- labels(xbig.save)
- xn.big <- dn.big[[2]]
- yn.big <- dn.big[[1]]
+ dnrow_X_vlm <- labels(X_vlm_save)
+ xnrow_X_vlm <- dnrow_X_vlm[[2]]
+ ynrow_X_vlm <- dnrow_X_vlm[[1]]
if(length(family at fini))
eval(family at fini)
if(M>1 && !nice31)
- tfit$predictors <- matrix(tfit$predictors, n, M, byrow=backchat)
+ tfit$predictors <- matrix(tfit$predictors, n, M)
- asgn <- attr(xbig.save, "assign")
+ asgn <- attr(X_vlm_save, "assign")
if(nice31) {
- coefs <- rep(0, len=length(xn.big))
- rank <- p.big
+ coefs <- rep(0, len=length(xnrow_X_vlm))
+ rank <- ncol_X_vlm
} else {
coefs <- tfit$coefficients
- names(coefs) <- xn.big
+ names(coefs) <- xnrow_X_vlm
rank <- tfit$rank
}
- cnames <- xn.big
+ cnames <- xnrow_X_vlm
- if(check.rank && rank < p.big)
+ if(check.rank && rank < ncol_X_vlm)
stop("rrvglm only handles full-rank models (currently)")
if(nice31) {
R <- matrix(as.numeric(NA), 5, 5)
} else {
- R <- if(is.R()) tfit$qr$qr[1:p.big, 1:p.big, drop=FALSE] else {
- if(backchat) tfit$qr[1:p.big, 1:p.big, drop=FALSE] else
- tfit$qr$qr[1:p.big, 1:p.big, drop=FALSE]
- }
+ R <- tfit$qr$qr[1:ncol_X_vlm, 1:ncol_X_vlm, drop=FALSE]
R[lower.tri(R)] <- 0
- attributes(R) <- if(is.R()) list(dim=c(p.big, p.big),
- dimnames=list(cnames, cnames), rank=rank) else
- list(dim=c(p.big, p.big),
- dimnames=list(cnames, cnames), rank=rank, class="upper")
+ attributes(R) <- list(dim=c(ncol_X_vlm, ncol_X_vlm),
+ dimnames=list(cnames, cnames), rank=rank)
}
if(nice31) {
effects <- rep(0, len=77)
} else {
effects <- tfit$effects
- neff <- rep("", n.big)
- neff[seq(p.big)] <- cnames
+ neff <- rep("", nrow_X_vlm)
+ neff[seq(ncol_X_vlm)] <- cnames
names(effects) <- neff
dim(tfit$predictors) <- c(n, M)
@@ -542,7 +500,7 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
- df.residual <- n.big - rank - (if(control$Quadratic) Rank*p2 else 0)
+ df.residual <- nrow_X_vlm - rank - (if(control$Quadratic) Rank*p2 else 0)
fit <- list(assign=asgn,
coefficients=coefs,
constraints=if(control$Quadratic) B.list else Blist,
@@ -557,12 +515,8 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
terms=Terms) # terms: This used to be done in vglm()
if(qr.arg && !nice31) {
- fit$qr <- if(is.R())
- fit$qr <- tfit$qr else {
- if(backchat) tfit[c("qr", "rank", "pivot", "qraux")] else
- tfit$qr
- }
- dimnames(fit$qr$qr) <- dn.big
+ fit$qr <- tfit$qr
+ dimnames(fit$qr$qr) <- dnrow_X_vlm
}
if(M==1) {
@@ -573,7 +527,7 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
misc <- list(
colnames.x = xn,
- colnames.xbig = xn.big,
+ colnames.X_vlm = xnrow_X_vlm,
criterion = criterion,
function.name = function.name,
intercept.only=intercept.only,
@@ -581,10 +535,10 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
M = M,
n = n,
nonparametric = nonparametric,
- n.big = n.big,
+ nrow_X_vlm = nrow_X_vlm,
orig.assign = attr(x, "assign"),
p = ncol(x),
- p.big = p.big,
+ ncol_X_vlm = ncol_X_vlm,
ynames = dimnames(y)[[2]])
if(one.more)
@@ -595,18 +549,14 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
if(criterion != "coefficients")
crit.list[[criterion]] <- fit[[criterion]] <- new.crit
- for(i in names(.min.criterion.VGAM)) {
- if(i != criterion &&
- any(slotNames(family) == i) &&
- (( is.R() && length(body(slot(family, i)))) ||
- ((!is.R() && length(slot(family, i)) > 1)))) {
- fit[[i]] <- crit.list[[i]] <-
- (slot(family, i))(mu=mu, y=y, w=w, res=FALSE, eta=eta, extra)
+ for(ii in names(.min.criterion.VGAM)) {
+ if(ii != criterion &&
+ any(slotNames(family) == ii) && length(body(slot(family, ii)))) {
+ fit[[ii]] <- crit.list[[ii]] <-
+ (slot(family, ii))(mu=mu, y=y, w=w, res=FALSE, eta=eta, extra)
}
}
-
-
if(w[1] != 1 || any(w != w[1]))
diff --git a/R/s.vam.q b/R/s.vam.q
index 168ba16..6a4a7e2 100644
--- a/R/s.vam.q
+++ b/R/s.vam.q
@@ -7,16 +7,15 @@
s.vam <- function(x, z, wz, s, which, smooth.frame, bf.maxit=10,
bf.epsilon=0.001, trace=FALSE, se.fit=TRUE,
- xbig.save, Blist, ncolBlist, M, qbig, U,
- backchat=if(is.R()) FALSE else TRUE,
+ X_vlm_save, Blist, ncolBlist, M, qbig, U,
all.knots=FALSE, nk=NULL,
sf.only=FALSE)
{
nwhich <- names(which)
- dxbig <- as.integer(dim(xbig.save))
- pbig <- dxbig[2]
+ dX_vlm <- as.integer(dim(X_vlm_save))
+ pbig <- dX_vlm[2]
if(!length(smooth.frame$first)) {
@@ -40,25 +39,22 @@ s.vam <- function(x, z, wz, s, which, smooth.frame, bf.maxit=10,
if(!is.numeric(temp) || any(temp<0))
stop("spar cannot be negative or non-numeric")
if(length(temp) > ncolBlist[i])
- warning(paste("only the first", ncolBlist[i],
- "values of spar used for variable \"",
- s.xargument, "\""))
+ warning("only the first ", ncolBlist[i], " values of 'spar' ",
+ " are used for variable '", s.xargument, "'")
spar[[i]] <- rep(temp, length=ncolBlist[i]) # recycle
temp <- df[[i]]
if(!is.numeric(temp) || any(temp<1))
stop("df is non-numeric or less than 1")
if(length(temp) > ncolBlist[i])
- warning(paste("only the first", ncolBlist[i],
- "values of df used for variable \"",
- s.xargument, "\""))
+ warning("only the first", ncolBlist[i], "values of 'df' ",
+ "are used for variable '", s.xargument, "'")
df[[i]] <- rep(temp, length=ncolBlist[i]) # recycle
if(max(temp) > smooth.frame$nef[k]-1)
- stop(paste("df value too high for variable \"",
- s.xargument, "\""))
+ stop("'df' value too high for variable '", s.xargument, "'")
if(any(spar[[i]]!=0) && any(df[[i]]!=4))
- stop("can't specify both spar and df")
+ stop("cannot specify both 'spar' and 'df'")
}
spar <- unlist(spar)
@@ -70,14 +66,14 @@ s.vam <- function(x, z, wz, s, which, smooth.frame, bf.maxit=10,
smooth.frame$n * sum(ncolBlist[nwhich]))
stop("too many parameters/dof for data on hand")
- xn.big <- labels(xbig.save)[[2]]
- asgn <- attr(xbig.save, "assign")
+ xnrow_X_vlm <- labels(X_vlm_save)[[2]]
+ asgn <- attr(X_vlm_save, "assign")
aa <- NULL
for(i in nwhich) {
- aa <- c(aa, xn.big[asgn[[i]]])
+ aa <- c(aa, xnrow_X_vlm[asgn[[i]]])
}
smooth.frame$ndfspar <- aa # Stored here
- smooth.frame$xn.big <- xn.big # Stored here
+ smooth.frame$xnrow_X_vlm <- xnrow_X_vlm # Stored here
smooth.frame$s.xargument <- s.xargument # Stored here
smooth.frame$smap=as.vector(cumsum(
@@ -125,7 +121,7 @@ s.vam <- function(x, z, wz, s, which, smooth.frame, bf.maxit=10,
size.twk <- max(size.twk, M*smooth.frame$n)
fit <- dotFortran(name="vbfa",
- as.integer(backchat), n = as.integer(n), M = as.integer(M),
+ n = as.integer(n), M = as.integer(M),
npetc = as.integer(c(n, p, length(which), se.fit, 0,
bf.maxit, 0, M, n*M, pbig,
qbig, dimw, dimu, ier=0, ldk=ldk)),
@@ -138,9 +134,9 @@ s.vam <- function(x, z, wz, s, which, smooth.frame, bf.maxit=10,
s0 = double((2*M)*(2*M)*2),
beta = double(pbig), var = if(se.fit) as.double(s) else double(1),
as.double(bf.epsilon),
- qr = as.double(xbig.save), qraux = double(pbig),
+ qr = as.double(X_vlm_save), qraux = double(pbig),
qpivot = as.integer(1:pbig),
- xbig = if(backchat) as.double(xbig.save) else double(1),
+ X_vlm = double(1),
U = as.double(U),
as.double(unlist(Blist)),
as.integer(ncbvec), as.integer(smooth.frame$smap),
@@ -164,8 +160,8 @@ s.vam <- function(x, z, wz, s, which, smooth.frame, bf.maxit=10,
itwk = integer(2*M),
kindex = as.integer(smooth.frame$kindex))
- dim(fit$qr) = dim(xbig.save)
- dimnames(fit$qr) = dimnames(xbig.save)
+ dim(fit$qr) = dim(X_vlm_save)
+ dimnames(fit$qr) = dimnames(X_vlm_save)
dim(fit$y) = dim(z)
dimnames(fit$y) = dimnames(z)
dim(fit$smooth) = dim(s)
@@ -196,8 +192,7 @@ s.vam <- function(x, z, wz, s, which, smooth.frame, bf.maxit=10,
smooth.frame$prev.dof <- fit$df
if((nit == bf.maxit) & bf.maxit > 1)
- warning(paste("s.vam convergence not obtained in", bf.maxit,
- "iterations"))
+ warning("'s.vam' convergence not obtained in ", bf.maxit, " iterations")
R <- fit$qr[1:pbig, 1:pbig]
R[lower.tri(R)] <- 0
@@ -233,7 +228,7 @@ s.vam <- function(x, z, wz, s, which, smooth.frame, bf.maxit=10,
s.xargument = unlist(smooth.frame$s.xargument))
- names(rl$coefficients) <- smooth.frame$xn.big
+ names(rl$coefficients) <- smooth.frame$xnrow_X_vlm
names(rl$spar) <- smooth.frame$ndfspar
names(rl$nl.df) <- smooth.frame$ndfspar
diff --git a/R/summary.others.q b/R/summary.others.q
deleted file mode 100644
index 336f7ae..0000000
--- a/R/summary.others.q
+++ /dev/null
@@ -1,35 +0,0 @@
-# These functions are
-# Copyright (C) 1998-2009 T.W. Yee, University of Auckland. All rights reserved.
-
-
-
-
-
-summary.lms <- function(object, ...)
-{
- ans <- NextMethod("summary")
-
- ans$testing <- object$lin[1:4,]
-
- class(ans) <- c("summary.lms", class(ans))
-
-
- ans
-}
-
-
-
-summary.rc.exponential <- function(object, ...)
-{
-
- ans <- NextMethod("summary")
-
- ans$num.censored <- attr(object$terms, "num.censored")
-
- class(ans) <- c("summary.rc.exponential", class(ans))
-
-
- ans
-}
-
-
diff --git a/R/summary.vgam.q b/R/summary.vgam.q
index 163c8fa..0a637db 100644
--- a/R/summary.vgam.q
+++ b/R/summary.vgam.q
@@ -11,8 +11,8 @@ summaryvgam <- function(object, dispersion=NULL, digits=options()$digits-2)
if(length(dispersion) && dispersion == 0 &&
length(object at family@summary.dispersion) &&
!object at family@summary.dispersion) {
- stop(paste("Can't use the general VGLM formula (based on a residual",
- "sum of squares) for computing the dispersion parameter."))
+ stop("cannot use the general VGLM formula (based on a residual ",
+ "sum of squares) for computing the dispersion parameter")
}
newobject <- object
@@ -21,7 +21,7 @@ summaryvgam <- function(object, dispersion=NULL, digits=options()$digits-2)
rdf <- stuff at df[2] <- object at df.residual # NA
M <- object at misc$M
- n.big <- object at misc$n.big
+ nrow_X_vlm <- object at misc$nrow_X_vlm
rank <- if(is.null(object at qr$rank)) length(object at coefficients) else
object at qr$rank
diff --git a/R/summary.vglm.q b/R/summary.vglm.q
index 44344db..6b2617a 100644
--- a/R/summary.vglm.q
+++ b/R/summary.vglm.q
@@ -22,8 +22,8 @@ summaryvglm <- function(object, correlation=FALSE, dispersion=NULL, digits=NULL)
if(length(dispersion) && dispersion == 0 &&
length(object at family@summary.dispersion) &&
!object at family@summary.dispersion) {
- stop(paste("Can't use the general VGLM formula (based on a residual",
- "sum of squares) for computing the dispersion parameter."))
+ stop("cannot use the general VGLM formula (based on a residual ",
+ "sum of squares) for computing the dispersion parameter")
}
stuff <- summaryvlm(as(object, "vlm"),
@@ -149,15 +149,15 @@ printsummary.vglm <- function(x, digits = NULL, quote = TRUE, prefix = "",
if(!is.null(correl))
{
- p.big <- dim(correl)[2]
- if(p.big > 1)
+ ncol_X_vlm <- dim(correl)[2]
+ if(ncol_X_vlm > 1)
{
cat("\nCorrelation of Coefficients:\n")
ll <- lower.tri(correl)
correl[ll] <- format(round(correl[ll], digits))
correl[!ll] <- ""
- print(correl[-1, -p.big, drop = FALSE], quote = FALSE, digits =
- digits)
+ print(correl[-1, -ncol_X_vlm, drop = FALSE], quote = FALSE,
+ digits = digits)
}
}
invisible(NULL)
@@ -196,16 +196,17 @@ vcovvlm <- function(object, dispersion=NULL, untransform=FALSE) {
answer = d * so at cov.unscaled
if(is.logical(OKRC <- object at misc$RegCondOK) && !OKRC)
- warning(paste("MLE regularity conditions were violated",
- "at the final iteration of the fitted object"))
+ warning("MLE regularity conditions were violated ",
+ "at the final iteration of the fitted object")
if(!untransform) return(answer)
if(!is.logical(object at misc$intercept.only))
- stop(paste("cannot determine whether the object is",
- "an intercept-only fit, i.e., y ~ 1 is the response"))
+ stop("cannot determine whether the object is",
+ "an intercept-only fit, i.e., 'y ~ 1' is the response")
if(!object at misc$intercept.only)
- stop("object must be an intercept-only fit, i.e., y ~ 1 is the response")
+ stop("object must be an intercept-only fit, i.e., ",
+ "y ~ 1 is the response")
if(!all(trivial.constraints(constraints(object)) == 1))
stop("object must have trivial constraints")
@@ -220,8 +221,7 @@ vcovvlm <- function(object, dispersion=NULL, untransform=FALSE) {
etavector = predict(object)[1,] # Contains transformed parameters
earg = object at misc$earg # This could be a NULL
if(!is.null(earg) && M > 1 && (!is.list(earg) || length(earg) != M))
- stop(paste("the earg component of object at misc should be of length ",
- M, sep=""))
+ stop("the 'earg' component of 'object at misc' should be of length ", M)
for(ii in 1:M) {
TTheta = etavector[ii] # Transformed theta
use.earg = if(M == 1 || is.null(earg)) earg else earg[[ii]]
diff --git a/R/summary.vlm.q b/R/summary.vlm.q
index 033491c..563144c 100644
--- a/R/summary.vlm.q
+++ b/R/summary.vlm.q
@@ -17,8 +17,8 @@ summaryvlm <- function(object, correlation=FALSE, dispersion=NULL)
M <- object at misc$M
n <- object at misc$n
- n.big <- object at misc$n.big
- p.big <- object at misc$p.big # May be NULL for CQO objects
+ nrow_X_vlm <- object at misc$nrow_X_vlm
+ ncol_X_vlm <- object at misc$ncol_X_vlm # May be NULL for CQO objects
coef <- object at coefficients
cnames <- names(coef)
@@ -55,21 +55,21 @@ summaryvlm <- function(object, correlation=FALSE, dispersion=NULL)
}
sigma <- dispersion^0.5 # Can be a vector
- if(is.Numeric(p.big)) {
+ if(is.Numeric(ncol_X_vlm)) {
R <- object at R
- if(p.big < max(dim(R)))
+ if(ncol_X_vlm < max(dim(R)))
stop("R is rank deficient")
- rinv = diag(p.big)
+ rinv = diag(ncol_X_vlm)
rinv = backsolve(R, rinv)
- rowlen = drop(((rinv^2) %*% rep(1, p.big))^0.5)
+ rowlen = drop(((rinv^2) %*% rep(1, ncol_X_vlm))^0.5)
covun = rinv %*% t(rinv)
dimnames(covun) <- list(cnames, cnames)
}
coef <- matrix(rep(coef, 3), ncol=3)
dimnames(coef) <- list(cnames, c("Value", "Std. Error", "t value"))
- if(length(sigma) == 1 && is.Numeric(p.big)) {
+ if(length(sigma) == 1 && is.Numeric(ncol_X_vlm)) {
coef[, 2] <- rowlen %o% sigma # Fails here when sigma is a vector
coef[, 3] <- coef[, 1]/coef[, 2]
} else {
@@ -90,10 +90,10 @@ summaryvlm <- function(object, correlation=FALSE, dispersion=NULL)
object,
coef3=coef,
correlation=correl,
- df=c(p.big, rdf),
+ df=c(ncol_X_vlm, rdf),
sigma=sigma)
- if(is.Numeric(p.big)) answer at cov.unscaled = covun
+ if(is.Numeric(ncol_X_vlm)) answer at cov.unscaled = covun
answer at dispersion = dispersion # Overwrite this
if(length(presid))
@@ -163,13 +163,13 @@ printsummary.vlm <- function(x, digits=NULL, quote=TRUE, prefix="")
"on", round(rdf, digits), "degrees of freedom\n")
if(length(correl)) {
- p.big <- dim(correl)[2]
- if(p.big > 1) {
+ ncol_X_vlm <- dim(correl)[2]
+ if(ncol_X_vlm > 1) {
cat("\nCorrelation of Coefficients:\n")
ll <- lower.tri(correl)
correl[ll] <- format(round(correl[ll], digits))
correl[!ll] <- ""
- print(correl[-1, -p.big, drop=FALSE], quote=FALSE, digits=digits)
+ print(correl[-1, -ncol_X_vlm, drop=FALSE], quote=FALSE, digits=digits)
}
}
invisible(NULL)
diff --git a/R/uqo.R b/R/uqo.R
index 7b2f0e7..6c6d2aa 100644
--- a/R/uqo.R
+++ b/R/uqo.R
@@ -146,7 +146,7 @@ uqo <- function(formula,
y <- model.response(mf, "numeric") # model.extract(mf, "response")
x <- model.matrix(mt, mf, contrasts)
- attr(x, "assign") <- attrassigndefault(x, mt) # So as to make it like Splus
+ attr(x, "assign") = attrassigndefault(x, mt)
offset <- model.offset(mf)
if(is.null(offset))
offset <- 0 # yyy ???
@@ -161,7 +161,7 @@ uqo <- function(formula,
if(is.function(family))
family <- family()
if(!inherits(family, "vglmff")) {
- stop(paste("family=", family, "is not a VGAM family function"))
+ stop("'family=", family, "' is not a VGAM family function")
}
if(!is.null(family at first))
@@ -173,8 +173,8 @@ uqo <- function(formula,
if(control$FastAlgorithm &&
length(as.list(family at deviance)) <= 1)
- stop(paste("The fast algorithm requires the family",
- "function to have a deviance slot"))
+ stop("The fast algorithm requires the family ",
+ "function to have a deviance slot")
deviance.Bestof = rep(as.numeric(NA), len=control$Bestof)
for(tries in 1:control$Bestof) {
if(control$trace && (control$Bestof>1))
@@ -221,8 +221,8 @@ uqo <- function(formula,
slot(answer, "y") = as.matrix(fit$y)
slot(answer, "extra") = if(length(fit$extra)) {
if(is.list(fit$extra)) fit$extra else {
- warning(paste("\"extra\" is not a list, therefore",
- "placing \"extra\" into a list"))
+ warning("'extra' is not a list, therefore ",
+ "placing 'extra' into a list")
list(fit$extra)
}
} else list() # R-1.5.0
@@ -403,7 +403,7 @@ uqo.fit <- function(x, y, w=rep(1, len=nrow(x)),
n <- dim(x)[1]
- copyxbig <- FALSE # May be overwritten in @initialize
+ copy_X_vlm <- FALSE # May be overwritten in @initialize
stepsize <- orig.stepsize
old.coeffs <- coefstart
@@ -416,8 +416,6 @@ uqo.fit <- function(x, y, w=rep(1, len=nrow(x)),
Rank <- control$Rank
rrcontrol <- control #
- backchat = FALSE
-
if(length(family at initialize))
eval(family at initialize) # Initialize mu and M (and optionally w)
n <- n.save
@@ -512,14 +510,14 @@ uqo.fit <- function(x, y, w=rep(1, len=nrow(x)),
all(trivial.constraints(Blist))
- xbig.save1 <- if(nice31) {
+ X_vlm_1save <- if(nice31) {
NULL
} else {
lm2vlm.model.matrix(x, Blist, xij=control$xij)
}
NOS = ifelse(modelno==3 || modelno==5, M/2, M)
- p1star = if(nice31) p1*ifelse(modelno==3 || modelno==5,2,1) else ncol(xbig.save1)
+ p1star = if(nice31) p1*ifelse(modelno==3 || modelno==5,2,1) else ncol(X_vlm_1save)
p2star = if(nice31)
ifelse(control$IToleran, Rank, Rank+0.5*Rank*(Rank+1)) else
(NOS*Rank + Rank*(Rank+1)/2 * ifelse(control$EqualTol,1,NOS))
@@ -539,7 +537,7 @@ uqo.fit <- function(x, y, w=rep(1, len=nrow(x)),
kinit=rep(control$Kinit, len=NOS),
shapeinit=rep(control$shapeinit, len=NOS))
bnumat = if(nice31) matrix(0,nstar,pstar) else
- cbind(matrix(0,nstar,p2star), xbig.save1)
+ cbind(matrix(0,nstar,p2star), X_vlm_1save)
rmfromVGAMenv(c("etamat", "z", "U", "beta", "deviance", "fv",
"cmatrix", "ocmatrix"), prefix=".VGAM.UQO.")
@@ -570,7 +568,7 @@ uqo.fit <- function(x, y, w=rep(1, len=nrow(x)),
}
if(!converged && optim.maxit>1)
- warning(paste("convergence not obtained"))
+ warning("convergence not obtained")
temp9 =
diff --git a/R/vgam.R b/R/vgam.R
index faf9a5e..92628db 100644
--- a/R/vgam.R
+++ b/R/vgam.R
@@ -3,6 +3,10 @@
+
+
+
+
vgam <- function(formula,
family,
data=list(),
@@ -47,8 +51,8 @@ vgam <- function(formula,
xlev = .getXlevels(mt, mf)
y <- model.response(mf, "any") # model.extract(mf, "response")
x <- if (!is.empty.model(mt)) model.matrix(mt, mf, contrasts) else
- matrix(, NROW(Y), 0)
- attr(x, "assign") <- attrassigndefault(x, mt) # So as to make it like Splus
+ matrix(, NROW(y), 0)
+ attr(x, "assign") = attrassigndefault(x, mt)
offset <- model.offset(mf)
if(is.null(offset))
@@ -90,7 +94,7 @@ vgam <- function(formula,
if (is.function(family))
family <- family()
if(!inherits(family, "vglmff")) {
- stop(paste("family=", family, "is not a VGAM family function"))
+ stop("'family=", family, "' is not a VGAM family function")
}
eval(vcontrol.expression)
@@ -214,7 +218,7 @@ vgam <- function(formula,
slot(answer, "xlevels") = xlev
if(y.arg)
slot(answer, "y") = as.matrix(fit$y)
-
+ answer at misc$formula = formula
slot(answer, "control") = fit$control
diff --git a/R/vgam.control.q b/R/vgam.control.q
index 6212769..a4efd46 100644
--- a/R/vgam.control.q
+++ b/R/vgam.control.q
@@ -4,7 +4,6 @@
vgam.control <- function(all.knots=FALSE,
- backchat=if(is.R()) FALSE else TRUE,
bf.epsilon=1e-7,
bf.maxit=30,
checkwz=TRUE,
@@ -17,7 +16,7 @@ vgam.control <- function(all.knots=FALSE,
se.fit=TRUE,
trace=FALSE,
wzepsilon = .Machine$double.eps^0.75,
- xij=NULL, ...)
+ ...)
{
@@ -30,26 +29,26 @@ vgam.control <- function(all.knots=FALSE,
criterion <- names(.min.criterion.VGAM)[criterion]
if(!is.logical(checkwz) || length(checkwz) != 1)
- stop("bad input for \"checkwz\"")
+ stop("bad input for 'checkwz'")
if(!is.Numeric(wzepsilon, allow=1, positive=TRUE))
- stop("bad input for \"wzepsilon\"")
+ stop("bad input for 'wzepsilon'")
if(length(all.knots) > 1)
warning("all.knots should be of length 1; using first value only")
if(!is.Numeric(bf.epsilon, allow=1, posit=TRUE)) {
- warning("bad input for \"bf.epsilon\"; using 0.00001 instead")
+ warning("bad input for 'bf.epsilon'; using 0.00001 instead")
bf.epsilon <- 0.00001
}
if(!is.Numeric(bf.maxit, allow=1, posit=TRUE, integ=TRUE)) {
- warning("bad input for \"bf.maxit\"; using 20 instead")
+ warning("bad input for 'bf.maxit'; using 20 instead")
bf.maxit <- 20
}
if(!is.Numeric(epsilon, allow=1, posit=TRUE)) {
- warning("bad input for \"epsilon\"; using 0.0001 instead")
+ warning("bad input for 'epsilon'; using 0.0001 instead")
epsilon <- 0.0001
}
if(!is.Numeric(maxit, allow=1, posit=TRUE, integ=TRUE)) {
- warning("bad input for \"maxit\"; using 20 instead")
+ warning("bad input for 'maxit'; using 20 instead")
maxit <- 20
}
@@ -61,7 +60,6 @@ vgam.control <- function(all.knots=FALSE,
})
list(all.knots=as.logical(all.knots)[1],
- backchat=as.logical(backchat)[1],
bf.epsilon=bf.epsilon,
bf.maxit=bf.maxit,
checkwz=checkwz,
@@ -74,8 +72,7 @@ vgam.control <- function(all.knots=FALSE,
save.weight=as.logical(save.weight)[1],
se.fit=as.logical(se.fit)[1],
trace=as.logical(trace)[1],
- wzepsilon = wzepsilon,
- xij=xij)
+ wzepsilon = wzepsilon)
}
diff --git a/R/vgam.fit.q b/R/vgam.fit.q
index 32a5550..2448cb9 100644
--- a/R/vgam.fit.q
+++ b/R/vgam.fit.q
@@ -3,7 +3,6 @@
-
vgam.fit <- function(x, y, w, mf,
etastart, mustart, coefstart,
offset, family, control, criterion="coefficients",
@@ -33,8 +32,7 @@ vgam.fit <- function(x, y, w, mf,
# --------------------------------------------------------------
new.s.call <- expression({
if(c.list$one.more) {
- fv <- if(backchat && M>1 && nonparametric)
- matrix(c.list$fit,n,M,byrow=TRUE) else c.list$fit
+ fv <- c.list$fit
new.coeffs <- c.list$coeff
if(length(family at middle))
@@ -54,10 +52,8 @@ vgam.fit <- function(x, y, w, mf,
if(trace) {
cat("VGAM ", bf, " loop ", iter, ": ", criterion, "= ")
- uuuu = switch(criterion,
- coefficients=if(is.R())
- format(new.crit, dig=round(2-log10(epsilon))) else
- format(round(new.crit, round(2-log10(epsilon)))),
+ uuuu = switch(criterion, coefficients=
+ format(new.crit, dig=round(2-log10(epsilon))),
format(round(new.crit, 4)))
switch(criterion,
@@ -68,8 +64,7 @@ vgam.fit <- function(x, y, w, mf,
one.more <- eval(control$convergence)
- if(exists("flush.console"))
- flush.console()
+ flush.console()
if(!is.finite(one.more) || !is.logical(one.more)) one.more = FALSE
if(one.more) {
@@ -99,7 +94,6 @@ vgam.fit <- function(x, y, w, mf,
- backchat <- control$backchat # if(is.R()) FALSE else TRUE
old.coeffs <- coefstart
intercept.only <- ncol(x) == 1 && dimnames(x)[[2]] == "(Intercept)"
@@ -141,8 +135,8 @@ vgam.fit <- function(x, y, w, mf,
bf <- "s.vam"
bf.call <- parse(text=paste(
"s.vam(x, z, wz, tfit$smooth, which, tfit$smooth.frame,",
- "bf.maxit, bf.epsilon, trace, se=se.fit, xbig.save, ",
- "Blist, ncolBlist, M, qbig, U, backchat, ",
+ "bf.maxit, bf.epsilon, trace, se=se.fit, X_vlm_save, ",
+ "Blist, ncolBlist, M, qbig, U, ",
"all.knots=control$all.knots, nk=control$nk)",
sep=""))[[1]]
@@ -157,20 +151,18 @@ vgam.fit <- function(x, y, w, mf,
tfit <- list(smooth=s, smooth.frame=smooth.frame)
} else {
- bf.call <- if(is.R()) expression(vlm.wfit(xbig.save, z, Blist=NULL,
- U=U, matrix.out=FALSE, XBIG=TRUE, qr=qr.arg, xij=NULL)) else
- expression(vlm.wfit(xbig.save, z, Blist=NULL,
- U=U, matrix.out=FALSE, XBIG=TRUE, singular.ok=TRUE, qr=qr.arg,
- xij=NULL))
+ bf.call <- expression(vlm.wfit(xmat=X_vlm_save, z, Blist=NULL, U=U,
+ matrix.out=FALSE, is.vlmX=TRUE,
+ qr=qr.arg, xij=NULL))
bf <- "vlm.wfit"
}
- xbig.save <- lm2vlm.model.matrix(x, Blist, xij=control$xij)
+ X_vlm_save <- lm2vlm.model.matrix(x, Blist, xij=control$xij)
if(length(coefstart)) {
- eta <- if(ncol(xbig.save)>1) xbig.save %*% coefstart +
- offset else xbig.save * coefstart + offset
+ eta <- if(ncol(X_vlm_save)>1) X_vlm_save %*% coefstart +
+ offset else X_vlm_save * coefstart + offset
eta <- if(M > 1) matrix(eta, ncol=M, byrow=TRUE) else c(eta)
mu <- family at inverse(eta, extra)
}
@@ -198,20 +190,14 @@ vgam.fit <- function(x, y, w, mf,
c.list <- list(wz=as.double(wz), z=as.double(z),
fit=as.double(t(eta)),
one.more=TRUE, U=as.double(U),
- coeff=as.double(rep(1,ncol(xbig.save))))
-
+ coeff=as.double(rep(1,ncol(X_vlm_save))))
- dxbig <- as.integer(dim(xbig.save))
- n.big <- dxbig[[1]]
- p.big <- dxbig[[2]]
- if(n.big < p.big)
- stop(paste(p.big, "parameters but only", n.big,
- "observations"))
- if(backchat) {
- nframe <- sys.nframe()
- dotC(name="v_init_call", as.integer(nframe), new.s.call)
- }
+ dX_vlm <- as.integer(dim(X_vlm_save))
+ nrow_X_vlm <- dX_vlm[[1]]
+ ncol_X_vlm <- dX_vlm[[2]]
+ if(nrow_X_vlm < ncol_X_vlm)
+ stop(ncol_X_vlm, " parameters but only ", nrow_X_vlm, " observations")
while(c.list$one.more) {
tfit <- eval(bf.call) # fit$smooth.frame is new
@@ -226,21 +212,21 @@ vgam.fit <- function(x, y, w, mf,
}
if(maxit>1 && iter>=maxit)
- warning(paste("convergence not obtained in ", maxit, " iterations."))
+ warning("convergence not obtained in ", maxit, " iterations")
- dn.big <- labels(xbig.save)
- xn.big <- dn.big[[2]]
- yn.big <- dn.big[[1]]
+ dnrow_X_vlm <- labels(X_vlm_save)
+ xnrow_X_vlm <- dnrow_X_vlm[[2]]
+ ynrow_X_vlm <- dnrow_X_vlm[[1]]
if(length(family at fini))
eval(family at fini)
coefs <- tfit$coefficients
- asgn <- attr(xbig.save, "assign") # 29/11/01 was x
+ asgn <- attr(X_vlm_save, "assign") # 29/11/01 was x
- names(coefs) <- xn.big
- cnames <- xn.big
+ names(coefs) <- xnrow_X_vlm
+ cnames <- xnrow_X_vlm
if(!is.null(tfit$rank)) {
rank <- tfit$rank
@@ -248,16 +234,10 @@ vgam.fit <- function(x, y, w, mf,
stop("rank < ncol(x) is bad")
} else rank <- ncol(x)
- R <- if(is.R()) tfit$qr$qr[1:p.big, 1:p.big, drop=FALSE] else {
- if(backchat) tfit$qr[1:p.big, 1:p.big, drop=FALSE] else
- tfit$qr$qr[1:p.big, 1:p.big, drop=FALSE]
- }
+ R <- tfit$qr$qr[1:ncol_X_vlm, 1:ncol_X_vlm, drop=FALSE]
R[lower.tri(R)] <- 0
- attributes(R) <- if(is.R()) list(dim=c(p.big, p.big),
- dimnames=list(cnames, cnames), rank=rank) else
- list(dim=c(p.big, p.big),
- dimnames=list(cnames, cnames), rank=rank, class="upper")
-
+ attributes(R) <- list(dim=c(ncol_X_vlm, ncol_X_vlm),
+ dimnames=list(cnames, cnames), rank=rank)
dn <- labels(x)
@@ -291,11 +271,7 @@ vgam.fit <- function(x, y, w, mf,
R=R,
terms=Terms)))
- df.residual <- n.big - rank
- if(backchat) {
- fit$coefficients <- coefs
- fit$df.residual <- df.residual
- }
+ df.residual <- nrow_X_vlm - rank
if(!se.fit) {
fit$var <- NULL
@@ -321,7 +297,7 @@ vgam.fit <- function(x, y, w, mf,
misc <- list(
colnames.x = xn,
- colnames.xbig = xn.big,
+ colnames.X_vlm = xnrow_X_vlm,
criterion = criterion,
function.name = function.name,
intercept.only=intercept.only,
@@ -330,10 +306,10 @@ vgam.fit <- function(x, y, w, mf,
n = n,
new.assign = new.assign(x, NewBlist),
nonparametric = nonparametric,
- n.big = n.big,
+ nrow_X_vlm = nrow_X_vlm,
orig.assign = attr(x, "assign"),
p = ncol(x),
- p.big = p.big,
+ ncol_X_vlm = ncol_X_vlm,
ynames = dimnames(y)[[2]])
@@ -357,13 +333,11 @@ vgam.fit <- function(x, y, w, mf,
crit.list <- list()
if(criterion != "coefficients")
crit.list[[criterion]] <- fit[[criterion]] <- new.crit
- for(i in names(.min.criterion.VGAM)) {
- if(i != criterion &&
- any(slotNames(family) == i) &&
- (( is.R() && length(body(slot(family, i)))) ||
- ((!is.R() && length(slot(family, i)) > 1)))) {
- fit[[i]] <- crit.list[[i]] <-
- (slot(family, i))(mu=mu, y=y, w=w, res=FALSE, eta=eta, extra)
+ for(ii in names(.min.criterion.VGAM)) {
+ if(ii != criterion &&
+ any(slotNames(family) == ii) && length(body(slot(family, ii)))) {
+ fit[[ii]] <- crit.list[[ii]] <-
+ (slot(family, ii))(mu=mu, y=y, w=w, res=FALSE, eta=eta, extra)
}
}
diff --git a/R/vgam.match.q b/R/vgam.match.q
index b8bbae4..cb3c6d7 100644
--- a/R/vgam.match.q
+++ b/R/vgam.match.q
@@ -38,7 +38,7 @@ vgam.match <- function(x, all.knots=FALSE, nk=NULL) {
}
if(!is.null(attributes(x)$NAs) || any(is.na(x)))
- stop("can't smooth on variables with NAs")
+ stop("cannot smooth on variables with NAs")
sx <- unique(sort(as.vector(x))) # "as.vector()" strips off attributes
o <- match(x, sx) # as.integer(match(x, sx)) # sx[o]==x
diff --git a/R/vglm.R b/R/vglm.R
index 0ccba55..0ceb6fb 100644
--- a/R/vglm.R
+++ b/R/vglm.R
@@ -44,8 +44,8 @@ vglm <- function(formula,
xlev = .getXlevels(mt, mf)
y <- model.response(mf, "any") # model.extract(mf, "response")
x <- if (!is.empty.model(mt)) model.matrix(mt, mf, contrasts) else
- matrix(, NROW(Y), 0)
- attr(x, "assign") <- attrassigndefault(x, mt) # So as to make it like Splus
+ matrix(, NROW(y), 0)
+ attr(x, "assign") = attrassigndefault(x, mt)
@@ -96,7 +96,7 @@ if(!is.null(form2)) {
if(is.function(family))
family <- family()
if(!inherits(family, "vglmff")) {
- stop(paste("family=", family, "is not a VGAM family function"))
+ stop("'family=", family, "' is not a VGAM family function")
}
eval(vcontrol.expression)
@@ -245,8 +245,8 @@ shadowvglm <-
xlev = .getXlevels(mt, mf)
y <- model.response(mf, "any") # model.extract(mf, "response")
x <- if (!is.empty.model(mt)) model.matrix(mt, mf, contrasts) else
- matrix(, NROW(Y), 0)
- attr(x, "assign") <- attrassigndefault(x, mt) # So as to make it like Splus
+ matrix(, NROW(y), 0)
+ attr(x, "assign") = attrassigndefault(x, mt)
list(Xm2=x, Ym2=y, call=ocall)
}
diff --git a/R/vglm.control.q b/R/vglm.control.q
index 5d0f100..692ec82 100644
--- a/R/vglm.control.q
+++ b/R/vglm.control.q
@@ -10,16 +10,15 @@
vlm.control <- function(save.weight=TRUE, tol=1e-7, method="qr",
checkwz=TRUE, wzepsilon = .Machine$double.eps^0.75,
- ...)
-{
+ ...) {
if(tol <= 0) {
warning("tol not positive; using 1e-7 instead")
tol <- 1e-7
}
if(!is.logical(checkwz) || length(checkwz) != 1)
- stop("bad input for \"checkwz\"")
+ stop("bad input for 'checkwz'")
if(!is.Numeric(wzepsilon, allow=1, positive=TRUE))
- stop("bad input for \"wzepsilon\"")
+ stop("bad input for 'wzepsilon'")
list(save.weight=save.weight, tol=tol, method=method,
checkwz=checkwz,
@@ -27,8 +26,7 @@ vlm.control <- function(save.weight=TRUE, tol=1e-7, method="qr",
}
-vglm.control <- function(backchat=if(is.R()) FALSE else TRUE,
- checkwz=TRUE,
+vglm.control <- function(checkwz=TRUE,
criterion = names(.min.criterion.VGAM),
epsilon=1e-7,
half.stepsizing=TRUE,
@@ -37,7 +35,8 @@ vglm.control <- function(backchat=if(is.R()) FALSE else TRUE,
save.weight=FALSE,
trace=FALSE,
wzepsilon = .Machine$double.eps^0.75,
- xij=NULL, ...)
+ xij=NULL,
+ ...)
{
@@ -50,9 +49,9 @@ vglm.control <- function(backchat=if(is.R()) FALSE else TRUE,
if(!is.logical(checkwz) || length(checkwz) != 1)
- stop("bad input for \"checkwz\"")
+ stop("bad input for 'checkwz'")
if(!is.Numeric(wzepsilon, allow=1, positive=TRUE))
- stop("bad input for \"wzepsilon\"")
+ stop("bad input for 'wzepsilon'")
convergence <- expression({
@@ -64,20 +63,19 @@ vglm.control <- function(backchat=if(is.R()) FALSE else TRUE,
})
if(!is.Numeric(epsilon, allow=1, posit=TRUE)) {
- warning("bad input for \"epsilon\"; using 0.00001 instead")
+ warning("bad input for 'epsilon'; using 0.00001 instead")
epsilon <- 0.00001
}
if(!is.Numeric(maxit, allow=1, posit=TRUE, integ=TRUE)) {
- warning("bad input for \"maxit\"; using 20 instead")
+ warning("bad input for 'maxit'; using 20 instead")
maxit <- 20
}
if(!is.Numeric(stepsize, allow=1, posit=TRUE)) {
- warning("bad input for \"stepsize\"; using 1 instead")
+ warning("bad input for 'stepsize'; using 1 instead")
stepsize <- 1
}
- list(backchat=as.logical(backchat)[1],
- checkwz=checkwz,
+ list(checkwz=checkwz,
convergence=convergence,
criterion=criterion,
epsilon=epsilon,
@@ -88,7 +86,7 @@ vglm.control <- function(backchat=if(is.R()) FALSE else TRUE,
stepsize=stepsize,
trace=as.logical(trace)[1],
wzepsilon = wzepsilon,
- xij=xij)
+ xij=if(is(xij, "formula")) list(xij) else xij)
}
diff --git a/R/vglm.fit.q b/R/vglm.fit.q
index 666992c..fa55bc5 100644
--- a/R/vglm.fit.q
+++ b/R/vglm.fit.q
@@ -3,7 +3,6 @@
-
vglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
Xm2=NULL, Ym2=NULL,
etastart=NULL, mustart=NULL, coefstart=NULL,
@@ -21,7 +20,6 @@ vglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
nonparametric <- FALSE
epsilon <- control$epsilon
maxit <- control$maxit
- backchat <- control$backchat # FALSE
save.weight <- control$save.weight
trace <- control$trace
orig.stepsize <- control$stepsize
@@ -33,9 +31,7 @@ vglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
new.s.call <- expression({
if(c.list$one.more) {
- fv <- if(backchat) {
- if(M>1) matrix(c.list$fit,n,M,byrow=TRUE) else c.list$fit
- } else c.list$fit
+ fv <- c.list$fit
new.coeffs <- c.list$coeff
if(length(slot(family, "middle")))
@@ -58,9 +54,7 @@ vglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
cat("VGLM linear loop ", iter, ": ", criterion, "= ")
uuuu =
switch(criterion,
- coefficients=if(is.R())
- format(new.crit, dig=round(2-log10(epsilon))) else
- format(round(new.crit, round(2-log10(epsilon)))),
+ coefficients=format(new.crit, dig=round(2-log10(epsilon))),
format(round(new.crit, 4)))
switch(criterion,
@@ -86,8 +80,7 @@ vglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
repeat {
if(trace) {
cat(".")
- if(exists("flush.console"))
- flush.console()
+ flush.console()
}
stepsize <- stepsize / 2
if(too.small <- stepsize < 0.001)
@@ -98,7 +91,7 @@ vglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
if(length(slot(family, "middle")))
eval(slot(family, "middle"))
- fv <- xbig.save %*% new.coeffs
+ fv <- X_vlm_save %*% new.coeffs
if(M > 1)
fv <- matrix(fv, n, M, byrow=TRUE)
@@ -123,8 +116,8 @@ vglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
if(trace)
cat("\n")
if(too.small) {
- warning(paste("iterations terminated because",
- "half-step sizes are very small"))
+ warning("iterations terminated because ",
+ "half-step sizes are very small")
one.more <- FALSE
} else {
if(trace) {
@@ -132,9 +125,7 @@ vglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
iter, ": ", criterion, "= ")
uuuu = switch(criterion,
- coefficients=if(is.R())
- format(new.crit, dig=round(2-log10(epsilon))) else
- format(round(new.crit, round(2-log10(epsilon)))),
+ coefficients=format(new.crit, dig=round(2-log10(epsilon))),
format(round(new.crit, 4)))
switch(criterion,
@@ -149,8 +140,7 @@ vglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
one.more <- eval(control$convergence)
}
}
- if(exists("flush.console"))
- flush.console()
+ flush.console()
if(!is.logical(one.more)) one.more = FALSE
if(one.more) {
@@ -166,7 +156,7 @@ vglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
c.list$z <- z
c.list$U <- U
- if(copyxbig) c.list$xbig <- xbig.save
+ if(copy_X_vlm) c.list$X_vlm <- X_vlm_save
}
c.list$one.more <- one.more
@@ -180,7 +170,7 @@ vglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
- copyxbig <- FALSE # May be overwritten in @initialize
+ copy_X_vlm <- FALSE # May be overwritten in @initialize
stepsize <- orig.stepsize
old.coeffs <- coefstart
@@ -220,12 +210,12 @@ vglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
dimB <- sum(ncolBlist)
- xbig.save <- lm2vlm.model.matrix(x, Blist, xij=control$xij)
+ X_vlm_save = lm2vlm.model.matrix(x, Blist, xij=control$xij, Xm2=Xm2)
if(length(coefstart)) {
- eta <- if(ncol(xbig.save)>1) xbig.save %*% coefstart +
- offset else xbig.save * coefstart + offset
+ eta <- if(ncol(X_vlm_save)>1) X_vlm_save %*% coefstart +
+ offset else X_vlm_save * coefstart + offset
eta <- if(M > 1) matrix(eta, ncol=M, byrow=TRUE) else c(eta)
mu <- slot(family, "inverse")(eta, extra)
}
@@ -251,101 +241,70 @@ vglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
z <- eta + vbacksub(U, tvfor, M=M, n=n) - offset
c.list <- list(z=as.double(z), fit=as.double(t(eta)), one.more=TRUE,
- coeff=as.double(rep(1,ncol(xbig.save))), U=as.double(U),
- copyxbig=copyxbig,
- xbig=if(copyxbig) as.double(xbig.save) else double(3))
-
+ coeff=as.double(rep(1,ncol(X_vlm_save))), U=as.double(U),
+ copy_X_vlm=copy_X_vlm,
+ X_vlm=if(copy_X_vlm) as.double(X_vlm_save) else double(3))
- dxbig <- as.integer(dim(xbig.save))
- n.big <- dxbig[[1]]
- p.big <- dxbig[[2]]
- if(n.big < p.big)
- stop(paste(p.big, "parameters but only", n.big, "observations"))
+ dX_vlm <- as.integer(dim(X_vlm_save))
+ nrow_X_vlm <- dX_vlm[[1]]
+ ncol_X_vlm <- dX_vlm[[2]]
- if(backchat) {
- nframe <- sys.nframe()
- dotC(name="v_init_call", as.integer(nframe), new.s.call)
- }
+ if(nrow_X_vlm < ncol_X_vlm)
+ stop(ncol_X_vlm, "parameters but only ", nrow_X_vlm, " observations")
- if(backchat) {
- index.vglm <- iam(NA, NA, M, both=TRUE)
- tfit <- dotFortran(name="vglmf", as.double(xbig.save), n.big, p.big,
- backchat=as.integer(T),
- as.integer(n), as.double(z),
- coefficients=double(p.big),
- predictors=double(n.big), effects=double(n.big),
- qr=as.double(xbig.save), qraux=double(p.big),
- rank=as.integer(0), pivot=as.integer(seq(p.big)),
- work=double(max(n.big, 2 * p.big)),
- wkmm=double(M*M*5 + M*p.big),
- as.double(U), as.integer(M),
- dimu=as.integer(if(is.matrix(U)) nrow(U) else 1),
- dimm=as.integer(if(is.matrix(wz)) ncol(wz) else 1),
- as.integer(index.vglm$row), as.integer(index.vglm$col),
- copyxbig=as.integer(copyxbig),
- rss=double(1))
- } else {
- bf.call <- if(is.R()) expression(vlm.wfit(xbig.save, z, Blist=NULL,
- U=U, matrix.out=FALSE, XBIG=TRUE, qr=qr.arg, xij=NULL)) else
- expression(vlm.wfit(xbig.save, z, Blist=NULL,
- U=U, matrix.out=FALSE, XBIG=TRUE, qr=qr.arg, xij=NULL))
+ bf.call <- expression(vlm.wfit(xmat=X_vlm_save, z, Blist=NULL, U=U,
+ matrix.out=FALSE, is.vlmX=TRUE,
+ qr=qr.arg, xij=NULL))
- while(c.list$one.more) {
- tfit <- eval(bf.call) # fit$smooth.frame is new
+ while(c.list$one.more) {
+ tfit <- eval(bf.call) # fit$smooth.frame is new
- c.list$coeff <- tfit$coefficients
+ c.list$coeff <- tfit$coefficients
- tfit$predictors <- tfit$fitted.values
+ tfit$predictors <- tfit$fitted.values
- c.list$fit <- tfit$fitted.values
- c.list <- eval(new.s.call)
- NULL
- }
+ c.list$fit <- tfit$fitted.values
+ c.list <- eval(new.s.call)
+ NULL
}
if(maxit>1 && iter>=maxit)
- warning(paste("convergence not obtained in", maxit, "iterations."))
+ warning("convergence not obtained in ", maxit, " iterations")
- dn.big <- labels(xbig.save)
- xn.big <- dn.big[[2]]
- yn.big <- dn.big[[1]]
+ dnrow_X_vlm <- labels(X_vlm_save)
+ xnrow_X_vlm <- dnrow_X_vlm[[2]]
+ ynrow_X_vlm <- dnrow_X_vlm[[1]]
if(length(slot(family, "fini")))
eval(slot(family, "fini"))
if(M>1)
- tfit$predictors <- matrix(tfit$predictors, n, M,
- byrow=backchat)
+ tfit$predictors <- matrix(tfit$predictors, n, M)
coefs <- tfit$coefficients
- asgn <- attr(xbig.save, "assign")
+ asgn <- attr(X_vlm_save, "assign")
- names(coefs) <- xn.big
+ names(coefs) <- xnrow_X_vlm
rank <- tfit$rank
- cnames <- xn.big
+ cnames <- xnrow_X_vlm
- if(check.rank && rank < p.big)
+ if(check.rank && rank < ncol_X_vlm)
stop("vglm only handles full-rank models (currently)")
- R <- if(is.R()) tfit$qr$qr[1:p.big, 1:p.big, drop=FALSE] else {
- if(backchat) tfit$qr[1:p.big, 1:p.big, drop=FALSE] else
- tfit$qr$qr[1:p.big, 1:p.big, drop=FALSE]
- }
+ R <- tfit$qr$qr[1:ncol_X_vlm, 1:ncol_X_vlm, drop=FALSE]
R[lower.tri(R)] <- 0
- attributes(R) <- if(is.R()) list(dim=c(p.big, p.big),
- dimnames=list(cnames, cnames), rank=rank) else
- list(dim=c(p.big, p.big),
- dimnames=list(cnames, cnames), rank=rank, class="upper")
+ attributes(R) <- list(dim=c(ncol_X_vlm, ncol_X_vlm),
+ dimnames=list(cnames, cnames), rank=rank)
effects <- tfit$effects
- neff <- rep("", n.big)
- neff[seq(p.big)] <- cnames
+ neff <- rep("", nrow_X_vlm)
+ neff[seq(ncol_X_vlm)] <- cnames
names(effects) <- neff
dim(tfit$predictors) <- c(n, M)
@@ -377,7 +336,7 @@ vglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
}
- df.residual <- n.big - rank
+ df.residual <- nrow_X_vlm - rank
fit <- list(assign=asgn,
coefficients=coefs,
constraints=Blist,
@@ -392,12 +351,8 @@ vglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
terms=Terms) # terms: This used to be done in vglm()
if(qr.arg) {
- fit$qr <- if(is.R()) {
- fit$qr <- tfit$qr
- } else {
- if(backchat) tfit[c("qr", "rank", "pivot", "qraux")] else tfit$qr
- }
- dimnames(fit$qr$qr) <- dn.big
+ fit$qr <- tfit$qr
+ dimnames(fit$qr$qr) <- dnrow_X_vlm
}
if(M==1) {
@@ -408,7 +363,7 @@ vglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
misc <- list(
colnames.x = xn,
- colnames.xbig = xn.big,
+ colnames.X_vlm = xnrow_X_vlm,
criterion = criterion,
function.name = function.name,
intercept.only=intercept.only,
@@ -416,10 +371,10 @@ vglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
M = M,
n = n,
nonparametric = nonparametric,
- n.big = n.big,
+ nrow_X_vlm = nrow_X_vlm,
orig.assign = attr(x, "assign"),
p = ncol(x),
- p.big = p.big,
+ ncol_X_vlm = ncol_X_vlm,
ynames = dimnames(y)[[2]])
@@ -427,16 +382,13 @@ vglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
if(criterion != "coefficients")
crit.list[[criterion]] <- fit[[criterion]] <- new.crit
- for(i in names(.min.criterion.VGAM)) {
- if(i != criterion &&
- any(slotNames(family) == i) &&
- (( is.R() && length(body(slot(family, i)))) ||
- ((!is.R() && length(slot(family, i)) > 1)))) {
- fit[[i]] <- crit.list[[i]] <-
- (slot(family, i))(mu=mu, y=y, w=w, res=FALSE, eta=eta, extra)
+ for(ii in names(.min.criterion.VGAM)) {
+ if(ii != criterion &&
+ any(slotNames(family) == ii) && length(body(slot(family, ii)))) {
+ fit[[ii]] <- crit.list[[ii]] <-
+ (slot(family, ii))(mu=mu, y=y, w=w, res=FALSE, eta=eta, extra)
}
}
-
diff --git a/R/vlm.R b/R/vlm.R
index 27e5361..23905e6 100644
--- a/R/vlm.R
+++ b/R/vlm.R
@@ -82,8 +82,8 @@ vlm <- function(formula,
identity.wts <- FALSE
temp = ncol(as.matrix(wz))
if(temp < M || temp > M*(M+1)/2)
- stop(paste("input w must have at between", M, "and",
- M*(M+1)/2, "columns"))
+ stop("input 'w' must have between ", M, " and ", M*(M+1)/2,
+ " columns")
wz <- prior.weights * wz
}
@@ -91,12 +91,12 @@ vlm <- function(formula,
Blist <- process.constraints(constraints, x, M)
intercept.only <- ncol(x) == 1 && dimnames(x)[[2]] == "(Intercept)"
- fit <- vlm.wfit(x=x, z=y, Blist=Blist, wz=wz, U=NULL,
- matrix.out=FALSE, XBIG=FALSE, rss=TRUE, qr=qr.arg,
- x.ret=TRUE, offset = offset)
+ fit = vlm.wfit(xmat=x, z=y, Blist=Blist, wz=wz, U=NULL,
+ matrix.out=FALSE, is.vlmX=FALSE, rss=TRUE, qr=qr.arg,
+ x.ret=TRUE, offset = offset)
- p.big <- fit$rank
- fit$R <- fit$qr$qr[1:p.big, 1:p.big, drop=FALSE]
+ ncol_X_vlm <- fit$rank
+ fit$R <- fit$qr$qr[1:ncol_X_vlm, 1:ncol_X_vlm, drop=FALSE]
fit$R[lower.tri(fit$R)] <- 0
@@ -104,26 +104,26 @@ vlm <- function(formula,
fit$constraints <- Blist
- dn.big <- labels(fit$xbig)
- xn.big <- dn.big[[2]]
+ dnrow_X_vlm <- labels(fit$X_vlm)
+ xnrow_X_vlm <- dnrow_X_vlm[[2]]
dn <- labels(x)
xn <- dn[[2]]
- dxbig <- as.integer(dim(fit$xbig))
- n.big <- dxbig[[1]]
- p.big <- dxbig[[2]]
+ dX_vlm <- as.integer(dim(fit$X_vlm))
+ nrow_X_vlm <- dX_vlm[[1]]
+ ncol_X_vlm <- dX_vlm[[2]]
misc <- list(
colnames.x = xn,
- colnames.xbig = xn.big,
+ colnames.X_vlm = xnrow_X_vlm,
function.name = function.name,
intercept.only=intercept.only,
predictors.names = predictors.names,
M = M,
n = nrow(x),
- n.big = n.big,
+ nrow_X_vlm = nrow_X_vlm,
orig.assign = attr(x, "assign"),
p = ncol(x),
- p.big = p.big,
+ ncol_X_vlm = ncol_X_vlm,
ynames = dimnames(y)[[2]])
fit$misc <- misc
diff --git a/R/vlm.wfit.q b/R/vlm.wfit.q
index 5c38a32..12304fc 100644
--- a/R/vlm.wfit.q
+++ b/R/vlm.wfit.q
@@ -6,28 +6,24 @@
-
-
-vlm.wfit <- function(x, z, Blist, wz=NULL, U=NULL,
- matrix.out=FALSE, XBIG=FALSE, rss=TRUE, qr=FALSE, x.ret=FALSE,
- offset=NULL,
- omit.these=NULL, only.rss=FALSE,
- ncolx=if(matrix.out && XBIG) stop("need this argument") else ncol(x),
- xij=NULL, Aarray=NULL, Aindex=NULL, lp.names=NULL,
- Eta.range=NULL, ...)
-{
-
+vlm.wfit <- function(xmat, zmat, Blist, wz=NULL, U=NULL,
+ matrix.out=FALSE, is.vlmX=FALSE, rss=TRUE, qr=FALSE, x.ret=FALSE,
+ offset=NULL,
+ omit.these=NULL, only.rss=FALSE,
+ ncolx=if(matrix.out && is.vlmX) stop("need argument 'ncolx'") else ncol(xmat),
+ xij=NULL,
+ lp.names=NULL, Eta.range=NULL, Xm2=NULL, ...) {
missing.Blist <- missing(Blist)
- z = as.matrix(z)
- n <- nrow(z)
- M <- ncol(z)
+ zmat = as.matrix(zmat)
+ n <- nrow(zmat)
+ M <- ncol(zmat)
if(!only.rss) {
- contrast.save <- attr(x, "contrasts")
- znames <- dimnames(z)[[2]]
+ contrast.save <- attr(xmat, "contrasts")
+ znames <- dimnames(zmat)[[2]]
}
if(length(offset))
- z <- z - offset
+ zmat <- zmat - offset
if(missing(U) || !length(U)) {
U <- vchol(wz, M=M, n=n, silent=FALSE)
}
@@ -35,35 +31,34 @@ vlm.wfit <- function(x, z, Blist, wz=NULL, U=NULL,
if(dU[2] != n)
stop("input unconformable")
- xbig.save <- if(XBIG) {
- x
+ X_vlm_save <- if(is.vlmX) {
+ xmat
} else {
if(missing.Blist || !length(Blist))
- Blist = replace.constraints(vector("list", ncol(x)),
- diag(M), 1:ncol(x)) # NULL
- lm2vlm.model.matrix(x=x, Blist=Blist, M=M, assign.attributes=FALSE,
- xij = xij, Aarray=Aarray, Aindex=Aindex)
+ Blist = replace.constraints(vector("list", ncol(xmat)),
+ diag(M), 1:ncol(xmat)) # NULL
+ lm2vlm.model.matrix(x=xmat, Blist=Blist, M=M, assign.attributes=FALSE,
+ xij = xij,
+ Xm2=Xm2)
}
- xbig <- mux111(U, xbig.save, M=M)
- z.big <- mux22(U, z, M=M, upper=TRUE, as.mat=FALSE)
+ X_vlm <- mux111(U, X_vlm_save, M=M)
+ z_vlm <- mux22(U, zmat, M=M, upper=TRUE, as.mat=FALSE)
if(length(omit.these)) {
- xbig = xbig[!omit.these,,drop=FALSE]
- z.big = z.big[!omit.these]
+ X_vlm = X_vlm[!omit.these,,drop=FALSE]
+ z_vlm = z_vlm[!omit.these]
}
- ans <- if(!is.R()) lm.fit.qr(x=xbig, y=z.big, qr=qr, ...) else
- lm.fit(xbig, z.big, ...)
+ ans <- lm.fit(X_vlm, z_vlm, ...)
if(rss) {
ans$rss <- sum(ans$resid^2)
if(only.rss) return(list(rss=ans$rss))
}
-
if(length(omit.these) && any(omit.these))
- stop("code beyond here can't handle omitted observations")
+ stop("code beyond here cannot handle omitted observations")
fv <- ans$fitted.values
@@ -80,8 +75,8 @@ vlm.wfit <- function(x, z, Blist, wz=NULL, U=NULL,
ans$fitted.values <- if(M==1) c(fv) else fv
if(M > 1)
- dimnames(ans$fitted.values) <- list(dimnames(z)[[1]], znames)
- ans$residuals <- if(M==1) c(z-fv) else z-fv
+ dimnames(ans$fitted.values) <- list(dimnames(zmat)[[1]], znames)
+ ans$residuals <- if(M==1) c(zmat-fv) else zmat-fv
if(M > 1)
dimnames(ans$residuals) <- list(dimnames(ans$residuals)[[1]], znames)
ans$misc <- list(M=M, n=n)
@@ -90,7 +85,7 @@ vlm.wfit <- function(x, z, Blist, wz=NULL, U=NULL,
ans$constraints <- Blist
ans$contrasts <- contrast.save
if(x.ret)
- ans$xbig <- xbig.save
+ ans$X_vlm <- X_vlm_save
if(!is.null(offset))
ans$fitted.values <- ans$fitted.values + offset
@@ -102,25 +97,24 @@ vlm.wfit <- function(x, z, Blist, wz=NULL, U=NULL,
return(ans)
- dx2 = if(XBIG) NULL else dimnames(x)[[2]]
+ dx2 = if(is.vlmX) NULL else dimnames(xmat)[[2]]
B <- matrix(as.numeric(NA), nrow=M, ncol=ncolx, dimnames=list(lp.names, dx2))
if(is.null(Blist)) {
Blist = replace.constraints(vector("list", ncolx), diag(M), 1:ncolx)
}
ncolBlist <- unlist(lapply(Blist, ncol))
temp <- c(0, cumsum(ncolBlist))
- for(i in 1:ncolx) {
- index <- (temp[i]+1):temp[i+1]
- cm <- Blist[[i]]
- B[,i] <- cm %*% ans$coef[index]
+ for(ii in 1:ncolx) {
+ index <- (temp[ii]+1):temp[ii+1]
+ cm <- Blist[[ii]]
+ B[,ii] <- cm %*% ans$coef[index]
}
ans$mat.coefficients <- t(B)
ans
}
-print.vlm.wfit <- function(x, ...)
-{
+print.vlm.wfit <- function(x, ...) {
if(!is.null(cl <- x$call)) {
cat("Call:\n")
dput(cl)
diff --git a/R/vsmooth.spline.q b/R/vsmooth.spline.q
index c66da8d..fa0d987 100644
--- a/R/vsmooth.spline.q
+++ b/R/vsmooth.spline.q
@@ -68,7 +68,7 @@ setMethod("show", "vsmooth.spline",
printvsmooth.spline(object))
setMethod("plot", "vsmooth.spline",
function(x, y, ...) {
- if(!missing(y)) stop("can't process the \"y\" argument")
+ if(!missing(y)) stop("cannot process the 'y' argument")
invisible(plotvsmooth.spline(x, ...))})
setMethod("predict", "vsmooth.spline.fit",
function(object, ...)
@@ -93,14 +93,14 @@ vsmooth.spline <- function(x, y, w, df=rep(5,M), spar=NULL, # rep(0,M),
missing.constraints <- missing(constraints)
if(!(missing.spar <- missing(spar)) && !missing(df))
- stop("can't specify both spar and df")
+ stop("cannot specify both 'spar' and 'df'")
my.call <- match.call()
if(missing(y)) {
if(is.list(x)) {
if(any(is.na(match(c("x", "y"), names(x)))))
- stop("cannot find x and y in list")
+ stop("cannot find 'x' and 'y' in list")
y <- x$y
x <- x$x
} else if(is.complex(x)) {
@@ -120,21 +120,21 @@ vsmooth.spline <- function(x, y, w, df=rep(5,M), spar=NULL, # rep(0,M),
ny2 <- dimnames(y)[[2]] # NULL if vector
M <- ncol(y)
if(n != nrow(y))
- stop("lengths of x and y must match")
+ stop("lengths of 'x' and 'y' must match")
if(any(is.na(x)) || any(is.na(y)))
- stop("NAs not allowed in x or y")
+ stop("NAs not allowed in 'x' or 'y'")
if(missing(w)) {
w <- matrix(1, n, M)
} else {
if(any(is.na(w)))
- stop("NAs not allowed in w")
+ stop("NAs not allowed in 'w'")
w <- as.matrix(w)
if(nrow(y) != nrow(w) || ncol(w)>M*(M+1)/2)
- stop("w and y don't match")
+ stop("'w' and 'y' don't match")
if(scale.w)
w <- w / mean(w[,1:M]) # 'Average' value is 1
@@ -148,11 +148,11 @@ vsmooth.spline <- function(x, y, w, df=rep(5,M), spar=NULL, # rep(0,M),
if(is.matrix(constraints))
constraints <- list("(Intercepts)"=constraints, x=constraints)
if(!is.list(constraints) || length(constraints)!=2)
- stop("constraints must equal a list (of length 2) or a matrix")
+ stop("'constraints' must equal a list (of length 2) or a matrix")
for(i in 1:2)
if(!is.numeric(constraints[[i]]) || !is.matrix(constraints[[i]]) ||
nrow(constraints[[i]])!=M || ncol(constraints[[i]])>M)
- stop("something wrong with the constraints")
+ stop("something wrong with 'constraints'")
names(constraints) <- c("(Intercepts)", "x")
@@ -160,7 +160,7 @@ vsmooth.spline <- function(x, y, w, df=rep(5,M), spar=NULL, # rep(0,M),
o <- match(x, sx) # sx[o]==x
nef <- length(sx)
if(nef < 7)
- stop("not enough unique x values (need 7 or more)")
+ stop("not enough unique 'x' values (need 7 or more)")
index <- iam(NA, NA, M, both=TRUE, diagonal=TRUE)
@@ -181,7 +181,7 @@ vsmooth.spline <- function(x, y, w, df=rep(5,M), spar=NULL, # rep(0,M),
if(collaps$ok != 1)
- stop("some non-positive-definite weight matrices detected in \"vsuff9\"")
+ stop("some non-positive-definite weight matrices detected in 'vsuff9'")
dim(collaps$ybar) <- dim(collaps$wz) <- c(nef, M)
@@ -212,8 +212,8 @@ vsmooth.spline <- function(x, y, w, df=rep(5,M), spar=NULL, # rep(0,M),
} else {
ispar <- 0
if(!is.numeric(df) || any(df < 2 | df > nef))
- stop(paste("you must supply 2 <= df <=", nef))
- if(tol.nl <= 0) stop("bad value for tol.nl")
+ stop("you must supply '2 <= df <= ", nef, "'")
+ if(tol.nl <= 0) stop("bad value for 'tol.nl'")
nonlin <- abs(df-2) > tol.nl
}
@@ -252,12 +252,12 @@ vsmooth.spline <- function(x, y, w, df=rep(5,M), spar=NULL, # rep(0,M),
} else {
knot <- c(rep(xbar[1], 3), xbar, rep(xbar[nef], 3))
}
- if(length(nk)) warning("overriding nk by all.knots=TRUE")
+ if(length(nk)) warning("overriding 'nk' by 'all.knots=TRUE'")
nk <- length(knot) - 4 # No longer nef + 2
} else {
chosen = length(nk)
if(chosen && (nk > nef+2 || nk <= 5))
- stop("bad value for nk")
+ stop("bad value for 'nk'")
if(!chosen) nk = 0
knot.list <- dotFortran(name="vknotl2", as.double(xbar), as.integer(nef),
knot=double(nef+6), k=as.integer(nk+4),
@@ -294,7 +294,7 @@ vsmooth.spline <- function(x, y, w, df=rep(5,M), spar=NULL, # rep(0,M),
as.double(conmat), as.integer(ncb),
as.integer(trivc), wuwbar=as.integer(0), ok=as.integer(0))
if(collaps$ok != 1)
- stop("some non-positive-definite weight matrices detected in \"vsuff9\"")
+ stop("some non-positive-definite weight matrices detected in 'vsuff9'")
dim(collaps$ybar) <- dim(collaps$wz) <- c(nef, M)
collaps$ybar = collaps$ybar[,1:ncb,drop=FALSE]
@@ -330,12 +330,11 @@ vsmooth.spline <- function(x, y, w, df=rep(5,M), spar=NULL, # rep(0,M),
if(vsplin$ier != 0) {
- cat("vsplin$ier ==", vsplin$ier, "\n")
- stop("something gone wrong in \"vsplin\"")
+ stop("vsplin$ier == ", vsplin$ier, ". Something gone wrong in 'vsplin'")
}
if(vsplin$info != 0)
- stop(paste("leading minor of order", vsplin$info,
- "is not positive definite"))
+ stop("leading minor of order ", vsplin$info,
+ " is not positive-definite")
dim(vsplin$lev) <- c(nef, ncb) # A matrix even when ncb==1
if(ncb > 1) {
@@ -344,7 +343,7 @@ vsmooth.spline <- function(x, y, w, df=rep(5,M), spar=NULL, # rep(0,M),
dim(vsplin$var) <- c(nef, ncb)
}
- df.nl <- apply(vsplin$lev, 2, sum) # Actual EDF used
+ df.nl <- colSums(vsplin$lev) # Actual EDF used
fv <- lfit at fitted.values + vsplin$fv %*% t(conmat)
@@ -457,7 +456,8 @@ plotvsmooth.spline <- function(x, xlab="x", ylab="", points=TRUE,
predictvsmooth.spline <- function(object, x, deriv=0, se.fit=FALSE) {
if(se.fit)
- warning("se.fit=TRUE is not currently implemented. Using se.fit=FALSE")
+ warning("'se.fit=TRUE' is not currently implemented. ",
+ "Using 'se.fit=FALSE'")
lfit <- object at lfit # Linear part of the vector spline
nlfit <- object at nlfit # Nonlinear part of the vector spline
diff --git a/data/olympic.txt b/data/olympic.txt
new file mode 100644
index 0000000..e8db440
--- /dev/null
+++ b/data/olympic.txt
@@ -0,0 +1,88 @@
+rank country gold silver bronze totalmedal
+1 China 51 21 28 100
+2 UnitedStates 36 38 36 110
+3 Russia 23 21 28 72
+4 GreatBritain 19 13 15 47
+5 Germany 16 10 15 41
+6 Australia 14 15 17 46
+7 SouthKorea 13 10 8 31
+8 Japan 9 6 10 25
+9 Italy 8 10 10 28
+10 France 7 16 17 40
+11 Ukraine 7 5 15 27
+12 Netherlands 7 5 4 16
+13 Jamaica 6 3 2 11
+14 Spain 5 10 3 18
+15 Kenya 5 5 4 14
+16 Belarus 4 5 10 19
+17 Romania 4 1 3 8
+18 Ethiopia 4 1 2 7
+19 Canada 3 9 6 18
+20 Poland 3 6 1 10
+21 Hungary 3 5 2 10
+21 Norway 3 5 2 10
+23 Brazil 3 4 8 15
+24 CzechRepublic 3 3 0 6
+25 Slovakia 3 2 1 6
+26 NewZealand 3 1 5 9
+27 Georgia 3 0 3 6
+28 Cuba 2 11 11 24
+29 Kazakhstan 2 4 7 13
+30 Denmark 2 2 3 7
+31 Mongolia 2 2 0 4
+31 Thailand 2 2 0 4
+33 NorthKorea 2 1 3 6
+34 Argentina 2 0 4 6
+34 Switzerland 2 0 4 6
+36 Mexico 2 0 1 3
+37 Turkey 1 4 3 8
+38 Zimbabwe 1 3 0 4
+39 Azerbaijan 1 2 4 7
+40 Uzbekistan 1 2 3 6
+41 Slovenia 1 2 2 5
+42 Bulgaria 1 1 3 5
+42 Indonesia 1 1 3 5
+44 Finland 1 1 2 4
+45 Latvia 1 1 1 3
+46 Belgium 1 1 0 2
+46 DominicanRepublic 1 1 0 2
+46 Estonia 1 1 0 2
+46 Portugal 1 1 0 2
+50 India 1 0 2 3
+51 Iran 1 0 1 2
+52 Bahrain 1 0 0 1
+52 Cameroon 1 0 0 1
+52 Panama 1 0 0 1
+52 Tunisia 1 0 0 1
+56 Sweden 0 4 1 5
+57 Croatia 0 2 3 5
+57 Lithuania 0 2 3 5
+59 Greece 0 2 2 4
+60 TrinidadandTobago 0 2 0 2
+61 Nigeria 0 1 3 4
+62 Austria 0 1 2 3
+62 Ireland 0 1 2 3
+62 Serbia 0 1 2 3
+65 Algeria 0 1 1 2
+65 Bahamas 0 1 1 2
+65 Columbia 0 1 1 2
+65 Kyrgyzstan 0 1 1 2
+65 Morocco 0 1 1 2
+65 Tajikistan 0 1 1 2
+71 Chile 0 1 0 1
+71 Ecuador 0 1 0 1
+71 Iceland 0 1 0 1
+71 Malaysia 0 1 0 1
+71 Singapore 0 1 0 1
+71 SouthAfrica 0 1 0 1
+71 Sudan 0 1 0 1
+71 Vietnam 0 1 0 1
+79 Armenia 0 0 6 6
+80 Taiwan 0 0 4 4
+81 Afghanistan 0 0 1 1
+81 Egypt 0 0 1 1
+81 Israel 0 0 1 1
+81 Mauritius 0 0 1 1
+81 Moldova 0 0 1 1
+81 Togo 0 0 1 1
+81 Venezuela 0 0 1 1
diff --git a/man/AA.Aa.aa.Rd b/man/AA.Aa.aa.Rd
index cfc72ce..181b62f 100644
--- a/man/AA.Aa.aa.Rd
+++ b/man/AA.Aa.aa.Rd
@@ -41,12 +41,14 @@ Sunderland, MA: Sinauer Associates, Inc.
}
\author{ T. W. Yee }
\note{
-The input can be a 3-column matrix of counts, where the columns
-are AA, Ab and aa
-(in order).
-Alternatively, the input can be a 3-column matrix of
-proportions (so each row adds to 1) and the \code{weights}
-argument is used to specify the total number of counts for each row.
+ The input can be a 3-column matrix of counts, where the columns
+ are AA, Ab and aa
+ (in order).
+ Alternatively, the input can be a 3-column matrix of
+ proportions (so each row adds to 1) and the \code{weights}
+ argument is used to specify the total number of counts for each row.
+
+
}
\seealso{
\code{\link{AB.Ab.aB.ab}},
@@ -54,6 +56,7 @@ argument is used to specify the total number of counts for each row.
\code{\link{ABO}},
\code{\link{G1G2G3}},
\code{\link{MNSs}}.
+
}
\examples{
y = cbind(53, 95, 38)
diff --git a/man/AB.Ab.aB.ab.Rd b/man/AB.Ab.aB.ab.Rd
index d38d59d..68db8ce 100644
--- a/man/AB.Ab.aB.ab.Rd
+++ b/man/AB.Ab.aB.ab.Rd
@@ -5,6 +5,7 @@
\description{
Estimates the parameter of the
AB-Ab-aB-ab blood group system.
+
}
\usage{
AB.Ab.aB.ab(link = "logit", earg=list(), init.p = NULL)
@@ -45,6 +46,7 @@ are AB, Ab, aB and ab
Alternatively, the input can be a 4-column matrix of
proportions (so each row adds to 1) and the \code{weights}
argument is used to specify the total number of counts for each row.
+
}
\seealso{
@@ -53,10 +55,11 @@ argument is used to specify the total number of counts for each row.
\code{\link{ABO}},
\code{\link{G1G2G3}},
\code{\link{MNSs}}.
+
}
\examples{
-y = cbind(1997, 906, 904, 32) # Data from Fisher (1925)
+y = cbind(AB=1997, Ab=906, aB=904, ab=32) # Data from Fisher (1925)
fit = vglm(y ~ 1, AB.Ab.aB.ab(link="identity", init.p=0.9), trace=TRUE)
fit = vglm(y ~ 1, AB.Ab.aB.ab, trace=TRUE)
rbind(y, sum(y)*fitted(fit))
diff --git a/man/AB.Ab.aB.ab2.Rd b/man/AB.Ab.aB.ab2.Rd
index fe63061..3e0f129 100644
--- a/man/AB.Ab.aB.ab2.Rd
+++ b/man/AB.Ab.aB.ab2.Rd
@@ -3,8 +3,9 @@
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ The AB-Ab-aB-ab2 Blood Group System }
\description{
- Estimates the parameter of the
+ Estimates the parameter of the
the AB-Ab-aB-ab2 blood group system.
+
}
\usage{
AB.Ab.aB.ab2(link = "logit", earg=list(), init.p = NULL)
@@ -25,11 +26,13 @@ AB.Ab.aB.ab2(link = "logit", earg=list(), init.p = NULL)
}
\details{
This one parameter model involves a probability called \code{p}.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}}.
+
}
\references{
Elandt-Johnson, R. C. (1971)
@@ -66,7 +69,7 @@ fit = vglm(y ~ 1, AB.Ab.aB.ab2(link=logit), trace=TRUE, crit="coef")
fit = vglm(y ~ 1, AB.Ab.aB.ab2(link=cloglog), trace=TRUE, crit="coef")
Coef(fit) # Estimated p
rbind(y, sum(y)*fitted(fit))
-diag(vcov(fit))^0.5
+sqrt(diag(vcov(fit)))
}
\keyword{models}
\keyword{regression}
diff --git a/man/ABO.Rd b/man/ABO.Rd
index af8f545..a7f6931 100644
--- a/man/ABO.Rd
+++ b/man/ABO.Rd
@@ -5,14 +5,15 @@
\description{
Estimates the two independent parameters of the
the ABO blood group system.
+
}
\usage{
-ABO(link = "logit", earg=list(), ir = NULL, ip = NULL)
+ABO(link = "logit", earg=list(), ipA = NULL, ipO = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{link}{
- Link function applied to \code{p} and \code{q}.
+ Link function applied to \code{pA} and \code{pB}.
See \code{\link{Links}} for more choices.
}
@@ -21,21 +22,29 @@ ABO(link = "logit", earg=list(), ir = NULL, ip = NULL)
See \code{earg} in \code{\link{Links}} for general information.
}
- \item{ir, ip}{ Optional initial value for \code{r} and \code{p}.
- A \code{NULL} value means values are computed internally. }
+ \item{ipA, ipO}{
+ Optional initial value for \code{pA} and \code{pO}.
+ A \code{NULL} value means values are computed internally.
+ }
+
}
\details{
- The parameters \code{p} and \code{q} are probabilities, so that
- \code{r=1-p-q} is the third probability.
- The probabilities \code{p} and \code{r} correspond to
- A and O respectively, so that \code{q} is the probability for B.
- It is easier to make use of initial values for \code{r} than for
- \code{q}.
+ The parameters \code{pA} and \code{pB} are probabilities, so that
+ \code{pO=1-pA-pB} is the third probability.
+ The probabilities \code{pA} and \code{pB} correspond to A and B respectively,
+ so that \code{pO} is the probability for O.
+ It is easier to make use of initial values for \code{pO} than for \code{pB}.
+ In documentation elsewhere I sometimes use
+ \code{pA=p},
+ \code{pB=q},
+ \code{pO=r}.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}}.
+
}
\references{
Lange, K. (2002)
@@ -59,13 +68,14 @@ ABO(link = "logit", earg=list(), ir = NULL, ip = NULL)
\code{\link{AB.Ab.aB.ab2}},
\code{\link{G1G2G3}},
\code{\link{MNSs}}.
+
}
\examples{
y = cbind(A=725, B=258, AB=72, O=1073) # Order matters, not the name
fit = vglm(y ~ 1, ABO(link=logit), trace=TRUE, cri="coef")
fit = vglm(y ~ 1, ABO(link=identity), trace=TRUE, cri="coef")
coef(fit, matrix=TRUE)
-Coef(fit) # Estimated p and q
+Coef(fit) # Estimated pA and pB
rbind(y, sum(y)*fitted(fit))
sqrt(diag(vcov(fit)))
}
diff --git a/man/AICvlm.Rd b/man/AICvlm.Rd
index f2bf1fd..ef83504 100644
--- a/man/AICvlm.Rd
+++ b/man/AICvlm.Rd
@@ -3,9 +3,9 @@
%\alias{AICvglm}
\alias{AICvgam}
\alias{AICrrvglm}
-%\alias{AICqrrvglm}
+\alias{AICqrrvglm}
%- Also NEED an '\alias' for EACH other topic documented here.
-\title{ Akaike's An Information Criterion }
+\title{ Akaike's Information Criterion }
\description{
Calculates the Akaike information criterion for a fitted model object
for which a log-likelihood value has been obtained.
@@ -34,7 +34,7 @@ AICvlm(object, \dots, k = 2)
\eqn{-2 \mbox{log-likelihood} + k n_{par}}{-2*log-likelihood
+ k*npar}, where \eqn{n_{par}}{npar} represents the number of parameters
in the fitted model, and \eqn{k = 2} for the usual AIC.
- One can assign \eqn{k = \log(n)} (\eqn{n} the number of observations)
+ One could assign \eqn{k = \log(n)} (\eqn{n} the number of observations)
for the so-called BIC or SBC (Schwarz's Bayesian criterion).
This code relies on the log-likelihood being defined, and computed,
@@ -47,7 +47,7 @@ AICvlm(object, \dots, k = 2)
parameter.
For VGAMs the nonlinear effective degrees of freedom for each
- smoothed component is used. This is heuristic.
+ smoothed component is used. This formula is heuristic.
}
\value{
@@ -55,7 +55,7 @@ AICvlm(object, \dots, k = 2)
depending on \code{k}).
}
-\author{T. W. Yee }
+\author{T. W. Yee. }
\note{
AIC has not been defined for QRR-VGLMs yet.
@@ -78,13 +78,12 @@ AICvlm(object, \dots, k = 2)
\seealso{
VGLMs are described in \code{\link{vglm-class}};
+ VGAMs are described in \code{\link{vgam-class}};
RR-VGLMs are described in \code{\link{rrvglm-class}};
\code{\link[stats]{AIC}}.
}
\examples{
-# Fit a proportional odds model
-data(pneumo)
pneumo = transform(pneumo, let=log(exposure.time))
(fit1 = vglm(cbind(normal, mild, severe) ~ let,
cumulative(parallel=TRUE, reverse=TRUE), pneumo))
diff --git a/man/BratUC.Rd b/man/BratUC.Rd
index 7d1dad4..4e24f2b 100644
--- a/man/BratUC.Rd
+++ b/man/BratUC.Rd
@@ -67,7 +67,7 @@ Yet to do: merge \code{InverseBrat} into \code{brat}.
\examples{
journal = c("Biometrika", "Comm Statist", "JASA", "JRSS-B")
m = matrix(c( NA, 33, 320, 284, 730, NA, 813, 276,
- 498, 68, NA, 325, 221, 17, 142, NA), 4,4)
+ 498, 68, NA, 325, 221, 17, 142, NA), 4, 4)
dimnames(m) = list(winner = journal, loser = journal)
Brat(m)
vglm(Brat(m) ~ 1, brat, trace=TRUE)
diff --git a/man/Coef.rrvglm-class.Rd b/man/Coef.rrvglm-class.Rd
index 513f4fc..2aaac11 100644
--- a/man/Coef.rrvglm-class.Rd
+++ b/man/Coef.rrvglm-class.Rd
@@ -59,7 +59,6 @@ Reduced-rank vector generalized linear models.
\examples{
# Rank-1 stereotype model of Anderson (1984)
-data(pneumo)
n = nrow(pneumo)
pneumo = transform(pneumo, let=log(exposure.time), x1=runif(n), x2=runif(n))
fit = rrvglm(cbind(normal,mild,severe) ~ let + x1 + x2, multinomial, pneumo)
diff --git a/man/Coef.rrvglm.Rd b/man/Coef.rrvglm.Rd
index 43aed7c..e3dbf1b 100644
--- a/man/Coef.rrvglm.Rd
+++ b/man/Coef.rrvglm.Rd
@@ -41,7 +41,6 @@ Reduced-rank vector generalized linear models.
\examples{
# Rank-1 stereotype model of Anderson (1984)
-data(pneumo)
n = nrow(pneumo)
pneumo = transform(pneumo, let=log(exposure.time), x1=runif(n), x2=runif(n))
fit = rrvglm(cbind(normal,mild,severe) ~ let + x1 + x2, multinomial, pneumo)
diff --git a/man/CommonVGAMffArguments.Rd b/man/CommonVGAMffArguments.Rd
index 9fd0cc1..3b17b3b 100644
--- a/man/CommonVGAMffArguments.Rd
+++ b/man/CommonVGAMffArguments.Rd
@@ -12,6 +12,7 @@
\usage{
TypicalVGAMfamilyFunction(lsigma="loge", esigma=list(),
isigma=NULL, parallel = TRUE,
+ shrinkage.init = 0.95,
method.init=1, nsimEIM=100, zero=NULL)
}
\arguments{
@@ -78,13 +79,22 @@ TypicalVGAMfamilyFunction(lsigma="loge", esigma=list(),
}
\item{method.init}{
- An integer with value \code{1} or \code{2} or or \code{3} ... which
+ An integer with value \code{1} or \code{2} or \code{3} or ... which
specifies the initialization method for some parameters or a specific
- parameter. If failure to converge occurs try the next higher value.
+ parameter.
+ If failure to converge occurs try the next higher value, and
+ continue until success.
For example, \code{method.init=1} might be the method of moments,
and \code{method.init=2} might be another method.
If no value of \code{method.init} works then it will be necessary
to use arguments such as \code{isigma}.
+ For many \pkg{VGAM} family functions it is advisable to try
+ this argument with all possible values to safeguard against
+ problems such as converging to a local solution.
+ \pkg{VGAM} family functions with this argument usually correspond
+ to a model or distribution that is relatively hard to fit successfully,
+ therefore care is needed to ensure the global solution is obtained.
+ So using all possible values that this argument supplies is a good idea.
}
\item{zero}{
@@ -106,6 +116,22 @@ TypicalVGAMfamilyFunction(lsigma="loge", esigma=list(),
a matrix of responses.
}
+ \item{shrinkage.init}{
+ Shrinkage factor \eqn{s} used for obtaining initial values.
+ Numeric, between 0 and 1.
+ In general, the formula used is something like
+ \eqn{s \mu + (1-s) y}{s*mu + (1-s)*y}
+ where \eqn{\mu}{mu} is a measure of central tendency such as a weighted
+ mean or median, and \eqn{y} is the response vector.
+ The the initial values are slight perturbations of the mean towards the
+ actual data.
+ For many type of models this method seems to work well and is often
+ reasonably robust to outliers in the response.
+ Often this argument is only used if
+ the argument \code{method.init} is assigned a certain value.
+
+ }
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
diff --git a/man/DagumUC.Rd b/man/DagumUC.Rd
index 2cf2ba8..5b19c12 100644
--- a/man/DagumUC.Rd
+++ b/man/DagumUC.Rd
@@ -11,7 +11,7 @@
and \code{p}, and scale parameter \code{scale}.
}
\usage{
-ddagum(x, a, scale, p.arg)
+ddagum(x, a, scale, p.arg, log=FALSE)
pdagum(q, a, scale, p.arg)
qdagum(p, a, scale, p.arg)
rdagum(n, a, scale, p.arg)
@@ -23,6 +23,12 @@ rdagum(n, a, scale, p.arg)
is taken to be the number required.}
\item{a, p.arg}{shape parameters.}
\item{scale}{scale parameter.}
+ \item{log}{
+ Logical.
+ If \code{log=TRUE} then the logarithm of the density is returned.
+
+ }
+
}
\value{
\code{ddagum} gives the density,
diff --git a/man/DeLury.Rd b/man/DeLury.Rd
index 3715c35..cd81005 100644
--- a/man/DeLury.Rd
+++ b/man/DeLury.Rd
@@ -131,6 +131,10 @@ some plots of his are reproduced.
Note that he used log to base 10 whereas natural logs are used here.
His plots had some observations obscured by the y-axis!
+The DeLury method is not applicable to the data frame
+\code{\link{wffc.nc}} since the 2008 World Fly Fishing Competition was
+strictly catch-and-release.
+
}
\seealso{ \code{\link{wffc.nc}}. }
\examples{
diff --git a/man/FiskUC.Rd b/man/FiskUC.Rd
index 1804833..429507a 100644
--- a/man/FiskUC.Rd
+++ b/man/FiskUC.Rd
@@ -11,18 +11,24 @@
and scale parameter \code{scale}.
}
\usage{
-dfisk(x, a, scale)
-pfisk(q, a, scale)
-qfisk(p, a, scale)
-rfisk(n, a, scale)
+dfisk(x, a, scale=1, log=FALSE)
+pfisk(q, a, scale=1)
+qfisk(p, a, scale=1)
+rfisk(n, a, scale=1)
}
\arguments{
\item{x, q}{vector of quantiles.}
\item{p}{vector of probabilities.}
- \item{n}{number of observations. If \code{length(n) > 1}, the length
- is taken to be the number required.}
+ \item{n}{number of observations.
+ If \code{length(n) > 1} then the length is taken to be the number required.}
\item{a}{shape parameter.}
\item{scale}{scale parameter.}
+ \item{log}{
+ Logical.
+ If \code{log=TRUE} then the logarithm of the density is returned.
+
+ }
+
}
\value{
\code{dfisk} gives the density,
diff --git a/man/G1G2G3.Rd b/man/G1G2G3.Rd
index 1245765..e81a7ec 100644
--- a/man/G1G2G3.Rd
+++ b/man/G1G2G3.Rd
@@ -49,12 +49,12 @@ Lange, K. (2002)
\note{
The input can be a 6-column matrix of counts,
with columns corresponding to
-\code{G_1/G_1},
-\code{G_1/G_2},
-\code{G_1/G_3},
-\code{G_2/G_2},
-\code{G_2/G_3},
-\code{G_3/G_3} (in order).
+\code{G1G1},
+\code{G1G2},
+\code{G1G3},
+\code{G2G2},
+\code{G2G3},
+\code{G3G3} (in order).
Alternatively, the input can be a 6-column matrix of
proportions (so each row adds to 1) and the \code{weights}
argument is used to specify the total number of counts for each row.
diff --git a/man/Inv.gaussian.Rd b/man/Inv.gaussian.Rd
index b42988e..a0054ba 100644
--- a/man/Inv.gaussian.Rd
+++ b/man/Inv.gaussian.Rd
@@ -10,16 +10,23 @@
}
\usage{
-dinv.gaussian(x, mu, lambda)
+dinv.gaussian(x, mu, lambda, log=FALSE)
pinv.gaussian(q, mu, lambda)
rinv.gaussian(n, mu, lambda)
}
\arguments{
\item{x, q}{vector of quantiles.}
%%\item{p}{vector of probabilities.}
- \item{n}{number of observations. Must be a single positive integer. }
+ \item{n}{number of observations. If \code{length(n) > 1} then the length
+ is taken to be the number required. }
\item{mu}{the mean parameter.}
\item{lambda}{the \eqn{\lambda}{lambda} parameter.}
+ \item{log}{
+ Logical.
+ If \code{log=TRUE} then the logarithm of the density is returned.
+
+ }
+
}
\value{
\code{dinv.gaussian} gives the density,
diff --git a/man/InvlomaxUC.Rd b/man/InvlomaxUC.Rd
index 9866379..839228e 100644
--- a/man/InvlomaxUC.Rd
+++ b/man/InvlomaxUC.Rd
@@ -12,10 +12,10 @@
}
\usage{
-dinvlomax(x, scale, p.arg)
-pinvlomax(q, scale, p.arg)
-qinvlomax(p, scale, p.arg)
-rinvlomax(n, scale, p.arg)
+dinvlomax(x, scale=1, p.arg, log = FALSE)
+pinvlomax(q, scale=1, p.arg)
+qinvlomax(p, scale=1, p.arg)
+rinvlomax(n, scale=1, p.arg)
}
\arguments{
\item{x, q}{vector of quantiles.}
@@ -24,6 +24,12 @@ rinvlomax(n, scale, p.arg)
is taken to be the number required.}
\item{p.arg}{shape parameter.}
\item{scale}{scale parameter.}
+ \item{log}{
+ Logical.
+ If \code{log=TRUE} then the logarithm of the density is returned.
+
+ }
+
}
\value{
\code{dinvlomax} gives the density,
diff --git a/man/InvparalogisticUC.Rd b/man/InvparalogisticUC.Rd
index 67c3167..9ec9ada 100644
--- a/man/InvparalogisticUC.Rd
+++ b/man/InvparalogisticUC.Rd
@@ -12,10 +12,10 @@
}
\usage{
-dinvparalogistic(x, a, scale)
-pinvparalogistic(q, a, scale)
-qinvparalogistic(p, a, scale)
-rinvparalogistic(n, a, scale)
+dinvparalogistic(x, a, scale=1, log=FALSE)
+pinvparalogistic(q, a, scale=1)
+qinvparalogistic(p, a, scale=1)
+rinvparalogistic(n, a, scale=1)
}
\arguments{
\item{x, q}{vector of quantiles.}
@@ -24,6 +24,12 @@ rinvparalogistic(n, a, scale)
is taken to be the number required.}
\item{a}{shape parameter.}
\item{scale}{scale parameter.}
+ \item{log}{
+ Logical.
+ If \code{log=TRUE} then the logarithm of the density is returned.
+
+ }
+
}
\value{
\code{dinvparalogistic} gives the density,
diff --git a/man/Links.Rd b/man/Links.Rd
index 00fa390..01918cb 100644
--- a/man/Links.Rd
+++ b/man/Links.Rd
@@ -176,7 +176,6 @@ logit("a", short=FALSE, tag=TRUE)
logoff(1:5, earg=list(offset=1)) # Same as log(1:5 + 1)
powl(1:5, earg=list(power=2)) # Same as (1:5)^2
-data(hunua)
fit1 = vgam(agaaus ~ altitude, binomialff(link=cloglog), hunua) # ok
fit2 = vgam(agaaus ~ altitude, binomialff(link="cloglog"), hunua) # ok
diff --git a/man/LomaxUC.Rd b/man/LomaxUC.Rd
index a1f11f6..a37c4b7 100644
--- a/man/LomaxUC.Rd
+++ b/man/LomaxUC.Rd
@@ -11,10 +11,10 @@
and shape parameter \code{q}.
}
\usage{
-dlomax(x, scale, q.arg)
-plomax(q, scale, q.arg)
-qlomax(p, scale, q.arg)
-rlomax(n, scale, q.arg)
+dlomax(x, scale=1, q.arg, log=FALSE)
+plomax(q, scale=1, q.arg)
+qlomax(p, scale=1, q.arg)
+rlomax(n, scale=1, q.arg)
}
\arguments{
\item{x, q}{vector of quantiles.}
@@ -23,6 +23,12 @@ rlomax(n, scale, q.arg)
is taken to be the number required.}
\item{q.arg}{shape parameter.}
\item{scale}{scale parameter.}
+ \item{log}{
+ Logical.
+ If \code{log=TRUE} then the logarithm of the density is returned.
+
+ }
+
}
\value{
\code{dlomax} gives the density,
diff --git a/man/Max.Rd b/man/Max.Rd
index b26a993..70fda24 100644
--- a/man/Max.Rd
+++ b/man/Max.Rd
@@ -65,7 +65,6 @@ Constrained additive ordination.
\examples{
\dontrun{
-data(hspider)
set.seed(111) # This leads to the global solution
hspider[,1:6] = scale(hspider[,1:6]) # Standardized environmental vars
p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
diff --git a/man/MaxwellUC.Rd b/man/MaxwellUC.Rd
index 16aa4ba..54eb230 100644
--- a/man/MaxwellUC.Rd
+++ b/man/MaxwellUC.Rd
@@ -11,7 +11,7 @@
}
\usage{
-dmaxwell(x, a)
+dmaxwell(x, a, log=FALSE)
pmaxwell(q, a)
qmaxwell(p, a)
rmaxwell(n, a)
@@ -21,6 +21,12 @@ rmaxwell(n, a)
\item{p}{vector of probabilities.}
\item{n}{number of observations. A single positive integer. }
\item{a}{the parameter.}
+ \item{log}{
+ Logical.
+ If \code{log=TRUE} then the logarithm of the density is returned.
+
+ }
+
}
\value{
\code{dmaxwell} gives the density,
diff --git a/man/Opt.Rd b/man/Opt.Rd
index 26376bf..0a72369 100644
--- a/man/Opt.Rd
+++ b/man/Opt.Rd
@@ -68,7 +68,6 @@ called the \emph{species score}.
}
\examples{
-data(hspider)
set.seed(111) # This leads to the global solution
hspider[,1:6] = scale(hspider[,1:6]) # Standardized environmental vars
p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
diff --git a/man/ParalogisticUC.Rd b/man/ParalogisticUC.Rd
index 75ea652..341e8e4 100644
--- a/man/ParalogisticUC.Rd
+++ b/man/ParalogisticUC.Rd
@@ -11,10 +11,10 @@
and scale parameter \code{scale}.
}
\usage{
-dparalogistic(x, a, scale)
-pparalogistic(q, a, scale)
-qparalogistic(p, a, scale)
-rparalogistic(n, a, scale)
+dparalogistic(x, a, scale=1, log=FALSE)
+pparalogistic(q, a, scale=1)
+qparalogistic(p, a, scale=1)
+rparalogistic(n, a, scale=1)
}
\arguments{
\item{x, q}{vector of quantiles.}
@@ -23,6 +23,12 @@ rparalogistic(n, a, scale)
is taken to be the number required.}
\item{a}{shape parameter.}
\item{scale}{scale parameter.}
+ \item{log}{
+ Logical.
+ If \code{log=TRUE} then the logarithm of the density is returned.
+
+ }
+
}
\value{
\code{dparalogistic} gives the density,
diff --git a/man/Pareto.Rd b/man/Pareto.Rd
index 642b0ad..012ee8e 100644
--- a/man/Pareto.Rd
+++ b/man/Pareto.Rd
@@ -12,7 +12,7 @@
}
\usage{
-dpareto(x, location, shape)
+dpareto(x, location, shape, log=FALSE)
ppareto(q, location, shape)
qpareto(p, location, shape)
rpareto(n, location, shape)
@@ -22,6 +22,12 @@ rpareto(n, location, shape)
\item{p}{vector of probabilities.}
\item{n}{number of observations. Must be a single positive integer. }
\item{location, shape}{the \eqn{\alpha}{alpha} and \eqn{k} parameters.}
+ \item{log}{
+ Logical.
+ If \code{log=TRUE} then the logarithm of the density is returned.
+
+ }
+
}
\value{
\code{dpareto} gives the density,
diff --git a/man/ParetoIVUC.Rd b/man/ParetoIVUC.Rd
index 7a1c04d..20cab72 100644
--- a/man/ParetoIVUC.Rd
+++ b/man/ParetoIVUC.Rd
@@ -26,15 +26,15 @@
}
\usage{
-dparetoIV(x, location=0, scale=1, inequality=1, shape=1)
+dparetoIV(x, location=0, scale=1, inequality=1, shape=1, log=FALSE)
pparetoIV(q, location=0, scale=1, inequality=1, shape=1)
qparetoIV(p, location=0, scale=1, inequality=1, shape=1)
rparetoIV(n, location=0, scale=1, inequality=1, shape=1)
-dparetoIII(x, location=0, scale=1, inequality=1)
+dparetoIII(x, location=0, scale=1, inequality=1, log=FALSE)
pparetoIII(q, location=0, scale=1, inequality=1)
qparetoIII(p, location=0, scale=1, inequality=1)
rparetoIII(n, location=0, scale=1, inequality=1)
-dparetoII(x, location=0, scale=1, shape=1)
+dparetoII(x, location=0, scale=1, shape=1, log=FALSE)
pparetoII(q, location=0, scale=1, shape=1)
qparetoII(p, location=0, scale=1, shape=1)
rparetoII(n, location=0, scale=1, shape=1)
@@ -50,6 +50,12 @@ rparetoI(n, scale=1, shape=1)
\item{location}{the location parameter. }
\item{scale, shape, inequality}{the (positive) scale,
inequality and shape parameters. }
+ \item{log}{
+ Logical.
+ If \code{log=TRUE} then the logarithm of the density is returned.
+
+ }
+
}
\value{
Functions beginning with the letter \code{d} give the density,
diff --git a/man/RayleighUC.Rd b/man/RayleighUC.Rd
index b151a48..ec69856 100644
--- a/man/RayleighUC.Rd
+++ b/man/RayleighUC.Rd
@@ -11,7 +11,7 @@
\code{a}.
}
\usage{
-drayleigh(x, a)
+drayleigh(x, a, log=FALSE)
prayleigh(q, a)
qrayleigh(p, a)
rrayleigh(n, a)
@@ -22,6 +22,12 @@ rrayleigh(n, a)
\item{n}{number of observations.
Must be a positive integer of length 1.}
\item{a}{the parameter \eqn{a}.}
+ \item{log}{
+ Logical.
+ If \code{log=TRUE} then the logarithm of the density is returned.
+
+ }
+
}
\value{
\code{drayleigh} gives the density,
diff --git a/man/SinmadUC.Rd b/man/SinmadUC.Rd
index c105a9a..317b2f3 100644
--- a/man/SinmadUC.Rd
+++ b/man/SinmadUC.Rd
@@ -11,10 +11,10 @@
and \code{q}, and scale parameter \code{scale}.
}
\usage{
-dsinmad(x, a, scale, q.arg)
-psinmad(q, a, scale, q.arg)
-qsinmad(p, a, scale, q.arg)
-rsinmad(n, a, scale, q.arg)
+dsinmad(x, a, scale=1, q.arg, log=FALSE)
+psinmad(q, a, scale=1, q.arg)
+qsinmad(p, a, scale=1, q.arg)
+rsinmad(n, a, scale=1, q.arg)
}
\arguments{
\item{x, q}{vector of quantiles.}
@@ -23,6 +23,12 @@ rsinmad(n, a, scale, q.arg)
is taken to be the number required.}
\item{a, q.arg}{shape parameters.}
\item{scale}{scale parameter.}
+ \item{log}{
+ Logical.
+ If \code{log=TRUE} then the logarithm of the density is returned.
+
+ }
+
}
\value{
\code{dsinmad} gives the density,
diff --git a/man/Tol.Rd b/man/Tol.Rd
index 830181e..380adeb 100644
--- a/man/Tol.Rd
+++ b/man/Tol.Rd
@@ -79,7 +79,6 @@ Constrained additive ordination.
}
\examples{
-data(hspider)
set.seed(111) # This leads to the global solution
hspider[,1:6] = scale(hspider[,1:6]) # Standardized environmental vars
p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
diff --git a/man/VGAM-package.Rd b/man/VGAM-package.Rd
index 0441dca..aae4b45 100644
--- a/man/VGAM-package.Rd
+++ b/man/VGAM-package.Rd
@@ -55,7 +55,7 @@ For a complete list of this package, use \code{library(help="VGAM")}.
New \pkg{VGAM} family functions are continually being written and
added to the package.
A monograph about VGLM and VGAMs etc. is in the making but unfortunately
-won't be finished for a while.
+will not be finished for a while.
%~~ An overview of how to use the package, including the most important ~~
@@ -118,7 +118,6 @@ contains further information and examples.
\examples{
# Example 1
# Fit the proportional odds model, p.179, in McCullagh and Nelder (1989)
-data(pneumo)
pneumo = transform(pneumo, let=log(exposure.time))
(fit = vglm(cbind(normal, mild, severe) ~ let,
cumulative(parallel=TRUE, reverse=TRUE), pneumo))
@@ -141,7 +140,6 @@ coef(fit, matrix=TRUE) # These should agree with the above values
# Example 3
# Fit a two species GAM simultaneously
-data(hunua)
fit2 = vgam(cbind(agaaus, kniexc) ~ s(altitude, df=c(2,3)),
binomialff(mv=TRUE), hunua)
coef(fit2, mat=TRUE) # Not really interpretable
@@ -158,13 +156,11 @@ with(hunua, rug(altitude))
# Example 4
# LMS quantile regression
-data(bminz)
-fit = vgam(BMI ~ s(age, df=c(4,2)), lms.bcn(zero=1), dat=bminz, tr=TRUE)
-predict(fit)[1:3,]
-fitted(fit)[1:3,]
-bminz[1:3,]
-# Person 1 is near the lower quartile of BMI amongst people his age
-cdf(fit)[1:3]
+fit = vgam(BMI ~ s(age, df=c(4,2)), lms.bcn(zero=1), dat=bminz, trac=TRUE)
+head(predict(fit))
+head(fitted(fit))
+head(bminz) # Person 1 is near the lower quartile among people his age
+head(cdf(fit))
\dontrun{
# Quantile plot
@@ -186,9 +182,8 @@ a at post$deplot # Contains density function values
# Example 5
# GEV distribution for extremes
-data(oxtemp)
(fit = vglm(maxtemp ~ 1, egev, data=oxtemp, trace=TRUE))
-fitted(fit)[1:3,]
+head(fitted(fit))
coef(fit, mat=TRUE)
Coef(fit)
vcov(fit)
diff --git a/man/acat.Rd b/man/acat.Rd
index 32a2edf..0780a25 100644
--- a/man/acat.Rd
+++ b/man/acat.Rd
@@ -106,7 +106,6 @@ contains further information and examples.
\code{\link{pneumo}}.
}
\examples{
-data(pneumo)
pneumo = transform(pneumo, let=log(exposure.time))
(fit = vglm(cbind(normal,mild,severe) ~ let, acat, pneumo))
coef(fit, matrix=TRUE)
diff --git a/man/alaplace3.Rd b/man/alaplace3.Rd
index ed51205..a5ef059 100644
--- a/man/alaplace3.Rd
+++ b/man/alaplace3.Rd
@@ -41,12 +41,14 @@ alaplace3(llocation = "identity", lscale = "loge", lkappa = "loge",
\item{llocation, lscale, lkappa}{ Character.
Parameter link functions for
location parameter \eqn{\xi}{xi},
- scale parameter \eqn{b},
+ scale parameter \eqn{\sigma}{sigma},
asymmetry parameter \eqn{\kappa}{kappa}.
See \code{\link{Links}} for more choices.
For example, the argument \code{llocation} can help handle
count data by restricting the quantiles to be positive
(use \code{llocation="loge"}).
+ However, \code{llocation} is best left alone since the theory
+ only works properly with the identity link.
}
\item{elocation, escale, ekappa}{
@@ -61,8 +63,8 @@ alaplace3(llocation = "identity", lscale = "loge", lkappa = "loge",
The default is to choose the value internally.
}
\item{parallelLocation}{ Logical.
- Should the quantiles be parallel on the transformed scale (argument
- \code{llocation})?
+ Should the quantiles be parallel on the transformed scale
+ (argument \code{llocation})?
Assigning this argument to \code{TRUE} circumvents the
seriously embarrassing quantile crossing problem.
@@ -95,7 +97,7 @@ alaplace3(llocation = "identity", lscale = "loge", lkappa = "loge",
}
\item{Scale.arg}{
- The value of the scale parameter \eqn{b}.
+ The value of the scale parameter \eqn{\sigma}{sigma}.
This argument may be used to compute quantiles at different
\eqn{\tau}{tau} values from an existing fitted \code{alaplace2()} model
(practical only if it has a single value).
@@ -116,7 +118,7 @@ alaplace3(llocation = "identity", lscale = "loge", lkappa = "loge",
\item{zero}{
See \code{\link{CommonVGAMffArguments}} for more information.
Where possible,
- the default is to model all the \eqn{b} and \eqn{\kappa}{kappa}
+ the default is to model all the \eqn{\sigma}{sigma} and \eqn{\kappa}{kappa}
as an intercept-only term.
}
@@ -126,29 +128,29 @@ alaplace3(llocation = "identity", lscale = "loge", lkappa = "loge",
Laplace distributions (ALDs) suitable for quantile regression.
Kotz et al. (2001) call it \emph{the} ALD.
Its density function is
- \deqn{f(y;\xi,b,\kappa) = \frac{\sqrt{2}}{b} \,
+ \deqn{f(y;\xi,\sigma,\kappa) = \frac{\sqrt{2}}{\sigma} \,
\frac{\kappa}{1 + \kappa^2} \,
- \exp \left( - \frac{\sqrt{2}}{b \, \kappa} |y - \xi |
+ \exp \left( - \frac{\sqrt{2}}{\sigma \, \kappa} |y - \xi |
\right) }{%
- f(y) = (sqrt(2)/b) * (kappa/(1+ \kappa^2)) * exp( -
- (sqrt(2) / (b * kappa)) * |y-xi| ) }
+ f(y;xi,sigma,kappa) = (sqrt(2)/sigma) * (kappa/(1+ \kappa^2)) *
+ exp( -(sqrt(2) / (sigma * kappa)) * |y-xi| ) }
for \eqn{y \leq \xi}{y <= xi}, and
- \deqn{f(y;\xi,b,\kappa) = \frac{\sqrt{2}}{b} \,
+ \deqn{f(y;\xi,\sigma,\kappa) = \frac{\sqrt{2}}{\sigma} \,
\frac{\kappa}{1 + \kappa^2} \,
- \exp \left( - \frac{\sqrt{2} \, \kappa}{b} |y - \xi |
+ \exp \left( - \frac{\sqrt{2} \, \kappa}{\sigma} |y - \xi |
\right) }{%
- f(y) = (sqrt(2)/b) * (kappa/(1+ \kappa^2)) * exp( -
- (sqrt(2) * kappa / b) * |y-xi| ) }
+ f(y;xi,sigma,kappa) = (sqrt(2)/sigma) * (kappa/(1+ \kappa^2)) * exp( -
+ (sqrt(2) * kappa / sigma) * |y-xi| ) }
for \eqn{y > \xi}{y > xi}.
Here, the ranges are
- for all real \eqn{y} and \eqn{\xi}{xi}, positive \eqn{b} and
+ for all real \eqn{y} and \eqn{\xi}{xi}, positive \eqn{\sigma}{sigma} and
positive \eqn{\kappa}{kappa}.
The special case \eqn{\kappa=1}{kappa=1} corresponds to the
(symmetric) Laplace distribution of Kotz et al. (2001).
- The mean is \eqn{\xi + b (1/\kappa - \kappa) / \sqrt{2}}{xi +
- b * (1/kappa - kappa) / sqrt(2)}
+ The mean is \eqn{\xi + \sigma (1/\kappa - \kappa) / \sqrt{2}}{xi +
+ sigma * (1/kappa - kappa) / sqrt(2)}
and the variance is
- \eqn{b^2 (1 + \kappa^4) / (2 \kappa^2)}{b^2 * (1 +
+ \eqn{\sigma^2 (1 + \kappa^4) / (2 \kappa^2)}{sigma^2 * (1 +
kappa^4) / (2 * kappa^2)}.
The enumeration of the linear/additive predictors used here is to
first have all the location parameters, followed by all the
@@ -157,9 +159,8 @@ alaplace3(llocation = "identity", lscale = "loge", lkappa = "loge",
It is known that the maximum likelihood estimate of the
location parameter \eqn{\xi}{xi} corresponds to the regression
- quantile estimate
- of the classical quantile regression approach of Koenker and Bassett
- (1978). An important property of the ALD is that
+ quantile estimate of the classical quantile regression approach of
+ Koenker and Bassett (1978). An important property of the ALD is that
\eqn{P(Y \leq \xi) = \tau}{P(Y <= xi) = tau} where
\eqn{\tau = \kappa^2 / (1 + \kappa^2)}{tau = kappa^2 / (1 + kappa^2)}
so that
@@ -195,8 +196,8 @@ a revisit with applications to communications,
economics, engineering, and finance},
Boston: Birkhauser.
- Yee, T. W. (2008)
- Quantile regression for counts and binomial proportions.
+ Yee, T. W. (2009)
+ Quantile regression for counts and proportions.
In preparation.
}
@@ -212,6 +213,13 @@ Boston: Birkhauser.
}
\note{
+% Commented out 20090326
+% The function \code{alaplace2()} is recommended over \code{alaplace1()}
+% for quantile regression because the solution is
+% invariant to location and scale,
+% i.e., linear transformation of the response produces the
+% same linear transformation of the fitted quantiles.
+
These \pkg{VGAM} family functions use Fisher scoring.
Convergence may be slow and
half-stepping is usual (although one can use \code{trace=TRUE} to see
@@ -230,7 +238,7 @@ Boston: Birkhauser.
A second method for solving the noncrossing quantile problem is
illustrated below in Example 3.
This is called the \emph{accumulative quantile method} (AQM)
- and details are in Yee (2008).
+ and details are in Yee (2009).
It does not make the strong parallelism assumption.
The functions \code{alaplace2()} and \code{\link{laplace}}
@@ -239,10 +247,10 @@ Boston: Birkhauser.
}
\seealso{
- \code{\link{ralaplace}},
+ \code{\link{ralap}},
\code{\link{laplace}},
\code{\link{lms.bcn}},
- \code{\link{alsqreg}}.
+ \code{\link{amlnormal}}.
}
diff --git a/man/alaplaceUC.Rd b/man/alaplaceUC.Rd
index 77bfcb2..63e902e 100644
--- a/man/alaplaceUC.Rd
+++ b/man/alaplaceUC.Rd
@@ -1,8 +1,8 @@
\name{alaplaceUC}
-\alias{dalaplace}
-\alias{palaplace}
-\alias{qalaplace}
-\alias{ralaplace}
+\alias{dalap}
+\alias{palap}
+\alias{qalap}
+\alias{ralap}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ The Laplace Distribution }
\description{
@@ -13,21 +13,25 @@
}
\usage{
-dalaplace(x, location=0, scale=1, tau=0.5, kappa=sqrt(tau/(1-tau)))
-palaplace(q, location=0, scale=1, tau=0.5, kappa=sqrt(tau/(1-tau)))
-qalaplace(p, location=0, scale=1, tau=0.5, kappa=sqrt(tau/(1-tau)))
-ralaplace(n, location=0, scale=1, tau=0.5, kappa=sqrt(tau/(1-tau)))
+dalap(x, location=0, scale=1, tau=0.5, kappa=sqrt(tau/(1-tau)),
+ log=FALSE)
+palap(q, location=0, scale=1, tau=0.5, kappa=sqrt(tau/(1-tau)))
+qalap(p, location=0, scale=1, tau=0.5, kappa=sqrt(tau/(1-tau)))
+ralap(n, location=0, scale=1, tau=0.5, kappa=sqrt(tau/(1-tau)))
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x, q}{vector of quantiles.}
\item{p}{vector of probabilities.}
- \item{n}{number of observations. Positive integer of length 1.}
+ \item{n}{
+ number of observations.
+ If \code{length(n) > 1} then the length is taken to be the number required.
+ }
\item{location}{
the location parameter \eqn{\xi}{xi}.
}
\item{scale}{
- the scale parameter \eqn{b}.
+ the scale parameter \eqn{\sigma}{sigma}.
Must consist of positive values.
}
\item{tau}{
@@ -41,22 +45,24 @@ ralaplace(n, location=0, scale=1, tau=0.5, kappa=sqrt(tau/(1-tau)))
the asymmetry parameter \eqn{\kappa}{kappa}.
Must consist of positive values.
}
+ \item{log}{
+ if \code{TRUE}, probabilities \code{p} are given as \code{log(p)}.
+ }
+
}
\details{
- There are several variants of asymmetric Laplace distributions (ALDs) and
+ There are many variants of asymmetric Laplace distributions (ALDs) and
this one is known as \emph{the} ALD by Kotz et al. (2001).
See \code{\link{alaplace3}}, the \pkg{VGAM} family function
for estimating the three parameters by maximum likelihood estimation,
for formulae and details.
- Apart from \code{n}, all the above arguments may be vectors and
- are recyled to the appropriate length if necessary.
}
\value{
- \code{dalaplace} gives the density,
- \code{palaplace} gives the distribution function,
- \code{qalaplace} gives the quantile function, and
- \code{ralaplace} generates random deviates.
+ \code{dalap} gives the density,
+ \code{palap} gives the distribution function,
+ \code{qalap} gives the quantile function, and
+ \code{ralap} generates random deviates.
}
\references{
Kotz, S., Kozubowski, T. J. and Podgorski, K. (2001)
@@ -74,23 +80,26 @@ Boston: Birkhauser.
\seealso{
\code{\link{alaplace3}}.
+% \code{\link{dloglap}}.
+
}
\examples{
x = seq(-5, 5, by=0.01)
-loc = 0; b = 1.5; kappa = 2
+loc = 0; sigma = 1.5; kappa = 2
\dontrun{
-plot(x, dalaplace(x, loc, b, kappa=kappa), type="l", col="blue",
+plot(x, dalap(x, loc, sigma, kappa=kappa), type="l", col="blue",
main="Blue is density, red is cumulative distribution function",
- ylim=c(0,1), sub="Purple are 5,10,...,95 percentiles", las=1, ylab="")
+ ylim=c(0,1), sub="Purple are 5,10,...,95 percentiles",
+ las=1, ylab="", cex.main=0.5)
abline(h=0, col="blue", lty=2)
-lines(qalaplace(seq(0.05,0.95,by=0.05), loc, b, kappa=kappa),
- dalaplace(qalaplace(seq(0.05,0.95,by=0.05), loc, b, kappa=kappa),
- loc, b, kappa=kappa), col="purple", lty=3, type="h")
-lines(x, palaplace(x, loc, b, kappa=kappa), type="l", col="red")
+lines(qalap(seq(0.05,0.95,by=0.05), loc, sigma, kappa=kappa),
+ dalap(qalap(seq(0.05,0.95,by=0.05), loc, sigma, kappa=kappa),
+ loc, sigma, kappa=kappa), col="purple", lty=3, type="h")
+lines(x, palap(x, loc, sigma, kappa=kappa), type="l", col="red")
abline(h=0, lty=2)
}
-palaplace(qalaplace(seq(0.05,0.95,by=0.05), loc, b, kappa=kappa),
- loc, b, kappa=kappa)
+palap(qalap(seq(0.05,0.95,by=0.05), loc, sigma, kappa=kappa),
+ loc, sigma, kappa=kappa)
}
\keyword{distribution}
diff --git a/man/amlbinomial.Rd b/man/amlbinomial.Rd
index 89832b3..f162ddf 100644
--- a/man/amlbinomial.Rd
+++ b/man/amlbinomial.Rd
@@ -12,6 +12,10 @@ amlbinomial(w.aml=1, parallel=FALSE, digw=4, link="logit", earg=list())
}
%- maybe also 'usage' for other objects documented here.
\arguments{
+ In this documentation the word \emph{quantile} can often be
+ interchangeably replaced by \emph{expectile}
+ (things are informal here).
+
\item{w.aml}{
Numeric, a vector of positive constants controlling the percentiles.
The larger the value the larger the fitted percentile value
@@ -95,7 +99,7 @@ amlbinomial(w.aml=1, parallel=FALSE, digw=4, link="logit", earg=list())
\seealso{
\code{\link{amlpoisson}},
\code{\link{amlexponential}},
- \code{\link{alsqreg}},
+ \code{\link{amlnormal}},
\code{\link{alaplace1}}.
}
diff --git a/man/amlexponential.Rd b/man/amlexponential.Rd
index be7c1f6..6705167 100644
--- a/man/amlexponential.Rd
+++ b/man/amlexponential.Rd
@@ -13,6 +13,10 @@ amlexponential(w.aml=1, parallel=FALSE, method.init=1, digw=4,
}
%- maybe also 'usage' for other objects documented here.
\arguments{
+ In this documentation the word \emph{quantile} can often be
+ interchangeably replaced by \emph{expectile}
+ (things are informal here).
+
\item{w.aml}{
Numeric, a vector of positive constants controlling the expectiles.
The larger the value the larger the fitted expectile value
@@ -106,7 +110,7 @@ amlexponential(w.aml=1, parallel=FALSE, method.init=1, digw=4,
\code{\link{exponential}},
\code{\link{amlbinomial}},
\code{\link{amlpoisson}},
- \code{\link{alsqreg}},
+ \code{\link{amlnormal}},
\code{\link{alaplace1}}.
}
diff --git a/man/alsqreg.Rd b/man/amlnormal.Rd
similarity index 84%
rename from man/alsqreg.Rd
rename to man/amlnormal.Rd
index ef638eb..239b2d7 100644
--- a/man/alsqreg.Rd
+++ b/man/amlnormal.Rd
@@ -1,18 +1,26 @@
-\name{alsqreg}
-\alias{alsqreg}
+\name{amlnormal}
+\alias{amlnormal}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Asymmetric Least Squares Quantile Regression }
\description{
- Quantile regression using asymmetric least squares error loss.
+ Asymmetric least squares,
+ a special case of maximizing an asymmetric
+ likelihood function of a normal distribution.
+ This allows for expectile/quantile regression using asymmetric least
+ squares error loss.
}
\usage{
-alsqreg(w.als=1, parallel=FALSE, lexpectile = "identity",
- eexpectile = list(), iexpectile = NULL,
- method.init=1, digw=4)
+amlnormal(w.als=1, parallel=FALSE, lexpectile = "identity",
+ eexpectile = list(), iexpectile = NULL,
+ method.init=1, digw=4)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
+ In this documentation the word \emph{quantile} can often be
+ interchangeably replaced by \emph{expectile}
+ (things are informal here).
+
\item{w.als}{
Numeric, a vector of positive constants controlling the percentiles.
The larger the value the larger the fitted percentile value
@@ -47,7 +55,7 @@ alsqreg(w.als=1, parallel=FALSE, lexpectile = "identity",
}
}
\details{
- This method was proposed by Efron (1991) and full details can
+ This is an implementation of Efron (1991) and full details can
be obtained there.
Equation numbers below refer to that article.
The model is essentially a linear model
@@ -85,7 +93,7 @@ alsqreg(w.als=1, parallel=FALSE, lexpectile = "identity",
percentile. One solution is to fit the model within a root finding
function such as \code{\link[stats]{uniroot}}; see the example below.
- For \code{alsqreg} objects, methods functions for the generic functions
+ For \code{amlnormal} objects, methods functions for the generic functions
\code{qtplot} and \code{cdf} have not been written yet.
See the note in \code{\link{amlpoisson}} on the jargon, including
@@ -123,10 +131,9 @@ alsqreg(w.als=1, parallel=FALSE, lexpectile = "identity",
\examples{
# Example 1
-data(bminz)
o = with(bminz, order(age))
bminz = bminz[o,] # Sort by age
-(fit = vglm(BMI ~ bs(age), fam=alsqreg(w.als=0.1), data=bminz))
+(fit = vglm(BMI ~ bs(age), fam=amlnormal(w.als=0.1), data=bminz))
fit at extra # Gives the w value and the percentile
coef(fit)
coef(fit, matrix=TRUE)
@@ -144,7 +151,7 @@ with(bminz, lines(age, c(fitted(fit)), col="black"))
# Example 2
# Find the w values that give the 25, 50 and 75 percentiles
findw = function(w, percentile=50) {
- fit2 = vglm(BMI ~ bs(age), fam=alsqreg(w=w), data=bminz)
+ fit2 = vglm(BMI ~ bs(age), fam=amlnormal(w=w), data=bminz)
fit2 at extra$percentile - percentile
}
\dontrun{
@@ -155,7 +162,7 @@ with(bminz, plot(age, BMI, col="blue", las=1, main=
for(myp in c(25,50,75)) {
# Note: uniroot() can only find one root at a time
bestw = uniroot(f=findw, interval=c(1/10^4, 10^4), percentile=myp)
- fit2 = vglm(BMI ~ bs(age), fam=alsqreg(w=bestw$root), data=bminz)
+ fit2 = vglm(BMI ~ bs(age), fam=amlnormal(w=bestw$root), data=bminz)
\dontrun{
with(bminz, lines(age, c(fitted(fit2)), col="red"))
}
@@ -165,10 +172,9 @@ for(myp in c(25,50,75)) {
# Example 3; this is Example 1 but with smoothing splines and
# a vector w and a parallelism assumption.
-data(bminz)
o = with(bminz, order(age))
bminz = bminz[o,] # Sort by age
-fit3 = vgam(BMI ~ s(age, df=4), fam=alsqreg(w=c(.1,1,10), parallel=TRUE),
+fit3 = vgam(BMI ~ s(age, df=4), fam=amlnormal(w=c(.1,1,10), parallel=TRUE),
data=bminz, trac=TRUE)
fit3 at extra # The w values, percentiles and weighted deviances
diff --git a/man/amlpoisson.Rd b/man/amlpoisson.Rd
index 3f8cce4..f99af01 100644
--- a/man/amlpoisson.Rd
+++ b/man/amlpoisson.Rd
@@ -13,6 +13,10 @@ amlpoisson(w.aml=1, parallel=FALSE, method.init=1, digw=4,
}
%- maybe also 'usage' for other objects documented here.
\arguments{
+ In this documentation the word \emph{quantile} can often be
+ interchangeably replaced by \emph{expectile}
+ (things are informal here).
+
\item{w.aml}{
Numeric, a vector of positive constants controlling the percentiles.
The larger the value the larger the fitted percentile value
@@ -124,7 +128,7 @@ amlpoisson(w.aml=1, parallel=FALSE, method.init=1, digw=4,
}
\seealso{
- \code{\link{alsqreg}},
+ \code{\link{amlnormal}},
\code{\link{amlbinomial}},
\code{\link{alaplace1}}.
diff --git a/man/auuc.Rd b/man/auuc.Rd
index 2c1a7f5..88a27eb 100644
--- a/man/auuc.Rd
+++ b/man/auuc.Rd
@@ -1,7 +1,7 @@
\name{auuc}
\alias{auuc}
\docType{data}
-\title{ Auckland University Undergraduate Counts}
+\title{ Auckland University Undergraduate Counts Data}
\description{
Undergraduate student enrolments
at the University of Auckland in 1990.
@@ -32,7 +32,6 @@ down to 4 = lowest).
New York: Wiley.
}
\examples{
-data(auuc)
round(fitted(grc(auuc)))
round(fitted(grc(auuc, Rank=2)))
}
diff --git a/man/benini.Rd b/man/benini.Rd
index a17b5cb..e9e6d9d 100644
--- a/man/benini.Rd
+++ b/man/benini.Rd
@@ -8,7 +8,7 @@
}
\usage{
-benini(y0=stop("argument \"y0\" must be specified"),
+benini(y0=stop("argument 'y0' must be specified"),
lshape="loge", earg=list(), ishape=NULL, method.init=1)
}
%- maybe also 'usage' for other objects documented here.
@@ -46,7 +46,7 @@ benini(y0=stop("argument \"y0\" must be specified"),
has a probability density function that can be written
\deqn{f(y) = 2 b \exp(-b[(\log(y/y_0))^2]) \log(y/y_0) / y }{%
f(y) = 2*b*exp(-b * [(log(y/y0))^2]) * log(y/y0) / y}
- for \eqn{y_0>0}, \eqn{y_0<y}{y0<y}, and \eqn{b>0}.
+ for \eqn{0 < y_0 < y}{0<y0<y}, and \eqn{b>0}.
The cumulative distribution function for \eqn{Y} is
\deqn{F(y) = 1 - \exp(-b[(\log(y/y_0))^2]).}{%
F(y) = 1 - exp(-b * [(log(y/y0))^2]). }
@@ -92,7 +92,7 @@ Coef(fit)
fit at extra$y0
# Apparent discrepancy:
-fitted(fit)[1:5]
+head(fitted(fit))
mean(y)
}
\keyword{models}
diff --git a/man/beniniUC.Rd b/man/beniniUC.Rd
index 00d607d..57eab08 100644
--- a/man/beniniUC.Rd
+++ b/man/beniniUC.Rd
@@ -11,7 +11,7 @@
\code{shape}.
}
\usage{
-dbenini(x, shape, y0)
+dbenini(x, shape, y0, log=FALSE)
pbenini(q, shape, y0)
qbenini(p, shape, y0)
rbenini(n, shape, y0)
@@ -23,6 +23,12 @@ rbenini(n, shape, y0)
Must be a positive integer of length 1.}
\item{shape}{the shape parameter \eqn{b}.}
\item{y0}{the scale parameter \eqn{y_0}{y0}.}
+ \item{log}{
+ Logical.
+ If \code{log=TRUE} then the logarithm of the density is returned.
+
+ }
+
}
\value{
\code{dbenini} gives the density,
diff --git a/man/beta.ab.Rd b/man/beta.ab.Rd
index 11e6db7..6b907d9 100644
--- a/man/beta.ab.Rd
+++ b/man/beta.ab.Rd
@@ -138,7 +138,7 @@ Coef(fit) # Useful for intercept-only models
Y = 5 + 8 * y # From 5 to 13, not 0 to 1
fit = vglm(Y ~ 1, beta.ab(A=5, B=13), trace = TRUE)
Coef(fit)
-c(mean(Y), fitted(fit)[1:2,])
+c(mean(Y), head(fitted(fit)))
}
\keyword{models}
\keyword{regression}
diff --git a/man/betabin.ab.Rd b/man/betabin.ab.Rd
index 1df56b1..dcb4b93 100644
--- a/man/betabin.ab.Rd
+++ b/man/betabin.ab.Rd
@@ -9,12 +9,13 @@
}
\usage{
-betabin.ab(link.shape12 = "loge", earg = list(),
- i1 = 1, i2 = NULL, zero = NULL)
+betabin.ab(lshape12 = "loge", earg = list(),
+ i1 = 1, i2 = NULL, method.init=1,
+ shrinkage.init=0.95, nsimEIM=NULL, zero=NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{link.shape12}{
+ \item{lshape12}{
Link function applied to both (positive) shape parameters
of the beta distribution.
See \code{\link{Links}} for more choices.
@@ -41,6 +42,13 @@ betabin.ab(link.shape12 = "loge", earg = list(),
try \code{zero=2}.
}
+ \item{shrinkage.init, nsimEIM, method.init}{
+ See \code{\link{CommonVGAMffArguments}} for more information.
+ The argument \code{shrinkage.init} is used only if \code{method.init=2}.
+ Using the argument \code{nsimEIM} may offer large advantages for large
+ values of \eqn{N} and/or large data sets.
+
+ }
}
\details{
There are several parameterizations of the beta-binomial distribution.
@@ -149,6 +157,7 @@ betabin.ab(link.shape12 = "loge", earg = list(),
positive value, using \code{i2} and/or setting \code{zero=2}.
This family function may be renamed in the future.
+ See the warnings in \code{\link{betabinomial}}.
}
\seealso{
@@ -167,12 +176,11 @@ y = rbetabin.ab(n=100, size=N, shape1=s1, shape2=s2)
fit = vglm(cbind(y,N-y) ~ 1, betabin.ab, trace=TRUE)
coef(fit, matrix=TRUE)
Coef(fit)
-fit at misc$rho[1:4] # The correlation parameter
-cbind(fit at y, weights(fit, type="prior"))[1:5,]
+head(fit at misc$rho) # The correlation parameter
+head(cbind(fit at y, weights(fit, type="prior")))
# Example 2
-data(lirat)
fit = vglm(cbind(R,N-R) ~ 1, betabin.ab, data=lirat, tra=TRUE, subset=N>1)
coef(fit, matrix=TRUE)
Coef(fit)
@@ -180,8 +188,7 @@ fit at misc$rho # The correlation parameter
t(fitted(fit))
t(fit at y)
t(weights(fit, type="prior"))
-# A loge link for the 2 shape parameters results in
-# a logistic regression:
+# A "loge" link for the 2 shape parameters is a logistic regression:
all.equal(c(fitted(fit)),
c(logit(predict(fit)[,1] - predict(fit)[,2], inverse=TRUE)))
@@ -200,7 +207,6 @@ with(lirat, plot(hb[N>1], fit2 at misc$rho,
pch=as.character(grp[N>1]), col=grp[N>1]))
}
\dontrun{
-data(lirat)
# cf. Figure 3 of Moore and Tsiatis (1991)
with(lirat, plot(hb, R/N, pch=as.character(grp), col=grp, las=1,
xlab="Hemoglobin level", ylab="Proportion Dead",
diff --git a/man/betabinUC.Rd b/man/betabinUC.Rd
index 3640be6..f498b19 100644
--- a/man/betabinUC.Rd
+++ b/man/betabinUC.Rd
@@ -93,11 +93,11 @@ rbetabin.ab(n, size, shape1, shape2)
N = 9; x = 0:N; s1=2; s2=3
dy = dbetabin.ab(x, size=N, shape1=s1, shape2=s2)
barplot(rbind(dy, dbinom(x, size=N, prob=s1/(s1+s2))),
- beside = TRUE, col = c("blue","green"),
+ beside = TRUE, col = c("blue","green"), las=1,
main=paste("Beta-binomial (size=",N,", shape1=",s1,
", shape2=",s2,") (blue) vs\n",
" Binomial(size=", N, ", prob=", s1/(s1+s2), ") (green)", sep=""),
- names.arg = as.character(x))
+ names.arg = as.character(x), cex.main=0.8)
sum(dy*x) # Check expected values are equal
sum(dbinom(x, size=N, prob=s1/(s1+s2))*x)
cumsum(dy) - pbetabin.ab(x, N, shape1=s1, shape2=s2)
@@ -105,11 +105,11 @@ cumsum(dy) - pbetabin.ab(x, N, shape1=s1, shape2=s2)
y = rbetabin.ab(n=10000, size=N, shape1=s1, shape2=s2)
ty = table(y)
barplot(rbind(dy, ty/sum(ty)),
- beside = TRUE, col = c("blue","red"),
+ beside = TRUE, col = c("blue","red"), las=1,
main=paste("Beta-binomial (size=",N,", shape1=",s1,
", shape2=",s2,") (blue) vs\n",
" Random generated beta-binomial(size=", N, ", prob=", s1/(s1+s2),
- ") (red)", sep=""),
+ ") (red)", sep=""), cex.main=0.8,
names.arg = as.character(x))
}
}
diff --git a/man/betabinomial.Rd b/man/betabinomial.Rd
index 29cb0c8..deb2836 100644
--- a/man/betabinomial.Rd
+++ b/man/betabinomial.Rd
@@ -9,14 +9,16 @@
}
\usage{
betabinomial(lmu="logit", lrho="logit", emu=list(), erho=list(),
- irho=NULL, method.init=1, zero=2)
+ irho=NULL, method.init=1, shrinkage.init=0.95,
+ nsimEIM=NULL, zero=2)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{lmu, lrho}{
Link functions applied to the two parameters.
See \code{\link{Links}} for more choices.
- The defaults ensure the parameters remain in \eqn{(0,1)}.
+ The defaults ensure the parameters remain in \eqn{(0,1)},
+ however, see the warning below.
}
\item{emu, erho}{
@@ -33,9 +35,9 @@ betabinomial(lmu="logit", lrho="logit", emu=list(), erho=list(),
}
\item{method.init}{
- An integer with value \code{1} or \code{2} which
- specifies the initialization method for \eqn{\mu}{mu}.
- If failure to converge occurs try the other value
+ An integer with value \code{1} or \code{2} or \ldots,
+ which specifies the initialization method for \eqn{\mu}{mu}.
+ If failure to converge occurs try the another value
and/or else specify a value for \code{irho}.
}
@@ -46,6 +48,14 @@ betabinomial(lmu="logit", lrho="logit", emu=list(), erho=list(),
The default is to have a single correlation parameter.
To model both parameters as functions of the covariates assign
\code{zero=NULL}.
+ See \code{\link{CommonVGAMffArguments}} for more information.
+
+ }
+ \item{shrinkage.init, nsimEIM}{
+ See \code{\link{CommonVGAMffArguments}} for more information.
+ The argument \code{shrinkage.init} is used only if \code{method.init=2}.
+ Using the argument \code{nsimEIM} may offer large advantages for large
+ values of \eqn{N} and/or large data sets.
}
}
@@ -72,7 +82,8 @@ betabinomial(lmu="logit", lrho="logit", emu=list(), erho=list(),
\deqn{P(T=t) = {N \choose t} \frac{B(\alpha+t, \beta+N-t)}
{B(\alpha, \beta)}}{%
P(T=t) = choose(N,t) B(alpha+t, beta+N-t) / B(alpha, beta)}
- where \eqn{t=0,1,\ldots,N}, and \eqn{B} is the beta function
+ where \eqn{t=0,1,\ldots,N}, and \eqn{B} is the
+ \code{\link[base:Special]{beta}} function
with shape parameters \eqn{\alpha}{alpha} and \eqn{\beta}{beta}.
Recall \eqn{Y = T/N} is the real response being modelled.
@@ -138,14 +149,20 @@ betabinomial(lmu="logit", lrho="logit", emu=list(), erho=list(),
is currently not implemented in the \pkg{VGAM} package as it has
range-restrictions for the correlation parameter that are currently
too difficult to handle in this package.
-
+ However, try \code{lrho="rhobit"}.
+
}
\section{Warning }{
+
+ If the estimated rho parameter is close to zero then it pays to try
+ \code{lrho="rhobit"}. One day this may become the default link function.
+
This family function is prone to numerical difficulties
due to the expected information matrices not being positive-definite
or ill-conditioned over some regions of the parameter space.
- If problems occur try setting \code{irho} to some numerical
- value, or else use \code{etastart} argument of
+ If problems occur try setting \code{irho} to some numerical value,
+ \code{nsimEIM=100}, say,
+ or else use \code{etastart} argument of
\code{\link{vglm}}, etc.
}
@@ -165,11 +182,10 @@ y = rbetabin(n=100, size=N, prob=mu, rho=rho)
fit = vglm(cbind(y,N-y) ~ 1, betabinomial, trace=TRUE)
coef(fit, matrix=TRUE)
Coef(fit)
-cbind(fit at y, weights(fit, type="prior"))[1:5,]
+head(cbind(fit at y, weights(fit, type="prior")))
# Example 2
-data(lirat)
fit = vglm(cbind(R,N-R) ~ 1, betabinomial, data=lirat,
trace=TRUE, subset=N>1)
coef(fit, matrix=TRUE)
@@ -191,7 +207,6 @@ with(lirat, plot(hb[N>1], fit2 at misc$rho,
pch=as.character(grp[N>1]), col=grp[N>1]))
}
\dontrun{
-data(lirat)
# cf. Figure 3 of Moore and Tsiatis (1991)
with(lirat, plot(hb, R/N, pch=as.character(grp), col=grp, las=1,
xlab="Hemoglobin level", ylab="Proportion Dead",
diff --git a/man/betaprime.Rd b/man/betaprime.Rd
index 954fc52..799e8b4 100644
--- a/man/betaprime.Rd
+++ b/man/betaprime.Rd
@@ -112,7 +112,7 @@ coef(fit3, matrix=TRUE)
# Compare the fitted values
mean(y3)
-fitted(fit3)[1:5]
+head(fitted(fit3))
Coef(fit3) # Useful for intercept-only models
}
\keyword{models}
diff --git a/man/bilogis4UC.Rd b/man/bilogis4UC.Rd
index 1b2b905..cbdcb5e 100644
--- a/man/bilogis4UC.Rd
+++ b/man/bilogis4UC.Rd
@@ -10,7 +10,7 @@
}
\usage{
-dbilogis4(x1, x2, loc1=0, scale1=1, loc2=0, scale2=1)
+dbilogis4(x1, x2, loc1=0, scale1=1, loc2=0, scale2=1, log=FALSE)
pbilogis4(q1, q2, loc1=0, scale1=1, loc2=0, scale2=1)
rbilogis4(n, loc1=0, scale1=1, loc2=0, scale2=1)
}
@@ -20,6 +20,12 @@ rbilogis4(n, loc1=0, scale1=1, loc2=0, scale2=1)
Must be a positive integer of length 1.}
\item{loc1, loc2}{the location parameters \eqn{l_1}{l1} and \eqn{l_2}{l2}.}
\item{scale1, scale2}{the scale parameters \eqn{s_1}{s1} and \eqn{s_2}{s2}.}
+ \item{log}{
+ Logical.
+ If \code{log=TRUE} then the logarithm of the density is returned.
+
+ }
+
}
\value{
\code{dbilogis4} gives the density,
diff --git a/man/bilogistic4.Rd b/man/bilogistic4.Rd
index 204d83e..0b0f767 100644
--- a/man/bilogistic4.Rd
+++ b/man/bilogistic4.Rd
@@ -118,9 +118,9 @@ ymat = rbilogis4(n <- 1000, loc1=5, loc2=7, scale2=exp(1))
fit = vglm(ymat ~ 1, fam=bilogistic4, trace=TRUE)
coef(fit, matrix=TRUE)
Coef(fit)
-fitted(fit)[1:4,]
+head(fitted(fit))
vcov(fit)
-weights(fit, type="w")[1:4,]
+head(weights(fit, type="w"))
summary(fit)
}
\keyword{models}
diff --git a/man/binom2.or.Rd b/man/binom2.or.Rd
index 435105c..e9e57f5 100644
--- a/man/binom2.or.Rd
+++ b/man/binom2.or.Rd
@@ -131,6 +131,11 @@ binom2.or(lmu = "logit", lmu1 = lmu, lmu2 = lmu, loratio = "loge",
Technical Report no. 101, Department of Biostatistics,
University of Washington, Seattle.
+ Yee, T. W. and Dirnbock, T. (2009)
+ Models for analysing species' presence/absence data
+ at two time points.
+ Journal of Theoretical Biology, \bold{259}. In press.
+
Documentation accompanying the \pkg{VGAM} package at
\url{http://www.stat.auckland.ac.nz/~yee}
contains further information and examples.
@@ -158,6 +163,12 @@ binom2.or(lmu = "logit", lmu1 = lmu, lmu2 = lmu, loratio = "loge",
to specify \code{exchangeable=TRUE} and have different link functions
for the two marginal probabilities.
+ Regarding Yee and Dirnbock (2009),
+ the \code{xij} (see \code{\link{vglm.control}}) argument enables
+ environmental variables with different values at the two time points
+ to be entered into an exchangeable \code{\link{binom2.or}} model.
+ See the author's webpage for sample code.
+
}
\seealso{
\code{\link{rbinom2.or}},
@@ -173,7 +184,6 @@ binom2.or(lmu = "logit", lmu1 = lmu, lmu2 = lmu, loratio = "loge",
}
\examples{
# Fit the model in Table 6.7 in McCullagh and Nelder (1989)
-data(coalminers)
coalminers = transform(coalminers, Age = (age - 42) / 5)
fit = vglm(cbind(nBnW,nBW,BnW,BW) ~ Age, binom2.or(zero=NULL), coalminers)
fitted(fit)
diff --git a/man/binom2.rho.Rd b/man/binom2.rho.Rd
index c6fbae7..4edb6b2 100644
--- a/man/binom2.rho.Rd
+++ b/man/binom2.rho.Rd
@@ -130,7 +130,6 @@ contains further information and examples.
}
\examples{
-data(coalminers)
coalminers = transform(coalminers, Age = (age - 42) / 5)
fit = vglm(cbind(nBnW,nBW,BnW,BW) ~ Age, binom2.rho,
data=coalminers, trace=TRUE)
diff --git a/man/binomialff.Rd b/man/binomialff.Rd
index fc27285..a1ab703 100644
--- a/man/binomialff.Rd
+++ b/man/binomialff.Rd
@@ -100,6 +100,10 @@ binomialff(link = "logit", earg = list(),
McCullagh, P. and Nelder, J. A. (1989)
\emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+ Altman, M. and Gill, J. and McDonald, M. P. (2004)
+ \emph{Numerical Issues in Statistical Computing for the Social
+ Scientist}, Hoboken, NJ: Wiley-Interscience.
+
}
\author{ Thomas W. Yee }
@@ -134,6 +138,7 @@ binomialff(link = "logit", earg = list(),
\code{\link{rrvglm}},
\code{\link{cqo}},
\code{\link{cao}},
+ \code{\link{betabinomial}},
\code{\link{zibinomial}},
\code{\link{dexpbinomial}},
\code{\link{mbinomial}},
@@ -144,8 +149,14 @@ binomialff(link = "logit", earg = list(),
}
\section{Warning }{
With a multivariate response, assigning a known dispersion parameter
- for \emph{each} response is not handled well yet. Currently, only
- a single known dispersion parameter is handled well.
+ for \emph{each} response is not handled well yet.
+ Currently, only a single known dispersion parameter is handled well.
+
+ The maximum likelihood estimate will not exist if the data is
+ \emph{completely separable} or \emph{quasi-completely separable}.
+ See Chapter 10 of Altman et al. (2004) for more details.
+ Yet to do: add a \code{sepcheck=TRUE}, say, argument to detect this
+ problem and give an appropriate warning.
}
@@ -153,7 +164,6 @@ binomialff(link = "logit", earg = list(),
quasibinomialff()
quasibinomialff(link="probit")
-data(hunua)
fit = vgam(agaaus ~ poly(altitude, 2), binomialff(link=cloglog), hunua)
\dontrun{
with(hunua, plot(altitude, agaaus, col="blue", ylab="P(agaaus=1)",
diff --git a/man/bisa.Rd b/man/bisa.Rd
index f8ea790..51ea7b7 100644
--- a/man/bisa.Rd
+++ b/man/bisa.Rd
@@ -129,7 +129,7 @@ fit = vglm(y ~ 1, bisa, trace=TRUE)
hist(y, prob=TRUE, ylim=c(0,0.5), col="lightblue")
coef(fit, matrix=TRUE)
mean(y)
-fitted(fit)[1:4]
+head(fitted(fit))
x = seq(0, max(y), len=200)
lines(x, dbisa(x, Coef(fit)[1], Coef(fit)[2]), col="red", lwd=2)
}
diff --git a/man/bisaUC.Rd b/man/bisaUC.Rd
index 85ca26b..4819546 100644
--- a/man/bisaUC.Rd
+++ b/man/bisaUC.Rd
@@ -20,7 +20,7 @@ rbisa(n, shape, scale=1)
\item{x, q}{vector of quantiles.}
\item{p}{vector of probabilities.}
\item{n}{number of observations.
- Must be a positive integer of length 1.}
+ If \code{length(n) > 1} then the length is taken to be the number required. }
\item{shape, scale}{
the (positive) shape and scale parameters.
diff --git a/man/bminz.Rd b/man/bminz.Rd
index 6eb1a3c..efc165f 100644
--- a/man/bminz.Rd
+++ b/man/bminz.Rd
@@ -1,7 +1,7 @@
\name{bminz}
\alias{bminz}
\docType{data}
-\title{ Body Mass Index of New Zealand Adults }
+\title{ Body Mass Index of New Zealand Adults Data}
\description{
The body mass indexes and ages from an approximate random
sample of 700 New Zealand adults.
@@ -12,8 +12,8 @@
\describe{
\item{age}{a numeric vector; their age (years). }
\item{BMI}{a numeric vector; their body mass indexes, which is
- their weight divided by the square of their height (kg /
- \eqn{m^2}{m^2} ).}
+ their weight divided by the square of their height
+ (kg / \eqn{m^2}{m^2}).}
}
}
\details{
@@ -38,7 +38,6 @@ Health Study: design and baseline findings.
}
\examples{
\dontrun{
-data(bminz)
with(bminz, plot(age, BMI, col="blue"))
fit = vgam(BMI ~ s(age, df=c(2,4,2)), fam=lms.yjn, bminz, tr=TRUE)
qtplot(fit, pcol="blue", tcol="brown", lcol="brown")
diff --git a/man/bortUC.Rd b/man/bortUC.Rd
index ff2caa1..2303540 100644
--- a/man/bortUC.Rd
+++ b/man/bortUC.Rd
@@ -12,7 +12,7 @@
}
\usage{
-dbort(x, Qsize=1, a=0.5)
+dbort(x, Qsize=1, a=0.5, log=FALSE)
%pbort(q, Qsize=1, a=0.5)
%qbort(p, Qsize=1, a=0.5)
rbort(n, Qsize=1, a=0.5)
@@ -22,8 +22,15 @@ rbort(n, Qsize=1, a=0.5)
% \item{p}{vector of probabilities.}
\item{n}{number of observations.
Must be a positive integer of length 1.}
- \item{Qsize, a}{ See \code{\link{borel.tanner}}.
- }
+ \item{Qsize, a}{
+ See \code{\link{borel.tanner}}.
+ }
+ \item{log}{
+ Logical.
+ If \code{log=TRUE} then the logarithm of the density is returned.
+
+ }
+
}
\value{
\code{dbort} gives the density,
diff --git a/man/calibrate.Rd b/man/calibrate.Rd
index 1294758..c67a111 100644
--- a/man/calibrate.Rd
+++ b/man/calibrate.Rd
@@ -56,7 +56,6 @@ calibrate(object, ...)
\examples{
\dontrun{
-data(hspider)
hspider[,1:6] = scale(hspider[,1:6]) # Standardized environmental vars
set.seed(123)
p1 = cao(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull, Zoraspin) ~
diff --git a/man/calibrate.qrrvglm.Rd b/man/calibrate.qrrvglm.Rd
index 591007e..ff1449d 100644
--- a/man/calibrate.qrrvglm.Rd
+++ b/man/calibrate.qrrvglm.Rd
@@ -114,7 +114,6 @@ Cambridge.
\code{\link{cao}}.
}
\examples{
-data(hspider)
hspider[,1:6] = scale(hspider[,1:6]) # Standardize the environmental variables
set.seed(123)
p1 = cqo(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull, Zoraspin) ~
diff --git a/man/calibrate.qrrvglm.control.Rd b/man/calibrate.qrrvglm.control.Rd
index 625fd5f..666b148 100644
--- a/man/calibrate.qrrvglm.control.Rd
+++ b/man/calibrate.qrrvglm.control.Rd
@@ -80,7 +80,6 @@ On constrained and unconstrained quadratic ordination.
\code{\link{Coef.qrrvglm}}.
}
\examples{
-data(hspider)
hspider[,1:6] = scale(hspider[,1:6]) # Needed when ITol = TRUE
set.seed(123)
p1 = cqo(cbind(Alopacce, Alopcune, Pardlugu, Pardnigr,
diff --git a/man/cao.Rd b/man/cao.Rd
index cf20149..2af4486 100644
--- a/man/cao.Rd
+++ b/man/cao.Rd
@@ -250,14 +250,16 @@ Constrained additive ordination.
a fast machine. I hope to look at ways of speeding things up in the
future.
- CAO models present a difficult optimization problem, partly because the
+ Use \code{\link[base:Random]{set.seed}} just prior to calling
+ \code{cao()} to make your results reproducible.
+ The reason for this is finding the optimal
+ CAO model presents a difficult optimization problem, partly because the
log-likelihood function contains many local solutions. To obtain the
- (global) solution the user is advised to try many initial values. This
- can be done by setting \code{Bestof} some appropriate value (see
- \code{\link{cao.control}}). Trying many initial values becomes
+ (global) solution the user is advised to try \emph{many} initial values.
+ This can be done by setting \code{Bestof} some appropriate value
+ (see \code{\link{cao.control}}). Trying many initial values becomes
progressively more important as the nonlinear degrees of freedom of
- the smooths increase. Use \code{\link[base:Random]{set.seed}} to make your
- results reproducible.
+ the smooths increase.
% The code is a little fragile at this stage, so the function might
% hang/lock up in the microsoft Windows version.
@@ -288,9 +290,8 @@ Constrained additive ordination.
\examples{
\dontrun{
-data(hspider)
hspider[,1:6] = scale(hspider[,1:6]) # Standardized environmental vars
-set.seed(149)
+set.seed(149) # For reproducible results
ap1 = cao(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull) ~
WaterCon + BareSand + FallTwig +
CoveMoss + CoveHerb + ReflLux,
diff --git a/man/cao.control.Rd b/man/cao.control.Rd
index d88fe04..ee094e8 100644
--- a/man/cao.control.Rd
+++ b/man/cao.control.Rd
@@ -293,7 +293,6 @@ London: Chapman & Hall.
}
\examples{\dontrun{
-data(hspider)
hspider[,1:6] = scale(hspider[,1:6]) # Standardized environmental vars
set.seed(123)
ap1 = cao(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull, Zoraspin) ~
diff --git a/man/cardUC.Rd b/man/cardUC.Rd
index 31224f0..5f3c6d2 100644
--- a/man/cardUC.Rd
+++ b/man/cardUC.Rd
@@ -12,7 +12,7 @@
}
\usage{
-dcard(x, mu, rho)
+dcard(x, mu, rho, log=FALSE)
pcard(q, mu, rho)
qcard(p, mu, rho, tolerance = 1e-07, maxits = 500)
rcard(n, mu, rho, ...)
@@ -35,6 +35,12 @@ rcard(n, mu, rho, ...)
to vary the two arguments.
}
+ \item{log}{
+ Logical.
+ If \code{log=TRUE} then the logarithm of the density is returned.
+
+ }
+
}
\details{
See \code{\link{cardioid}}, the \pkg{VGAM} family function
diff --git a/man/cauchy.Rd b/man/cauchy.Rd
index 9060fd4..7a5c1be 100644
--- a/man/cauchy.Rd
+++ b/man/cauchy.Rd
@@ -35,8 +35,11 @@ cauchy1(scale.arg=1, llocation="identity",
}
\item{method.init}{
- Integer, either 1 or 2 or 3. Initial method, three algorithms are
- implemented. Choose the another value if convergence fails, or use
+ Integer, either 1 or 2 or 3.
+ Initial method, three algorithms are implemented.
+ The user should try all possible values to help avoid converging
+ to a local solution.
+ Also, choose the another value if convergence fails, or use
\code{ilocation} and/or \code{iscale}.
}
@@ -80,6 +83,14 @@ cauchy1(scale.arg=1, llocation="identity",
and \code{\link{vgam}}.
}
+\section{Warning }{
+ It is well-known that the Cauchy distribution may have local
+ maxima in its likelihood function;
+ make full use of \code{method.init}, \code{ilocation}, \code{iscale}
+ etc.
+
+}
+
\references{
Evans, M., Hastings, N. and Peacock, B. (2000)
@@ -118,6 +129,7 @@ Observed versus expected Fisher information.
\seealso{
\code{\link[stats:Cauchy]{Cauchy}},
\code{\link{cauchit}}.
+
}
\examples{
# Both location and scale parameters unknown
@@ -125,7 +137,7 @@ x = runif(n <- 1000)
y = rcauchy(n, loc=exp(1+0.5*x), scale=exp(1))
fit = vglm(y ~ x, cauchy(lloc="loge"), trace=TRUE)
coef(fit, matrix=TRUE)
-fitted(fit)[1:4] # location estimates
+head(fitted(fit)) # Location estimates
summary(fit)
# Location parameter unknown
diff --git a/man/ccoef.Rd b/man/ccoef.Rd
index 4a5df12..75d44a7 100644
--- a/man/ccoef.Rd
+++ b/man/ccoef.Rd
@@ -77,7 +77,6 @@ Constrained additive ordination.
}
\examples{
-data(hspider)
set.seed(111) # This leads to the global solution
hspider[,1:6] = scale(hspider[,1:6]) # Standardized environmental vars
p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
diff --git a/man/cdf.lmscreg.Rd b/man/cdf.lmscreg.Rd
index 9c21276..42de180 100644
--- a/man/cdf.lmscreg.Rd
+++ b/man/cdf.lmscreg.Rd
@@ -62,12 +62,11 @@ The CDF values of the model have been placed in
\code{\link{lms.yjn}}.
}
\examples{
-data(bminz)
fit = vgam(BMI ~ s(age, df=c(4,2)), fam=lms.bcn(zero=1), data=bminz)
-fit at post$cdf[1:5]
-cdf(fit)[1:5] # Same
-fit at y[1:5]
-fitted(fit)[1:5,]
+head(fit at post$cdf)
+head(cdf(fit)) # Same
+head(fit at y)
+head(fitted(fit))
cdf(fit, data.frame(age=c(31.5,39), BMI=c(28.4,24)))
}
diff --git a/man/cenpoisson.Rd b/man/cenpoisson.Rd
index 06524eb..2ec0238 100644
--- a/man/cenpoisson.Rd
+++ b/man/cenpoisson.Rd
@@ -132,7 +132,7 @@ table(print(fit at y)) # Another check
# Example 4: Add in some uncensored observations
index = (1:n)[icensored]
-index = index[1:min(4,length(index))]
+index = head(index, 4)
status[index] = 1 # actual or uncensored value
Lvec[index] = y[index]
table(i <- print(Surv(Lvec, Uvec, status, type="interval"))) # Check
diff --git a/man/cgumbel.Rd b/man/cgumbel.Rd
index 7c4f645..7bc2cdc 100644
--- a/man/cgumbel.Rd
+++ b/man/cgumbel.Rd
@@ -105,7 +105,6 @@ London: Springer-Verlag.
\examples{
# Example 1
-data(venice)
ystar = venice[["r1"]] # Use the first order statistic as the response
n = length(ystar)
L = runif(n, 100, 104) # Lower censoring points
@@ -116,7 +115,7 @@ extra = list(leftcensored = ystar < L, rightcensored = ystar > U)
fit = vglm(y ~ scale(year), data=venice, trace=TRUE, extra=extra,
cgumbel(mean=FALSE, perc=c(5,25,50,75,95)))
coef(fit, matrix=TRUE)
-fitted(fit)[1:5,]
+head(fitted(fit))
fit at extra
# Example 2: simulated data
diff --git a/man/chest.Rd b/man/chest.Rd
index 552ad11..0aad154 100644
--- a/man/chest.Rd
+++ b/man/chest.Rd
@@ -1,7 +1,7 @@
\name{chest}
\alias{chest}
\docType{data}
-\title{ Chest Pain in NZ Adults }
+\title{ Chest Pain in NZ Adults Data}
\description{
Presence/absence of chest pain in 10186 New Zealand adults.
}
@@ -32,7 +32,6 @@
\bold{108}, 499--502.
}
\examples{
-data(chest)
fit = vgam(cbind(nolnor, nolr,lnor,lr) ~ s(age, c(4,3)),
binom2.or(exchan=TRUE, zero=NULL), data = chest)
coef(fit, matrix=TRUE)
diff --git a/man/coalminers.Rd b/man/coalminers.Rd
index 5092b6d..b3c789b 100644
--- a/man/coalminers.Rd
+++ b/man/coalminers.Rd
@@ -1,7 +1,7 @@
\name{coalminers}
\alias{coalminers}
\docType{data}
-\title{ Breathlessness and Wheeze Amongst Coalminers }
+\title{ Breathlessness and Wheeze Amongst Coalminers Data}
\description{
Coalminers who are smokers without radiological pneumoconiosis,
classified by age, breathlessness and wheeze.
@@ -33,7 +33,6 @@
}
\examples{
-data(coalminers)
str(coalminers)
}
\keyword{datasets}
diff --git a/man/constraints.Rd b/man/constraints.Rd
index 982d323..59ccd34 100644
--- a/man/constraints.Rd
+++ b/man/constraints.Rd
@@ -56,6 +56,11 @@ constraints(object, ...)
and \code{\link{vgam}} allows constraint matrices to
be inputted. If so, then \code{constraints(fit)} should
return the same as the input.
+
+The \code{xij} argument does not affect constraint matrices; rather,
+it allows each row of the constraint matrix to be multiplied by a
+specified vector.
+
}
\references{
@@ -73,9 +78,6 @@ Reduced-rank vector generalized linear models.
information.
}
-\section{Warning }{The \code{xij} argument changes things,
- and this has not been fully resolved yet.
-}
\seealso{
VGLMs are described in \code{\link{vglm-class}};
@@ -85,10 +87,11 @@ information.
found in many \pkg{VGAM}
family functions are a way of creating/modifying constraint
matrices conveniently, e.g., see \code{\link{zero}}.
+ See \code{\link{CommonVGAMffArguments}} for more information.
+
}
\examples{
# Fit the proportional odds model
-data(pneumo)
pneumo = transform(pneumo, let=log(exposure.time))
(fit = vglm(cbind(normal, mild, severe) ~ let,
cumulative(parallel=TRUE, reverse=TRUE), pneumo))
diff --git a/man/cqo.Rd b/man/cqo.Rd
index 2b7d240..b1ac355 100644
--- a/man/cqo.Rd
+++ b/man/cqo.Rd
@@ -326,6 +326,10 @@ Constrained additive ordination.
With multivariate binary responses, one must use \code{mv=TRUE} to
indicate that the response (matrix) is multivariate. Otherwise, it is
interpreted as a single binary response variable.
+ In Example 4 below, the deviance residuals are plotted for each species.
+ This is useful as a diagnostic plot.
+ This is done by (re)regressing each species separately against the latent
+ variable.
Sometime in the future, this function might handle input of the form
\code{cqo(x, y)}, where \code{x} and \code{y} are matrices containing
@@ -406,24 +410,24 @@ contains further information and examples.
}
\examples{
# Example 1; Fit an unequal tolerances model to the hunting spiders data
-data(hspider)
hspider[,1:6]=scale(hspider[,1:6]) # Standardize the environmental variables
set.seed(1234)
-p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
- Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~
- WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
- fam=poissonff, data=hspider, Crow1positive=FALSE, ITol=FALSE)
-sort(p1 at misc$deviance.Bestof) # A history of all the iterations
-if(deviance(p1) > 1177) stop("suboptimal fit obtained")
+p1ut = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
+ Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
+ Trocterr, Zoraspin) ~
+ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+ fam=poissonff, data=hspider, Crow1positive=FALSE, ITol=FALSE)
+sort(p1ut at misc$deviance.Bestof) # A history of all the iterations
+if(deviance(p1ut) > 1177) stop("suboptimal fit obtained")
\dontrun{
-S = ncol(p1 at y) # Number of species
+S = ncol(p1ut at y) # Number of species
clr = (1:(S+1))[-7] # omits yellow
-lvplot(p1, y=TRUE, lcol=clr, pch=1:S, pcol=clr, las=1) # ordination diagram
-legend("topright", leg=colnames(p1 at y), col=clr,
+lvplot(p1ut, y=TRUE, lcol=clr, pch=1:S, pcol=clr, las=1) # ordination diagram
+legend("topright", leg=colnames(p1ut at y), col=clr,
pch=1:S, merge=TRUE, bty="n", lty=1:S, lwd=2)
}
-(cp = Coef(p1))
+(cp = Coef(p1ut))
(a = cp at lv[cp at lvOrder]) # The ordered site scores along the gradient
# Names of the ordered sites along the gradient:
@@ -433,22 +437,23 @@ a = a[!is.na(a)] # Delete the species that is not unimodal
names(a) # Names of the ordered optima along the gradient
\dontrun{
-trplot(p1, whichSpecies=1:3, log="xy", type="b", lty=1, lwd=2,
+trplot(p1ut, whichSpecies=1:3, log="xy", type="b", lty=1, lwd=2,
col=c("blue","red","green"), label=TRUE) -> ii # trajectory plot
legend(0.00005, 0.3, paste(ii$species[,1], ii$species[,2], sep=" and "),
lwd=2, lty=1, col=c("blue","red","green"))
abline(a=0, b=1, lty="dashed")
-S = ncol(p1 at y) # Number of species
+S = ncol(p1ut at y) # Number of species
clr = (1:(S+1))[-7] # omits yellow
-persp(p1, col=clr, label=TRUE, las=1) # perspective plot
+persp(p1ut, col=clr, label=TRUE, las=1) # perspective plot
}
# Example 2: A rank-2 equal tolerances CQO model with Poisson data
# This example is numerically fraught.
set.seed(555)
-p2 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
- Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~
+p2 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
+ Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
+ Trocterr, Zoraspin) ~
WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
fam=poissonff, data=hspider, Crow1positive=FALSE,
# ITol=FALSE, EqualTol=TRUE,
@@ -469,18 +474,53 @@ mydata = rcqo(n, p, S, fam="binomial", hiabundance=4,
EqualTol=TRUE, ESOpt=TRUE, EqualMax=TRUE)
myform = attr(mydata, "formula")
set.seed(1234)
-b1 = cqo(myform, fam=binomialff(mv=TRUE, link="cloglog"), data=mydata)
-sort(b1 at misc$deviance.Bestof) # A history of all the iterations
+b1et = cqo(myform, fam=binomialff(mv=TRUE, link="cloglog"), data=mydata)
+sort(b1et at misc$deviance.Bestof) # A history of all the iterations
\dontrun{
-lvplot(b1, y=TRUE, lcol=1:S, pch=1:S, pcol=1:S, las=1)
+lvplot(b1et, y=TRUE, lcol=1:S, pch=1:S, pcol=1:S, las=1)
}
-Coef(b1)
+Coef(b1et)
# Compare the fitted model with the 'truth'
-cbind(truth=attr(mydata, "ccoefficients"), fitted=ccoef(b1))
+cbind(truth=attr(mydata, "ccoefficients"), fitted=ccoef(b1et))
+
+
+# Example 4: Plot the deviance residuals for diagnostic purposes
+set.seed(1234)
+p1et = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
+ Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
+ Trocterr, Zoraspin) ~
+ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+ fam=poissonff, data=hspider, ITol=TRUE, trace=FALSE)
+sort(p1et at misc$deviance.Bestof) # A history of all the iterations
+if(deviance(p1et) > 1586) stop("suboptimal fit obtained")
+S = ncol(p1et at y)
+par(mfrow=c(3,4))
+for(ii in 1:S) {
+ tempdata = data.frame(lv1 = c(lv(p1et)), sppCounts = p1et at y[,ii])
+ tempdata = transform(tempdata, myOffset = -0.5 * lv1^2)
+
+# For species ii, refit the model to get the deviance residuals
+ fit1 = vglm(sppCounts ~ offset(myOffset) + lv1, fam=poissonff,
+ data=tempdata, trace=FALSE)
+
+# For checking: this should be 0
+ print("max(abs(c(Coef(p1et)@B1[1,ii], Coef(p1et)@A[ii,1]) - coef(fit1)))")
+ print( max(abs(c(Coef(p1et)@B1[1,ii], Coef(p1et)@A[ii,1]) - coef(fit1))) )
+
+# # Plot the deviance residuals
+ devresid = resid(fit1, type = "deviance")
+ predvalues = predict(fit1) + fit1 at offset
+ oo = with(tempdata, order(lv1))
+\dontrun{
+ with(tempdata, plot(lv1, predvalues + devresid, col="darkgreen",
+ xlab="lv1", ylab="", main=colnames(p1et at y)[ii]))
+ with(tempdata, lines(lv1[oo], predvalues[oo], col="blue"))
+}
+}
}
\keyword{models}
\keyword{regression}
-%legend("topright", x=1, y=135, leg=colnames(p1 at y), col=clr,
+%legend("topright", x=1, y=135, leg=colnames(p1ut at y), col=clr,
% pch=1:S, merge=TRUE, bty="n", lty=1:S, lwd=2)
diff --git a/man/cratio.Rd b/man/cratio.Rd
index 7ea2549..75dc87e 100644
--- a/man/cratio.Rd
+++ b/man/cratio.Rd
@@ -118,7 +118,6 @@ contains further information and examples.
}
\examples{
-data(pneumo)
pneumo = transform(pneumo, let=log(exposure.time))
(fit = vglm(cbind(normal,mild,severe) ~ let, cratio(parallel=TRUE), pneumo))
coef(fit, matrix=TRUE)
diff --git a/man/cumulative.Rd b/man/cumulative.Rd
index e531423..cd97b41 100644
--- a/man/cumulative.Rd
+++ b/man/cumulative.Rd
@@ -1,10 +1,10 @@
\name{cumulative}
\alias{cumulative}
-\alias{scumulative}
+%\alias{scumulative}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Ordinal Regression with Cumulative Probabilities }
\description{
- Fits a cumulative logit/probit/cloglog/cauchit/...
+ Fits a cumulative link
regression model to a (preferably ordered) factor response.
}
@@ -12,49 +12,52 @@
cumulative(link = "logit", earg = list(),
parallel = FALSE, reverse = FALSE,
mv = FALSE, intercept.apply = FALSE)
-scumulative(link="logit", earg = list(),
- lscale="loge", escale = list(),
- parallel=FALSE, sparallel=TRUE, reverse=FALSE, iscale = 1)
}
+%scumulative(link="logit", earg = list(),
+% lscale="loge", escale = list(),
+% parallel=FALSE, sparallel=TRUE, reverse=FALSE, iscale = 1)
%- maybe also 'usage' for other objects documented here.
\arguments{
In the following, the response \eqn{Y} is assumed to be a factor
with ordered values \eqn{1,2,\dots,J+1}.
- \eqn{M} is the number of linear/additive predictors
+ Hence \eqn{M} is the number of linear/additive predictors
\eqn{\eta_j}{eta_j};
- for \code{cumulative()} \eqn{M=J},
- and for \code{scumulative()} \eqn{M=2J}.
+ for \code{cumulative()} \eqn{M=J}.
+% and for \code{scumulative()} \eqn{M=2J}.
\item{link}{
Link function applied to the \eqn{J} cumulative probabilities.
- See \code{\link{Links}} for more choices.
+ See \code{\link{Links}} for more choices,
+ e.g., for the cumulative
+ \code{\link{probit}}/\code{\link{cloglog}}/\code{\link{cauchit}}/\ldots
+ models.
}
- \item{lscale}{
- Link function applied to the \eqn{J} scaling parameters.
- See \code{\link{Links}} for more choices.
-
- }
- \item{earg, escale}{
- List. Extra argument for the respective link functions.
+% \item{lscale}{
+% Link function applied to the \eqn{J} scaling parameters.
+% See \code{\link{Links}} for more choices.
+%
+% }
+ \item{earg}{
+ List. Extra argument for the link function.
See \code{earg} in \code{\link{Links}} for general information.
}
\item{parallel}{
- A logical or formula specifying which terms have
+ A logical or formula specifying which terms have
equal/unequal coefficients.
See below for more information about the parallelism assumption.
}
- \item{sparallel}{
- For the scaling parameters.
- A logical, or formula specifying which terms have
- equal/unequal coefficients.
- This argument is not applied to the intercept.
- The \code{scumulative()} function requires covariates; for
- intercept models use \code{cumulative()}.
-
- }
+% \item{sparallel}{
+% For the scaling parameters.
+% A logical, or formula specifying which terms have
+% equal/unequal coefficients.
+% This argument is not applied to the intercept.
+% The \code{scumulative()} function requires covariates; for
+% intercept models use \code{cumulative()}.
+
+% }
\item{reverse}{
Logical.
By default, the cumulative probabilities used are
@@ -90,12 +93,14 @@ scumulative(link="logit", earg = list(),
\code{\link{nbolf}}.
}
- \item{iscale}{
- Numeric. Initial values for the scale parameters.
+% \item{iscale}{
+% Numeric. Initial values for the scale parameters.
- }
+% }
}
\details{
+ This \pkg{VGAM} family function fits the class of
+ cumulative link models to (hopefully) an ordinal response.
By default, the non-parallel cumulative logit model is fitted, i.e.,
\deqn{\eta_j = logit(P[Y \leq j])}{%
eta_j = logit(P[Y<=j])}
@@ -117,11 +122,11 @@ scumulative(link="logit", earg = list(),
Currently, reduced-rank vector generalized additive models
(RR-VGAMs) have not been implemented here.
- The scaled version of \code{cumulative()}, called \code{scumulative()},
- has \eqn{J} positive scaling factors.
- They are described in pages 154 and 177 of McCullagh and Nelder (1989);
- see their equation (5.4) in particular,
- which they call the \emph{generalized rational model}.
+% The scaled version of \code{cumulative()}, called \code{scumulative()},
+% has \eqn{J} positive scaling factors.
+% They are described in pages 154 and 177 of McCullagh and Nelder (1989);
+% see their equation (5.4) in particular,
+% which they call the \emph{generalized rational model}.
}
\value{
@@ -161,7 +166,8 @@ contains further information and examples.
\note{
The response should be either a matrix of counts (with row sums that
are all positive), or a factor. In both cases, the \code{y} slot
- returned by \code{vglm}/\code{vgam}/\code{rrvglm} is the matrix
+ returned by \code{\link{vglm}}/\code{\link{vgam}}/\code{\link{rrvglm}}
+ is the matrix
of counts.
The formula must contain an intercept term.
Other \pkg{VGAM} family functions for an ordinal response include
@@ -181,19 +187,20 @@ contains further information and examples.
then numerical problems are less likely to occur during the fitting,
and there are less parameters. Numerical problems occur when
the linear/additive predictors cross, which results in probabilities
- outside of (0,1); setting \code{parallel=TRUE} will help avoid
+ outside of \eqn{(0,1)}; setting \code{parallel=TRUE} will help avoid
this problem.
Here is an example of the usage of the \code{parallel} argument.
- If there are covariates \code{x1}, \code{x2} and \code{x3}, then
- \code{parallel = TRUE ~ x1 + x2 -1} and
- \code{parallel = FALSE ~ x3} are equivalent. This would constrain
- the regression coefficients for \code{x1} and \code{x2} to be
- equal; those of the intercepts and \code{x3} would be different.
+ If there are covariates \code{x2}, \code{x3} and \code{x4}, then
+ \code{parallel = TRUE ~ x2 + x3 -1} and
+ \code{parallel = FALSE ~ x4} are equivalent. This would constrain
+ the regression coefficients for \code{x2} and \code{x3} to be
+ equal; those of the intercepts and \code{x4} would be different.
If the data is inputted in \emph{long} format
(not \emph{wide} format, as in \code{\link{pneumo}} below)
- then good initial values are needed; use
+ and the self-starting initial values are not good enough then
+ try using
\code{mustart},
\code{coefstart} and/or
\code{etatstart}.
@@ -214,6 +221,7 @@ contains further information and examples.
}
\seealso{
+ \code{\link{margeff}},
\code{\link{acat}},
\code{\link{cratio}},
\code{\link{sratio}},
@@ -226,12 +234,12 @@ contains further information and examples.
\code{\link{cauchit}},
\code{\link{golf}},
\code{\link{polf}},
- \code{\link{nbolf}}.
+ \code{\link{nbolf}},
+ \code{\link{logistic1}}.
}
\examples{
# Fit the proportional odds model, p.179, in McCullagh and Nelder (1989)
-data(pneumo)
pneumo = transform(pneumo, let=log(exposure.time))
(fit = vglm(cbind(normal, mild, severe) ~ let,
cumulative(parallel=TRUE, reverse=TRUE), pneumo))
@@ -254,33 +262,29 @@ plot(fit2, se=TRUE, overlay=TRUE, lcol=1:2, scol=1:2)
df=length(coef(fit3))-length(coef(fit)))
# A factor() version of fit ----------------------------------
+# This is in long format (cf. wide format above)
nobs = round(fit at y * c(weights(fit, type="prior")))
sumnobs = colSums(nobs) # apply(nobs, 2, sum)
-mydat = data.frame(
- response = ordered(c(rep("normal", times=sumnobs["normal"]),
- rep("mild", times=sumnobs["mild"]),
- rep("severe", times=sumnobs["severe"])),
- levels = c("normal","mild","severe")),
- LET = c(with(pneumo, rep(let, times=nobs[,"normal"])),
- with(pneumo, rep(let, times=nobs[,"mild"])),
- with(pneumo, rep(let, times=nobs[,"severe"]))))
-(fit4 = vglm(response ~ LET, data=mydat,
- cumulative(parallel=TRUE, reverse=TRUE), trace=TRUE))
+pneumo.long = data.frame(symptoms=ordered(rep(rep(colnames(nobs),
+ nrow(nobs)),
+ times=c(t(nobs))),
+ levels = colnames(nobs)),
+ let = rep(rep(with(pneumo, let), each=ncol(nobs)),
+ times=c(t(nobs))))
+with(pneumo.long, table(let, symptoms)) # check it; should be same as pneumo
-# Long format (cf. wide format above) ----------------------------------
-longdata = data.frame(symptoms=ordered(rep(rep(colnames(nobs), nrow(nobs)),
- times=c(t(nobs))),
- levels = colnames(nobs)),
- let = rep(rep(with(pneumo, let), each=ncol(nobs)),
- times=c(t(nobs))))
-with(longdata, table(let, symptoms)) # check it; should be same as pneumo
+
+(fit.long1 = vglm(symptoms ~ let, data=pneumo.long,
+ cumulative(parallel=TRUE, reverse=TRUE), trace=TRUE))
+coef(fit.long1, matrix=TRUE) # Should be same as coef(fit, matrix=TRUE)
+# Could try using mustart if fit.long1 failed to converge.
mymustart = matrix(sumnobs / sum(sumnobs),
- nrow(longdata), ncol(nobs), byrow=TRUE)
-fit.long = vglm(symptoms ~ let,
+ nrow(pneumo.long), ncol(nobs), byrow=TRUE)
+fit.long2 = vglm(symptoms ~ let,
fam = cumulative(parallel=TRUE, reverse=TRUE),
- mustart=mymustart, data = longdata, trace=TRUE)
-coef(fit.long, matrix=TRUE) # Should be same as coef(fit, matrix=TRUE)
+ mustart=mymustart, data = pneumo.long, trace=TRUE)
+coef(fit.long2, matrix=TRUE) # Should be same as coef(fit, matrix=TRUE)
}
\keyword{models}
\keyword{regression}
diff --git a/man/deplot.lmscreg.Rd b/man/deplot.lmscreg.Rd
index 2e00748..6c1bda4 100644
--- a/man/deplot.lmscreg.Rd
+++ b/man/deplot.lmscreg.Rd
@@ -70,7 +70,6 @@ contains further information and examples.
}
\examples{\dontrun{
-data(bminz)
fit = vgam(BMI ~ s(age, df=c(4,2)), fam=lms.bcn(zero=1), data=bminz)
ygrid = seq(15, 43, by=0.25)
deplot(fit, x0=20, y=ygrid, xlab="BMI", col="green", llwd=2,
@@ -80,8 +79,8 @@ deplot(fit, x0=60, y=ygrid, add=TRUE, col="red", llwd=2) -> a
names(a at post$deplot)
a at post$deplot$newdata
-a at post$deplot$y[1:5]
-a at post$deplot$density[1:5]
+head(a at post$deplot$y)
+head(a at post$deplot$density)
}
}
\keyword{hplot}
diff --git a/man/dexpbinomial.Rd b/man/dexpbinomial.Rd
index 7c84dab..e5232f0 100644
--- a/man/dexpbinomial.Rd
+++ b/man/dexpbinomial.Rd
@@ -114,7 +114,6 @@ dexpbinomial(lmean="logit", ldispersion="logit",
\examples{
# This example mimics the example in Efron (1986). The results here
# differ slightly.
-data(toxop)
# Scale the variables
toxop = transform(toxop,
@@ -140,13 +139,13 @@ fit = vglm(phat ~ I(srainfall) + I(srainfall^2) + I(srainfall^3) +
# Now look at the results
coef(fit)
coef(fit, matrix=TRUE)
-fitted(fit)[1:4,]
+head(fitted(fit))
summary(fit)
vcov(fit)
sqrt(diag(vcov(fit))) # Standard errors
# Effective sample size (not quite the last column of Table 1)
-predict(fit)[1:4,]
+head(predict(fit))
Dispersion = elogit(predict(fit)[,2], earg=dlist, inverse=TRUE)
c(round(weights(fit, type="prior") * Dispersion, dig=1))
diff --git a/man/dirichlet.Rd b/man/dirichlet.Rd
index 53ec133..67322e8 100644
--- a/man/dirichlet.Rd
+++ b/man/dirichlet.Rd
@@ -7,7 +7,7 @@
}
\usage{
-dirichlet(link = "loge", earg=list(), zero=NULL)
+dirichlet(link = "loge", earg=list(), parallel = FALSE, zero=NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -28,11 +28,8 @@ dirichlet(link = "loge", earg=list(), zero=NULL)
See \code{earg} in \code{\link{Links}} for general information.
}
- \item{zero}{
- An integer-valued vector specifying which
- linear/additive predictors are modelled as intercepts only.
- The default is none of them.
- If used, choose values from the set \eqn{\{1,2,\ldots,M\}}.
+ \item{parallel, zero}{
+ See \code{\link{CommonVGAMffArguments}} for more information.
}
}
@@ -117,7 +114,7 @@ y = rdiric(n=1000, shape=exp(c(-1,1,0)))
fit = vglm(y ~ 1, dirichlet, trace = TRUE, crit="c")
Coef(fit)
coef(fit, matrix=TRUE)
-fitted(fit)[1:2,]
+head(fitted(fit))
}
\keyword{models}
\keyword{regression}
diff --git a/man/dirmultinomial.Rd b/man/dirmultinomial.Rd
index 62ca52a..bb1d073 100644
--- a/man/dirmultinomial.Rd
+++ b/man/dirmultinomial.Rd
@@ -164,7 +164,7 @@ n = 10
M = 5
y = round(matrix(runif(n*M)*10, n, M)) # Integer counts
fit = vglm(y ~ 1, dirmultinomial, trace=TRUE)
-fitted(fit)[1:2,]
+head(fitted(fit))
fit at y # Sample proportions
weights(fit, type="prior", matrix=FALSE) # Total counts per row
diff --git a/man/enzyme.Rd b/man/enzyme.Rd
index 99ad822..a8743dc 100644
--- a/man/enzyme.Rd
+++ b/man/enzyme.Rd
@@ -31,7 +31,6 @@ Watts, D. G. (1981)
\code{\link{micmen}}.
}
\examples{
-data(enzyme)
fit = vglm(velocity ~ 1, micmen, data=enzyme, trace = TRUE,
form2 = ~ conc - 1, crit = "crit")
summary(fit)
diff --git a/man/felixUC.Rd b/man/felixUC.Rd
index 132b8b4..c5887cc 100644
--- a/man/felixUC.Rd
+++ b/man/felixUC.Rd
@@ -13,7 +13,7 @@
}
\usage{
-dfelix(x, a=0.25)
+dfelix(x, a=0.25, log=FALSE)
%pfelix(q, a=0.25)
%qfelix(p, a=0.25)
%rfelix(n, a=0.25)
@@ -25,6 +25,12 @@ dfelix(x, a=0.25)
% Must be a positive integer of length 1.}
\item{a}{ See \code{\link{felix}}.
}
+ \item{log}{
+ Logical.
+ If \code{log=TRUE} then the logarithm of the density is returned.
+
+ }
+
}
\value{
\code{dfelix} gives the density.
diff --git a/man/fgm.Rd b/man/fgm.Rd
index c68616a..fe98313 100644
--- a/man/fgm.Rd
+++ b/man/fgm.Rd
@@ -97,7 +97,7 @@ ymat = rfgm(n = 1000, alpha=rhobit(3, inverse=TRUE))
fit = vglm(ymat ~ 1, fam=fgm, trace=TRUE)
coef(fit, matrix=TRUE)
Coef(fit)
-fitted(fit)[1:5,]
+head(fitted(fit))
}
\keyword{models}
\keyword{regression}
diff --git a/man/fill.Rd b/man/fill.Rd
index b55248a..9cd086f 100644
--- a/man/fill.Rd
+++ b/man/fill.Rd
@@ -40,8 +40,10 @@ fill(x, values = 0, ncolx = ncol(x))
}
\item{values}{
- Numeric. The answer contains these values which are recycled if
- necessary.
+ Numeric.
+ The answer contains these values,
+ which are recycled \emph{columnwise} if necessary, i.e.,
+ as \code{matrix(values, ..., byrow=TRUE)}.
}
\item{ncolx}{
@@ -52,7 +54,8 @@ fill(x, values = 0, ncolx = ncol(x))
}
\details{
The \code{xij} argument for \code{\link{vglm}} allows the user to input
- variables specific to each linear predictor. For example, consider
+ variables specific to each linear/additive predictor.
+ For example, consider
the bivariate logit model where the first/second linear/additive
predictor is the logistic regression of the first/second binary response
respectively. The third linear/additive predictor is \code{log(OR) =
@@ -64,8 +67,9 @@ fill(x, values = 0, ncolx = ncol(x))
these data into \code{\link{vglm}} one often finds that functions
\code{fill}, \code{fill1}, etc. are useful.
- All terms in the \code{xij} argument must appear in the main
- \code{formula} argument in \code{\link{vglm}}.
+ All terms in the \code{xij}
+ and \code{formula} arguments in \code{\link{vglm}}
+ must appear in the \code{form2} argument too.
}
\value{
@@ -80,56 +84,65 @@ fill(x, values = 0, ncolx = ncol(x))
\url{http://www.stat.auckland.ac.nz/~yee}.
}
-\section{Warning }{
- The use of the \code{xij} argument overrides other arguments such as
- \code{exchangeable} and \code{zero}. Care is needed in such cases.
- See the examples below.
-}
+% \section{Warning }{
+% Care is needed in such cases.
+% See the examples below.
+%
+%}
\author{ T. W. Yee }
\note{
+ The effect of the \code{xij} argument is after other arguments such as
+ \code{exchangeable} and \code{zero}.
+ Hence \code{xij} does not affect constraint matrices.
+
Additionally, there are currently 3 other identical \code{fill}
- functions, called \code{fill1}, \code{fill2} and \code{fill3}; if you
- need more then assign \code{fill4 = fill5 = fill1} etc.
+ functions, called \code{fill1}, \code{fill2} and \code{fill3};
+ if you need more then assign \code{fill4 = fill5 = fill1} etc.
The reason for this is that if more than one \code{fill} function is
needed then they must be unique.
For example, if \eqn{M=4} then
\code{xij = op ~ lop + rop + fill(mop) + fill(mop)} would reduce to
\code{xij = op ~ lop + rop + fill(mop)}, whereas
\code{xij = op ~ lop + rop + fill1(mop) + fill2(mop)} would retain
- \eqn{M} terms, which is needed.
+ all \eqn{M} terms, which is needed.
- The constraint matrices, as returned by \code{constraints}, have a
- different meaning when \code{xij} is used.
+% The constraint matrices, as returned by \code{constraints}, do not
+% have a different meaning when \code{xij} is used.
In Examples 1 to 3 below, the \code{xij} argument illustrates covariates
that are specific to a linear predictor. Here, \code{lop}/\code{rop} are
the ocular pressures of the left/right eye in an artificial dataset,
- and \code{mop} is their mean. Variables \code{leye} and \code{reye}
+ and \code{mop} is their mean. Variables \code{leye} and \code{reye}
might be the presence/absence of a particular disease on the LHS/RHS
- eye respectively. Examples 1 and 2 are deliberately misspecified.
- The output from, e.g., \code{coef(fit, matrix=TRUE)}, looks wrong but
- is correct because the coefficients are multiplied by the zeros
- produced from \code{fill}.
-
- In Example 4,
- the \code{xij} argument illustrates fitting the model where there
- is a common smooth function of the ocular pressure. One should use
+ eye respectively.
+%
+% Examples 1 and 2 are deliberately misspecified.
+% The output from, e.g., \code{coef(fit, matrix=TRUE)}, looks wrong but
+% is correct because the coefficients are multiplied by the zeros
+% produced from \code{fill}.
+
+ In Example 3,
+ the \code{xij} argument illustrates fitting the (exchangeable) model
+ where there
+ is a common smooth function of the ocular pressure. One should use
regression splines since \code{\link{s}} in \code{\link{vgam}} does not
handle the \code{xij} argument. However, regression splines such as
\code{\link[splines]{bs}} and \code{\link[splines]{ns}} need to have
- the same knots here for both functions, and Example 4 illustrates
- a trick involving a function \code{BS} to obtain this. Although
- regression splines create more than a single column per term in the
- model matrix, \code{fill(BS(lop,rop,mop))} creates the required (same)
- number of columns.
+ the same basis functions here for both functions, and Example 3 illustrates
+ a trick involving a function \code{BS} to obtain this, e.g., same knots.
+ Although regression splines create more than a single column per term
+ in the model matrix, \code{fill(BS(lop,rop))} creates the required
+ (same) number of columns.
}
\seealso{
+ \code{\link{vglm.control}},
\code{\link{vglm}},
- \code{\link{vglm.control}}.
+ \code{\link{multinomial}}.
+
}
\examples{
fill(runif(5))
@@ -137,79 +150,83 @@ fill(runif(5), ncol=3)
fill(runif(5), val=1, ncol=3)
# Generate eyes data for the examples below. Eyes are independent (OR=1).
-set.seed(123)
-n = 2000 # Number of people
-eyes = data.frame(lop = round(runif(n), 2),
- rop = round(runif(n), 2),
- age = round(rnorm(n, 40, 10)))
-eyes = transform(eyes,
- mop = (lop + rop) / 2, # mean ocular pressure
+nn = 1000 # Number of people
+eyesdat = data.frame(lop = round(runif(nn), 2),
+ rop = round(runif(nn), 2),
+ age = round(rnorm(nn, 40, 10)))
+eyesdat = transform(eyesdat,
+ mop = (lop + rop) / 2, # Mean ocular pressure
+ op = (lop + rop) / 2, # Value unimportant unless plotting
+# op = lop, # Choose this if plotting
eta1 = 0 - 2*lop + 0.04*age, # Linear predictor for left eye
eta2 = 0 - 2*rop + 0.04*age) # Linear predictor for right eye
-eyes = transform(eyes,
- leye = rbinom(n, size=1, prob=exp(eta1)/(1+exp(eta1))),
- reye = rbinom(n, size=1, prob=exp(eta2)/(1+exp(eta2))))
+eyesdat = transform(eyesdat,
+ leye = rbinom(nn, size=1, prob=logit(eta1, inverse=TRUE)),
+ reye = rbinom(nn, size=1, prob=logit(eta2, inverse=TRUE)))
# Example 1
-# Non-exchangeable errors (misspecified model)
-fit1 = vglm(cbind(leye,reye) ~ lop + rop + fill(lop) + age,
- family = binom2.or(exchangeable=FALSE, zero=NULL),
- xij = op ~ lop + rop + fill(lop), data=eyes)
-model.matrix(fit1, type="lm")[1:7,] # LM model matrix
-model.matrix(fit1, type="vlm")[1:7,] # Big VLM model matrix
+# All effects are linear
+fit1 = vglm(cbind(leye,reye) ~ op + age,
+ family = binom2.or(exchangeable=TRUE, zero=3),
+ data=eyesdat, trace=TRUE,
+ xij = list(op ~ lop + rop + fill(lop)),
+ form2 = ~ op + lop + rop + fill(lop) + age)
+head(model.matrix(fit1, type="lm")) # LM model matrix
+head(model.matrix(fit1, type="vlm")) # Big VLM model matrix
coef(fit1)
-coef(fit1, matrix=TRUE) # Looks wrong but is correct
-coef(fit1, matrix=TRUE, compress=FALSE) # Looks wrong but is correct
+coef(fit1, matrix=TRUE) # Unchanged with 'xij'
constraints(fit1)
-max(abs(predict(fit1)-predict(fit1, new=eyes))) # Predicts correctly
+max(abs(predict(fit1)-predict(fit1, new=eyesdat))) # Predicts correctly
summary(fit1)
+\dontrun{
+plotvgam(fit1, se=TRUE) # Wrong, e.g., because it plots against op, not lop.
+# So set op=lop in the above for a correct plot.
+}
+
# Example 2
-# Nonexchangeable errors (misspecified model), OR is a function of mop
-fit2 = vglm(cbind(leye,reye) ~ lop + rop + mop + age,
- family = binom2.or(exchangeable=FALSE, zero=NULL),
- xij = op ~ lop + rop + mop, data=eyes)
-model.matrix(fit2, type="lm")[1:7,] # LM model matrix
-model.matrix(fit2, type="vlm")[1:7,] # Big VLM model matrix
+# Model OR as a linear function of mop
+fit2 = vglm(cbind(leye,reye) ~ op + age,
+ binom2.or(exchangeable=TRUE, zero=NULL),
+ data=eyesdat, trace=TRUE,
+ xij = list(op ~ lop + rop + mop),
+ form2 = ~ op + lop + rop + mop + age)
+head(model.matrix(fit2, type="lm")) # LM model matrix
+head(model.matrix(fit2, type="vlm")) # Big VLM model matrix
coef(fit2)
-coef(fit2, matrix=TRUE) # correct
-coef(fit2, matrix=TRUE, compress=FALSE) # correct
-max(abs(predict(fit2)-predict(fit2, new=eyes))) # Predicts correctly
+coef(fit2, matrix=TRUE) # Unchanged with 'xij'
+max(abs(predict(fit2)-predict(fit2, new=eyesdat))) # Predicts correctly
summary(fit2)
+\dontrun{
+plotvgam(fit2, se=TRUE) # Wrong because it plots against op, not lop.
+}
+
+# Example 3. This model uses regression splines on ocular pressure.
+# It uses a trick to ensure common basis functions.
+BS = function(x, ...) bs(c(x,...), df=3)[1:length(x),,drop=FALSE] # trick
-# Example 3. This model is correctly specified.
-# Exchangeable errors
-fit3 = vglm(cbind(leye,reye) ~ lop + rop + fill(lop) + age,
+fit3 = vglm(cbind(leye,reye) ~ BS(lop,rop) + age,
family = binom2.or(exchangeable=TRUE, zero=3),
- xij = op ~ lop + rop + fill(lop), data=eyes)
-model.matrix(fit3, type="lm")[1:7,] # LM model matrix
-model.matrix(fit3, type="vlm")[1:7,] # Big VLM model matrix
+ data=eyesdat, trace=TRUE,
+ xij = list(BS(lop,rop) ~ BS(lop,rop) +
+ BS(rop,lop) +
+ fill(BS(lop,rop))),
+ form2 = ~ BS(lop,rop) + BS(rop,lop) + fill(BS(lop,rop)) +
+ lop + rop + age)
+head(model.matrix(fit3, type="lm")) # LM model matrix
+head(model.matrix(fit3, type="vlm")) # Big VLM model matrix
coef(fit3)
-coef(fit3, matrix=TRUE) # Looks wrong but is correct
-coef(fit3, matrix=TRUE, compress=FALSE) # Looks wrong but is correct
-predict(fit3, new=eyes[1:4,]) # Note the 'scalar' OR, i.e., zero=3
-max(abs(predict(fit3)-predict(fit3, new=eyes))) # Predicts correctly
+coef(fit3, matrix=TRUE)
summary(fit3)
-
-
-# Example 4. This model uses regression splines on ocular pressure.
-# It assumes exchangeable errors.
-BS = function(x, ...) bs(c(x,...), df=3)[1:length(x),]
-fit4 = vglm(cbind(leye,reye) ~ BS(lop,rop,mop) + BS(rop,lop,mop) +
- fill(BS(lop,rop,mop)) + age,
- family = binom2.or(exchangeable=TRUE, zero=3),
- xij = BS(op) ~ BS(lop,rop,mop) + BS(rop,lop,mop) +
- fill(BS(lop,rop,mop)), data=eyes)
-model.matrix(fit4, type="lm")[1:7,] # LM model matrix
-model.matrix(fit4, type="vlm")[1:7,] # Big VLM model matrix
-coef(fit4)
-coef(fit4, matrix=TRUE) # Looks wrong but is correct
-coef(fit4, matrix=TRUE, compress=FALSE) # Looks wrong but is correct
-predict(fit4, new=eyes[1:4,]) # Note the 'scalar' OR, i.e., zero=3
-max(abs(predict(fit4)-predict(fit4, new=eyes))) # Predicts correctly
-summary(fit4)
+fit3 at smart.prediction
+max(abs(predict(fit3)-predict(fit3, new=eyesdat))) # Predicts correctly
+predict(fit3, new=head(eyesdat)) # Note the 'scalar' OR, i.e., zero=3
+max(abs(head(predict(fit3))-predict(fit3, new=head(eyesdat)))) # Should be 0
+\dontrun{
+plotvgam(fit3, se=TRUE, xlab="lop") # Correct
+}
}
\keyword{models}
\keyword{regression}
@@ -220,13 +237,13 @@ summary(fit4)
%\code{fill1(x, value=0, ncolx=ncol(x))} and create .Rd file for
%\code{zero} argument.]
-%eyes$leye = ifelse(runif(n) < exp(eta1)/(1+exp(eta1)), 1, 0)
-%eyes$reye = ifelse(runif(n) < exp(eta2)/(1+exp(eta2)), 1, 0)
+%eyesdat$leye = ifelse(runif(n) < exp(eta1)/(1+exp(eta1)), 1, 0)
+%eyesdat$reye = ifelse(runif(n) < exp(eta2)/(1+exp(eta2)), 1, 0)
% \deqn{logit P(Y_k=1) = f_k(x_{ijk}) }{%
% logit P(Y_k=1) = f_k(x_{ijk}) }
% for \code{k=1,2}.
-% fill1(lop, ncol=ncol(BS(lop,rop,mop))), data=eyes)
+% fill1(lop, ncol=ncol(BS(lop,rop,mop))), data=eyesdat)
% Models using the \code{xij} argument may or may not predict correctly,
% and inference obtained using \code{summary} may be incorrect.
diff --git a/man/fitted.vlm.Rd b/man/fitted.vlm.Rd
index 0f134ba..cc480e2 100644
--- a/man/fitted.vlm.Rd
+++ b/man/fitted.vlm.Rd
@@ -59,7 +59,6 @@ Chambers, J. M. and T. J. Hastie (eds) (1992)
}
\examples{
# Categorical regression example 1
-data(pneumo)
pneumo = transform(pneumo, let=log(exposure.time))
fit = vglm(cbind(normal, mild, severe) ~ let,
cumulative(parallel=TRUE, reverse=TRUE), pneumo)
@@ -68,13 +67,12 @@ fitted(fit)
# LMS quantile regression example 2
-data(bminz)
fit = vgam(BMI ~ s(age, df=c(4,2)),
fam=lms.bcn(zero=1), data=bminz, trace=TRUE)
# The following are equal
-predict(fit, type="r")[1:3,]
-fitted(fit)[1:3,]
-predict(fit, type="r", newdata=bminz[1:3,])
+head(predict(fit, type="r"))
+head(fitted(fit))
+predict(fit, type="r", newdata=head(bminz))
}
\keyword{models}
\keyword{regression}
diff --git a/man/frank.Rd b/man/frank.Rd
index 1072898..52f2fd0 100644
--- a/man/frank.Rd
+++ b/man/frank.Rd
@@ -101,7 +101,7 @@ fit = vglm(ymat ~ 1, fam=frank, trace=TRUE)
coef(fit, matrix=TRUE)
Coef(fit)
vcov(fit)
-fitted(fit)[1:5,]
+head(fitted(fit))
summary(fit)
}
\keyword{models}
diff --git a/man/frankUC.Rd b/man/frankUC.Rd
index a9669e4..d91e3f2 100644
--- a/man/frankUC.Rd
+++ b/man/frankUC.Rd
@@ -10,7 +10,7 @@
}
\usage{
-dfrank(x1, x2, alpha)
+dfrank(x1, x2, alpha, log=FALSE)
pfrank(q1, q2, alpha)
rfrank(n, alpha)
}
@@ -19,6 +19,12 @@ rfrank(n, alpha)
\item{n}{number of observations.
Must be a positive integer of length 1.}
\item{alpha}{the positive association parameter \eqn{\alpha}{alpha}.}
+ \item{log}{
+ Logical.
+ If \code{log=TRUE} then the logarithm of the density is returned.
+
+ }
+
}
\value{
\code{dfrank} gives the density,
diff --git a/man/frechet.Rd b/man/frechet.Rd
index 3590c7b..6d56d57 100644
--- a/man/frechet.Rd
+++ b/man/frechet.Rd
@@ -148,12 +148,12 @@ fit2 = vglm(y ~ 1, frechet2, trace=TRUE, maxit=155)
fit3 = vglm(y ~ 1, frechet3(ilocation=0), trace=TRUE, maxit=155)
coef(fit3, matrix=TRUE)
Coef(fit3)
-fitted(fit3)[1:5,]
+head(fitted(fit3))
mean(y)
-weights(fit3, type="w")[1:5,]
+head(weights(fit3, type="w"))
vcov(fit3) # caution needed!
-fit3 at extra$location[1:3] # Estimate of the location parameter
-fit3 at extra$LHSanchor # Anchor point
+head(fit3 at extra$location) # Estimate of the location parameter
+fit3 at extra$LHSanchor # Anchor point
min(y)
}
\keyword{models}
diff --git a/man/frechetUC.Rd b/man/frechetUC.Rd
index e5cff0c..4be25c2 100644
--- a/man/frechetUC.Rd
+++ b/man/frechetUC.Rd
@@ -10,7 +10,7 @@
generation for the three parameter Frechet distribution.
}
\usage{
-dfrechet(x, location=0, scale=1, shape)
+dfrechet(x, location=0, scale=1, shape, log=FALSE)
pfrechet(q, location=0, scale=1, shape)
qfrechet(p, location=0, scale=1, shape)
rfrechet(n, location=0, scale=1, shape)
@@ -22,6 +22,12 @@ rfrechet(n, location=0, scale=1, shape)
Must be a positive integer of length 1.}
\item{location, scale, shape}{the location parameter \eqn{a},
scale parameter \eqn{b}, and shape parameter \eqn{s}.}
+ \item{log}{
+ Logical.
+ If \code{log=TRUE} then the logarithm of the density is returned.
+
+ }
+
}
\value{
\code{dfrechet} gives the density,
diff --git a/man/freund61.Rd b/man/freund61.Rd
index 92382ed..b8c1279 100644
--- a/man/freund61.Rd
+++ b/man/freund61.Rd
@@ -167,7 +167,7 @@ fit = vglm(ymat ~ 1, fam=freund61, trace=TRUE)
coef(fit, matrix=TRUE)
Coef(fit)
vcov(fit)
-fitted(fit)[1:5,]
+head(fitted(fit))
summary(fit)
# y1 and y2 are independent, so fit an independence model
diff --git a/man/fsqrt.Rd b/man/fsqrt.Rd
index a070ba7..afd22d1 100644
--- a/man/fsqrt.Rd
+++ b/man/fsqrt.Rd
@@ -140,20 +140,18 @@ for(d in 0) {
# This is lucky to converge
earg = list(min=0, max=1, mux=5)
-data(hunua)
fit.h = vglm(agaaus ~ bs(altitude),
fam= binomialff(link="fsqrt", earg=earg),
data=hunua, trace=TRUE, crit="d")
\dontrun{
plotvgam(fit.h, se=TRUE, lcol="red", scol="red",
- main="Red is Hunua, Blue is Waitakere")
+ main="Red is Hunua, Blue is Waitakere")
}
-predict(fit.h, hunua, type="response")[1:3]
+head(predict(fit.h, hunua, type="response"))
\dontrun{
# The following fails.
-data(pneumo)
pneumo = transform(pneumo, let=log(exposure.time))
earg = list(min=0, max=1, mux=10)
fit = vglm(cbind(normal, mild, severe) ~ let,
diff --git a/man/gammahyp.Rd b/man/gammahyp.Rd
index df0f7db..ebaed2c 100644
--- a/man/gammahyp.Rd
+++ b/man/gammahyp.Rd
@@ -77,7 +77,7 @@ fit = vglm(cbind(y1,y2) ~ x, fam=gammahyp(expected=TRUE), trace=TRUE)
fit = vglm(cbind(y1,y2) ~ x, fam=gammahyp, trace=TRUE, crit="coef")
coef(fit, matrix=TRUE)
Coef(fit)
-fitted(fit)[1:4,]
+head(fitted(fit))
summary(fit)
}
\keyword{models}
diff --git a/man/gaussianff.Rd b/man/gaussianff.Rd
index 2d456da..632ba7d 100644
--- a/man/gaussianff.Rd
+++ b/man/gaussianff.Rd
@@ -134,7 +134,7 @@ vcov(fit)
vcov(lmfit)
t(weights(fit, type="prior")) # Unweighted observations
-weights(fit, type="working")[1:4,] # Identity matrices
+head(weights(fit, type="working")) # Identity matrices
# Reduced-rank VLM (rank-1)
fit2 = rrvglm(cbind(y1,y2,y3) ~ x, gaussianff, data=d)
diff --git a/man/gev.Rd b/man/gev.Rd
index 11e5467..552ebc0 100644
--- a/man/gev.Rd
+++ b/man/gev.Rd
@@ -13,16 +13,16 @@ gev(llocation = "identity", lscale = "loge", lshape = "logoff",
elocation = list(), escale = list(),
eshape = if(lshape=="logoff") list(offset=0.5) else
if(lshape=="elogit") list(min=-0.5, max=0.5) else list(),
- percentiles = c(95, 99),
- iscale=NULL, ishape = NULL,
- method.init = 1, gshape=c(-0.45, 0.45), tshape0=0.001, zero = 3)
+ percentiles = c(95, 99), iscale=NULL, ishape = NULL,
+ method.init = 1, gshape=c(-0.45, 0.45), tolshape0=0.001,
+ giveWarning=TRUE, zero = 3)
egev(llocation = "identity", lscale = "loge", lshape = "logoff",
elocation = list(), escale = list(),
eshape = if(lshape=="logoff") list(offset=0.5) else
if(lshape=="elogit") list(min=-0.5, max=0.5) else list(),
- percentiles = c(95, 99),
- iscale=NULL, ishape = NULL,
- method.init=1, gshape=c(-0.45, 0.45), tshape0=0.001, zero = 3)
+ percentiles = c(95, 99), iscale=NULL, ishape = NULL,
+ method.init=1, gshape=c(-0.45, 0.45), tolshape0=0.001,
+ giveWarning=TRUE, zero = 3)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -103,12 +103,8 @@ egev(llocation = "identity", lscale = "loge", lshape = "logoff",
Used only if \code{method.init} equals 1.
}
- \item{tshape0}{
- Positive numeric.
- Threshold/tolerance value for resting whether \eqn{\xi}{xi} is zero.
- If the absolute value of the estimate of \eqn{\xi}{xi} is less than
- this value then it will be assumed zero and Gumbel derivatives etc. will
- be used.
+ \item{tolshape0, giveWarning}{
+ Passed into \code{\link{dgev}} when computing the log-likelihood.
}
\item{zero}{
@@ -246,25 +242,24 @@ egev(llocation = "identity", lscale = "loge", lshape = "logoff",
\examples{
# Multivariate example
-data(venice)
y = as.matrix(venice[,paste("r", 1:10, sep="")])
fit1 = vgam(y[,1:2] ~ s(year, df=3), gev(zero=2:3), venice, trace=TRUE)
coef(fit1, matrix=TRUE)
-fitted(fit1)[1:4,]
+head(fitted(fit1))
\dontrun{
par(mfrow=c(1,2), las=1)
plot(fit1, se=TRUE, lcol="blue", scol="forestgreen",
- main="Fitted mu(year) function (centered)")
+ main="Fitted mu(year) function (centered)", cex.main=0.8)
with(venice, matplot(year, y[,1:2], ylab="Sea level (cm)", col=1:2,
- main="Highest 2 annual sealevels + fitted 95 percentile"))
+ main="Highest 2 annual sea levels", cex.main=0.8))
with(venice, lines(year, fitted(fit1)[,1], lty="dashed", col="blue"))
+legend("topleft", lty="dashed", col="blue", "Fitted 95 percentile")
}
# Univariate example
-data(oxtemp)
(fit = vglm(maxtemp ~ 1, egev, data=oxtemp, trace=TRUE))
-fitted(fit)[1:3,]
+head(fitted(fit))
coef(fit, mat=TRUE)
Coef(fit)
vcov(fit)
diff --git a/man/gevUC.Rd b/man/gevUC.Rd
index 8cfaff3..33498d2 100644
--- a/man/gevUC.Rd
+++ b/man/gevUC.Rd
@@ -13,7 +13,8 @@
shape parameter \code{shape}.
}
\usage{
-dgev(x, location=0, scale=1, shape=0)
+dgev(x, location=0, scale=1, shape=0, log=FALSE, tolshape0 =
+ sqrt(.Machine$double.eps), oobounds.log = -Inf, giveWarning = FALSE)
pgev(q, location=0, scale=1, shape=0)
qgev(p, location=0, scale=1, shape=0)
rgev(n, location=0, scale=1, shape=0)
@@ -21,11 +22,39 @@ rgev(n, location=0, scale=1, shape=0)
\arguments{
\item{x, q}{vector of quantiles.}
\item{p}{vector of probabilities.}
- \item{n}{number of observations. Positive integer of length 1.}
+ \item{n}{number of observations.
+ If \code{length(n) > 1} then the length is taken to be the number required. }
\item{location}{the location parameter \eqn{\mu}{mu}.}
- \item{scale}{the scale parameter \eqn{\sigma}{sigma}.
+ \item{scale}{the (positive) scale parameter \eqn{\sigma}{sigma}.
Must consist of positive values. }
\item{shape}{the shape parameter \eqn{\xi}{xi}.}
+ \item{log}{
+ Logical.
+ If \code{log=TRUE} then the logarithm of the density is returned.
+
+ }
+ \item{tolshape0}{
+ Positive numeric.
+ Threshold/tolerance value for resting whether \eqn{\xi}{xi} is zero.
+ If the absolute value of the estimate of \eqn{\xi}{xi} is less than
+ this value then it will be assumed zero and a Gumbel distribution will
+ be used.
+
+ }
+ \item{oobounds.log, giveWarning}{
+ Numeric and logical.
+ The GEV distribution has support in the region satisfying
+ \code{1+shape*(x-location)/scale > 0}. Outside that region, the
+ logarithm of the density is assigned \code{oobounds.log}, which
+ equates to a zero density.
+ It should not be assigned a positive number, and ideally is very negative.
+ Since \code{\link{egev}} uses this function it is necessary
+ to return a finite value outside this region so as to allow
+ for half-stepping. Both arguments are in support of this.
+ This argument and others match those of \code{\link{egev}}.
+
+ }
+
}
\value{
\code{dgev} gives the density,
@@ -55,7 +84,10 @@ London: Springer-Verlag.
}
\seealso{
- \code{\link{gev}}.
+ \code{\link{gev}},
+ \code{\link{egev}},
+ \code{\link{vglm.control}}.
+
}
\examples{
\dontrun{
diff --git a/man/gew.Rd b/man/gew.Rd
index ba593b3..0fc9d09 100644
--- a/man/gew.Rd
+++ b/man/gew.Rd
@@ -34,7 +34,6 @@ and tests for aggregation bias.
}
\examples{
-data(gew)
str(gew)
}
\keyword{datasets}
diff --git a/man/ggammaUC.Rd b/man/ggammaUC.Rd
index 9b44b7e..85e1307 100644
--- a/man/ggammaUC.Rd
+++ b/man/ggammaUC.Rd
@@ -12,7 +12,7 @@
and parameters \code{d} and \code{k}.
}
\usage{
-dggamma(x, scale=1, d=1, k=1)
+dggamma(x, scale=1, d=1, k=1, log=FALSE)
pggamma(q, scale=1, d=1, k=1)
qggamma(p, scale=1, d=1, k=1)
rggamma(n, scale=1, d=1, k=1)
@@ -23,6 +23,12 @@ rggamma(n, scale=1, d=1, k=1)
\item{n}{number of observations. Positive integer of length 1.}
\item{scale}{the (positive) scale parameter \eqn{b}.}
\item{d, k}{the (positive) parameters \eqn{d} and \eqn{k}.}
+ \item{log}{
+ Logical.
+ If \code{log=TRUE} then the logarithm of the density is returned.
+
+ }
+
}
\value{
\code{dggamma} gives the density,
diff --git a/man/golf.Rd b/man/golf.Rd
index c23242c..2d93dac 100644
--- a/man/golf.Rd
+++ b/man/golf.Rd
@@ -146,9 +146,9 @@ fit = vglm(cuty ~ x2 + x3, fam = cumulative(link="golf",
reverse=TRUE, parallel=TRUE, intercept.apply=TRUE,
mv=TRUE, earg=list(cutpoint=cutpoints[2:3], lambda=lambda)),
trace=TRUE)
-fit at y[1:5,]
-fitted(fit)[1:5,]
-predict(fit)[1:5,]
+head(fit at y)
+head(fitted(fit))
+head(predict(fit))
coef(fit)
coef(fit, matrix=TRUE)
constraints(fit)
diff --git a/man/gpd.Rd b/man/gpd.Rd
index c833ddd..38838df 100644
--- a/man/gpd.Rd
+++ b/man/gpd.Rd
@@ -13,7 +13,7 @@ gpd(threshold = 0, lscale = "loge", lshape = "logoff",
eshape = if(lshape=="logoff") list(offset=0.5) else
if(lshape=="elogit") list(min=-0.5, max=0.5) else NULL,
percentiles = c(90, 95), iscale = NULL, ishape = NULL,
- tshape0=0.001, method.init=1, zero=2)
+ tolshape0=0.001, giveWarning=TRUE, method.init=1, zero=2)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -83,14 +83,19 @@ gpd(threshold = 0, lscale = "loge", lshape = "logoff",
% and the variance exists (\eqn{\xi < 0.5}{xi < 0.5}).
% }
- \item{tshape0}{
- Positive numeric.
- Threshold/tolerance value for resting whether \eqn{\xi}{xi} is zero.
- If the absolute value of the estimate of \eqn{\xi}{xi} is less than
- this value then it will be assumed zero and exponential distribution
- derivatives etc. will be used.
+ \item{tolshape0, giveWarning}{
+ Passed into \code{\link{dgpd}} when computing the log-likelihood.
}
+
+% \item{tolshape0}{
+% Positive numeric.
+% Threshold/tolerance value for resting whether \eqn{\xi}{xi} is zero.
+% If the absolute value of the estimate of \eqn{\xi}{xi} is less than
+% this value then it will be assumed zero and exponential distribution
+% derivatives etc. will be used.
+
+% }
\item{method.init}{
Method of initialization, either 1 or 2. The first is the method of
moments, and the second is a variant of this. If neither work, try
@@ -204,6 +209,7 @@ gpd(threshold = 0, lscale = "loge", lshape = "logoff",
\code{\link{vglm}},
\code{\link{vgam}},
\code{\link{s}}.
+
}
\examples{
@@ -211,7 +217,7 @@ gpd(threshold = 0, lscale = "loge", lshape = "logoff",
threshold = 0.5
y = threshold + rexp(n=3000, rate=2)
fit = vglm(y ~ 1, gpd(threshold=threshold), trace=TRUE)
-fitted(fit)[1:5,]
+head(fitted(fit))
coef(fit, matrix=TRUE) # xi should be close to 0
Coef(fit)
summary(fit)
diff --git a/man/gpdUC.Rd b/man/gpdUC.Rd
index 92c2c98..9a16f71 100644
--- a/man/gpdUC.Rd
+++ b/man/gpdUC.Rd
@@ -13,7 +13,9 @@
shape parameter \code{shape}.
}
\usage{
-dgpd(x, location=0, scale=1, shape=0)
+dgpd(x, location=0, scale=1, shape=0, log=FALSE,
+ tolshape0 = sqrt(.Machine$double.eps),
+ oobounds.log = -Inf, giveWarning=FALSE)
pgpd(q, location=0, scale=1, shape=0)
qgpd(p, location=0, scale=1, shape=0)
rgpd(n, location=0, scale=1, shape=0)
@@ -21,10 +23,41 @@ rgpd(n, location=0, scale=1, shape=0)
\arguments{
\item{x, q}{vector of quantiles.}
\item{p}{vector of probabilities.}
- \item{n}{number of observations. Positive integer of length 1.}
+ \item{n}{number of observations.
+ If \code{length(n) > 1} then the length is taken to be the number required.}
\item{location}{the location parameter \eqn{\mu}{mu}.}
- \item{scale}{the scale parameter \eqn{\sigma}{sigma}.}
+ \item{scale}{the (positive) scale parameter \eqn{\sigma}{sigma}.}
\item{shape}{the shape parameter \eqn{\xi}{xi}.}
+ \item{log}{
+ Logical.
+ If \code{log=TRUE} then the logarithm of the density is returned.
+
+ }
+ \item{tolshape0}{
+ Positive numeric.
+ Threshold/tolerance value for resting whether \eqn{\xi}{xi} is zero.
+ If the absolute value of the estimate of \eqn{\xi}{xi} is less than
+ this value then it will be assumed zero and an exponential distribution will
+ be used.
+
+ }
+ \item{oobounds.log, giveWarning}{
+ Numeric and logical.
+ The GPD distribution has support in the region satisfying
+ \code{(x-location)/scale > 0}
+ and
+ \code{1+shape*(x-location)/scale > 0}.
+ Outside that region, the
+ logarithm of the density is assigned \code{oobounds.log}, which
+ equates to a zero density.
+ It should not be assigned a positive number, and ideally is very negative.
+ Since \code{\link{gpd}} uses this function it is necessary
+ to return a finite value outside this region so as to allow
+ for half-stepping. Both arguments are in support of this.
+ This argument and others match those of \code{\link{gpd}}.
+
+ }
+
}
\value{
\code{dgpd} gives the density,
@@ -54,6 +87,7 @@ London: Springer-Verlag.
}
\seealso{
\code{\link{gpd}}.
+
}
\examples{
\dontrun{
diff --git a/man/grc.Rd b/man/grc.Rd
index 13a81f4..4a41306 100644
--- a/man/grc.Rd
+++ b/man/grc.Rd
@@ -110,16 +110,16 @@ indicator variables.
}
\seealso{
-\code{\link{rrvglm}},
-\code{\link{rrvglm.control}},
-\code{\link{rrvglm-class}},
-\code{summary.grc},
-\code{\link{auuc}}.
+ \code{\link{rrvglm}},
+ \code{\link{rrvglm.control}},
+ \code{\link{rrvglm-class}},
+ \code{summary.grc},
+ \code{\link{auuc}},
+ \code{\link{olympic}}.
}
\examples{
# Some undergraduate student enrolments at the University of Auckland in 1990
-data(auuc)
g1 = grc(auuc, Rank=1)
fitted(g1)
summary(g1)
@@ -127,6 +127,23 @@ summary(g1)
g2 = grc(auuc, Rank=2, Index.corner=c(2,5))
fitted(g2)
summary(g2)
+
+
+# 2008 Summer Olympic Games in Beijing
+top10 = head(olympic, n=10)
+oly1 = with(top10, grc(cbind(gold,silver,bronze)))
+round(fitted(oly1))
+round(resid(oly1, type="response"), dig=1) # Response residuals
+summary(oly1)
+Coef(oly1)
}
\keyword{models}
\keyword{regression}
+% plot(oly1)
+% oly2 = with(top10, grc(cbind(gold,silver,bronze), Rank=2)) # Saturated model
+% round(fitted(oly2))
+% round(fitted(oly2)) - with(top10, cbind(gold,silver,bronze))
+% summary(oly2) # Saturated model
+
+
+
diff --git a/man/gumbel.Rd b/man/gumbel.Rd
index fcb9c32..9eff7c3 100644
--- a/man/gumbel.Rd
+++ b/man/gumbel.Rd
@@ -187,15 +187,14 @@ y = rgumbel(n=1000, loc = 100, scale=exp(1))
fit = vglm(y ~ 1, egumbel(perc=NULL), trace=TRUE)
coef(fit, matrix=TRUE)
Coef(fit)
-fitted(fit)[1:4,]
+head(fitted(fit))
mean(y)
# Example 2: Venice data
-data(venice)
(fit = vglm(cbind(r1,r2,r3,r4,r5) ~ year, data=venice,
gumbel(R=365, mpv=TRUE), trace=TRUE))
-fitted(fit)[1:5,]
+head(fitted(fit))
coef(fit, mat=TRUE)
vcov(summary(fit))
sqrt(diag(vcov(summary(fit)))) # Standard errors
diff --git a/man/gumbelIbiv.Rd b/man/gumbelIbiv.Rd
index c0be1fe..a1a6ff2 100644
--- a/man/gumbelIbiv.Rd
+++ b/man/gumbelIbiv.Rd
@@ -89,7 +89,7 @@ fit = vglm(ymat ~ 1, fam=gumbelIbiv, trace=TRUE)
fit = vglm(ymat ~ 1, fam=gumbelIbiv, trace=TRUE, crit="coef")
coef(fit, matrix=TRUE)
Coef(fit)
-fitted(fit)[1:5,]
+head(fitted(fit))
}
\keyword{models}
\keyword{regression}
diff --git a/man/gumbelUC.Rd b/man/gumbelUC.Rd
index 713c102..5371b11 100644
--- a/man/gumbelUC.Rd
+++ b/man/gumbelUC.Rd
@@ -12,7 +12,7 @@
scale parameter \code{scale}.
}
\usage{
-dgumbel(x, location=0, scale=1)
+dgumbel(x, location=0, scale=1, log=FALSE)
pgumbel(q, location=0, scale=1)
qgumbel(p, location=0, scale=1)
rgumbel(n, location=0, scale=1)
@@ -21,13 +21,19 @@ rgumbel(n, location=0, scale=1)
\arguments{
\item{x, q}{vector of quantiles.}
\item{p}{vector of probabilities.}
- \item{n}{number of observations. Positive integer of length 1.}
+ \item{n}{number of observations.
+ If \code{length(n) > 1} then the length is taken to be the number required.}
\item{location}{the location parameter \eqn{\mu}{mu}.
This is not the mean
of the Gumbel distribution (see \bold{Details} below). }
\item{scale}{the scale parameter \eqn{\sigma}{sigma}.
This is not the standard deviation
of the Gumbel distribution (see \bold{Details} below). }
+ \item{log}{
+ Logical.
+ If \code{log=TRUE} then the logarithm of the density is returned.
+
+ }
}
\details{
diff --git a/man/hspider.Rd b/man/hspider.Rd
index 65b7704..00861d2 100644
--- a/man/hspider.Rd
+++ b/man/hspider.Rd
@@ -52,7 +52,6 @@ in a dune area.
\bold{25}, 1--45.
}
\examples{
-data(hspider)
str(hspider)
\dontrun{
diff --git a/man/hunua.Rd b/man/hunua.Rd
index b907257..89cbe8d 100644
--- a/man/hunua.Rd
+++ b/man/hunua.Rd
@@ -50,17 +50,15 @@
}
\examples{
# Fit a GAM using vgam() and compare it with the Waitakere Ranges one
-data(hunua)
fit.h = vgam(agaaus ~ s(altitude, df=2), binomialff, hunua)
\dontrun{
plot(fit.h, se=TRUE, lcol="red", scol="red",
main="Red is Hunua, Blue is Waitakere") }
-predict(fit.h, hunua, type="response")[1:3]
+head(predict(fit.h, hunua, type="response"))
-data(waitakere)
fit.w = vgam(agaaus ~ s(altitude, df=2), binomialff, waitakere)
\dontrun{
plot(fit.w, se=TRUE, lcol="blue", scol="blue", add=TRUE) }
-predict(fit.w, hunua, type="response")[1:3] # Same as above?
+head(predict(fit.w, hunua, type="response")) # Same as above?
}
\keyword{datasets}
diff --git a/man/hyperg.Rd b/man/hyperg.Rd
index b0ab0ad..f32dea4 100644
--- a/man/hyperg.Rd
+++ b/man/hyperg.Rd
@@ -125,7 +125,7 @@ coef(fit, matrix=TRUE)
Coef(fit) # Should be equal to the true population proportion
unique(m / (m+n)) # The true population proportion
fit at extra
-fitted(fit)[1:4]
+head(fitted(fit))
summary(fit)
}
\keyword{models}
diff --git a/man/hzetaUC.Rd b/man/hzetaUC.Rd
index 7de1172..3822d8c 100644
--- a/man/hzetaUC.Rd
+++ b/man/hzetaUC.Rd
@@ -11,7 +11,7 @@
}
\usage{
-dhzeta(x, alpha)
+dhzeta(x, alpha, log=FALSE)
phzeta(q, alpha)
qhzeta(p, alpha)
rhzeta(n, alpha)
@@ -30,6 +30,11 @@ rhzeta(n, alpha)
the length of \code{x} or \code{p} or \code{q} if necessary.
}
+ \item{log}{
+ Logical.
+ If \code{log=TRUE} then the logarithm of the density is returned.
+
+ }
}
\details{
The probability function is
diff --git a/man/invbinomial.Rd b/man/invbinomial.Rd
index 030979f..95af19f 100644
--- a/man/invbinomial.Rd
+++ b/man/invbinomial.Rd
@@ -102,7 +102,7 @@ results in a EIM that is diagonal.
\examples{
y <- rnbinom(n <- 1000, mu=exp(3), size=exp(1))
fit <- vglm(y ~ 1, invbinomial, trace=TRUE)
-c(mean(y), fitted(fit)[1:5])
+c(mean(y), head(fitted(fit)))
summary(fit)
coef(fit, matrix=TRUE)
Coef(fit)
diff --git a/man/kumarUC.Rd b/man/kumarUC.Rd
index 46211f5..d3abf91 100644
--- a/man/kumarUC.Rd
+++ b/man/kumarUC.Rd
@@ -11,7 +11,7 @@
}
\usage{
-dkumar(x, shape1, shape2)
+dkumar(x, shape1, shape2, log=FALSE)
pkumar(q, shape1, shape2)
qkumar(p, shape1, shape2)
rkumar(n, shape1, shape2)
@@ -20,8 +20,14 @@ rkumar(n, shape1, shape2)
\item{x, q}{vector of quantiles.}
\item{p}{vector of probabilities.}
\item{n}{number of observations.
- Must be a positive integer of length 1.}
+ If \code{length(n) > 1} then the length is taken to be the number required. }
\item{shape1, shape2}{ positive shape parameters. }
+ \item{log}{
+ Logical.
+ If \code{log=TRUE} then the logarithm of the density is returned.
+
+ }
+
}
\value{
\code{dkumar} gives the density,
@@ -44,17 +50,15 @@ rkumar(n, shape1, shape2)
}
\examples{
\dontrun{
-# shape1 = shape2 = 0.5;
-shape1 = 2; shape2 = 2;
-nn = 201
-x = seq(0.01, 0.99, len=nn)
+shape1 = 2; shape2 = 2; nn = 201; # shape1 = shape2 = 0.5;
+x = seq(0.0 , 1.00, len=nn)
+x = seq(-0.05, 1.05, len=nn)
plot(x, dkumar(x, shape1, shape2), type="l", las=1, ylim=c(0,1.5),
ylab=paste("fkumar(shape1=", shape1, ", shape2=", shape2, ")"),
- col="blue",
+ col="blue", cex.main=0.8,
main="Blue is density, red is cumulative distribution function",
sub="Purple lines are the 10,20,...,90 percentiles")
lines(x, pkumar(x, shape1, shape2), col="red")
-abline(h=0)
probs = seq(0.1, 0.9, by=0.1)
Q = qkumar(probs, shape1, shape2)
lines(Q, dkumar(Q, shape1, shape2), col="purple", lty=3, type="h")
diff --git a/man/laplaceUC.Rd b/man/laplaceUC.Rd
index af5ddd8..ce038b5 100644
--- a/man/laplaceUC.Rd
+++ b/man/laplaceUC.Rd
@@ -12,7 +12,7 @@
}
\usage{
-dlaplace(x, location=0, scale=1)
+dlaplace(x, location=0, scale=1, log=FALSE)
plaplace(q, location=0, scale=1)
qlaplace(p, location=0, scale=1)
rlaplace(n, location=0, scale=1)
@@ -29,6 +29,12 @@ rlaplace(n, location=0, scale=1)
the scale parameter \eqn{b}.
Must consist of positive values.
}
+ \item{log}{
+ Logical.
+ If \code{log=TRUE} then the logarithm of the density is returned.
+
+ }
+
}
\details{
The Laplace distribution is often known as the double-exponential
diff --git a/man/leipnik.Rd b/man/leipnik.Rd
index 38dcadb..f39fc6b 100644
--- a/man/leipnik.Rd
+++ b/man/leipnik.Rd
@@ -98,7 +98,7 @@ y = rnorm(n=2000, mean=0.5, sd=0.1) # Not good data
fit = vglm(y ~ 1, leipnik(ilambda=1), tr=TRUE, checkwz=FALSE)
fit = vglm(y ~ 1, leipnik(ilambda=1,llam=logoff, elam=list(offset=1)),
trace=TRUE, cri="coef")
-fitted(fit)[1:5]
+head(fitted(fit))
mean(y)
summary(fit)
coef(fit, matrix=TRUE)
diff --git a/man/levy.Rd b/man/levy.Rd
index d4821b3..e0cc917 100644
--- a/man/levy.Rd
+++ b/man/levy.Rd
@@ -99,7 +99,7 @@ fit = vglm(y ~ 1, levy(idelta=delta, igamma=mygamma),
coef(fit, matrix=TRUE)
Coef(fit)
summary(fit)
-weights(fit, type="w")[1:4,]
+head(weights(fit, type="w"))
}
\keyword{models}
\keyword{regression}
diff --git a/man/lgammaUC.Rd b/man/lgammaUC.Rd
index 68e91fb..8e5c6cd 100644
--- a/man/lgammaUC.Rd
+++ b/man/lgammaUC.Rd
@@ -14,7 +14,7 @@
}
\usage{
-dlgamma(x, location=0, scale=1, k=1)
+dlgamma(x, location=0, scale=1, k=1, log=FALSE)
plgamma(q, location=0, scale=1, k=1)
qlgamma(p, location=0, scale=1, k=1)
rlgamma(n, location=0, scale=1, k=1)
@@ -26,6 +26,12 @@ rlgamma(n, location=0, scale=1, k=1)
\item{location}{the location parameter \eqn{a}.}
\item{scale}{the (positive) scale parameter \eqn{b}.}
\item{k}{the (positive) shape parameter \eqn{k}.}
+ \item{log}{
+ Logical.
+ If \code{log=TRUE} then the logarithm of the density is returned.
+
+ }
+
}
\value{
\code{dlgamma} gives the density,
diff --git a/man/lino.Rd b/man/lino.Rd
index 3f81458..471994c 100644
--- a/man/lino.Rd
+++ b/man/lino.Rd
@@ -115,7 +115,7 @@ y = rbeta(n=1000, exp(0.5), exp(1)) # Standard beta distribution
fit = vglm(y ~ 1, lino, trace=TRUE)
coef(fit, mat=TRUE)
Coef(fit)
-fitted(fit)[1:4]
+head(fitted(fit))
summary(fit)
# Nonstandard beta distribution
diff --git a/man/linoUC.Rd b/man/linoUC.Rd
index 9fbdbf6..c697610 100644
--- a/man/linoUC.Rd
+++ b/man/linoUC.Rd
@@ -11,7 +11,7 @@
by Libby and Novick (1982).
}
\usage{
-dlino(x, shape1, shape2, lambda=1)
+dlino(x, shape1, shape2, lambda=1, log=FALSE)
plino(q, shape1, shape2, lambda=1)
qlino(p, shape1, shape2, lambda=1)
rlino(n, shape1, shape2, lambda=1)
@@ -22,6 +22,11 @@ rlino(n, shape1, shape2, lambda=1)
\item{n}{number of observations.
Must be a positive integer of length 1.}
\item{shape1, shape2, lambda}{ see \code{\link{lino}}. }
+ \item{log}{
+ Logical.
+ If \code{log=TRUE} then the logarithm of the density is returned.
+
+ }
}
\value{
\code{dlino} gives the density,
diff --git a/man/lirat.Rd b/man/lirat.Rd
index c84925e..e248d4a 100644
--- a/man/lirat.Rd
+++ b/man/lirat.Rd
@@ -56,7 +56,6 @@ are accounted for.
}
\examples{
\dontrun{
-data(lirat)
# cf. Figure 3 of Moore and Tsiatis (1991)
with(lirat, plot(hb, R/N, pch=as.character(grp), col=grp, las=1,
xlab="Hemoglobin level", ylab="Proportion Dead"))
diff --git a/man/lms.bcg.Rd b/man/lms.bcg.Rd
index ebd1491..aa47dbb 100644
--- a/man/lms.bcg.Rd
+++ b/man/lms.bcg.Rd
@@ -151,13 +151,11 @@ Green manuscript.)
\code{\link{deplot.lmscreg}},
\code{\link{cdf.lmscreg}},
\code{\link{bminz}},
-\code{\link{alsqreg}}.
+\code{\link{amlnormal}}.
}
\examples{
-data(bminz)
-
-# This converges, but can't be deplot()'ed or qtplot()'ed
+# This converges, but deplot(fit) and qtplot(fit) does not work
fit = vglm(BMI ~ bs(age, df=4), fam=lms.bcg(zero=c(1,3)), data=bminz, tr=TRUE)
coef(fit, matrix=TRUE)
\dontrun{
@@ -170,11 +168,11 @@ plotvgam(fit, se=TRUE) # Plot mu function (only)
fit = vgam(BMI ~ s(age, df=c(4,2)), maxit=4,
fam=lms.bcg(zero=1, ilam=3), data=bminz, tr=TRUE)
summary(fit)
-predict(fit)[1:3,]
-fitted(fit)[1:3,]
-bminz[1:3,]
+head(predict(fit))
+head(fitted(fit))
+head(bminz)
# Person 1 is near the lower quartile of BMI amongst people his age
-cdf(fit)[1:3]
+head(cdf(fit))
\dontrun{
# Quantile plot
diff --git a/man/lms.bcn.Rd b/man/lms.bcn.Rd
index b5c48cc..4feb61a 100644
--- a/man/lms.bcn.Rd
+++ b/man/lms.bcn.Rd
@@ -149,17 +149,16 @@ number corresponding to the highest likelihood value.
\code{\link{cdf.lmscreg}},
\code{\link{bminz}},
\code{\link{alaplace1}},
-\code{\link{alsqreg}}.
+\code{\link{amlnormal}}.
}
\examples{
-data(bminz)
fit = vgam(BMI ~ s(age, df=c(4,2)), fam=lms.bcn(zero=1), data=bminz, tr=TRUE)
-predict(fit)[1:3,]
-fitted(fit)[1:3,]
-bminz[1:3,]
+head(predict(fit))
+head(fitted(fit))
+head(bminz)
# Person 1 is near the lower quartile of BMI amongst people his age
-cdf(fit)[1:3]
+head(cdf(fit))
\dontrun{
# Quantile plot
diff --git a/man/lms.yjn.Rd b/man/lms.yjn.Rd
index ed63731..fad6935 100644
--- a/man/lms.yjn.Rd
+++ b/man/lms.yjn.Rd
@@ -186,18 +186,17 @@ The generic function \code{predict}, when applied to a
\code{\link{deplot.lmscreg}},
\code{\link{cdf.lmscreg}},
\code{\link{bminz}},
-\code{\link{alsqreg}}.
+\code{\link{amlnormal}}.
}
\examples{
-data(bminz)
fit = vgam(BMI ~ s(age, df=4), fam=lms.yjn(zero=c(1,3)),
data=bminz, trace=TRUE)
-predict(fit)[1:3,]
-fitted(fit)[1:3,]
-bminz[1:3,]
+head(predict(fit))
+head(fitted(fit))
+head(bminz)
# Person 1 is near the lower quartile of BMI amongst people his age
-cdf(fit)[1:3]
+head(cdf(fit))
\dontrun{
# Quantile plot
diff --git a/man/logff.Rd b/man/logff.Rd
index 79d74f7..f774086 100644
--- a/man/logff.Rd
+++ b/man/logff.Rd
@@ -85,9 +85,9 @@ y = rlog(n=1000, prob=logit(0.2, inverse=TRUE))
fit = vglm(y ~ 1, logff, trace=TRUE, crit="c")
coef(fit, matrix=TRUE)
Coef(fit)
-\dontrun{hist(y, prob=TRUE, breaks=seq(0.5, max(y)+0.5, by=1))
+\dontrun{hist(y, prob=TRUE, breaks=seq(0.5, max(y)+0.5, by=1), border="blue")
x = seq(1, max(y), by=1)
-lines(x, dlog(x, Coef(fit)[1]), col="red", type="h")
+lines(x, dlog(x, Coef(fit)[1]), col="red", type="h", lwd=2)
}
}
\keyword{models}
diff --git a/man/logistic.Rd b/man/logistic.Rd
index e47c826..dcf9286 100644
--- a/man/logistic.Rd
+++ b/man/logistic.Rd
@@ -120,7 +120,10 @@ A note on Deriving the Information Matrix for a Logistic Distribution,
\seealso{
\code{\link[stats:Logistic]{rlogis}},
+ \code{\link{logit}},
+ \code{\link{cumulative}},
\code{\link{bilogistic4}}.
+
}
\examples{
# location unknown, scale known
diff --git a/man/logit.Rd b/man/logit.Rd
index 5debe5a..cae427c 100644
--- a/man/logit.Rd
+++ b/man/logit.Rd
@@ -131,7 +131,9 @@ elogit(theta, earg = list(min=0, max=1), inverse = FALSE, deriv = 0,
\code{\link{probit}},
\code{\link{cloglog}},
\code{\link{cauchit}},
+ \code{\link{logistic1}},
\code{\link{loge}}.
+
}
\examples{
p = seq(0.01, 0.99, by=0.01)
diff --git a/man/loglapUC.Rd b/man/loglapUC.Rd
new file mode 100644
index 0000000..d65f2ac
--- /dev/null
+++ b/man/loglapUC.Rd
@@ -0,0 +1,106 @@
+\name{loglapUC}
+\alias{dloglap}
+\alias{ploglap}
+\alias{qloglap}
+\alias{rloglap}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ The Log-Laplace Distribution }
+\description{
+ Density, distribution function, quantile function and random generation
+ for the 3-parameter log-Laplace distribution with location
+ parameter \code{location.ald}, scale parameter \code{scale.ald}
+ (on the log scale),
+ and asymmetry parameter \code{kappa}.
+
+}
+\usage{
+dloglap(x, location.ald=0, scale.ald=1,
+ tau=0.5, kappa=sqrt(tau/(1-tau)), log=FALSE)
+ploglap(q, location.ald=0, scale.ald=1,
+ tau=0.5, kappa=sqrt(tau/(1-tau)))
+qloglap(p, location.ald=0, scale.ald=1,
+ tau=0.5, kappa=sqrt(tau/(1-tau)))
+rloglap(n, location.ald=0, scale.ald=1,
+ tau=0.5, kappa=sqrt(tau/(1-tau)))
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{x, q}{vector of quantiles.}
+ \item{p}{vector of probabilities.}
+ \item{n}{
+ number of observations.
+ If \code{length(n) > 1} then the length is taken to be the number required.
+ }
+ \item{location.ald, scale.ald}{
+ the location parameter \eqn{\xi}{xi} and
+ the (positive) scale parameter \eqn{\sigma}{sigma},
+ on the log scale.
+ }
+ \item{tau}{
+ the quantile parameter \eqn{\tau}{tau}.
+ Must consist of values in \eqn{(0,1)}.
+ This argument is used to specify \code{kappa} and is ignored
+ if \code{kappa} is assigned.
+
+ }
+ \item{kappa}{
+ the asymmetry parameter \eqn{\kappa}{kappa}.
+ Must consist of positive values.
+ }
+ \item{log}{
+ if \code{TRUE}, probabilities \code{p} are given as \code{log(p)}.
+ }
+
+}
+\details{
+ A positive random variable \eqn{Y} is said to have a log-Laplace
+ distribution if \eqn{\log(Y)} has an asymmetric Laplace distribution
+ (ALD). There are many variants of ALDs and the one used here
+ is described in \code{\link{alaplace3}}.
+
+}
+\value{
+ \code{dloglap} gives the density,
+ \code{ploglap} gives the distribution function,
+ \code{qloglap} gives the quantile function, and
+ \code{rloglap} generates random deviates.
+}
+\references{
+Kozubowski, T. J. and Podgorski, K. (2003)
+Log-Laplace distributions.
+\emph{International Mathematical Journal},
+\bold{3}, 467--495.
+
+}
+\author{ T. W. Yee }
+%\note{
+% The \pkg{VGAM} family function \code{\link{loglaplace3}}
+% estimates the three parameters by maximum likelihood estimation.
+%}
+
+\seealso{
+ \code{\link{dalap}},
+ \code{\link{alaplace3}},
+% \code{\link{loglaplace3}}.
+ \code{\link{loglaplace1}}.
+
+}
+\examples{
+x = seq(-0.2, 5, by=0.01)
+loc = 0; sigma = exp(0.5); kappa = 1
+\dontrun{
+plot(x, dloglap(x, loc, sigma, kappa=kappa), type="l", col="blue",
+ main="Blue is density, red is cumulative distribution function",
+ ylim=c(0,1), sub="Purple are 5,10,...,95 percentiles", las=1, ylab="")
+abline(h=0, col="blue", lty=2)
+lines(qloglap(seq(0.05,0.95,by=0.05), loc, sigma, kappa=kappa),
+ dloglap(qloglap(seq(0.05,0.95,by=0.05), loc, sigma, kappa=kappa),
+ loc, sigma, kappa=kappa), col="purple", lty=3, type="h")
+lines(x, ploglap(x, loc, sigma, kappa=kappa), type="l", col="red")
+abline(h=0, lty=2)
+}
+ploglap(qloglap(seq(0.05,0.95,by=0.05), loc, sigma, kappa=kappa),
+ loc, sigma, kappa=kappa)
+}
+\keyword{distribution}
+
diff --git a/man/loglaplace.Rd b/man/loglaplace.Rd
new file mode 100644
index 0000000..add24cc
--- /dev/null
+++ b/man/loglaplace.Rd
@@ -0,0 +1,254 @@
+\name{loglaplace}
+\alias{loglaplace1}
+\alias{logitlaplace1}
+% \alias{alaplace3}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Log-Laplace and Logit-Laplace Distribution Family Functions }
+\description{
+ Maximum likelihood estimation of
+ the 1-parameter log-Laplace and
+ the 1-parameter logit-Laplace
+ distributions.
+ These may be used for quantile regression for counts and proportions
+ respectively.
+
+}
+\usage{
+loglaplace1(tau = NULL, llocation = "loge", elocation = list(),
+ ilocation = NULL, kappa = sqrt(tau/(1 - tau)), Scale.arg = 1,
+ shrinkage.init = 0.95, parallelLocation = FALSE, digt = 4,
+ dfmu.init = 3, rep0 = 0.5,
+ minquantile = 0, maxquantile = Inf,
+ method.init = 1, zero = NULL)
+logitlaplace1(tau = NULL, llocation = "logit", elocation = list(),
+ ilocation = NULL, kappa = sqrt(tau/(1 - tau)),
+ Scale.arg = 1, shrinkage.init = 0.95, parallelLocation = FALSE,
+ digt = 4, dfmu.init = 3, rep01 = 0.5, method.init = 1, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{tau, kappa}{
+ See \code{\link{alaplace1}}.
+
+ }
+ \item{llocation}{ Character.
+ Parameter link functions for
+ location parameter \eqn{\xi}{xi}.
+ See \code{\link{Links}} for more choices.
+ However, this argument should be left unchanged with
+ count data because it restricts the quantiles to be positive.
+ With proportions data \code{llocation} can be assigned a link such as
+ \code{\link{logit}},
+ \code{\link{probit}},
+ \code{\link{cloglog}},
+ etc.
+
+ }
+ \item{elocation}{
+ List. Extra argument for each of the links.
+ See \code{earg} in \code{\link{Links}} for general information.
+
+ }
+ \item{ilocation}{
+ Optional initial values.
+ If given, it must be numeric and values are recycled to the
+ appropriate length.
+ The default is to choose the value internally.
+
+ }
+ \item{parallelLocation}{ Logical.
+ Should the quantiles be parallel on the transformed scale
+ (argument \code{llocation})?
+ Assigning this argument to \code{TRUE} circumvents the
+ seriously embarrassing quantile crossing problem.
+
+ }
+% \item{sameScale}{ Logical.
+% Should the scale parameters be equal? It is advised to keep
+% \code{sameScale=TRUE} unchanged because it does not make sense to
+% have different values for each \code{tau} value.
+% }
+
+ \item{method.init}{
+ Initialization method.
+ Either the value 1, 2, or \ldots.
+
+ }
+ \item{dfmu.init, shrinkage.init, Scale.arg, digt, zero}{
+ See \code{\link{alaplace1}}.
+
+ }
+ \item{rep0, rep01}{
+ Numeric, positive.
+ Replacement values for 0s and 1s respectively.
+ For count data, values of the response whose value is 0 are replaced
+ by \code{rep0}; it avoids computing \code{log(0)}.
+ For proportions data values of the response whose value is 0 or 1
+ are replaced by
+ \code{min(rangey01[1]/2, rep01/w[y<=0])} and
+ \code{max((1 + rangey01[2])/2, 1-rep01/w[y >= 1])}
+ respectively; e.g., it avoids computing \code{logit(0)} or \code{logit(1)}.
+ Here, \code{rangey01} is the 2-vector \code{range(y[(y > 0) & (y < 1)])}
+ of the response.
+
+ }
+ \item{minquantile, maxquantile}{
+ Numeric.
+ The minimum and maximum values possible in the quantiles.
+ These argument are effectively ignored by default since
+ \code{\link{loge}} keeps all quantiles positive.
+ However, if
+ \code{llocation = "logoff", elocation = list(offset=1)}
+ then it is possible that the fitted quantiles have value 0
+ because \code{minquantile=0}.
+
+ }
+}
+\details{
+ These \pkg{VGAM} family functions implement translations of
+ the asymmetric Laplace distribution (ALD).
+ The resulting variants may be suitable for quantile regression
+ for count data or sample proportions.
+ For example, a log link applied to count data is assumed to follow an ALD.
+ Another example is a logit link applied to proportions data so as
+ to follow an ALD.
+ A positive random variable \eqn{Y} is said to have a log-Laplace
+ distribution if \eqn{Y = e^W}{Y = exp(W)} where \eqn{W} has an ALD.
+ There are many variants of ALDs and the one used here
+ is described in \code{\link{alaplace1}}.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}}
+ and \code{\link{vgam}}.
+
+ In the \code{extra} slot of the fitted object are some list
+ components which are useful.
+ For example, the sample proportion of
+ values which are less than the fitted quantile curves, which is
+ \code{sum(wprior[y <= location]) / sum(wprior)} internally.
+ Here, \code{wprior} are the prior weights (called \code{ssize} below),
+ \code{y} is the response
+ and \code{location} is a fitted quantile curve.
+ This definition comes about naturally from the transformed ALD data.
+
+}
+\references{
+
+Kotz, S., Kozubowski, T. J. and Podgorski, K. (2001)
+\emph{The Laplace distribution and generalizations:
+a revisit with applications to communications,
+economics, engineering, and finance},
+Boston: Birkhauser.
+
+Kozubowski, T. J. and Podgorski, K. (2003)
+Log-Laplace distributions.
+\emph{International Mathematical Journal},
+\bold{3}, 467--495.
+
+ Yee, T. W. (2009)
+ Quantile regression for counts and proportions.
+ In preparation.
+
+}
+\author{ Thomas W. Yee }
+\section{Warning}{
+ The \pkg{VGAM} family function \code{\link{logitlaplace1}} will
+ not handle a vector of just 0s and 1s as the response;
+ it will only work satisfactorily if the number of trials is large.
+
+ See \code{\link{alaplace1}} for other warnings.
+ Care is needed with \code{tau} values which are too small, e.g.,
+ for count data the sample
+ proportion of zeros must be less than all values in \code{tau}.
+ Similarly, this also holds with \code{\link{logitlaplace1}},
+ which also requires all \code{tau} values to be less than the
+ sample proportion of ones.
+
+}
+\note{
+ The form of input for \code{\link{logitlaplace1}} as response
+ is a vector of proportions (values in \eqn{[0,1]}) and the
+ number of trials is entered into the \code{weights} argument of
+ \code{\link{vglm}}/\code{\link{vgam}}.
+ See Example 2 below.
+ See \code{\link{alaplace1}} for other notes in general.
+
+}
+
+\seealso{
+ \code{\link{alaplace1}},
+ \code{\link{dloglap}}.
+
+}
+
+\examples{
+# Example 1: quantile regression of counts with regression splines
+set.seed(123); my.k = exp(0)
+alldat = data.frame(x2 = sort(runif(n <- 500)))
+mymu = function(x) exp( 1 + 3*sin(2*x) / (x+0.5)^2)
+alldat = transform(alldat, y = rnbinom(n, mu=mymu(x2), size=my.k))
+mytau = c(0.1, 0.25, 0.5, 0.75, 0.9); mydof = 3
+fitp = vglm(y ~ bs(x2, df=mydof), data=alldat, trace=TRUE,
+ loglaplace1(tau=mytau, parallelLoc=TRUE)) # halfstepping is usual
+
+\dontrun{
+par(las=1) # Plot on a log1p() scale
+mylwd = 1.5
+with(alldat, plot(x2, jitter(log1p(y), factor=1.5), col="red", pch="o",
+ main="Example 1; darkgreen=truth, blue=estimated", cex=0.75))
+with(alldat, matlines(x2, log1p(fitted(fitp)), col="blue", lty=1, lwd=mylwd))
+finexgrid = seq(0, 1, len=201)
+for(ii in 1:length(mytau))
+ lines(finexgrid, col="darkgreen", lwd=mylwd,
+ log1p(qnbinom(p=mytau[ii], mu=mymu(finexgrid), si=my.k)))
+}
+fitp at extra # Contains useful information
+
+
+# Example 2: sample proportions
+set.seed(123); nnn = 1000; ssize = 100 # ssize = 1 will not work!
+alldat = data.frame(x2 = sort(runif(nnn)))
+mymu = function(x) logit( 1.0 + 4*x, inv=TRUE)
+alldat = transform(alldat, ssize = ssize,
+ y2 = rbinom(nnn, size=ssize, prob=mymu(x2)) / ssize)
+
+mytau = c(0.25, 0.50, 0.75)
+fit1 = vglm(y2 ~ bs(x2, df=3), data=alldat, weights=ssize, trace=TRUE,
+ logitlaplace1(tau=mytau, lloc="cloglog", paral=TRUE))
+
+\dontrun{
+# Check the solution. Note: this may be like comparing apples with oranges.
+plotvgam(fit1, se=TRUE, scol="red", lcol="blue", main="Truth = 'darkgreen'")
+# Centered approximately !
+linkFunctionChar = as.character(fit1 at misc$link)
+alldat = transform(alldat, trueFunction=
+ theta2eta(theta=mymu(x2), link=linkFunctionChar))
+with(alldat, lines(x2, trueFunction - mean(trueFunction), col="darkgreen"))
+
+
+# Plot the data + fitted quantiles (on the original scale)
+myylim = with(alldat, range(y2))
+with(alldat, plot(x2, y2, col="blue", ylim=myylim, las=1, pch=".", cex=2.5))
+with(alldat, matplot(x2, fitted(fit1), add=TRUE, lwd=3, type="l"))
+truecol = rep(1:3, len=fit1 at misc$M) # Add the 'truth'
+smallxgrid = seq(0, 1, len=501)
+for(ii in 1:length(mytau))
+ lines(smallxgrid, col=truecol[ii], lwd=2,
+ qbinom(p=mytau[ii], prob=mymu(smallxgrid), size=ssize) / ssize)
+
+
+# Plot on the eta (== logit()/probit()/...) scale
+with(alldat, matplot(x2, predict(fit1), add=FALSE, lwd=3, type="l"))
+# Add the 'truth'
+for(ii in 1:length(mytau)) {
+ true.quant = qbinom(p=mytau[ii], pr=mymu(smallxgrid), si=ssize)/ssize
+ lines(smallxgrid, theta2eta(theta=true.quant, link=linkFunctionChar),
+ col=truecol[ii], lwd=2)
+}
+}
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/loglinb2.Rd b/man/loglinb2.Rd
index 28cc52d..5c64641 100644
--- a/man/loglinb2.Rd
+++ b/man/loglinb2.Rd
@@ -76,7 +76,6 @@ contains further information and examples.
\code{\link{loglinb3}}.
}
\examples{
-data(coalminers)
coalminers = transform(coalminers, age=(age-42)/5)
# Get the n x 4 matrix of counts
diff --git a/man/loglinb3.Rd b/man/loglinb3.Rd
index 473391c..7e424c8 100644
--- a/man/loglinb3.Rd
+++ b/man/loglinb3.Rd
@@ -79,10 +79,9 @@ contains further information and examples.
\code{\link{hunua}}.
}
\examples{
-data(hunua)
fit = vglm(cbind(cyadea,beitaw,kniexc) ~ altitude, loglinb3, data=hunua)
coef(fit, mat=TRUE)
-fitted(fit)[1:4,]
+head(fitted(fit))
summary(fit)
}
\keyword{models}
diff --git a/man/lqnorm.Rd b/man/lqnorm.Rd
index 90c3613..39ccf6d 100644
--- a/man/lqnorm.Rd
+++ b/man/lqnorm.Rd
@@ -112,7 +112,7 @@ d = transform(d, y = realfun(x) + rnorm(n, sd=exp(1)))
d = transform(d, y = c(4*y[1], y[-1]), x=c(-1, x[-1]))
fit = vglm(y ~ x, fam = lqnorm(qpower=1.2), data=d)
coef(fit, matrix=TRUE)
-fitted(fit)[1:4,]
+head(fitted(fit))
fit at misc$qpower
fit at misc$objectiveFunction
diff --git a/man/lv.Rd b/man/lv.Rd
index 01b279c..9ea197f 100644
--- a/man/lv.Rd
+++ b/man/lv.Rd
@@ -65,7 +65,6 @@ Constrained additive ordination.
\examples{
\dontrun{
-data(hspider)
hspider[,1:6] = scale(hspider[,1:6]) # Standardized environmental vars
set.seed(123)
p1 = cao(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull, Zoraspin) ~
diff --git a/man/lvplot.Rd b/man/lvplot.Rd
index a0e6193..241a66c 100644
--- a/man/lvplot.Rd
+++ b/man/lvplot.Rd
@@ -61,7 +61,6 @@ Constrained additive ordination.
\examples{
\dontrun{
-data(hspider)
hspider[,1:6] = scale(hspider[,1:6]) # Standardized environmental vars
set.seed(123)
p1 = cao(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull, Zoraspin) ~
diff --git a/man/lvplot.rrvglm.Rd b/man/lvplot.rrvglm.Rd
index f48fad7..ff11405 100644
--- a/man/lvplot.rrvglm.Rd
+++ b/man/lvplot.rrvglm.Rd
@@ -145,7 +145,6 @@ Reduced-rank vector generalized linear models.
\code{\link{rrvglm.control}}.
}
\examples{
-data(pneumo)
n = nrow(pneumo) # x1, x2 and x3 are some unrelated covariates
pneumo = transform(pneumo, slet=scale(log(exposure.time)),
x1 = rnorm(n), x2 = rnorm(n), x3 = rnorm(n))
diff --git a/man/margeff.Rd b/man/margeff.Rd
new file mode 100644
index 0000000..1a29fb0
--- /dev/null
+++ b/man/margeff.Rd
@@ -0,0 +1,132 @@
+\name{margeff}
+\alias{margeff}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Marginal effects for the multinomial logit and cumulative models }
+\description{
+ Marginal effects for the multinomial logit model and
+ cumulative logit/probit/... models: the derivative
+ of the fitted probabilities with respect to each explanatory
+ variable.
+
+}
+\usage{
+margeff(object, subset=NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{object}{
+ A \code{\link{vglm}} \code{\link{multinomial}}
+ or
+ \code{\link{cumulative}} object.
+
+ }
+ \item{subset}{
+ Numerical or logical vector, denoting the required observation(s).
+ Recycling is used if possible.
+ The default means all observations.
+
+ }
+
+}
+\details{
+ Computes the derivative of the fitted probabilities
+ of a multinomial logit model
+ or
+ cumulative logit/probit/... model
+ with respect to each explanatory variable.
+
+}
+\value{
+ A \eqn{p} by \eqn{M+1} by \eqn{n} array, where \eqn{p} is the number
+ of explanatory variables and the (hopefully) nominal response has
+ \eqn{M+1} levels, and there are \eqn{n} observations.
+
+ If
+ \code{is.numeric(subset)}
+ and
+ \code{length(subset) == 1} then a
+ \eqn{p} by \eqn{M+1} matrix is returned.
+
+}
+% \references{ ~put references to the literature/web site here ~ }
+\author{ T. W. Yee }
+\section{Warning }{
+ Care is needed in interpretation, e.g., the change is not universally
+ accurate for a unit change in each explanatory variable because
+ eventually the `new' probabilities may become negative or greater
+ than unity. Also, the `new' probabilities will not sum to one.
+
+ This function is not applicable for models with
+ data-dependent terms such as \code{\link{bs}} and
+ \code{\link{poly}}.
+ Also the function should not be applied to models with any terms that
+ have generated more than one column of the LM model matrix,
+ such as \code{\link{bs}} and \code{\link{poly}}.
+ For such try using numerical methods such as finite-differences.
+ The \code{formula} in \code{object} should comprise of simple terms
+ of the form \code{ ~ x2 + x3 + x4}, etc.
+
+}
+
+\note{
+ For \code{\link{multinomial}}
+ this function should handle any value of \code{refLevel} and also
+ any constraint matrices.
+ However, it does not currently handle
+ the \code{xij} or \code{form2} arguments,
+ nor \code{\link{vgam}} objects.
+
+ For \code{\link{multinomial}}
+ if \code{subset} is numeric then the function uses a \code{for} loop over
+ the observations (slow).
+ The default computations use vectorization; this uses more memory than a
+ \code{for} loop but is faster.
+
+
+}
+\seealso{
+ \code{\link{multinomial}},
+ \code{\link{cumulative}},
+ \code{\link{vglm}}.
+
+}
+
+\examples{
+# Not a good example for multinomial() because the response is ordinal!!
+ii = 3; hh = 1/100
+pneumo = transform(pneumo, let = log(exposure.time))
+fit = vglm(cbind(normal, mild, severe) ~ let, multinomial, pneumo)
+fit = vglm(cbind(normal, mild, severe) ~ let,
+ cumulative(reverse=TRUE, parallel=TRUE),
+ data = pneumo)
+fitted(fit)[ii,]
+
+mynewdata = with(pneumo, data.frame(let = let[ii]+hh))
+(newp <- predict(fit, newdata=mynewdata, type="response"))
+
+# Compare the difference. Should be the same as hh --> 0.
+round(dig=3, (newp-fitted(fit)[ii,])/hh) # Finite-difference approximation
+round(dig=3, margeff(fit, subset=ii)["let",])
+
+# Other examples
+round(dig=3, margeff(fit))
+round(dig=3, margeff(fit, subset=2)["let",])
+round(dig=3, margeff(fit, subset=c(FALSE,TRUE))["let",,]) # recycling
+round(dig=3, margeff(fit, subset=c(2,4,6,8))["let",,])
+}
+
+
+% Add one or more standard keywords, see file 'KEYWORDS' in the
+% R documentation directory.
+\keyword{models}
+\keyword{regression}
+
+% set \code{i=1:n}.
+% hh * margeff(fit, i=ii)["let",]
+% cumulative(reverse=TRUE, parallel=TRUE),
+% cumulative(reverse=FALSE, parallel=TRUE),
+% cumulative(reverse=TRUE, parallel=FALSE),
+% cumulative(reverse=FALSE, parallel=FALSE),
+
+
+
diff --git a/man/mbinomial.Rd b/man/mbinomial.Rd
index 4d45fc0..69dd0a7 100644
--- a/man/mbinomial.Rd
+++ b/man/mbinomial.Rd
@@ -145,7 +145,7 @@ fit = vglm(y ~ 1 + ID + x2, trace=TRUE,
dimnames(coef(fit, mat=TRUE))
coef(fit, mat=TRUE)
summary(fit)
-fitted(fit)[1:5]
+head(fitted(fit))
objsizemb = function(object) round(object.size(object) / 2^20, dig=2)
objsizemb(fit) # in Mb
diff --git a/man/mccullagh89.Rd b/man/mccullagh89.Rd
index a8d6ed7..4cf7571 100644
--- a/man/mccullagh89.Rd
+++ b/man/mccullagh89.Rd
@@ -112,7 +112,7 @@ all else fails.
n = 1000
y = rnorm(n, mean=0.0, sd=0.2) # Limit as theta is 0, nu is infinity
fit = vglm(y ~ 1, mccullagh89, trace=TRUE)
-fitted(fit)[1:5]
+head(fitted(fit))
mean(y)
summary(fit)
coef(fit, matrix=TRUE)
diff --git a/man/mckaygamma2.Rd b/man/mckaygamma2.Rd
index 655bdb4..3a1b02b 100644
--- a/man/mckaygamma2.Rd
+++ b/man/mckaygamma2.Rd
@@ -33,9 +33,9 @@ mckaygamma2(la = "loge", lp = "loge", lq = "loge",
}
\details{
The joint probability density function is given by
- \deqn{f(y_1,y_2) = a^{p+q} y_1^{p-1} (y_2-y_1)^{q-1}
+ \deqn{f(y_1,y_2;a,p,q) = a^{p+q} y_1^{p-1} (y_2-y_1)^{q-1}
\exp(-a y_2) / [\Gamma(p) \Gamma(q)]}{%
- f(y1,y2) = a^(p+q) y1^(p-1) (y2-y1)^(q-1)
+ f(y1,y2;a,p,q) = a^(p+q) y1^(p-1) (y2-y1)^(q-1)
exp(-a y2) / [gamma(p) gamma(q)] }
for \eqn{a > 0}, \eqn{p > 0}, \eqn{q > 0} and
\eqn{0 < y_1 < y_2}{0<y1<y2}.
@@ -77,9 +77,9 @@ New York: Wiley.
\author{ T. W. Yee }
\note{
The response must be a two column matrix.
- Currently, the fitted value is a matrix with two columns; the
- first column has values \eqn{(p+q)/a} for the mean of
- \code{pmin(y1,y2)}, while the second column is filled with \code{NA}
+ Currently, the fitted value is a matrix with two columns;
+ the first column has values \eqn{(p+q)/a} for the mean of \code{pmin(y1,y2)},
+ while the second column is filled with \code{NA}
for the unknown mean of \code{pmax(y1,y2)}.
The data are sorted internally and the user need not input the
data presorted.
@@ -98,7 +98,7 @@ fit = vglm(ymat ~ 1, fam=mckaygamma2, trace=TRUE)
coef(fit, matrix=TRUE)
Coef(fit)
vcov(fit)
-fitted(fit)[1:5,]
+head(fitted(fit))
summary(fit)
}
\keyword{models}
diff --git a/man/micmen.Rd b/man/micmen.Rd
index 1a7aebe..daab99c 100644
--- a/man/micmen.Rd
+++ b/man/micmen.Rd
@@ -113,7 +113,6 @@ micmen(rpar = 0.001, divisor = 10, init1 = NULL, init2 = NULL,
% \code{skira}.
}
\examples{
-data(enzyme)
fit = vglm(velocity ~ 1, micmen, enzyme, trace=TRUE, crit="c",
form2 = ~ conc - 1)
\dontrun{
diff --git a/man/model.framevlm.Rd b/man/model.framevlm.Rd
index f4a366b..31ae73d 100644
--- a/man/model.framevlm.Rd
+++ b/man/model.framevlm.Rd
@@ -2,7 +2,7 @@
\alias{model.framevlm}
\title{Construct the Model Frame of a VLM Object}
\usage{
-model.framevlm(object, \dots)
+model.framevlm(object, setupsmart = TRUE, wrapupsmart = TRUE, \dots)
}
\arguments{
\item{object}{a model object from the \pkg{VGAM} \R package
@@ -13,6 +13,13 @@ model.framevlm(object, \dots)
\code{subset}.
See \code{\link[stats]{model.frame}} for more information on these.
}
+
+ \item{setupsmart, wrapupsmart}{
+ Logical.
+ Arguments to determine whether to use smart prediction.
+
+ }
+
}
\description{
This function returns a \code{\link{data.frame}} with the variables.
@@ -48,22 +55,21 @@ model.framevlm(object, \dots)
}
\examples{
# Illustrates smart prediction
-data(pneumo)
pneumo = transform(pneumo, let=log(exposure.time))
fit = vglm(cbind(normal,mild, severe) ~ poly(c(scale(let)), 2),
fam=multinomial,
data=pneumo, trace=TRUE, x=FALSE)
class(fit)
-check1 = model.frame(fit)[1:3,]
+check1 = head(model.frame(fit))
check1
-check2 = model.frame(fit, data=pneumo[1:3,])
+check2 = model.frame(fit, data=head(pneumo))
check2
all.equal(unlist(check1), unlist(check2)) # Should be TRUE
-q0 = predict(fit)[1:3,]
-q1 = predict(fit, newdata=pneumo)[1:3,]
-q2 = predict(fit, newdata=pneumo[1:3,])
+q0 = head(predict(fit))
+q1 = head(predict(fit, newdata=pneumo))
+q2 = predict(fit, newdata=head(pneumo))
all.equal(q0, q1) # Should be TRUE
all.equal(q1, q2) # Should be TRUE
}
diff --git a/man/model.matrixvlm.Rd b/man/model.matrixvlm.Rd
index 8e5703c..137a99f 100644
--- a/man/model.matrixvlm.Rd
+++ b/man/model.matrixvlm.Rd
@@ -2,7 +2,7 @@
\alias{model.matrixvlm}
\title{Construct the Design Matrix of a VLM Object}
\usage{
-model.matrixvlm(object, type=c("vlm","lm","lm2"), \dots)
+model.matrixvlm(object, type=c("vlm","lm","lm2","bothlmlm2"), \dots)
}
\arguments{
\item{object}{an object of a class that inherits from the
@@ -15,6 +15,8 @@ model.matrixvlm(object, type=c("vlm","lm","lm2"), \dots)
to the \code{formula} argument.
The value \code{"lm2"} is the second (LM) model matrix corresponding
to the \code{form2} argument.
+ The value \code{"bothlmlm2"} means both LM and VLM model matrices.
+
}
\item{\dots}{further arguments passed to or from other methods.
These include \code{data} (which
@@ -42,6 +44,9 @@ model.matrixvlm(object, type=c("vlm","lm","lm2"), \dots)
\value{
The design matrix for a regression model with the specified formula
and data.
+ If \code{type="bothlmlm2"} then a list is returned with components
+ \code{"X"} and \code{"Xm2"}.
+
}
\references{
Yee, T. W. and Hastie, T. J. (2003)
@@ -63,7 +68,6 @@ Reduced-rank vector generalized linear models.
}
\examples{
# Illustrates smart prediction
-data(pneumo)
pneumo = transform(pneumo, let=log(exposure.time))
fit = vglm(cbind(normal,mild, severe) ~ poly(c(scale(let)), 2),
fam=multinomial,
@@ -72,15 +76,15 @@ class(fit)
fit at x
model.matrix(fit)
-Check1 = model.matrix(fit, type="lm")[1:3,]
+Check1 = head(model.matrix(fit, type="lm"))
Check1
-Check2 = model.matrix(fit, data=pneumo[1:3,], type="lm")
+Check2 = model.matrix(fit, data=head(pneumo), type="lm")
Check2
all.equal(c(Check1), c(Check2))
-q0 = predict(fit)[1:3,]
-q1 = predict(fit, newdata=pneumo)[1:3,]
-q2 = predict(fit, newdata=pneumo[1:3,])
+q0 = head(predict(fit))
+q1 = head(predict(fit, newdata=pneumo))
+q2 = predict(fit, newdata=head(pneumo))
all.equal(q0, q1) # Should be TRUE
all.equal(q1, q2) # Should be TRUE
}
diff --git a/man/morgenstern.Rd b/man/morgenstern.Rd
index abd3f7d..e9bff0a 100644
--- a/man/morgenstern.Rd
+++ b/man/morgenstern.Rd
@@ -105,7 +105,7 @@ fit = vglm(ymat ~ 1, fam=morgenstern, trace=TRUE)
fit = vglm(ymat ~ 1, fam=morgenstern, trace=TRUE, crit="coef")
coef(fit, matrix=TRUE)
Coef(fit)
-fitted(fit)[1:5,]
+head(fitted(fit))
}
\keyword{models}
\keyword{regression}
diff --git a/man/multinomial.Rd b/man/multinomial.Rd
index a14b703..16bef18 100644
--- a/man/multinomial.Rd
+++ b/man/multinomial.Rd
@@ -3,7 +3,9 @@
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Multinomial Logit Model }
\description{
- Fits a multinomial logit model to an unordered factor response.
+ Fits a multinomial logit model to a (preferably unordered) factor
+ response.
+
}
\usage{
multinomial(zero = NULL, parallel = FALSE, nointercept = NULL,
@@ -18,7 +20,7 @@ multinomial(zero = NULL, parallel = FALSE, nointercept = NULL,
\item{zero}{
An integer-valued vector specifying which
linear/additive predictors are modelled as intercepts only.
- The values must be from the set \{1,2,\ldots,\eqn{M}\}.
+ Any values must be from the set \{1,2,\ldots,\eqn{M}\}.
The default value means none are modelled as intercept-only terms.
}
@@ -30,11 +32,11 @@ multinomial(zero = NULL, parallel = FALSE, nointercept = NULL,
\item{nointercept}{
An integer-valued vector specifying which
linear/additive predictors have no intercepts.
- The values must be from the set \{1,2,\ldots,\eqn{M}\}.
+ Any values must be from the set \{1,2,\ldots,\eqn{M}\}.
}
\item{refLevel}{
- Either a single positive integer or a value of a factor.
+ Either a single positive integer or a value of the factor.
If an integer then it specifies which
column of the response matrix is the reference or baseline level.
The default is the last one (the \eqn{(M+1)}th one).
@@ -52,11 +54,11 @@ multinomial(zero = NULL, parallel = FALSE, nointercept = NULL,
\deqn{\eta_j = \log(P[Y=j]/ P[Y=M+1])}{%
eta_j = log(P[Y=j]/ P[Y=M+1])}
where \eqn{\eta_j}{eta_j} is the \eqn{j}th linear/additive predictor.
- Here, \eqn{j=1,\ldots,M} and \eqn{\eta_{M+1}}{eta_{M+1}} is 0 by
+ Here, \eqn{j=1,\ldots,M}, and \eqn{\eta_{M+1}}{eta_{M+1}} is 0 by
definition. That is, the last level of the factor, or last column of
the response matrix, is taken as the reference level or baseline---this
is for identifiability of the parameters.
- The reference or baseline level can be chosen by using the
+ The reference or baseline level can be changed with the
\code{refLevel} argument.
In almost all the literature, the constraint matrices associated
@@ -66,7 +68,7 @@ multinomial(zero = NULL, parallel = FALSE, nointercept = NULL,
matrices are unknown and to be estimated, then this can be achieved
by fitting the model as a reduced-rank vector generalized linear model
(RR-VGLM; see \code{\link{rrvglm}}). In particular, a multinomial logit
- model with unknown constraint matrices is known as a stereotype model
+ model with unknown constraint matrices is known as a \emph{stereotype} model
(Anderson, 1984), and can be fitted with \code{\link{rrvglm}}.
}
@@ -91,6 +93,11 @@ Agresti, A. (2002)
\emph{Categorical Data Analysis},
2nd ed. New York: Wiley.
+Hastie, T. J., Tibshirani, R. J. and Friedman, J. H. (2009)
+\emph{The Elements of Statistical Learning: Data Mining, Inference and Prediction},
+2nd ed.
+New York: Springer-Verlag.
+
Simonoff, J. S. (2003)
\emph{Analyzing Categorical Data},
New York: Springer-Verlag.
@@ -111,12 +118,13 @@ contains further information and examples.
The response should be either a matrix of counts (with row sums that are
all positive), or a factor. In both cases, the \code{y} slot returned
by \code{\link{vglm}}/\code{\link{vgam}}/\code{\link{rrvglm}} is the
- matrix of counts.
+ matrix of sample proportions.
The multinomial logit model is more appropriate for a nominal
- (unordered) factor response. For an ordinal (ordered) factor
- response, models such as those based on cumulative probabilities
- (see \code{\link{cumulative}}) are more suited.
+ (unordered) factor response than for an ordinal (ordered) factor
+ response.
+ Models more suited for the latter include those based on cumulative
+ probabilities, e.g., \code{\link{cumulative}}.
\code{multinomial} is prone to numerical difficulties if the groups
are separable and/or the fitted probabilities are close to 0 or 1.
@@ -124,19 +132,19 @@ contains further information and examples.
\eqn{P[Y=j]} for \eqn{j=1,\ldots,M+1}.
Here is an example of the usage of the \code{parallel} argument.
- If there are covariates \code{x1}, \code{x2} and \code{x3}, then
- \code{parallel = TRUE ~ x1 + x2 -1} and
- \code{parallel = FALSE ~ x3} are equivalent. This would constrain
- the regression coefficients for \code{x1} and \code{x2} to be
- equal; those of the intercepts and \code{x3} would be different.
+ If there are covariates \code{x2}, \code{x3} and \code{x4}, then
+ \code{parallel = TRUE ~ x2 + x3 - 1} and
+ \code{parallel = FALSE ~ x4} are equivalent. This would constrain
+ the regression coefficients for \code{x2} and \code{x3} to be equal;
+ those of the intercepts and \code{x4} would be different.
- In Example 4 below, a conditional logit model is fitted to a artificial
+ In Example 4 below, a conditional logit model is fitted to an artificial
data set that explores how cost and travel time affect people's
decision about how to travel to work. Walking is the baseline group.
The variable \code{Cost.car} is the difference between the cost of
- travel to work by car and walking, etc. The variable \code{Durn.car}
+ travel to work by car and walking, etc. The variable \code{Time.car}
is the difference between the travel duration/time to work by car and
- walking, etc. For other details about the \code{xij} argument see
+ walking, etc. For other details about the \code{xij} argument see
\code{\link{vglm.control}} and \code{\link{fill}}.
The \code{\link[nnet]{multinom}} function in the \pkg{nnet} package
@@ -151,6 +159,8 @@ contains further information and examples.
% Please let me know if you strongly agree or disagree about this.
\section{Warning }{
+ No check is made to verify that the response is nominal.
+
The arguments \code{zero} and \code{nointercept} can be inputted
with values that fail. For example, \code{multinomial(zero=2,
nointercept=1:3)} means the second linear/additive predictor is
@@ -160,19 +170,21 @@ contains further information and examples.
e.g., \code{multinomial(zero=2, parallel = TRUE ~ x3)}. If in doubt,
apply \code{constraints()} to the fitted object to check.
- No check is made to verify that the response is nominal.
-
}
\seealso{
- \code{\link{acat}},
+ \code{\link{margeff}},
\code{\link{cumulative}},
+ \code{\link{acat}},
\code{\link{cratio}},
\code{\link{sratio}},
\code{\link{dirichlet}},
\code{\link{dirmultinomial}},
\code{\link{rrvglm}},
+ \code{\link{fill1}},
\code{\link[stats:Multinom]{Multinomial}},
\code{\link[base]{iris}}.
+ The author's homepage has further documentation about
+ categorical data analysis using \pkg{VGAM}.
}
% \code{\link[base:Multinom]{rmultinom}}
@@ -187,36 +199,38 @@ coef(fit, matrix=TRUE)
# Example 2a: a simple example
-y = t(rmultinom(10, size = 20, prob=c(0.1,0.2,0.8))) # Counts
-fit = vglm(y ~ 1, multinomial)
-fitted(fit)[1:4,] # Proportions
+ymat = t(rmultinom(10, size = 20, prob=c(0.1,0.2,0.8))) # Counts
+fit = vglm(ymat ~ 1, multinomial)
+head(fitted(fit)) # Proportions
fit at prior.weights # Not recommended for extraction of prior weights
weights(fit, type="prior", matrix=FALSE) # The better method
fit at y # Sample proportions
constraints(fit) # Constraint matrices
# Example 2b: Different reference level used as the baseline
-fit2 = vglm(y ~ 1, multinomial(refLevel=2))
+fit2 = vglm(ymat ~ 1, multinomial(refLevel=2))
coef(fit2, matrix=TRUE)
coef(fit , matrix=TRUE) # Easy to reconcile this output with fit2
# Example 2c: Different input to Example 2a but same result
-w = apply(y, 1, sum) # Prior weights
-yprop = y / w # Sample proportions
+w = apply(ymat, 1, sum) # Prior weights
+yprop = ymat / w # Sample proportions
fitprop = vglm(yprop ~ 1, multinomial, weights=w)
-fitted(fitprop)[1:4,] # Proportions
+head(fitted(fitprop)) # Proportions
weights(fitprop, type="prior", matrix=FALSE)
fitprop at y # Same as the input
# Example 3: The response is a factor.
nn = 10
-yfactor = gl(3, nn, labels = c("Control", "Treatment1", "Treatment2"))
-x = runif(3 * nn)
-fit3a = vglm(yfactor ~ x, multinomial(refLevel=yfactor[12]))
-fit3b = vglm(yfactor ~ x, multinomial(refLevel=2))
+dframe3 = data.frame(yfactor = gl(3, nn, labels=c("Control","Trt1","Trt2")),
+ x = runif(3 * nn))
+myrefLevel = with(dframe3, yfactor[12])
+fit3a = vglm(yfactor ~ x, multinomial(refLevel=myrefLevel), data=dframe3)
+fit3b = vglm(yfactor ~ x, multinomial(refLevel=2), data=dframe3)
coef(fit3a, matrix=TRUE) # "Treatment1" is the reference level
coef(fit3b, matrix=TRUE) # "Treatment1" is the reference level
+margeff(fit3b)
# Example 4: Fit a rank-1 stereotype model
@@ -231,37 +245,41 @@ Coef(fit4)@A # The A matrix
svd(coef(fit4, matrix=TRUE)[-1,])$d # This has rank 1; = C %*% t(A)
-# Example 5: The use of the xij argument (conditional logit model)
+# Example 5: The use of the xij argument (aka conditional logit model)
set.seed(111)
-n = 100 # Number of people who travel to work
-M = 3 # There are M+1 models of transport
-ymat = matrix(0, n, M+1)
-ymat[cbind(1:n, sample(x=M+1, size=n, replace=TRUE))] = 1
+nn = 100 # Number of people who travel to work
+M = 3 # There are M+1 models of transport to go to work
+ymat = matrix(0, nn, M+1)
+ymat[cbind(1:nn, sample(x=M+1, size=nn, replace=TRUE))] = 1
dimnames(ymat) = list(NULL, c("bus","train","car","walk"))
-transport = data.frame(cost.bus=runif(n), cost.train=runif(n),
- cost.car=runif(n), cost.walk=runif(n),
- durn.bus=runif(n), durn.train=runif(n),
- durn.car=runif(n), durn.walk=runif(n))
-transport = round(transport, dig=2) # For convenience
-transport = transform(transport,
- Cost.bus = cost.bus - cost.walk,
- Cost.car = cost.car - cost.walk,
- Cost.train = cost.train - cost.walk,
- Durn.bus = durn.bus - durn.walk,
- Durn.car = durn.car - durn.walk,
- Durn.train = durn.train - durn.walk)
-fit = vglm(ymat ~ Cost.bus + Cost.train + Cost.car +
- Durn.bus + Durn.train + Durn.car,
- fam = multinomial,
+gotowork = data.frame(cost.bus = runif(nn), time.bus = runif(nn),
+ cost.train= runif(nn), time.train= runif(nn),
+ cost.car = runif(nn), time.car = runif(nn),
+ cost.walk = runif(nn), time.walk = runif(nn))
+gotowork = round(gotowork, dig=2) # For convenience
+gotowork = transform(gotowork,
+ Cost.bus = cost.bus - cost.walk,
+ Cost.car = cost.car - cost.walk,
+ Cost.train = cost.train - cost.walk,
+ Cost = cost.train - cost.walk, # for labelling
+ Time.bus = time.bus - time.walk,
+ Time.car = time.car - time.walk,
+ Time.train = time.train - time.walk,
+ Time = time.train - time.walk) # for labelling
+fit = vglm(ymat ~ Cost + Time,
+ multinomial(parall=TRUE ~ Cost + Time - 1),
xij = list(Cost ~ Cost.bus + Cost.train + Cost.car,
- Durn ~ Durn.bus + Durn.train + Durn.car),
- data=transport)
-model.matrix(fit, type="lm")[1:7,] # LM model matrix
-model.matrix(fit, type="vlm")[1:7,] # Big VLM model matrix
+ Time ~ Time.bus + Time.train + Time.car),
+ form2 = ~ Cost + Cost.bus + Cost.train + Cost.car +
+ Time + Time.bus + Time.train + Time.car,
+ data=gotowork, trace=TRUE)
+head(model.matrix(fit, type="lm")) # LM model matrix
+head(model.matrix(fit, type="vlm")) # Big VLM model matrix
coef(fit)
coef(fit, matrix=TRUE)
-coef(fit, matrix=TRUE, compress=FALSE)
+constraints(fit)
summary(fit)
+max(abs(predict(fit)-predict(fit, new=gotowork))) # Should be 0
}
\keyword{models}
\keyword{regression}
diff --git a/man/nakagami.Rd b/man/nakagami.Rd
index cabffe6..0f2e2ef 100644
--- a/man/nakagami.Rd
+++ b/man/nakagami.Rd
@@ -94,7 +94,7 @@ y = sqrt(rgamma(n, shape=shape, scale=Scale/shape))
fit = vglm(y ~ 1, nakagami, trace=TRUE, crit="c")
y = rnaka(n, shape=shape, scale=Scale)
fit = vglm(y ~ 1, nakagami(iscale=3), trace=TRUE)
-fitted(fit)[1:5]
+head(fitted(fit))
mean(y)
coef(fit, matrix=TRUE)
(Cfit = Coef(fit))
diff --git a/man/nakagamiUC.Rd b/man/nakagamiUC.Rd
index 6e60942..46c7add 100644
--- a/man/nakagamiUC.Rd
+++ b/man/nakagamiUC.Rd
@@ -12,7 +12,7 @@
}
\usage{
-dnaka(x, shape, scale=1)
+dnaka(x, shape, scale=1, log = FALSE)
pnaka(q, shape, scale=1)
qnaka(p, shape, scale=1, ...)
rnaka(n, shape, scale=1, Smallno=1.0e-6)
@@ -39,6 +39,11 @@ rnaka(n, shape, scale=1, Smallno=1.0e-6)
Arguments that can be passed into \code{\link[stats]{uniroot}}.
}
+ \item{log}{
+ Logical.
+ If \code{log=TRUE} then the logarithm of the density is returned.
+
+ }
}
\value{
\code{dnaka} gives the density,
diff --git a/man/nbolf.Rd b/man/nbolf.Rd
index b02c375..8e9822f 100644
--- a/man/nbolf.Rd
+++ b/man/nbolf.Rd
@@ -141,9 +141,9 @@ fit = vglm(cuty ~ x2 + x3, fam = cumulative(link="nbolf",
reverse=TRUE, parallel=TRUE, intercept.apply=TRUE,
mv=TRUE, earg=list(cutpoint=cutpoints[2:3], k=k)),
trace=TRUE)
-fit at y[1:5,]
-fitted(fit)[1:5,]
-predict(fit)[1:5,]
+head(fit at y)
+head(fitted(fit))
+head(predict(fit))
coef(fit)
coef(fit, matrix=TRUE)
constraints(fit)
diff --git a/man/negbinomial.Rd b/man/negbinomial.Rd
index 8a44eba..2f98cfc 100644
--- a/man/negbinomial.Rd
+++ b/man/negbinomial.Rd
@@ -19,6 +19,8 @@ negbinomial(lmu = "loge", lk = "loge",
\item{lmu, lk}{
Link functions applied to the \eqn{\mu}{mu} and \eqn{k} parameters.
See \code{\link{Links}} for more choices.
+ Note that the \eqn{k} parameter is the \code{size} argument of
+ \code{\link[stats:NegBinomial]{rnbinom}} etc.
}
\item{emu, ek}{
@@ -76,7 +78,7 @@ negbinomial(lmu = "loge", lk = "loge",
}
\item{method.init}{
- An integer with value \code{1} or \code{2} which
+ An integer with value \code{1} or \code{2} or \code{3} which
specifies the initialization method for the \eqn{\mu}{mu} parameter.
If failure to converge occurs try another value
and/or else specify a value for \code{shrinkage.init}
diff --git a/man/notdocumentedyet.Rd b/man/notdocumentedyet.Rd
index af8e6e9..e410d20 100644
--- a/man/notdocumentedyet.Rd
+++ b/man/notdocumentedyet.Rd
@@ -1,5 +1,25 @@
\name{notdocumentedyet}
\alias{notdocumentedyet}
+%
+%
+%20090330
+\alias{dclogloglap}
+\alias{dlogitlap}
+\alias{dprobitlap}
+\alias{logitlaplace1.control}
+\alias{loglaplace1.control}
+\alias{pclogloglap}
+\alias{plogitlap}
+\alias{pprobitlap}
+\alias{qclogloglap}
+\alias{qlogitlap}
+\alias{qprobitlap}
+\alias{rclogloglap}
+\alias{rlogitlap}
+\alias{rprobitlap}
+%
+%
+%
\alias{A1A2A3}
\alias{AAaa.nohw}
%\alias{AIC}
@@ -9,7 +29,7 @@
%\alias{AIC.vglm}
%\alias{AIC.vlm}
% \alias{Build.terms}
-% \alias{Build.terms.vlm}
+\alias{Build.terms.vlm}
\alias{Coef.cao}
\alias{Coefficients}
\alias{Cut}
@@ -43,7 +63,7 @@
% \alias{attrassigndefault}
% \alias{attrassignlm}
\alias{beta4}
-\alias{betaffqn}
+% \alias{betaffqn}
\alias{biplot}
\alias{biplot.qrrvglm}
% \alias{block.diag}
@@ -123,6 +143,16 @@
\alias{fitted.values}
\alias{fitted.values.uqo}
\alias{fittedvsmooth.spline}
+%
+\alias{variable.names}
+\alias{variable.namesvlm}
+\alias{variable.namesrrvglm}
+\alias{case.names}
+\alias{case.namesvlm}
+%
+\alias{formula}
+\alias{formulavlm}
+\alias{formulaNA.VGAM}
\alias{gammaff}
% \alias{get.arg}
% \alias{get.rrvglm.se1}
@@ -160,7 +190,7 @@
\alias{lms.yjn.control}
\alias{lmscreg.control}
\alias{logLik.vlm}
-\alias{lv.Coef.cao}
+% \alias{lv.Coef.cao} 20090505
\alias{lv.Coef.qrrvglm}
\alias{lv.cao}
\alias{lv.qrrvglm}
@@ -221,6 +251,7 @@
\alias{predictvsmooth.spline}
\alias{predictvsmooth.spline.fit}
% \alias{preplotvgam}
+\alias{print}
% \alias{print.vanova}
% \alias{print.vfamily}
% \alias{print.vgam}
@@ -347,6 +378,7 @@
\alias{vglm.garma.control}
\alias{vglm.multinomial.control}
\alias{vglm.multinomial.deviance.control}
+\alias{dmultinomial}
\alias{vglm.vcategorical.control}
% \alias{vindex}
% \alias{vlabel}
@@ -436,3 +468,11 @@
%}
\keyword{models}
\keyword{regression}
+\keyword{internal}
+
+
+
+
+
+
+
diff --git a/man/nzc.Rd b/man/nzc.Rd
index a1165f5..3da9366 100644
--- a/man/nzc.Rd
+++ b/man/nzc.Rd
@@ -1,7 +1,7 @@
\name{nzc}
\alias{nzc}
\docType{data}
-\title{ Chinese Population in New Zealand 1867--2001}
+\title{ Chinese Population in New Zealand 1867--2001 Data}
\description{
The Chinese population in New Zealand from 1867 to 2001,
along with the whole of the New Zealand population.
@@ -29,7 +29,6 @@
}
\examples{
\dontrun{
-data(nzc)
with(nzc, plot(year, female/(male+female), type="b", ylab="Proportion",
main="Proportion of NZ Chinese that are female",
col="blue", las=1))
diff --git a/man/olympic.Rd b/man/olympic.Rd
new file mode 100644
index 0000000..354c045
--- /dev/null
+++ b/man/olympic.Rd
@@ -0,0 +1,55 @@
+\name{olympic}
+\alias{olympic}
+\docType{data}
+\title{ 2008 Summer Olympic Final Medal Count Data}
+\description{
+ Final count of medal winners by country for the 2008 Summer Olympic
+ games in Beijing.
+
+}
+\usage{data(olympic)}
+\format{
+ A data frame with 87 observations on the following 6 variables.
+ \describe{
+ \item{\code{rank}}{a numeric vector, overall ranking of the countries. }
+ \item{\code{country}}{a character vector. }
+ \item{\code{gold}}{a numeric vector, number of gold medals. }
+ \item{\code{silver}}{a numeric vector, number of silver medals. }
+ \item{\code{bronze}}{a numeric vector, number of bronze medals. }
+ \item{\code{totalmedal}}{a numeric vector, total number of medals. }
+ }
+}
+\details{
+ The event was held during August 8--24, 2008, in Beijing.
+
+% This is a simple two-way contingency table of counts.
+
+}
+\source{
+ \url{http://www.associatedcontent.com/article/979484/2008_summer_olympic_medal_count_total.html}.
+
+}
+\references{
+ The official English webite was
+ \url{http://en.beijing2008.cn}.
+
+}
+\seealso{
+ \code{\link{grc}}.
+
+}
+
+\examples{
+summary(olympic)
+## maybe str(olympic) ; plot(olympic) ...
+\dontrun{
+with(head(olympic, n=8),
+ barplot(rbind(gold,silver,bronze),
+ col=c("gold","grey","brown"), # No "silver" or "bronze"!
+ names.arg=country, cex.names=0.5,
+ beside=TRUE, main="2008 Summer Olympic Final Medal Count",
+ ylab="Medal count", las=1,
+ sub="Top 8 countries; 'gold'=gold, 'grey'=silver, 'brown'=bronze"))
+}
+}
+\keyword{datasets}
diff --git a/man/ordpoisson.Rd b/man/ordpoisson.Rd
index 9dc4ede..f0b9ae9 100644
--- a/man/ordpoisson.Rd
+++ b/man/ordpoisson.Rd
@@ -132,9 +132,9 @@ plot(x2, x3, col=ystar, pch=as.character(ystar))
}
table(ystar) / sum(table(ystar))
fit = vglm(ystar ~ x2 + x3, fam = ordpoisson(cutpoi=fcutpts))
-fit at y[1:5,] # This can be input if countdata=TRUE
-fitted(fit)[1:5,]
-predict(fit)[1:5,]
+head(fit at y) # This can be input if countdata=TRUE
+head(fitted(fit))
+head(predict(fit))
coef(fit, matrix=TRUE)
fit at extra
diff --git a/man/oxtemp.Rd b/man/oxtemp.Rd
index 4029ff1..c581990 100644
--- a/man/oxtemp.Rd
+++ b/man/oxtemp.Rd
@@ -23,7 +23,6 @@
% \references{
% }
\examples{
-data(oxtemp)
fit = vglm(maxtemp ~ 1, egev, trace=TRUE, data=oxtemp)
}
\keyword{datasets}
diff --git a/man/pareto1.Rd b/man/pareto1.Rd
index 2e79ec0..46e9ce9 100644
--- a/man/pareto1.Rd
+++ b/man/pareto1.Rd
@@ -158,7 +158,7 @@ alpha = 2; k = exp(3)
y = rpareto(n=1000, location=alpha, shape=k)
fit = vglm(y ~ 1, pareto1, trace=TRUE)
fit at extra # The estimate of alpha is here
-fitted(fit)[1:5]
+head(fitted(fit))
mean(y)
coef(fit, matrix=TRUE)
summary(fit) # Standard errors are incorrect!!
@@ -166,7 +166,7 @@ summary(fit) # Standard errors are incorrect!!
# Here, alpha is assumed known
fit2 = vglm(y ~ 1, pareto1(location=alpha), trace=TRUE, crit="c")
fit2 at extra # alpha stored here
-fitted(fit2)[1:5]
+head(fitted(fit2))
mean(y)
coef(fit2, matrix=TRUE)
summary(fit2) # Standard errors are ok
diff --git a/man/persp.qrrvglm.Rd b/man/persp.qrrvglm.Rd
index 7c6f30d..77238ba 100644
--- a/man/persp.qrrvglm.Rd
+++ b/man/persp.qrrvglm.Rd
@@ -191,7 +191,6 @@ canonical Gaussian ordination.
\code{\link[graphics]{title}}.
}
\examples{\dontrun{
-data(hspider)
hspider[,1:6] = scale(hspider[,1:6]) # Good idea when ITolerances = TRUE
set.seed(111)
r1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
diff --git a/man/plackett.Rd b/man/plackett.Rd
index 60e85a0..64e37a3 100644
--- a/man/plackett.Rd
+++ b/man/plackett.Rd
@@ -107,7 +107,7 @@ fit = vglm(ymat ~ 1, fam=plackett, trace=TRUE)
coef(fit, matrix=TRUE)
Coef(fit)
vcov(fit)
-fitted(fit)[1:5,]
+head(fitted(fit))
summary(fit)
}
\keyword{models}
diff --git a/man/plotdeplot.lmscreg.Rd b/man/plotdeplot.lmscreg.Rd
index 65e6b66..5b020fa 100644
--- a/man/plotdeplot.lmscreg.Rd
+++ b/man/plotdeplot.lmscreg.Rd
@@ -81,7 +81,6 @@ this function should not be called directly.
}
\examples{
-data(bminz)
fit = vgam(BMI ~ s(age, df=c(4,2)), fam=lms.bcn(zero=1), data=bminz)
\dontrun{
y = seq(15, 43, by=0.25)
@@ -92,8 +91,8 @@ deplot(fit, x0=60, y=y, add=TRUE, col="red", llwd=2) -> a
names(a at post$deplot)
a at post$deplot$newdata
-a at post$deplot$y[1:5]
-a at post$deplot$density[1:5]
+head(a at post$deplot$y)
+head(a at post$deplot$density)
}
}
\keyword{dplot}
diff --git a/man/plotqrrvglm.Rd b/man/plotqrrvglm.Rd
index a307767..a0b2c5e 100644
--- a/man/plotqrrvglm.Rd
+++ b/man/plotqrrvglm.Rd
@@ -58,7 +58,6 @@ by \code{\link{lvplot.qrrvglm}}.
\examples{\dontrun{
# QRR-VGLM on the hunting spiders data
# This is computationally expensive
-data(hspider)
set.seed(111) # This leads to the global solution
# hspider[,1:6]=scale(hspider[,1:6]) # Standardize the environmental variables
p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
diff --git a/man/plotqtplot.lmscreg.Rd b/man/plotqtplot.lmscreg.Rd
index 03c1d44..9c79d06 100644
--- a/man/plotqtplot.lmscreg.Rd
+++ b/man/plotqtplot.lmscreg.Rd
@@ -98,7 +98,6 @@ contains further information and examples.
}
\examples{\dontrun{
-data(bminz)
fit = vgam(BMI ~ s(age, df=c(4,2)), fam=lms.bcn(zero=1), data=bminz)
qtplot(fit)
qtplot(fit, perc=c(25,50,75,95), lcol="blue", tcol="blue", llwd=2)
diff --git a/man/plotvgam.Rd b/man/plotvgam.Rd
index 8b44a44..2b0df5f 100644
--- a/man/plotvgam.Rd
+++ b/man/plotvgam.Rd
@@ -11,60 +11,68 @@
plotvgam(x, newdata = NULL, y = NULL, residuals = NULL,
rugplot = TRUE, se = FALSE, scale = 0, raw = TRUE,
offset.arg = 0, deriv.arg = 0, overlay = FALSE,
- type.residuals = c("deviance", "working", "pearson", "response"),
+ type.residuals = c("deviance","working","pearson","response"),
plot.arg = TRUE, which.term = NULL, which.cf = NULL,
- control = plotvgam.control(...), ...)
+ control = plotvgam.control(...), varxij = 1, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
+ In the following, \eqn{M} is the number of linear/additive
+ predictors, and \eqn{r} is the number of columns of the
+ constraint matrix of interest.
+
\item{x}{ A fitted \pkg{VGAM} object, e.g., produced by
\code{vgam()}, \code{vglm()}, or \code{rrvglm()}. }
\item{newdata}{ Data frame.
May be used to reconstruct the original data set. }
\item{y}{ Unused. }
- \item{residuals}{ Logical. If \code{TRUE}, residuals are plotted. See
- \code{type.residuals}}
-\item{rugplot}{ Logical. If \code{TRUE}, a rug plot is plotted at the
+ \item{residuals}{
+ Logical. If \code{TRUE} then residuals are plotted.
+ See \code{type.residuals}}
+\item{rugplot}{
+ Logical. If \code{TRUE} then a rug plot is plotted at the
foot of each plot. These values are jittered to expose ties.
}
\item{se}{
- Logical. If \code{TRUE}, approximate \eqn{\pm 2}{+-2} pointwise
+ Logical. If \code{TRUE} then approximate \eqn{\pm 2}{+-2} pointwise
standard error bands are included in the plot.
}
- \item{scale}{ Numerical. By default, each plot will have its own
+ \item{scale}{
+ Numerical. By default, each plot will have its own
y-axis scale. However, by specifying a value, each plot's y-axis
scale will be at least \code{scale} wide.
}
- \item{raw}{ Logical. If \code{TRUE}, the smooth functions are those
- obtained directly by the algorithm, and are
- plotted without
+ \item{raw}{
+ Logical. If \code{TRUE} then the smooth functions are those
+ obtained directly by the algorithm, and are plotted without
having to premultiply with the constraint matrices.
- If \code{FALSE}, the smooth functions have been premultiply by
+ If \code{FALSE} then the smooth functions have been premultiply by
the constraint matrices.
The \code{raw} argument is directly fed into \code{predict.vgam()}.
+
}
\item{offset.arg}{ Numerical vector of length \eqn{r}.
These are added to the component functions. Useful for
separating out the functions when \code{overlay} is \code{TRUE}.
- If \code{overlay} is \code{TRUE} and there is one covariate, using the
- intercept values as the offsets can be a good idea.
+ If \code{overlay} is \code{TRUE} and there is one covariate then
+ using the intercept values as the offsets can be a good idea.
}
\item{deriv.arg}{ Numerical. The order of the derivative.
Should be assigned an small
integer such as 0, 1, 2. Only applying to \code{s()} terms,
it plots the derivative.
}
- \item{overlay}{ Logical. If \code{TRUE}, component functions of the same
+ \item{overlay}{ Logical. If \code{TRUE} then component functions of the same
covariate are overlaid on each other.
The functions are centered, so \code{offset.arg} can be useful
when \code{overlay} is \code{TRUE}.
}
- \item{type.residuals}{ if \code{residuals} is \code{TRUE}, the first
+ \item{type.residuals}{ if \code{residuals} is \code{TRUE} then the first
possible value
of this vector, is used to specify the type of
residual. }
- \item{plot.arg}{ Logical. If \code{FALSE}, no plot is produced. }
+ \item{plot.arg}{ Logical. If \code{FALSE} then no plot is produced. }
\item{which.term}{ Character or integer vector containing all
terms to be
plotted, e.g., \code{which.term=c("s(age)", "s(height"))} or
@@ -81,9 +89,19 @@ plotvgam(x, newdata = NULL, y = NULL, residuals = NULL,
\code{\link{plotvgam.control}}. This includes line colors,
line widths, line types, etc.
}
- In the above, \eqn{M} is the number of linear/additive
- predictors, and \eqn{r} is the number of columns of the
- constraint matrix of interest.
+
+ \item{varxij}{ Positive integer.
+ Used if \code{xij} of \code{\link{vglm.control}} was used,
+ this chooses which inner argument the component is plotted against.
+ This argument is related to \code{raw=TRUE} and terms such as
+ \code{NS(dum1,dum2)} and constraint matrices that have more than
+ one column. The default would plot the smooth against \code{dum1}
+ but setting \code{varxij=2} could mean plotting the smooth against
+ \code{dum2}.
+ See the \pkg{VGAM} website for further information.
+
+ }
+
}
\details{
Many of \code{plotvgam()}'s options can be found in
@@ -114,9 +132,11 @@ contains further information and examples.
explicitly otherwise.
\code{plotvgam()} is quite buggy at the moment.
- \code{plotvgam()} works in a similar
- manner to S-PLUS's \code{plot.gam()}, however, there is no
- options for interactive construction of the plots yet.
+
+% \code{plotvgam()} works in a similar
+% manner to S-PLUS's \code{plot.gam()}, however, there is no
+% options for interactive construction of the plots yet.
+
}
\seealso{
@@ -126,7 +146,6 @@ contains further information and examples.
\code{\link{vglm}}.
}
\examples{
-data(coalminers)
coalminers = transform(coalminers, Age = (age - 42) / 5)
fit = vgam(cbind(nBnW,nBW,BnW,BW) ~ s(Age), binom2.or(zero=NULL), coalminers)
\dontrun{
diff --git a/man/plotvgam.control.Rd b/man/plotvgam.control.Rd
index 42999cf..476e26d 100644
--- a/man/plotvgam.control.Rd
+++ b/man/plotvgam.control.Rd
@@ -9,7 +9,7 @@
\usage{
plotvgam.control(which.cf = NULL,
xlim = NULL, ylim = NULL, llty = par()$lty,
- slty = if (is.R()) "dashed" else 3, pcex = par()$cex,
+ slty = "dashed", pcex = par()$cex,
pch = par()$pch, pcol = par()$col,
lcol = par()$col, rcol = par()$col,
scol = par()$col, llwd = par()$lwd, slwd = par()$lwd,
@@ -82,10 +82,13 @@ contains further information and examples.
}
\author{ Thomas W. Yee }
-\note{ This function enables \code{plotvgam()} to work in a similar
- manner to S-PLUS's \code{plot.gam()}. However, there is no
- interactive options yet.
-}
+
+%\note{
+% This function enables \code{plotvgam()} to work in a similar
+% manner to S-PLUS's \code{plot.gam()}.
+% However, there is no interactive options yet.
+%
+%}
\seealso{
\code{\link{plotvgam}}.
diff --git a/man/pneumo.Rd b/man/pneumo.Rd
index 6eb2f80..3acc55c 100644
--- a/man/pneumo.Rd
+++ b/man/pneumo.Rd
@@ -1,7 +1,7 @@
\name{pneumo}
\alias{pneumo}
\docType{data}
-\title{Pneumoconiosis amongst a group of coalminers}
+\title{Pneumoconiosis in Coalminers Data}
\description{
The \code{pneumo} data frame has 8 rows and 4 columns.
Exposure time is explanatory, and there are 3 ordinal response variables.
@@ -31,8 +31,6 @@ data set, the two most severe categories were combined.
\emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
}
\examples{
-data(pneumo)
-
# Fit the proportional odds model, p.179, in McCullagh and Nelder (1989)
pneumo = transform(pneumo, let=log(exposure.time))
vglm(cbind(normal,mild,severe) ~ let,
diff --git a/man/poissonp.Rd b/man/poissonp.Rd
index adf0bd1..fb914ba 100644
--- a/man/poissonp.Rd
+++ b/man/poissonp.Rd
@@ -99,7 +99,7 @@ os = 2
fit = vglm(y ~ 1, poissonp(os, 2), tra=TRUE, cri="c")
fit = vglm(y ~ 1, poissonp(os, 3), tra=TRUE, cri="c") # Slow convergence?
fit = vglm(y ~ 1, poissonp(os, 3, idensi=1), tra=TRUE, cri="c")
-fitted(fit)[1:4]
+head(fitted(fit))
mean(y)
coef(fit, matrix=TRUE)
Coef(fit)
diff --git a/man/polf.Rd b/man/polf.Rd
index dfd66a6..fad43a9 100644
--- a/man/polf.Rd
+++ b/man/polf.Rd
@@ -150,9 +150,9 @@ fit = vglm(cuty ~ x2 + x3, fam = cumulative(link="polf",
reverse=TRUE, parallel=TRUE, intercept.apply=TRUE,
mv=TRUE, earg=list(cutpoint=cutpoints[2:3])),
trace=TRUE)
-fit at y[1:5,]
-fitted(fit)[1:5,]
-predict(fit)[1:5,]
+head(fit at y)
+head(fitted(fit))
+head(predict(fit))
coef(fit)
coef(fit, matrix=TRUE)
constraints(fit)
diff --git a/man/polonoUC.Rd b/man/polonoUC.Rd
index 1abcec1..68990ab 100644
--- a/man/polonoUC.Rd
+++ b/man/polonoUC.Rd
@@ -18,7 +18,7 @@ rpolono(n, meanlog=0, sdlog=1)
\item{x}{vector of quantiles.}
% \item{p}{vector of probabilities.}
\item{n}{number of observations.
- Must be a positive integer of length 1.}
+ If \code{length(n) > 1} then the length is taken to be the number required. }
\item{meanlog, sdlog }{
the mean and standard deviation of the normal distribution
(on the log scale).
diff --git a/man/posbinomUC.Rd b/man/posbinomUC.Rd
index 6af3e1f..7f780f7 100644
--- a/man/posbinomUC.Rd
+++ b/man/posbinomUC.Rd
@@ -21,7 +21,8 @@ rposbinom(n, size, prob)
\arguments{
\item{x, q}{vector of quantiles.}
\item{p}{vector of probabilities.}
- \item{n}{number of observations. Must be a single positive integer. }
+ \item{n}{number of observations.
+ If \code{length(n) > 1} then the length is taken to be the number required. }
\item{size}{number of trials. It is the \eqn{N} symbol in the formula
given in \code{\link{posbinomial}}. }
\item{prob}{probability of success on each trial. }
diff --git a/man/posbinomial.Rd b/man/posbinomial.Rd
index 053b1a2..ac3cf3a 100644
--- a/man/posbinomial.Rd
+++ b/man/posbinomial.Rd
@@ -77,8 +77,8 @@ summary(fit)
summary(fit2)
Coef(fit2) # = MLE of p
Coef(fit) # = MLE of p
-fitted(fit2)[1:2]
-fitted(fit)[1:2]
+head(fitted(fit2))
+head(fitted(fit))
}
\keyword{models}
\keyword{regression}
diff --git a/man/posnegbinUC.Rd b/man/posnegbinUC.Rd
index 1179d33..e7b79c5 100644
--- a/man/posnegbinUC.Rd
+++ b/man/posnegbinUC.Rd
@@ -23,6 +23,7 @@ rposnegbin(n, size, prob=NULL, munb=NULL)
\item{p}{vector of probabilities.}
\item{n}{
number of random values to return.
+ If \code{length(n) > 1} then the length is taken to be the number required.
}
\item{size, prob, munb, log}{
diff --git a/man/posnegbinomial.Rd b/man/posnegbinomial.Rd
index 221f1df..388cbe1 100644
--- a/man/posnegbinomial.Rd
+++ b/man/posnegbinomial.Rd
@@ -144,8 +144,8 @@ fit = vglm(y ~ 1, posnegbinomial, trace=TRUE)
coef(fit, matrix=TRUE)
mean(y) # Sample mean
munb / (1 - (size/(size+munb))^size) # Population mean
-fitted(fit)[1:3,]
-predict(fit)[1:3,]
+head(fitted(fit))
+head(predict(fit))
}
\keyword{models}
\keyword{regression}
diff --git a/man/posnormUC.Rd b/man/posnormUC.Rd
index fea7122..8ccd3b1 100644
--- a/man/posnormUC.Rd
+++ b/man/posnormUC.Rd
@@ -20,7 +20,7 @@ rposnorm(n, mean=0, sd=1)
\item{x, q}{vector of quantiles.}
\item{p}{vector of probabilities.}
\item{n}{number of observations.
- Must be a positive integer of length 1.}
+ If \code{length(n) > 1} then the length is taken to be the number required. }
\item{mean, sd, log}{ see \code{\link[stats:Normal]{rnorm}}. }
}
\value{
diff --git a/man/pospoisUC.Rd b/man/pospoisUC.Rd
index 62f3c7d..0a50a18 100644
--- a/man/pospoisUC.Rd
+++ b/man/pospoisUC.Rd
@@ -21,7 +21,8 @@ rpospois(n, lambda)
\arguments{
\item{x, q}{vector of quantiles.}
\item{p}{vector of probabilities.}
- \item{n}{number of observations. Must be a single positive integer. }
+ \item{n}{number of observations.
+ If \code{length(n) > 1} then the length is taken to be the number required. }
\item{lambda}{ vector of positive means (of an ordinary Poisson distribution).
Short vectors are recycled. }
\item{log}{ logical.
diff --git a/man/predict.qrrvglm.Rd b/man/predict.qrrvglm.Rd
index 13fb90a..b9a6bf3 100644
--- a/man/predict.qrrvglm.Rd
+++ b/man/predict.qrrvglm.Rd
@@ -65,7 +65,6 @@ canonical Gaussian ordination.
}
\examples{
-data(hspider)
hspider[,1:6]=scale(hspider[,1:6]) # Standardize the environmental variables
set.seed(1234)
p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
@@ -74,11 +73,11 @@ p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
fam=poissonff, data=hspider, Crow1positive=FALSE, ITol=TRUE)
sort(p1 at misc$deviance.Bestof) # A history of all the iterations
-predict(p1)[1:3,]
+head(predict(p1))
# The following should be all zeros
-max(abs(predict(p1, new=hspider[1:3,]) - predict(p1)[1:3,]))
-max(abs(predict(p1, new=hspider[1:3,], type="res") - fitted(p1)[1:3,]))
+max(abs(predict(p1, new=head(hspider)) - head(predict(p1))))
+max(abs(predict(p1, new=head(hspider), type="res") - head(fitted(p1))))
}
\keyword{models}
\keyword{regression}
diff --git a/man/predict.vglm.Rd b/man/predict.vglm.Rd
index a38f17c..c909de5 100644
--- a/man/predict.vglm.Rd
+++ b/man/predict.vglm.Rd
@@ -108,31 +108,30 @@ Reduced-rank vector generalized linear models.
\examples{
# Illustrates smart prediction
-data(pneumo)
pneumo = transform(pneumo, let=log(exposure.time))
fit = vglm(cbind(normal,mild, severe) ~ poly(c(scale(let)), 2),
- fam=cumulative(parallel=TRUE),
+ cumulative(parallel=TRUE),
data=pneumo, trace=TRUE, x=FALSE)
class(fit)
-(q0 = predict(fit)[1:3,])
-(q1 = predict(fit, newdata=pneumo)[1:3,])
-(q2 = predict(fit, newdata=pneumo[1:3,]))
+(q0 = head(predict(fit)))
+(q1 = predict(fit, newdata=head(pneumo)))
+(q2 = predict(fit, newdata=head(pneumo)))
all.equal(q0, q1) # Should be TRUE
all.equal(q1, q2) # Should be TRUE
-predict(fit)[1:3,]
-predict(fit, untransform=TRUE)[1:3,]
+head(predict(fit))
+head(predict(fit, untransform=TRUE))
-p0 = predict(fit, type="res")[1:3,]
-p1 = predict(fit, type="res", newdata=pneumo)[1:3,]
-p2 = predict(fit, type="res", newdata=pneumo[1:3,])
-p3 = fitted(fit)[1:3,]
+p0 = head(predict(fit, type="res"))
+p1 = head(predict(fit, type="res", newdata=pneumo))
+p2 = head(predict(fit, type="res", newdata=pneumo))
+p3 = head(fitted(fit))
all.equal(p0, p1) # Should be TRUE
all.equal(p1, p2) # Should be TRUE
all.equal(p2, p3) # Should be TRUE
-predict(fit, type="t", se=TRUE)
+predict(fit, type="terms", se=TRUE)
}
\keyword{models}
\keyword{regression}
diff --git a/man/qrrvglm.control.Rd b/man/qrrvglm.control.Rd
index 9d6bbbc..a065329 100644
--- a/man/qrrvglm.control.Rd
+++ b/man/qrrvglm.control.Rd
@@ -445,7 +445,6 @@ chances of obtaining the global solution.
\examples{
# Poisson CQO with equal tolerances
-data(hspider)
set.seed(111) # This leads to the global solution
hspider[,1:6]=scale(hspider[,1:6]) # Good idea when ITolerances = TRUE
p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
diff --git a/man/qtplot.gumbel.Rd b/man/qtplot.gumbel.Rd
index 5cb03ec..988eaa3 100644
--- a/man/qtplot.gumbel.Rd
+++ b/man/qtplot.gumbel.Rd
@@ -89,11 +89,10 @@ qtplot.gumbel(object, plot.it = TRUE,
\code{\link{gumbel}}.
}
\examples{
-data(venice)
y = as.matrix(venice[,paste("r",1:10,sep="")])
fit1 = vgam(y ~ s(year, df=3), gumbel(R=365, mpv=TRUE),
data=venice, trace=TRUE, na.action=na.pass)
-fitted(fit1)[1:4,]
+head(fitted(fit1))
\dontrun{
par(mfrow=c(1,1), bty="l", xpd=TRUE, las=1)
@@ -102,7 +101,7 @@ qtplot(fit1, mpv=TRUE, lcol=c(1,2,5), tcol=c(1,2,5), lwd=2,
qtplot(fit1, perc=97, mpv=FALSE, lcol=3, tcol=3,
lwd=2, tadj=0.4, add=TRUE) -> i
-i at post$qtplot$fitted[1:4,]
+head(i at post$qtplot$fitted)
}
}
\keyword{hplot}
diff --git a/man/qtplot.lmscreg.Rd b/man/qtplot.lmscreg.Rd
index 0781c1b..67b7f9c 100644
--- a/man/qtplot.lmscreg.Rd
+++ b/man/qtplot.lmscreg.Rd
@@ -69,7 +69,6 @@ contains further information and examples.
}
\examples{\dontrun{
-data(bminz)
fit = vgam(BMI ~ s(age, df=c(4,2)), fam=lms.bcn(zero=1), data=bminz)
qtplot(fit)
qtplot(fit, perc=c(25,50,75,95), lcol="blue", tcol="blue", llwd=2)
diff --git a/man/quasibinomialff.Rd b/man/quasibinomialff.Rd
index f28d41a..2ea59b8 100644
--- a/man/quasibinomialff.Rd
+++ b/man/quasibinomialff.Rd
@@ -124,7 +124,6 @@ quasibinomialff()
quasibinomialff(link="probit")
# Nonparametric logistic regression
-data(hunua)
hunua = transform(hunua, a.5 = sqrt(altitude)) # Transformation of altitude
fit1 = vglm(agaaus ~ poly(a.5, 2), quasibinomialff, hunua)
fit2 = vgam(agaaus ~ s(a.5, df=2), quasibinomialff, hunua)
diff --git a/man/rayleigh.Rd b/man/rayleigh.Rd
index cb08447..9175604 100644
--- a/man/rayleigh.Rd
+++ b/man/rayleigh.Rd
@@ -87,7 +87,7 @@ A related distribution is the Maxwell distribution.
n = 1000; a = exp(2)
ystar = rrayleigh(n, a=a)
fit = vglm(ystar ~ 1, rayleigh, trace=TRUE, crit="c")
-fitted(fit)[1:5]
+head(fitted(fit))
mean(ystar)
coef(fit, matrix=TRUE)
Coef(fit)
@@ -102,7 +102,7 @@ extra = list(rightcensored = ystar > U)
fit = vglm(y ~ 1, crayleigh, trace=TRUE, extra=extra)
table(fit at extra$rightcen)
coef(fit, matrix=TRUE)
-fitted(fit)[1:4,]
+head(fitted(fit))
}
\keyword{models}
\keyword{regression}
diff --git a/man/rcqo.Rd b/man/rcqo.Rd
index ce40afa..06ecda6 100644
--- a/man/rcqo.Rd
+++ b/man/rcqo.Rd
@@ -12,7 +12,7 @@ rcqo(n, p, S, Rank = 1,
"Ordinal-negbinomial", "gamma2"),
EqualMaxima = FALSE, EqualTolerances = TRUE, ESOptima = FALSE,
loabundance = if (EqualMaxima) hiabundance else 10,
- hiabundance = 100, sdlv = c(1.5/2^(0:3))[1:Rank],
+ hiabundance = 100, sdlv = head(1.5/2^(0:3), Rank),
sdOptima = ifelse(ESOptima, 1.5/Rank, 1) * ifelse(scalelv, sdlv, 1),
sdTolerances = 0.25, Kvector = 1, Shape = 1,
sqrt = FALSE, Log = FALSE, rhox = 0.5, breaks = 4,
diff --git a/man/recexp1.Rd b/man/recexp1.Rd
index e0de865..57df415 100644
--- a/man/recexp1.Rd
+++ b/man/recexp1.Rd
@@ -59,12 +59,7 @@ recexp1(lrate="loge", irate=NULL, method.init=1)
}
\examples{
rawy = rexp(n <- 10000, rate=exp(1))
-
-# Keep only the records
-delete = c(FALSE, rep(TRUE, len=n-1))
-for(i in 2:length(rawy))
- if(rawy[i] > max(rawy[1:(i-1)])) delete[i] = FALSE
-(y = rawy[!delete])
+y = unique(cummax(rawy)) # Keep only the records
length(y) / y[length(y)] # MLE of rate
@@ -75,3 +70,11 @@ Coef(fit)
\keyword{models}
\keyword{regression}
+%# Keep only the records
+%delete = c(FALSE, rep(TRUE, len=n-1))
+%for(i in 2:length(rawy))
+% if(rawy[i] > max(rawy[1:(i-1)])) delete[i] = FALSE
+%(y = rawy[!delete])
+
+
+
diff --git a/man/recnormal1.Rd b/man/recnormal1.Rd
index 9511207..b07557e 100644
--- a/man/recnormal1.Rd
+++ b/man/recnormal1.Rd
@@ -79,13 +79,8 @@ recnormal1(lmean="identity", lsd="loge",
n = 10000
mymean = 100
# First value is reference value or trivial record
-rawy = c(mymean, rnorm(n, me=mymean, sd=16))
-
-# Keep only observations that are records
-delete = c(FALSE, rep(TRUE, len=n))
-for(i in 2:length(rawy))
- if(rawy[i] > max(rawy[1:(i-1)])) delete[i] = FALSE
-(y = rawy[!delete])
+rawy = c(mymean, rnorm(n, me=mymean, sd=exp(3)))
+(y = unique(cummax(rawy))) # Keep only observations that are records
fit = vglm(y ~ 1, recnormal1, trace=TRUE, maxit=200)
coef(fit, matrix=TRUE)
@@ -95,3 +90,9 @@ summary(fit)
\keyword{models}
\keyword{regression}
+%# Keep only observations that are records
+%delete = c(FALSE, rep(TRUE, len=n))
+%for(i in 2:length(rawy))
+% if(rawy[i] > max(rawy[1:(i-1)])) delete[i] = FALSE
+%(y = rawy[!delete])
+
diff --git a/man/riceUC.Rd b/man/riceUC.Rd
index c702a2e..7532659 100644
--- a/man/riceUC.Rd
+++ b/man/riceUC.Rd
@@ -13,7 +13,7 @@
}
\usage{
-drice(x, vee, sigma)
+drice(x, vee, sigma, log=FALSE)
%price(q, vee, sigma)
%qrice(p, vee, sigma)
rrice(n, vee, sigma)
@@ -25,6 +25,12 @@ rrice(n, vee, sigma)
Must be a positive integer of length 1.}
\item{vee, sigma}{ See \code{\link{riceff}}.
}
+ \item{log}{
+ Logical.
+ If \code{log=TRUE} then the logarithm of the density is returned.
+
+ }
+
}
\value{
\code{drice} gives the density,
diff --git a/man/rrar.Rd b/man/rrar.Rd
index 0a427a6..5b5cba1 100644
--- a/man/rrar.Rd
+++ b/man/rrar.Rd
@@ -74,12 +74,12 @@ written.
}
\examples{
\dontrun{
-data(usagrain)
year = seq(1961+1/12, 1972+10/12, by=1/12)
par(mar=c(4,4,2,2)+0.1, mfrow=c(2,2))
-for(i in 1:4) {
- plot(year, usagrain[,i], main=names(usagrain)[i], type="l", xlab="", ylab="")
- points(year, usagrain[,i], pch="*")
+for(ii in 1:4) {
+ plot(year, usagrain[,ii], main=names(usagrain)[ii],
+ type="l", xlab="", ylab="")
+ points(year, usagrain[,ii], pch="*")
}
apply(usagrain, 2, mean) # mu vector
cgrain = scale(usagrain, scale=FALSE) # Center the time series only
@@ -93,10 +93,10 @@ print(fit at misc$omegahat, dig=3)
print(fit at misc$Phimatrices, dig=2)
par(mar=c(4,4,2,2)+0.1, mfrow=c(4,1))
-for(i in 1:4) {
- plot(year, fit at misc$Z[,i], main=paste("Z", i, sep=""),
+for(ii in 1:4) {
+ plot(year, fit at misc$Z[,ii], main=paste("Z", ii, sep=""),
type="l", xlab="", ylab="")
- points(year, fit at misc$Z[,i], pch="*")
+ points(year, fit at misc$Z[,ii], pch="*")
}
}
}
diff --git a/man/rrvglm-class.Rd b/man/rrvglm-class.Rd
index 3fbfea0..7eb0699 100644
--- a/man/rrvglm-class.Rd
+++ b/man/rrvglm-class.Rd
@@ -252,7 +252,6 @@ Vector generalized additive models.
\examples{
# Rank-1 stereotype model of Anderson (1984)
-data(pneumo)
set.seed(111)
pneumo = transform(pneumo, let=log(exposure.time),
x1=runif(nrow(pneumo))) # x1 is some unrelated covariate
diff --git a/man/rrvglm.Rd b/man/rrvglm.Rd
index d6cf0b3..0c7b539 100644
--- a/man/rrvglm.Rd
+++ b/man/rrvglm.Rd
@@ -219,7 +219,7 @@ Regression and ordered categorical variables.
The arguments of \code{rrvglm} are the same as those of
\code{\link{vglm}} but with some extras in \code{\link{rrvglm.control}}.
- In the example below, a rank-1 stereotype model of Anderson (1984)
+ In the example below, a rank-1 \emph{stereotype} model of Anderson (1984)
is fitted to some car data. The reduced-rank regression is performed,
adjusting for two covariates. Setting a trivial constraint matrix for
the latent variable variables in \eqn{x_2}{x2} avoids a warning message
diff --git a/man/rrvglm.control.Rd b/man/rrvglm.control.Rd
index dcd2564..279717e 100644
--- a/man/rrvglm.control.Rd
+++ b/man/rrvglm.control.Rd
@@ -10,7 +10,7 @@
rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
Corner = TRUE, Uncor = FALSE, Wmat = NULL, Svd.arg = FALSE,
Index.corner = if (length(Structural.zero))
- ((1:1000)[-Structural.zero])[1:Rank] else 1:Rank,
+ head((1:1000)[-Structural.zero], Rank) else 1:Rank,
Alpha = 0.5, Bestof = 1, Cinit = NULL,
Etamat.colmax = 10,
SD.Cinit = 0.02, Structural.zero = NULL,
@@ -197,7 +197,7 @@ rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
The arguments in this function begin with an upper case letter to help
avoid interference with those of \code{\link{vglm.control}}.
- In the example below a rank-1 stereotype model (Anderson, 1984)
+ In the example below a rank-1 \emph{stereotype} model (Anderson, 1984)
is fitted.
}
@@ -214,7 +214,6 @@ rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
}
\examples{
-data(pneumo)
set.seed(111)
pneumo = transform(pneumo, let=log(exposure.time),
x1 = runif(nrow(pneumo))) # x1 is some unrelated covariate
diff --git a/man/ruge.Rd b/man/ruge.Rd
index 3b224ee..fc99396 100644
--- a/man/ruge.Rd
+++ b/man/ruge.Rd
@@ -32,7 +32,6 @@
%\references{
%}
\examples{
-data(ruge)
lambdahat = with(ruge, weighted.mean(number, w=counts))
(N = with(ruge, sum(counts)))
with(ruge, cbind(number, counts,
diff --git a/man/s.Rd b/man/s.Rd
index 9c92911..6a418c0 100644
--- a/man/s.Rd
+++ b/man/s.Rd
@@ -85,7 +85,6 @@ Vector generalized additive models.
\examples{
# Nonparametric logistic regression
-data(hunua)
fit = vgam(agaaus ~ s(altitude, df=2), binomialff, hunua)
\dontrun{
plot(fit, se=TRUE)}
diff --git a/man/seq2binomial.Rd b/man/seq2binomial.Rd
index 6e01c44..60acea9 100644
--- a/man/seq2binomial.Rd
+++ b/man/seq2binomial.Rd
@@ -105,7 +105,7 @@ y2 = successes2 / successes1
fit = vglm(cbind(y1,y2) ~ x, seq2binomial, trace=TRUE, weight=mvector)
coef(fit)
coef(fit, mat=TRUE)
-fitted(fit)[1:5,]
+head(fitted(fit))
}
\keyword{models}
\keyword{regression}
diff --git a/man/skellam.Rd b/man/skellam.Rd
index ae16fbf..566567e 100644
--- a/man/skellam.Rd
+++ b/man/skellam.Rd
@@ -104,7 +104,8 @@ coef(fit1, matrix=TRUE)
coef(fit2, matrix=TRUE)
summary(fit1)
# Likelihood ratio test for equal means:
-1-pchisq(logLik(fit1)-logLik(fit2), df=fit2 at df.residual-fit1@df.residual)
+1-pchisq(2*(logLik(fit1)-logLik(fit2)),
+ df=fit2 at df.residual-fit1@df.residual)
}
\keyword{models}
\keyword{regression}
diff --git a/man/skewnormal1.Rd b/man/skewnormal1.Rd
index 797e5d8..240f9f9 100644
--- a/man/skewnormal1.Rd
+++ b/man/skewnormal1.Rd
@@ -103,7 +103,7 @@ distribution.
y = rsnorm(n <- 1000, shape=5)
fit = vglm(y ~ 1, skewnormal1, trace=TRUE)
coef(fit, matrix=TRUE)
-fitted(fit)[1:4,]
+head(fitted(fit))
mean(y)
\dontrun{
hist(y, prob=TRUE)
diff --git a/man/snormUC.Rd b/man/snormUC.Rd
index df1767e..feaef38 100644
--- a/man/snormUC.Rd
+++ b/man/snormUC.Rd
@@ -14,7 +14,7 @@
}
\usage{
-dsnorm(x, location = 0, scale = 1, shape = 0)
+dsnorm(x, location = 0, scale = 1, shape = 0, log = FALSE)
%psnorm(q, lambda)
%qsnorm(p, lambda)
rsnorm(n, location = 0, scale = 1, shape = 0)
@@ -35,6 +35,12 @@ rsnorm(n, location = 0, scale = 1, shape = 0)
The shape parameter. It is called \eqn{\alpha}{alpha} in
\code{\link{skewnormal1}}.
}
+ \item{log}{
+ Logical.
+ If \code{log=TRUE} then the logarithm of the density is returned.
+
+ }
+
}
\details{
See \code{\link{skewnormal1}}, which currently only estimates the shape
diff --git a/man/sratio.Rd b/man/sratio.Rd
index 4231608..88aa742 100644
--- a/man/sratio.Rd
+++ b/man/sratio.Rd
@@ -118,7 +118,6 @@ contains further information and examples.
}
\examples{
-data(pneumo)
pneumo = transform(pneumo, let=log(exposure.time))
(fit = vglm(cbind(normal,mild,severe) ~ let, sratio(parallel=TRUE), pneumo))
coef(fit, matrix=TRUE)
diff --git a/man/tikuvUC.Rd b/man/tikuvUC.Rd
index d2873f5..be4b92c 100644
--- a/man/tikuvUC.Rd
+++ b/man/tikuvUC.Rd
@@ -12,7 +12,7 @@
}
\usage{
-dtikuv(x, d, mean=0, sigma=1)
+dtikuv(x, d, mean=0, sigma=1, log=FALSE)
ptikuv(q, d, mean=0, sigma=1)
qtikuv(p, d, mean=0, sigma=1, ...)
rtikuv(n, d, mean=0, sigma=1, Smallno=1.0e-6)
@@ -40,6 +40,12 @@ rtikuv(n, d, mean=0, sigma=1, Smallno=1.0e-6)
Arguments that can be passed into \code{\link[stats]{uniroot}}.
}
+ \item{log}{
+ Logical.
+ If \code{log=TRUE} then the logarithm of the density is returned.
+
+ }
+
}
\value{
\code{dtikuv} gives the density,
diff --git a/man/toxop.Rd b/man/toxop.Rd
index 5ed0e7a..58c8899 100644
--- a/man/toxop.Rd
+++ b/man/toxop.Rd
@@ -41,7 +41,6 @@
}
\examples{
-data(toxop)
\dontrun{
with(toxop, plot(rainfall, positive/ssize, col="blue"))
plot(toxop, col="blue")
diff --git a/man/triangle.Rd b/man/triangle.Rd
index 4a6d496..dd40f48 100644
--- a/man/triangle.Rd
+++ b/man/triangle.Rd
@@ -73,9 +73,8 @@ y = rtriangle(n <- 3000, theta=3/4)
fit = vglm(y ~ 1, triangle(link="identity"), trace=TRUE)
coef(fit, matrix=TRUE)
Coef(fit)
-fit at extra$lower[1:5]
-
-fitted(fit)[1:5]
+head(fit at extra$lower)
+head(fitted(fit))
mean(y)
}
\keyword{models}
diff --git a/man/triangleUC.Rd b/man/triangleUC.Rd
index d6b6721..6e1716e 100644
--- a/man/triangleUC.Rd
+++ b/man/triangleUC.Rd
@@ -11,7 +11,7 @@
\code{theta}.
}
\usage{
-dtriangle(x, theta, lower=0, upper=1)
+dtriangle(x, theta, lower=0, upper=1, log=FALSE)
ptriangle(q, theta, lower=0, upper=1)
qtriangle(p, theta, lower=0, upper=1)
rtriangle(n, theta, lower=0, upper=1)
@@ -26,6 +26,12 @@ rtriangle(n, theta, lower=0, upper=1)
\item{lower, upper}{lower and upper limits of the distribution.
Must be finite.
}
+ \item{log}{
+ Logical.
+ If \code{log=TRUE} then the logarithm of the density is returned.
+
+ }
+
}
\value{
\code{dtriangle} gives the density,
diff --git a/man/trplot.Rd b/man/trplot.Rd
index f2ce821..e299b3a 100644
--- a/man/trplot.Rd
+++ b/man/trplot.Rd
@@ -54,7 +54,6 @@ quadratic ordination.
\examples{
\dontrun{
-data(hspider)
hspider[,1:6] = scale(hspider[,1:6]) # Standardized environmental vars
set.seed(123)
diff --git a/man/trplot.qrrvglm.Rd b/man/trplot.qrrvglm.Rd
index 9e9db45..6ce103e 100644
--- a/man/trplot.qrrvglm.Rd
+++ b/man/trplot.qrrvglm.Rd
@@ -146,7 +146,6 @@ quadratic ordination.
}
\examples{\dontrun{
-data(hspider)
set.seed(111) # This leads to the global solution
# hspider[,1:6]=scale(hspider[,1:6]) # Standardize the environmental variables
p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
diff --git a/man/undocumented-methods.Rd b/man/undocumented-methods.Rd
index 71ca6f3..35141d3 100644
--- a/man/undocumented-methods.Rd
+++ b/man/undocumented-methods.Rd
@@ -55,6 +55,36 @@
\alias{fitted,vglm-method}
\alias{fitted,uqo-method}
\alias{fitted,vsmooth.spline-method}
+%
+%
+%\alias{case.names,ANY-method}
+\alias{case.names,vlm-method}
+\alias{case.names,vgam-method}
+\alias{case.names,vglm-method}
+\alias{case.names,rrvglm-method}
+\alias{case.names,qrrvglm-method}
+\alias{case.names,grc-method}
+%
+%\alias{variable.names,ANY-method}
+\alias{variable.names,vlm-method}
+\alias{variable.names,vgam-method}
+\alias{variable.names,vglm-method}
+\alias{variable.names,rrvglm-method}
+\alias{variable.names,qrrvglm-method}
+\alias{variable.names,grc-method}
+%
+%
+%\alias{formula,ANY-method}
+\alias{formula,vlm-method}
+\alias{formula,vgam-method}
+\alias{formula,vglm-method}
+\alias{formula,rrvglm-method}
+\alias{formula,qrrvglm-method}
+\alias{formula,grc-method}
+%\alias{formula,uqo-method}
+% \alias{formula,vsmooth.spline-method}
+%
+%
\alias{guplot,numeric-method}
\alias{guplot,vlm-method}
%\alias{model.frame,ANY-method}
@@ -113,12 +143,18 @@
\alias{predict,uqo-method}
\alias{predict,vsmooth.spline-method}
\alias{predict,vsmooth.spline.fit-method}
+%
+%
+% Added 20090505:
+%\alias{print,ANY-method}
+%
\alias{print,vglmff-method}
\alias{print,Coef.cao-method}
\alias{print,summary.cao-method}
\alias{print,qrrvglm-method}
\alias{print,Coef.qrrvglm-method}
\alias{print,rrvglm-method}
+% 20090505
\alias{print,summary.qrrvglm-method}
\alias{print,Coef.rrvglm-method}
\alias{print,vlm-method}
@@ -141,6 +177,7 @@
\alias{residuals,vgam-method}
\alias{residuals,uqo-method}
\alias{residuals,vsmooth.spline-method}
+\alias{resid,qrrvglm-method}
\alias{resid,vlm-method}
\alias{resid,vglm-method}
\alias{resid,vgam-method}
@@ -152,6 +189,7 @@
\alias{show,qrrvglm-method}
\alias{show,Coef.qrrvglm-method}
\alias{show,rrvglm-method}
+% 20090505
\alias{show,summary.qrrvglm-method}
\alias{show,Coef.rrvglm-method}
\alias{show,vlm-method}
@@ -228,6 +266,7 @@
%\keyword{ ~~ other possible keyword(s)}
\keyword{models}
\keyword{regression}
+\keyword{internal}
diff --git a/man/uqo.Rd b/man/uqo.Rd
index 82f4d7c..395267a 100644
--- a/man/uqo.Rd
+++ b/man/uqo.Rd
@@ -219,7 +219,6 @@ this is not done.
}
\examples{
\dontrun{
-data(hspider)
set.seed(123) # This leads to the global solution
hspider[,1:6] = scale(hspider[,1:6]) # Standardized environmental vars
p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
diff --git a/man/usagrain.Rd b/man/usagrain.Rd
index 0b254b2..88f76a8 100644
--- a/man/usagrain.Rd
+++ b/man/usagrain.Rd
@@ -1,7 +1,7 @@
\name{usagrain}
\alias{usagrain}
\docType{data}
-\title{USA grain prices}
+\title{USA Grain Prices Data}
\description{
A 4-column matrix.
@@ -33,7 +33,6 @@ Nested reduced-rank autoregressive models for multiple time series.
\bold{83}, 849--856.
}
\examples{
-data(usagrain)
cgrain = scale(usagrain, scale=FALSE) # Center the time series only
fit = vglm(cgrain ~ 1, rrar(Rank=c(4,1)),
eps=1e-3, step=0.5, trace=TRUE, maxit=40)
diff --git a/man/venice.Rd b/man/venice.Rd
index f1bbcb6..0f990fc 100644
--- a/man/venice.Rd
+++ b/man/venice.Rd
@@ -1,7 +1,7 @@
\name{venice}
\alias{venice}
\docType{data}
-\title{ Venice Maximum Sea Levels }
+\title{ Venice Maximum Sea Levels Data}
\description{
The maximum heights of sea levels recorded at Venice, Italy, between
1931 and 1981.
@@ -43,7 +43,6 @@ Maree estreme a Venezia (periodo 1872--1981).
}
\examples{
-data(venice)
\dontrun{
matplot(venice[["year"]], venice[,-1], xlab="Year", ylab="Sea level (cm)")
}
@@ -51,7 +50,7 @@ matplot(venice[["year"]], venice[,-1], xlab="Year", ylab="Sea level (cm)")
y = as.matrix(venice[,paste("r",1:10,sep="")])
fit1 = vgam(y ~ s(year, df=3), gumbel(R=365, mpv=TRUE),
data=venice, trace=TRUE, na.action=na.pass)
-fitted(fit1)[1:4,]
+head(fitted(fit1))
\dontrun{
par(mfrow=c(2,1), xpd=TRUE)
diff --git a/man/vgam-class.Rd b/man/vgam-class.Rd
index 784edc1..11ca00f 100644
--- a/man/vgam-class.Rd
+++ b/man/vgam-class.Rd
@@ -241,7 +241,6 @@ Vector generalized additive models.
\examples{
# Fit a nonparametric proportional odds model
-data(pneumo)
pneumo = transform(pneumo, let=log(exposure.time))
vgam(cbind(normal, mild, severe) ~ s(let),
cumulative(parallel=TRUE), pneumo)
diff --git a/man/vgam.Rd b/man/vgam.Rd
index 7013e1d..1477b8d 100644
--- a/man/vgam.Rd
+++ b/man/vgam.Rd
@@ -267,12 +267,10 @@ The \code{VGAM} Package.
\examples{
# Nonparametric proportional odds model
-data(pneumo)
pneumo = transform(pneumo, let=log(exposure.time))
vgam(cbind(normal,mild,severe) ~ s(let), cumulative(par=TRUE), pneumo)
# Nonparametric logistic regression
-data(hunua)
fit = vgam(agaaus ~ s(altitude, df=2), binomialff, hunua)
\dontrun{
plot(fit, se=TRUE)
diff --git a/man/vgam.control.Rd b/man/vgam.control.Rd
index 788dc32..f83bb5b 100644
--- a/man/vgam.control.Rd
+++ b/man/vgam.control.Rd
@@ -7,14 +7,12 @@
are set using this function.
}
\usage{
-vgam.control(all.knots = FALSE, backchat = if (is.R()) FALSE else TRUE,
- bf.epsilon = 1e-07, bf.maxit = 30,
- checkwz=TRUE,
- criterion = names(.min.criterion.VGAM),
+vgam.control(all.knots = FALSE, bf.epsilon = 1e-07, bf.maxit = 30,
+ checkwz=TRUE, criterion = names(.min.criterion.VGAM),
epsilon = 1e-07, maxit = 30, na.action = na.fail,
nk = NULL, save.weight = FALSE, se.fit = TRUE,
trace = FALSE, wzepsilon = .Machine$double.eps^0.75,
- xij = NULL, ...)
+ ...)
}
%- maybe also `usage' for other objects documented here.
\arguments{
@@ -35,10 +33,6 @@ in the formula.
so that the number of knots is approximately between 50 and 60
for large \eqn{n}.
}
- \item{backchat}{
- logical indicating if a backchat is to be used (not
- applicable in \R).
- }
\item{bf.epsilon}{
tolerance used by the modified vector
backfitting algorithm for testing convergence.
@@ -108,10 +102,12 @@ in the formula.
weight matrices are sufficiently positive.
}
- \item{xij}{
- formula giving terms making up a covariate-dependent term.
- }
+% \item{xij}{
+% formula giving terms making up a covariate-dependent term.
+%
+% }
+
\item{\dots}{
other parameters that may be picked up from control
functions that are specific to the \pkg{VGAM} family function.
@@ -157,6 +153,7 @@ Vector generalized additive models.
\seealso{
\code{\link{vgam}},
+ \code{\link{vglm.control}},
\code{\link{vsmooth.spline}},
\code{\link{vglm}}.
}
@@ -170,3 +167,10 @@ vgam(cbind(normal, mild, severe) ~ s(let, df=3), multinomial,
\keyword{models}
\keyword{regression}
\keyword{smooth}
+
+
+
+% xij = NULL,
+
+
+
diff --git a/man/vglm-class.Rd b/man/vglm-class.Rd
index 7e54b39..cc36354 100644
--- a/man/vglm-class.Rd
+++ b/man/vglm-class.Rd
@@ -223,7 +223,6 @@ Vector generalized additive models.
\examples{
# Multinomial logit model
-data(pneumo)
pneumo = transform(pneumo, let=log(exposure.time))
vglm(cbind(normal,mild,severe) ~ let, multinomial, pneumo)
}
diff --git a/man/vglm.Rd b/man/vglm.Rd
index 39a8de1..9af2e3a 100644
--- a/man/vglm.Rd
+++ b/man/vglm.Rd
@@ -5,7 +5,7 @@
\description{
\code{vglm} is used to fit vector generalized linear models (VGLMs).
This is a large class of models that includes
- generalized linear models (GLMs) as special cases.
+ generalized linear models (GLMs) as a special case.
}
\usage{
@@ -138,10 +138,12 @@ vglm(formula, family, data = list(), weights = NULL, subset = NULL,
}
\item{form2}{
The second (optional) formula.
- Some \pkg{VGAM} family functions such as \code{\link{micmen}})
- use this argument to onput the regressor variable.
+ If argument \code{xij} is used (see \code{\link{vglm.control}}) then
+ \code{form2} needs to have \emph{all} terms in the model.
+ Also, some \pkg{VGAM} family functions such as \code{\link{micmen}}
+ use this argument to input the regressor variable.
If given, the slots \code{@Xm2} and \code{@Ym2} may be assigned.
- Warning: smart prediction is not used for this argument.
+ Note that smart prediction applies to terms in \code{form2} too.
}
\item{qr.arg}{
@@ -172,7 +174,7 @@ vglm(formula, family, data = list(), weights = NULL, subset = NULL,
and
\eqn{\beta_j}{beta_j} is a vector of regression coefficients
to be estimated.
- Here, \eqn{j=1,\ldots,M} where \eqn{M} is finite.
+ Here, \eqn{j=1,\ldots,M}, where \eqn{M} is finite.
Then one can write
\eqn{\eta=(\eta_1,\ldots,\eta_M)^T}{eta=(eta_1,\ldots,\eta_M)^T}
as a vector of linear predictors.
@@ -210,7 +212,7 @@ vglm(formula, family, data = list(), weights = NULL, subset = NULL,
\item{effects}{the effects.}
\item{fitted.values}{
the fitted values, as a matrix.
- This is usually the mean but may be quantiles, or the location
+ This is often the mean but may be quantiles, or the location
parameter, e.g., in the Cauchy model.
}
@@ -221,7 +223,12 @@ vglm(formula, family, data = list(), weights = NULL, subset = NULL,
\item{post}{a list where post-analysis results may be put.}
\item{preplot}{used by \code{\link{plotvgam}}, the plotting parameters
may be put here.}
- \item{prior.weights}{initially supplied weights.}
+ \item{prior.weights}{
+ initially supplied weights
+ (the \code{weights} argument).
+ Also see \code{\link{weightsvglm}}.
+
+ }
\item{qr}{the QR decomposition used in the fitting.}
\item{R}{the \bold{R} matrix in the QR decomposition used in the fitting.}
\item{rank}{numerical rank of the fitted model.}
@@ -234,7 +241,7 @@ vglm(formula, family, data = list(), weights = NULL, subset = NULL,
}
\item{terms}{the \code{\link[stats]{terms}} object used.}
- \item{weights}{the weight matrices at the final IRLS iteration.
+ \item{weights}{the working weight matrices at the final IRLS iteration.
This is in matrix-band form.}
\item{x}{the model matrix (linear model LM, not VGLM).}
\item{xlevels}{the levels of the factors, if any, used in fitting.}
@@ -287,8 +294,8 @@ The \code{VGAM} Package.
(and also \code{\link[gam]{gam}} in the \pkg{gam} library and
\code{\link[mgcv]{gam}} in the \pkg{mgcv} library).
- The smart prediction (\code{\link{smartpred}}) library is packed with
- the \pkg{VGAM} library.
+ The smart prediction (\code{\link{smartpred}}) library is incorporated
+ within the \pkg{VGAM} library.
The theory behind the scaling parameter is currently being made more
rigorous, but it it should give the same value as the scale parameter
@@ -296,9 +303,13 @@ The \code{VGAM} Package.
In Example 5 below, the \code{xij} argument to illustrate covariates
that are specific to a linear predictor. Here, \code{lop}/\code{rop} are
- the ocular pressures of the left/right eye (artificial data). Variables
- \code{leye} and \code{reye} might be the presence/absence of a particular
- disease on the LHS/RHS eye respectively. See \code{\link{fill}}
+ the ocular pressures of the left/right eye (artificial data).
+ Variables \code{leye} and \code{reye} might be the presence/absence of
+ a particular disease on the LHS/RHS eye respectively.
+ See
+ \code{\link{vglm.control}}
+ and
+ \code{\link{fill}}
for more details and examples.
}
@@ -333,57 +344,102 @@ summary(vglm.D93)
# Example 2. Multinomial logit model
-data(pneumo)
pneumo = transform(pneumo, let=log(exposure.time))
vglm(cbind(normal, mild, severe) ~ let, multinomial, pneumo)
# Example 3. Proportional odds model
-fit = vglm(cbind(normal,mild,severe) ~ let, cumulative(par=TRUE), pneumo)
-coef(fit, matrix=TRUE)
-constraints(fit)
-fit at x # LM model matrix
-model.matrix(fit) # Larger VGLM model matrix
+fit3 = vglm(cbind(normal,mild,severe) ~ let, trace=TRUE,
+ cumulative(parallel=TRUE, reverse=TRUE), pneumo)
+coef(fit3, matrix=TRUE)
+constraints(fit3)
+model.matrix(fit3, type="lm") # LM model matrix
+model.matrix(fit3) # Larger VGLM (or VLM) model matrix
# Example 4. Bivariate logistic model
-data(coalminers)
-fit = vglm(cbind(nBnW, nBW, BnW, BW) ~ age, binom2.or, coalminers, trace=TRUE)
-coef(fit, matrix=TRUE)
-fit at y
-
-
-# Example 5. The use of the xij argument
-n = 1000
-eyes = data.frame(lop = runif(n), rop = runif(n))
-eyes = transform(eyes,
- leye = ifelse(runif(n) < logit(-1+2*lop, inverse=TRUE), 1, 0),
- reye = ifelse(runif(n) < logit(-1+2*rop, inverse=TRUE), 1, 0))
-fit = vglm(cbind(leye,reye) ~ lop + rop + fill(lop),
- binom2.or(exchangeable=TRUE, zero=3),
- xij = op ~ lop + rop + fill(lop), data=eyes)
-coef(fit)
-coef(fit, matrix=TRUE)
-coef(fit, matrix=TRUE, compress=FALSE)
-
-# Here's one method to handle the xij argument with a term that
-# produces more than one column in the model matrix.
-POLY3 = function(x, ...) {
- # A cubic
- poly(c(x,...), 3)[1:length(x),]
-}
+fit4 = vglm(cbind(nBnW, nBW, BnW, BW) ~ age, binom2.or, coalminers)
+coef(fit4, matrix=TRUE)
+fit4 at y # Response are proportions
+weights(fit4, type="prior")
+
+
+# Example 5. The use of the xij argument (simple case).
+# The constraint matrix for 'op' has one column.
+nn = 1000
+eyesdat = round(data.frame(lop = runif(nn),
+ rop = runif(nn),
+ op = runif(nn)), dig=2)
+eyesdat = transform(eyesdat, eta1 = -1+2*lop,
+ eta2 = -1+2*lop)
+eyesdat = transform(eyesdat,
+ leye = rbinom(nn, size=1, prob=logit(eta1, inv=TRUE)),
+ reye = rbinom(nn, size=1, prob=logit(eta2, inv=TRUE)))
+fit5 = vglm(cbind(leye,reye) ~ op,
+ binom2.or(exchangeable=TRUE, zero=3),
+ data=eyesdat, trace=TRUE,
+ xij = list(op ~ lop + rop + fill(lop)),
+ form2 = ~ op + lop + rop + fill(lop))
+coef(fit5)
+coef(fit5, matrix=TRUE)
+
-fit = vglm(cbind(leye,reye) ~ POLY3(lop,rop) + POLY3(rop,lop) + fill(POLY3(lop,rop)),
- binom2.or(exchangeable=TRUE, zero=3), data=eyes,
- xij = POLY3(op) ~ POLY3(lop,rop) + POLY3(rop,lop) +
- fill(POLY3(lop,rop)))
-coef(fit)
-coef(fit, matrix=TRUE)
-coef(fit, matrix=TRUE, compress=FALSE)
-predict(fit)[1:4,]
}
\keyword{models}
\keyword{regression}
-%eyes$leye = ifelse(runif(n) < 1/(1+exp(-1+2*eyes$lop)), 1, 0)
-%eyes$reye = ifelse(runif(n) < 1/(1+exp(-1+2*eyes$rop)), 1, 0)
+%eyesdat$leye = ifelse(runif(n) < 1/(1+exp(-1+2*eyesdat$lop)), 1, 0)
+%eyesdat$reye = ifelse(runif(n) < 1/(1+exp(-1+2*eyesdat$rop)), 1, 0)
+%coef(fit, matrix=TRUE, compress=FALSE)
+
+
+
+% 20090506 zz Put these examples elsewhere:
+%
+%# Example 6. The use of the xij argument (complex case).
+%# Here is one method to handle the xij argument with a term that
+%# produces more than one column in the model matrix.
+%# The constraint matrix for 'op' has essentially one column.
+%POLY3 = function(x, ...) {
+% # A cubic; ensures that the basis functions are the same.
+% poly(c(x,...), 3)[1:length(x),]
+% head(poly(c(x,...), 3), length(x), drop=FALSE)
+%}
+%
+%fit6 = vglm(cbind(leye,reye) ~ POLY3(op), trace=TRUE,
+% fam = binom2.or(exchangeable=TRUE, zero=3), data=eyesdat,
+% xij = list(POLY3(op) ~ POLY3(lop,rop) + POLY3(rop,lop) +
+% fill(POLY3(lop,rop))),
+% form2 = ~ POLY3(op) + POLY3(lop,rop) + POLY3(rop,lop) +
+% fill(POLY3(lop,rop)))
+%coef(fit6)
+%coef(fit6, matrix=TRUE)
+%head(predict(fit6))
+%\dontrun{
+%plotvgam(fit6, se=TRUE) # Wrong since it plots against op, not lop.
+%}
+%
+%
+%# Example 7. The use of the xij argument (simple case).
+%# Each constraint matrix has 4 columns.
+%ymat = rdiric(n <- 1000, shape=c(4,7,3,1))
+%mydat = data.frame(x1=runif(n), x2=runif(n), x3=runif(n), x4=runif(n),
+% z1=runif(n), z2=runif(n), z3=runif(n), z4=runif(n),
+% X2=runif(n), Z2=runif(n))
+%mydat = round(mydat, dig=2)
+%fit7 = vglm(ymat ~ X2 + Z2, data=mydat, crit="c",
+% fam = dirichlet(parallel=TRUE), # Intercept is also parallel.
+% xij = list(Z2 ~ z1 + z2 + z3 + z4,
+% X2 ~ x1 + x2 + x3 + x4),
+% form2 = ~ Z2 + z1 + z2 + z3 + z4 +
+% X2 + x1 + x2 + x3 + x4)
+%head(model.matrix(fit7, type="lm")) # LM model matrix
+%head(model.matrix(fit7, type="vlm")) # Big VLM model matrix
+%coef(fit7)
+%coef(fit7, matrix=TRUE)
+%max(abs(predict(fit7)-predict(fit7, new=mydat))) # Predicts correctly
+%summary(fit7)
+
+
+
+
diff --git a/man/vglm.control.Rd b/man/vglm.control.Rd
index c864ca4..21fb827 100644
--- a/man/vglm.control.Rd
+++ b/man/vglm.control.Rd
@@ -8,8 +8,7 @@
}
\usage{
-vglm.control(backchat = if (is.R()) FALSE else TRUE,
- checkwz=TRUE, criterion = names(.min.criterion.VGAM),
+vglm.control(checkwz=TRUE, criterion = names(.min.criterion.VGAM),
epsilon = 1e-07, half.stepsizing = TRUE,
maxit = 30, stepsize = 1, save.weight = FALSE,
trace = FALSE, wzepsilon = .Machine$double.eps^0.75,
@@ -17,11 +16,6 @@ vglm.control(backchat = if (is.R()) FALSE else TRUE,
}
%- maybe also `usage' for other objects documented here.
\arguments{
- \item{backchat}{
- logical indicating if a backchat is to be used
- (not applicable in \R).
-
- }
\item{checkwz}{
logical indicating whether the diagonal elements of
the working weight matrices should be checked whether they are
@@ -84,22 +78,44 @@ vglm.control(backchat = if (is.R()) FALSE else TRUE,
}
\item{wzepsilon}{
- Small positive number used to test whether the diagonals of the working
+ small positive number used to test whether the diagonals of the working
weight matrices are sufficiently positive.
}
\item{xij}{
- formula giving terms making up a covariate-dependent term (a variable
- that takes on different values for each linear/additive predictor.
- For example, the ocular pressure of each eye).
- There should be \eqn{M} unique terms; use
- \code{\link{fill1}}, \code{fill2}, \code{fill3}, etc. if necessary.
+ A formula or a list of formulas.
+ Each formula has a RHS giving \eqn{M} terms making up a
+ covariate-dependent term (whose name is the response).
+ That is, it creates a variable
+ that takes on different values for each linear/additive predictor,
+ e.g., the ocular pressure of each eye.
+ The \eqn{M} terms must be unique;
+ use \code{\link{fill1}}, \code{fill2}, \code{fill3}, etc. if necessary.
Each formula should have a response which is taken as the name of
- that variable, and the terms are enumerated in sequential order.
- With more than one formula, use a list of formulas.
- See Example 2 below.
+ that variable, and the \eqn{M} terms are enumerated in sequential order.
+ Each of the \eqn{M} terms multiply each successive row of the constraint
+ matrix.
+ When \code{xij} is used, the use of \code{form2} is also required
+ to give \emph{every} term used by the model.
}
+% \item{jix}{
+% A formula or a list of formulas specifying
+% which explanatory variables are to be plotted for each \code{xij} term.
+% For example, in the code below,
+% the term \code{BS(dumm)} could be plotted against either
+% \code{dum1} or \code{dum2}, therefore
+% either \code{jix=dum1} or \code{jix=dum2} are ok.
+% This argument is made use of by \code{plotvgam()}.
+% Each formula has a RHS giving \eqn{r_k} unique terms,
+% one for each column of the constraint matrix.
+% Each formula should have a response that matches the \code{formula} argument.
+% The argument \code{jix} is a reversal of \code{xij} to emphasize
+% the same framework for handling terms involving covariates that have
+% different values for each linear/additive predictor.
+%
+% }
+
\item{\dots}{
other parameters that may be picked up from control
functions that are specific to the \pkg{VGAM} family function.
@@ -143,35 +159,105 @@ These are handled using the \code{xij} argument.
\seealso{
\code{\link{vglm}},
\code{\link{fill}}.
+ The author's homepage has further documentation about the
+ \code{xij} argument.
+
}
\examples{
# Example 1.
-data(pneumo)
pneumo = transform(pneumo, let=log(exposure.time))
-vglm(cbind(normal,mild,severe) ~ let, multinomial, pneumo,
+vglm(cbind(normal,mild,severe) ~ let, multinomial, data=pneumo,
crit="coef", step=0.5, trace=TRUE, eps=1e-8, maxit=40)
-# Example 2. The use of the xij argument
-set.seed(111)
-n = 1000
-ymat = rdiric(n, shape=c(4,7,3,1))
+# Example 2. The use of the xij argument (simple case).
+ymat = rdiric(n <- 1000, shape=rep(exp(2), len=4))
mydat = data.frame(x1=runif(n), x2=runif(n), x3=runif(n), x4=runif(n),
z1=runif(n), z2=runif(n), z3=runif(n), z4=runif(n))
+mydat = transform(mydat, X=x1, Z=z1)
mydat = round(mydat, dig=2)
-fit = vglm(ymat ~ x1 + x2 + x3 + x4 + z1 + z2 + z3 + z4,
- fam = dirichlet, data=mydat, crit="c",
- xij = list(z ~ z1 + z2 + z3 + z4,
- x ~ x1 + x2 + x3 + x4))
-model.matrix(fit, type="lm")[1:7,] # LM model matrix
-model.matrix(fit, type="vlm")[1:7,] # Big VLM model matrix
-coef(fit)
-coef(fit, matrix=TRUE)
-coef(fit, matrix=TRUE, compress=FALSE)
-max(abs(predict(fit)-predict(fit, new=mydat))) # Predicts correctly
-summary(fit)
+fit2 = vglm(ymat ~ X + Z,
+ dirichlet(parallel=TRUE), data=mydat, trace=TRUE,
+ xij = list(Z ~ z1 + z2 + z3 + z4,
+ X ~ x1 + x2 + x3 + x4),
+ form2 = ~ Z + z1 + z2 + z3 + z4 +
+ X + x1 + x2 + x3 + x4)
+head(model.matrix(fit2, type="lm")) # LM model matrix
+head(model.matrix(fit2, type="vlm")) # Big VLM model matrix
+coef(fit2)
+coef(fit2, matrix=TRUE)
+max(abs(predict(fit2)-predict(fit2, new=mydat))) # Predicts correctly
+summary(fit2)
+\dontrun{
+# plotvgam(fit2, se=TRUE, xlab="x1", which.term=1) # Bug!
+# plotvgam(fit2, se=TRUE, xlab="z1", which.term=2) # Bug!
+plotvgam(fit2, xlab="x1") # Correct
+plotvgam(fit2, xlab="z1") # Correct
+}
+
+
+
+
+# Example 3. The use of the xij argument (complex case).
+set.seed(123)
+coalminers = transform(coalminers,
+ Age = (age - 42) / 5,
+ dum1 = round(runif(nrow(coalminers)), dig=2),
+ dum2 = round(runif(nrow(coalminers)), dig=2),
+ dum3 = round(runif(nrow(coalminers)), dig=2),
+ dumm = round(runif(nrow(coalminers)), dig=2))
+BS = function(x, ..., df=3) bs(c(x,...), df=df)[1:length(x),,drop=FALSE]
+NS = function(x, ..., df=3) ns(c(x,...), df=df)[1:length(x),,drop=FALSE]
+
+# Equivalently...
+BS = function(x, ..., df=3) head(bs(c(x,...), df=df), length(x), drop=FALSE)
+NS = function(x, ..., df=3) head(ns(c(x,...), df=df), length(x), drop=FALSE)
+
+fit3 = vglm(cbind(nBnW,nBW,BnW,BW) ~ Age + NS(dum1,dum2),
+ fam = binom2.or(exchang=TRUE, zero=3),
+ xij = list(NS(dum1,dum2) ~ NS(dum1,dum2) +
+ NS(dum2,dum1) +
+ fill(NS(dum1))),
+ form2 = ~ NS(dum1,dum2) + NS(dum2,dum1) + fill(NS(dum1)) +
+ dum1 + dum2 + dum3 + Age + age + dumm,
+ data = coalminers, trace=TRUE)
+head(model.matrix(fit3, type="lm")) # LM model matrix
+head(model.matrix(fit3, type="vlm")) # Big VLM model matrix
+coef(fit3)
+coef(fit3, matrix=TRUE)
+\dontrun{
+plotvgam(fit3, se=TRUE, lcol="red", scol="blue", xlab="dum1")
+}
}
\keyword{models}
\keyword{regression}
+
+
+
+
+% zz 20090506 put elsewhere:
+%
+%
+%# Example 4. The use of the xij argument (complex case).
+%# Here is one method to handle the xij argument with a term that
+%# produces more than one column in the model matrix.
+%# The constraint matrix for 'op' has one column.
+%POLY3 = function(x, ...) {
+% # A cubic; ensures that the basis functions are the same.
+% poly(c(x,...), 3)[1:length(x),]
+%}
+%
+%\dontrun{
+%fit4 = vglm(cbind(leye,reye) ~ POLY3(op), trace=TRUE,
+% fam = binom2.or(exchangeable=TRUE, zero=3), data=eyesdata,
+% xij = list(POLY3(op) ~ POLY3(lop,rop) + POLY3(rop,lop) +
+% fill(POLY3(lop,rop))),
+% form2 = ~ POLY3(op) + POLY3(lop,rop) + POLY3(rop,lop) +
+% fill(POLY3(lop,rop)))
+%coef(fit4)
+%coef(fit4, matrix=TRUE)
+%head(predict(fit4))
+%}
+
diff --git a/man/waitakere.Rd b/man/waitakere.Rd
index 19af06f..a8203ed 100644
--- a/man/waitakere.Rd
+++ b/man/waitakere.Rd
@@ -49,10 +49,9 @@
\code{\link{hunua}}.
}
\examples{
-data(waitakere)
fit = vgam(agaaus ~ s(altitude, df=2), binomialff, waitakere)
\dontrun{
plot(fit, se=TRUE, lcol="red", scol="blue") }
-predict(fit, waitakere, type="response")[1:3]
+head(predict(fit, waitakere, type="response"))
}
\keyword{datasets}
diff --git a/man/weightsvglm.Rd b/man/weightsvglm.Rd
index e088731..fec9f50 100644
--- a/man/weightsvglm.Rd
+++ b/man/weightsvglm.Rd
@@ -107,7 +107,6 @@ weightsvglm(object, type = c("prior", "working"),
\code{\link{vglm}}.
}
\examples{
-data(pneumo)
pneumo = transform(pneumo, let=log(exposure.time))
(fit = vglm(cbind(normal, mild, severe) ~ let,
cumulative(parallel=TRUE, reverse=TRUE), pneumo))
diff --git a/man/wffc.P2star.Rd b/man/wffc.P2star.Rd
index 767d69b..ad8110b 100644
--- a/man/wffc.P2star.Rd
+++ b/man/wffc.P2star.Rd
@@ -38,12 +38,11 @@ wffc.P2star(length, min.eligible = 0.18)
}
\references{
- \url{http://www.2008worldflyfishingchamps.com}
- is the official website.
+% \url{http://www.2008worldflyfishingchamps.com}
+% is the official website.
- Yee, T. W. (2008)
- Vector generalized linear and additive models, with
- applications to the 2008 World Fly Fishing Championships.
+ Yee, T. W. (2009)
+ VGLMs and VGAMs: an overview for applications in fisheries research.
In preparation.
}
@@ -58,7 +57,7 @@ wffc.P2star(length, min.eligible = 0.18)
\dontrun{
fishlength = seq(0.0, 0.72, by=0.001)
plot(fishlength, wffc.P2star(fishlength), type="l", col="blue",
- las=1, lty="dashed", lwd=2, las=1,
+ las=1, lty="dashed", lwd=2, las=1, cex.main=0.8,
xlab="Fish length (m)", ylab="Competition points",
main="Current (red) and proposed (blue) WFFC point system")
lines(fishlength, wffc.P1star(fishlength), type="l", col="red", lwd=2)
diff --git a/man/wffc.Rd b/man/wffc.Rd
index 7b98271..1fa8b69 100644
--- a/man/wffc.Rd
+++ b/man/wffc.Rd
@@ -1,7 +1,7 @@
\name{wffc}
\alias{wffc}
\docType{data}
-\title{ 2008 World Fly Fishing Championships }
+\title{ 2008 World Fly Fishing Championships Data}
\description{
Capture records of the 2008 FIPS-MOUCHE
World Fly Fishing Championships held in Rotorua, New Zealand during
@@ -55,7 +55,7 @@ Wales (WAL).
}
}
\details{
- Details may be obtained at the official website and Yee (2008).
+ Details may be obtained at Yee (2009b).
Here is a brief summary.
The three competition days were 28--30 March.
Each session was fixed at 9.00am--12.00pm and 2.30--5.30pm daily.
@@ -106,6 +106,19 @@ is rather poor and furthermore they were not recorded electronically.
Note that some fish may have been caught more than once, hence
these data do not represent individual fish but rather recorded captures.
+ Note also that a few internal discrepancies may be found within and between
+ the data frames
+ \code{\link{wffc}},
+ \code{\link{wffc.nc}},
+ \code{\link{wffc.indiv}},
+ \code{\link{wffc.teams}}.
+ This is due to various reasons, such as
+ competitors being replaced by reserves when sick,
+ fish that were included or excluded upon the local judge's decision,
+ competitors who fished two hours instead of three by mistake, etc.
+ The data has already been cleaned of errors and internal inconsistencies
+ but a few may remain.
+
}
\seealso{
@@ -128,26 +141,29 @@ is rather poor and furthermore they were not recorded electronically.
}
\references{
- \url{http://www.2008worldflyfishingchamps.com}
- is the official website.
+% \url{http://www.2008worldflyfishingchamps.com}
+% is the official website.
+
+ Yee, T. W. (2009a)
+ VGLMs and VGAMs: an overview for applications in fisheries research.
+ In preparation.
- Yee, T. W. (2008)
- Vector generalized linear and additive models, with
- applications to the 2008 World Fly Fishing Championships.
+ Yee, T. W. (2009b)
+ Some issues raised by the analysis of
+ the 2008 World Fly Fishing Championships data.
In preparation.
}
\examples{
-data(wffc)
summary(wffc)
-with(wffc, table(water,session))
+with(wffc, table(water, session))
# Obtain some simple plots
-waihou = with(wffc, wffc[water=="Waihou",])
-waimak = with(wffc, wffc[water=="Waimakariri",])
-whang = with(wffc, wffc[water=="Whanganui",])
-otam = with(wffc, wffc[water=="Otamangakau",])
-roto = with(wffc, wffc[water=="Rotoaira",])
+waihou = subset(wffc, water == "Waihou")
+waimak = subset(wffc, water == "Waimakariri")
+whang = subset(wffc, water == "Whanganui")
+otam = subset(wffc, water == "Otamangakau")
+roto = subset(wffc, water == "Rotoaira")
minlength = min(wffc[,"length"])
maxlength = max(wffc[,"length"])
nwater = c("Waihou"=nrow(waihou), "Waimakariri"=nrow(waimak),
@@ -157,11 +173,11 @@ nwater = c("Waihou"=nrow(waihou), "Waimakariri"=nrow(waimak),
par(mfrow=c(2,3), las=1)
# Overall distribution of length
with(wffc, boxplot(length/10 ~ water, ylim=c(minlength, maxlength)/10,
- border="blue", main="Length (cm)"))
+ border="blue", main="Length (cm)", cex.axis=0.5))
# Overall distribution of LOG length
with(wffc, boxplot(length/10 ~ water, ylim=c(minlength, maxlength)/10,
- border="blue", log="y",
+ border="blue", log="y", cex.axis=0.5,
main="Length (cm) on a log scale"))
# Overall distribution of number of captures
@@ -170,15 +186,16 @@ pie(nwater, border="blue", main="Proportion of captures",
angle=85+30* 1:length(nwater))
# Overall distribution of number of captures
-with(wffc, barplot(nwater, main="Number of captures", cex.names=0.5))
+with(wffc, barplot(nwater, main="Number of captures", cex.names=0.5,
+ col="lightblue"))
# Overall distribution of proportion of number of captures
-with(wffc, barplot(nwater / sum(nwater), cex.names=0.5,
+with(wffc, barplot(nwater / sum(nwater), cex.names=0.5, col="lightblue",
main="Proportion of captures"))
# An interesting lake
-with(roto, hist(length/10, xlab="Fish lengths (cm)", breaks=seq(18, 70, by=3),
- prob=TRUE, ylim=c(0, 0.08), border="blue", ylab="",
- main="Lake Rotoaira", lwd=2))
+with(roto, hist(length/10, xlab="Fish length (cm)", col="lightblue",
+ breaks=seq(18, 70, by=3), prob=TRUE, ylim=c(0, 0.08),
+ border="blue", ylab="", main="Lake Rotoaira", lwd=2))
}
}
\keyword{datasets}
diff --git a/man/wffc.indiv.Rd b/man/wffc.indiv.Rd
index d33cee8..8517296 100644
--- a/man/wffc.indiv.Rd
+++ b/man/wffc.indiv.Rd
@@ -1,7 +1,7 @@
\name{wffc.indiv}
\alias{wffc.indiv}
\docType{data}
-\title{ 2008 World Fly Fishing Championships (Individual results) }
+\title{ 2008 World Fly Fishing Championships (Individual results) Data}
\description{
Individual competitors' results of the 2008 FIPS-MOUCHE
World Fly Fishing Championships held in Rotorua, New Zealand during
@@ -32,18 +32,16 @@
details and links.
}
-\source{
- \url{http://www.2008worldflyfishingchamps.com/}.
-}
+%\source{
+% \url{http://www.2008worldflyfishingchamps.com/}.
+%}
\references{
- Yee, T. W. (2008)
- Vector generalized linear and additive models, with
- applications to the 2008 World Fly Fishing Championships.
+ Yee, T. W. (2009)
+ VGLMs and VGAMs: an overview for applications in fisheries research.
In preparation.
}
\examples{
-data(wffc.indiv)
summary(wffc.indiv)
}
\keyword{datasets}
diff --git a/man/wffc.nc.Rd b/man/wffc.nc.Rd
index a4619aa..318c36f 100644
--- a/man/wffc.nc.Rd
+++ b/man/wffc.nc.Rd
@@ -1,7 +1,7 @@
\name{wffc.nc}
\alias{wffc.nc}
\docType{data}
-\title{ 2008 World Fly Fishing Championships (Number of captures) }
+\title{ 2008 World Fly Fishing Championships (Number of captures) Data}
\description{
Number of captures in the 2008 FIPS-MOUCHE
World Fly Fishing Championships held in Rotorua, New Zealand during
@@ -32,13 +32,12 @@
these data do not represent individual fish.
}
-\source{
- \url{http://www.2008worldflyfishingchamps.com/}.
-}
+%\source{
+% \url{http://www.2008worldflyfishingchamps.com/}.
+%}
\references{
- Yee, T. W. (2008)
- Vector generalized linear and additive models, with
- applications to the 2008 World Fly Fishing Championships.
+ Yee, T. W. (2009)
+ VGLMs and VGAMs: an overview for applications in fisheries research.
In preparation.
}
@@ -46,7 +45,10 @@
\seealso{ \code{\link{DeLury}}. }
\examples{
-data(wffc.nc)
-with(wffc.nc, table(sector, session))
+xtabs( ~ sector + session, wffc.nc)
}
\keyword{datasets}
+
+% with(wffc.nc, table(sector, session))
+
+
diff --git a/man/wffc.teams.Rd b/man/wffc.teams.Rd
index 5c40f8e..596aefa 100644
--- a/man/wffc.teams.Rd
+++ b/man/wffc.teams.Rd
@@ -1,7 +1,7 @@
\name{wffc.teams}
\alias{wffc.teams}
\docType{data}
-\title{ 2008 World Fly Fishing Championships (Team results) }
+\title{ 2008 World Fly Fishing Championships (Team results) Data}
\description{
Team results of the 2008 FIPS-MOUCHE
World Fly Fishing Championships held in Rotorua, New Zealand during
@@ -27,18 +27,16 @@
details and links.
}
-\source{
- \url{http://www.2008worldflyfishingchamps.com/}.
-}
+%\source{
+% \url{http://www.2008worldflyfishingchamps.com/}.
+%}
\references{
- Yee, T. W. (2008)
- Vector generalized linear and additive models, with
- applications to the 2008 World Fly Fishing Championships.
+ Yee, T. W. (2009)
+ VGLMs and VGAMs: an overview for applications in fisheries research.
In preparation.
}
\examples{
-data(wffc.teams)
wffc.teams
}
\keyword{datasets}
diff --git a/man/yip88.Rd b/man/yip88.Rd
index a784410..250b219 100644
--- a/man/yip88.Rd
+++ b/man/yip88.Rd
@@ -106,27 +106,24 @@ contains further information and examples.
\examples{
# Generate some artificial data
-n = 1000
-phi = 0.35
-lambda = 2
-y = rzipois(n, lambda, phi)
+phi = 0.35; lambda = 2
+y = rzipois(n <- 1000, lambda, phi)
table(y)
# Two equivalent ways of fitting the same model
fit1 = vglm(y ~ 1, yip88(n=length(y)), subset=y>0, trace=TRUE, crit="c")
fit2 = vglm(y ~ 1, yip88, trace=TRUE, crit="c")
-true.mean = (1-phi) * lambda
-true.mean
+(true.mean = (1-phi) * lambda)
mean(y)
-fitted(fit1)[1:5,]
+head(fitted(fit1))
fit1 at misc$phi # The estimate of phi
# Compare the ZIP with the positive Poisson distribution
pp = vglm(y ~ 1, pospoisson, subset=y>0, tr=TRUE, crit="c")
coef(pp)
Coef(pp)
-(coef(fit1) - coef(pp)) # Same
-((fitted(fit1) - fitted(pp))[1:5,]) # Different
+coef(fit1) - coef(pp) # Same
+head(fitted(fit1) - fitted(pp)) # Different
# Another example ------------------------------
y = 0:7 # Data from Angers and Biswas (2003)
@@ -139,7 +136,7 @@ fit3 = vglm(yy ~ 1, yip88(n=length(yy)), subset=yy>0, trace=TRUE, crit="c")
fit3 at misc$phi # Estimate of phi (they get 0.5154 with standard error 0.0707)
coef(fit3, matrix=TRUE)
Coef(fit3) # Estimate of lambda (they get 0.6997 with standard error 0.1520)
-fitted(fit3)[1:5]
+head(fitted(fit3))
mean(yy) # compare this with fitted(fit3)
}
\keyword{models}
diff --git a/man/zanegbinUC.Rd b/man/zanegbinUC.Rd
index cdd6baa..21d8dd1 100644
--- a/man/zanegbinUC.Rd
+++ b/man/zanegbinUC.Rd
@@ -21,7 +21,8 @@ rzanegbin(n, p0, size, prob=NULL, munb=NULL)
\arguments{
\item{x, q}{vector of quantiles.}
\item{p}{vector of probabilities.}
- \item{n}{number of observations. Must be a single positive integer. }
+ \item{n}{number of observations.
+ If \code{length(n) > 1} then the length is taken to be the number required. }
\item{size, prob, munb, log}{
Parameters from the ordinary negative binomial distribution
(see \code{\link[stats:NegBinomial]{dnbinom}}).
@@ -37,12 +38,15 @@ rzanegbin(n, p0, size, prob=NULL, munb=NULL)
}
\details{
The probability function of \eqn{Y} is 0 with probability \code{p0},
- else a positive \eqn{negative binomial(\mu_{nb}, size)}{negative binomial(munb, size)}
+ else a positive
+ \eqn{negative binomial(\mu_{nb}, size)}{negative binomial(munb, size)}
distribution.
}
\value{
\code{dzanegbin} gives the density and
+ \code{pzanegbin} gives the distribution function,
+ \code{qzanegbin} gives the quantile function, and
\code{rzanegbin} generates random deviates.
}
%\references{ }
@@ -59,11 +63,9 @@ rzanegbin(n, p0, size, prob=NULL, munb=NULL)
}
\examples{
-munb = 3
-size = 4
-p0 = 0.3
+munb = 3; size = 4; p0 = 0.3
x = (-1):7
-(i = dzanegbin(x, p0=p0, munb=munb, size=size))
+(ii = dzanegbin(x, p0=p0, munb=munb, size=size))
table(rzanegbin(100, p0=p0, munb=munb, size=size))
\dontrun{
@@ -73,7 +75,7 @@ barplot(rbind(dzanegbin(x, p0=p0, munb=munb, size=size),
beside = TRUE, col = c("blue","green"),
main=paste("ZANB(p0=", p0, ", munb=", munb, ", size=", size, ") (blue) vs",
" NB(mu=", munb, ", size=", size, ") (green)", sep=""),
- names.arg = as.character(x))
+ names.arg = as.character(x), cex.main=0.7, las=1)
}
}
\keyword{distribution}
diff --git a/man/zanegbinomial.Rd b/man/zanegbinomial.Rd
index 9053ebe..8fc0883 100644
--- a/man/zanegbinomial.Rd
+++ b/man/zanegbinomial.Rd
@@ -164,8 +164,8 @@ table(y2)
fit = vglm(cbind(y1,y2) ~ x, zanegbinomial, trace=TRUE)
coef(fit, matrix=TRUE)
-fitted(fit)[1:3,]
-predict(fit)[1:3,]
+head(fitted(fit))
+head(predict(fit))
}
\keyword{models}
\keyword{regression}
diff --git a/man/zapoisUC.Rd b/man/zapoisUC.Rd
index 0d197cb..8125b3e 100644
--- a/man/zapoisUC.Rd
+++ b/man/zapoisUC.Rd
@@ -21,7 +21,8 @@ rzapois(n, lambda, p0 = 0)
\arguments{
\item{x, q}{vector of quantiles.}
\item{p}{vector of probabilities.}
- \item{n}{number of observations. Must be a single positive integer. }
+ \item{n}{number of observations.
+ If \code{length(n) > 1} then the length is taken to be the number required. }
\item{lambda}{ Vector of positive means. }
\item{p0}{
Probability of zero, called \eqn{p0}.
diff --git a/man/zapoisson.Rd b/man/zapoisson.Rd
index f9f210f..8131b81 100644
--- a/man/zapoisson.Rd
+++ b/man/zapoisson.Rd
@@ -138,9 +138,9 @@ y = ifelse(runif(n) < p0, 0, rpospois(n, lambda))
table(y)
fit = vglm(y ~ x, zapoisson, trace=TRUE)
fit = vglm(y ~ x, zapoisson, trace=TRUE, crit="c")
-fitted(fit)[1:5]
-predict(fit)[1:5,]
-predict(fit, untransform=TRUE)[1:5,]
+head(fitted(fit))
+head(predict(fit))
+head(predict(fit, untransform=TRUE))
coef(fit, matrix=TRUE)
@@ -153,7 +153,7 @@ yy = rep(y,w)
fit3 = vglm(yy ~ 1, zapoisson, trace=TRUE, crit="c")
coef(fit3, matrix=TRUE)
Coef(fit3) # Estimate of lambda (they get 0.6997 with standard error 0.1520)
-fitted(fit3)[1:5]
+head(fitted(fit3))
mean(yy) # compare this with fitted(fit3)
}
\keyword{models}
diff --git a/man/zero.Rd b/man/zero.Rd
index 8320eb6..ac24d13 100644
--- a/man/zero.Rd
+++ b/man/zero.Rd
@@ -89,7 +89,6 @@ args(gpd)
#LMS quantile regression example
-data(bminz)
fit = vglm(BMI ~ bs(age, df=4), fam=lms.bcg(zero=c(1,3)),
data=bminz, trace=TRUE)
coef(fit, matrix=TRUE)
diff --git a/man/dzeta.Rd b/man/zetaUC.Rd
similarity index 81%
rename from man/dzeta.Rd
rename to man/zetaUC.Rd
index 0bd5835..949f7b9 100644
--- a/man/dzeta.Rd
+++ b/man/zetaUC.Rd
@@ -8,18 +8,24 @@
}
% zz p is not a good argument name, esp. with qzeta(p, p)
\usage{
-dzeta(x, p)
+dzeta(x, p, log=FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{ Numerical vector/matrix to evaluate the density. }
- \item{p}{ The parameter \eqn{p}. This must be greater than 1. }
+ \item{p}{ The parameter \eqn{p}. This must be positive. }
+ \item{log}{
+ Logical.
+ If \code{log=TRUE} then the logarithm of the density is returned.
+
+ }
+
}
\details{
The density function of the zeta distribution is given by
- \deqn{y^{-p} / \zeta(p)}{%
- y^(-p) / zeta(p)}
- where \eqn{p>1}, \eqn{y=1,2,\ldots}, and \eqn{\zeta}{zeta} is
+ \deqn{y^{-p-1} / \zeta(p+1)}{%
+ y^(-p-1) / zeta(p+1)}
+ where \eqn{p>0}, \eqn{y=1,2,\ldots}, and \eqn{\zeta}{zeta} is
Riemann's zeta function.
}
diff --git a/man/zibinomial.Rd b/man/zibinomial.Rd
index e48f7d9..55c33a9 100644
--- a/man/zibinomial.Rd
+++ b/man/zibinomial.Rd
@@ -8,19 +8,15 @@
}
\usage{
-zibinomial(lphi="logit", link.mu="logit",
+zibinomial(lphi="logit", lmu="logit",
ephi=list(), emu=list(),
iphi=NULL, zero=1, mv=FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{lphi}{
- Link function for the parameter \eqn{\phi}{phi}.
- See \code{\link{Links}} for more choices.
-
- }
- \item{link.mu}{
- Link function for the usual binomial probability \eqn{\mu}{mu} parameter.
+ \item{lphi, lmu}{
+ Link functions for the parameter \eqn{\phi}{phi}
+ and the usual binomial probability \eqn{\mu}{mu} parameter.
See \code{\link{Links}} for more choices.
}
@@ -112,18 +108,19 @@ zibinomial(lphi="logit", link.mu="logit",
}
\examples{
size = 10 # number of trials; N in the notation above
-n = 200
-phi = logit(0,inv=TRUE) # 0.50
-mubin = logit(-1,inv=TRUE) # Mean of an ordinary binomial distribution
-sv = rep(size, len=n)
-y = rzibinom(n=n, size=sv, prob=mubin, phi=phi) / sv # A proportion
-table(y)
-fit = vglm(y ~ 1, zibinomial, weight=sv, trace=TRUE)
+nn = 200
+zibdata = data.frame(phi = logit(0, inv=TRUE), # 0.50
+ mubin = logit(-1, inv=TRUE), # Mean of usual binomial
+ sv = rep(size, len=nn))
+zibdata = transform(zibdata,
+ y = rzibinom(n=nn, size=sv, prob=mubin, phi=phi)/sv)
+with(zibdata, table(y))
+fit = vglm(y ~ 1, zibinomial, weight=sv, data=zibdata, trace=TRUE)
coef(fit, matrix=TRUE)
Coef(fit) # Useful for intercept-only models
fit at misc$p0 # Estimate of P(Y=0)
-fitted(fit)[1:4,]
-mean(y) # Compare this with fitted(fit)
+head(fitted(fit))
+with(zibdata, mean(y)) # Compare this with fitted(fit)
summary(fit)
}
\keyword{models}
diff --git a/man/zinegbinomial.Rd b/man/zinegbinomial.Rd
index f268741..1af6e33 100644
--- a/man/zinegbinomial.Rd
+++ b/man/zinegbinomial.Rd
@@ -140,7 +140,7 @@ table(y2)["0"] / sum(table(y2))
fit = vglm(cbind(y1,y2) ~ x, zinegbinomial(zero=NULL), trace=TRUE)
coef(fit, matrix=TRUE)
summary(fit)
-cbind(fitted(fit), (1-phi) * munb)[1:5,]
+head(cbind(fitted(fit), (1-phi) * munb))
vcov(fit)
}
\keyword{models}
diff --git a/man/zipebcom.Rd b/man/zipebcom.Rd
index e61d684..de6dc57 100644
--- a/man/zipebcom.Rd
+++ b/man/zipebcom.Rd
@@ -147,12 +147,21 @@ zipebcom(lmu12="cloglog", lphi12="logit", loratio="loge",
generic function.
}
+\section{Warning }{
+ The fact that the EIM is not of full rank may mean the model is
+ naturally ill-conditioned.
+ Not sure whether there are any negative consequences wrt theory.
+ For now
+ it is certainly safer to fit \code{\link{binom2.or}} to bivariate binary
+ responses.
+
+}
\references{
Yee, T. W. and Dirnbock, T. (2009)
- Models for species' presence/absence data at two time
- points based on constrained ordination and odds ratio.
- In preparation.
+ Models for analysing species' presence/absence data
+ at two time points.
+ Journal of Theoretical Biology, \bold{259}. In press.
}
\author{ Thomas W. Yee }
@@ -167,6 +176,11 @@ zipebcom(lmu12="cloglog", lphi12="logit", loratio="loge",
\pkg{VGAM} family function too.
Even better initial values are usually needed here.
+ The \code{xij} (see \code{\link{vglm.control}}) argument enables
+ environmental variables with different values at the two time points
+ to be entered into an exchangeable \code{\link{binom2.or}} model.
+ See the author's webpage for sample code.
+
}
\seealso{
\code{\link{binom2.or}},
diff --git a/man/zipfUC.Rd b/man/zipfUC.Rd
index adf3a77..66a1def 100644
--- a/man/zipfUC.Rd
+++ b/man/zipfUC.Rd
@@ -11,7 +11,7 @@
}
\usage{
-dzipf(x, N, s)
+dzipf(x, N, s, log = FALSE)
pzipf(q, N, s)
}
\arguments{
@@ -25,6 +25,12 @@ pzipf(q, N, s)
See \code{\link{zipf}} for more details.
}
+ \item{log}{
+ Logical.
+ If \code{log=TRUE} then the logarithm of the density is returned.
+
+ }
+
}
\value{
\code{dzipf} gives the density, and
diff --git a/src/vgam.f b/src/vgam.f
index 424ea3a..057c22b 100644
--- a/src/vgam.f
+++ b/src/vgam.f
@@ -766,11 +766,11 @@
23298 continue
return
end
- subroutine vbfa(d8gwha,n,lku8xq,gqai81, p3vlea,jmwo0z,w8xfic,
- &l6xrjt,dof, ynk9ah,uxs1iq,vliac4, vfd2pw,sazp9g,go0l1q,s0, lq8reh,
- &zo5jyl,h4fgoy, ioqzvb,i0qvzl, i83h1, xbig, jrxg6l, ifo4ew, ozuw3p,
- & hwi2tb, nbd5rl, wj5shg, ykdc2t, wk2, zxao0o, phqco4, vb81l0, bmb,
- & rjcq9o, mwk, t5vlzq, j1l0o1, qc7zyb, das4bx, vlni8d, jko0o1,
+ subroutine vbfa( n,lku8xq,gqai81, p3vlea,jmwo0z,w8xfic,l6xrjt,dof,
+ & ynk9ah,uxs1iq,vliac4, vfd2pw,sazp9g,go0l1q,s0, lq8reh,zo5jyl,
+ &h4fgoy, ioqzvb,i0qvzl, i83h1, xbig, jrxg6l, ifo4ew, ozuw3p,
+ &hwi2tb, nbd5rl, wj5shg, ykdc2t, wk2, zxao0o, phqco4, vb81l0, bmb,
+ &rjcq9o, mwk, t5vlzq, j1l0o1, qc7zyb, das4bx, vlni8d, jko0o1,
&mnh3up, fg3pxq)
implicit logical (a-z)
integer d8gwha, n, lku8xq, gqai81(15), ynk9ah(1),uxs1iq(1),vliac4(
@@ -788,6 +788,7 @@
integer ucgi1r
integer sehz7y
integer w3gohz, j0qwtz, zx1610
+ d8gwha = 0
j0qwtz = lku8xq*(lku8xq+1)/2
p=gqai81(2)
q=gqai81(3)
@@ -1262,98 +1263,3 @@
23474 continue
return
end
- subroutine vglmf(xbig,c4bdmu,o9ljyn,d8gwha,nfiumb4, jmwo0z,lq8reh,
- &go0l1q,blq5vu, ioqzvb,i0qvzl, zx1610,i83h1, bgu6fw,zxao0o,jrxg6l,
- &lku8xq,zkjqhi,xhe4cg, zqve1l, vvl1li, cpxbig, das4bx)
- implicit logical (a-z)
- integer c4bdmu,o9ljyn,d8gwha,nfiumb4, zx1610,i83h1(o9ljyn),
- &lku8xq,zkjqhi,xhe4cg, zqve1l(1), vvl1li(1), cpxbig
- double precision xbig(c4bdmu,o9ljyn), jmwo0z(nfiumb4,lku8xq),
- &lq8reh(o9ljyn),go0l1q(lku8xq,nfiumb4),blq5vu(c4bdmu), ioqzvb(
- &c4bdmu,o9ljyn),i0qvzl(o9ljyn), bgu6fw(1), zxao0o(lku8xq,lku8xq,5),
- & jrxg6l(1), das4bx
- integer sehz7y
- call qh4ulb(zqve1l,vvl1li,lku8xq)
- sehz7y = 1
-23476 if(.not.(sehz7y .ne. 0))goto 23477
- call vfit(lku8xq,c4bdmu,o9ljyn,nfiumb4, xbig,jmwo0z,lq8reh,go0l1q,
- &blq5vu, jrxg6l,ioqzvb,i0qvzl,das4bx, zx1610,i83h1, bgu6fw,zxao0o,
- &xhe4cg,zkjqhi,zqve1l,vvl1li)
- if(.not.(d8gwha .ne. 0))goto 23478
- call vcall1(sehz7y,jmwo0z,go0l1q,lq8reh,jrxg6l,xbig,cpxbig)
- goto 23479
-23478 continue
- sehz7y= 0
-23479 continue
- if(.not.(sehz7y .ne. 0))goto 23480
- zx1610=0
-23480 continue
- goto 23476
-23477 continue
- return
- end
- subroutine vfit(lku8xq,c4bdmu,o9ljyn,nfiumb4, xbig,jmwo0z,lq8reh,
- &go0l1q,blq5vu, jrxg6l,ioqzvb,i0qvzl,das4bx, zx1610,i83h1, bgu6fw,
- &zxao0o, xhe4cg, zkjqhi, zqve1l, vvl1li)
- implicit logical (a-z)
- integer lku8xq, c4bdmu, o9ljyn, nfiumb4, zx1610, i83h1(o9ljyn),
- &xhe4cg, zkjqhi, zqve1l(1), vvl1li(1)
- double precision xbig(c4bdmu,o9ljyn), jmwo0z(nfiumb4,lku8xq),
- &lq8reh(o9ljyn), go0l1q(lku8xq,nfiumb4), blq5vu(c4bdmu), jrxg6l(
- &zkjqhi,nfiumb4), ioqzvb(c4bdmu,o9ljyn), i0qvzl(o9ljyn), das4bx,
- &bgu6fw(c4bdmu), zxao0o(lku8xq,lku8xq,5)
- integer w3gohz, d9rjek, nd6mep, xi1mqb, hv3wja, tvyd2b, fjg0qv
- double precision gwu72m, dyb3po, t7sbea
- t7sbea=1.0d-7
- tvyd2b=101
- fjg0qv=1
- dyb3po=1.0d0
- if(.not.(zx1610 .eq. 0))goto 23482
- do 23484 d9rjek=1,o9ljyn
- do 23486 w3gohz=1,c4bdmu
- ioqzvb(w3gohz,d9rjek) = xbig(w3gohz,d9rjek)
-23486 continue
-23484 continue
- do 23488 nd6mep=1,o9ljyn
- i83h1(nd6mep) = nd6mep
-23488 continue
- call mux17f(jrxg6l, ioqzvb, lku8xq, o9ljyn, nfiumb4, zxao0o(1,1,1)
- &, zxao0o(1,1,2), zqve1l, vvl1li, zkjqhi, c4bdmu)
- call dhkt9w(ioqzvb,c4bdmu,c4bdmu,o9ljyn,i0qvzl,i83h1,bgu6fw,
- &zx1610,t7sbea)
-23482 continue
- call mux22f(jrxg6l,jmwo0z,bgu6fw,zkjqhi,zqve1l,vvl1li,nfiumb4,
- &lku8xq,zxao0o)
- nd6mep=1
- do 23490 d9rjek=1,lku8xq
- do 23492 w3gohz=1,nfiumb4
- jmwo0z(w3gohz,d9rjek)=bgu6fw(nd6mep)
- nd6mep=nd6mep+1
-23492 continue
-23490 continue
- call vdqrsl(ioqzvb,c4bdmu,c4bdmu,zx1610,i0qvzl,jmwo0z,bgu6fw(1),
- &blq5vu,lq8reh, bgu6fw(1),go0l1q,tvyd2b,fjg0qv)
- das4bx=0.0d0
- xi1mqb=0
- hv3wja=1
- do 23494 w3gohz=1,nfiumb4
- do 23496 d9rjek=1,lku8xq
- xi1mqb = xi1mqb + 1
- if(.not.(xi1mqb .gt. nfiumb4))goto 23498
- xi1mqb = 1
- hv3wja = hv3wja + 1
-23498 continue
- gwu72m = jmwo0z(xi1mqb,hv3wja) - go0l1q(d9rjek,w3gohz)
- das4bx = das4bx + gwu72m * gwu72m
-23496 continue
-23494 continue
- call vbksf(jrxg6l,go0l1q,lku8xq,nfiumb4,zxao0o,zqve1l,vvl1li,
- &xhe4cg)
- do 23500 d9rjek=1,o9ljyn
- bgu6fw(d9rjek) = lq8reh(d9rjek)
-23500 continue
- do 23502 d9rjek=1,o9ljyn
- lq8reh(i83h1(d9rjek)) = bgu6fw(d9rjek)
-23502 continue
- return
- end
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/r-cran-vgam.git
More information about the debian-science-commits
mailing list