[r-cran-vgam] 21/63: Import Upstream version 0.8-2
Andreas Tille
tille at debian.org
Tue Jan 24 13:54:26 UTC 2017
This is an automated email from the git hooks/post-receive script.
tille pushed a commit to branch master
in repository r-cran-vgam.
commit 2cb64c15bf4defd0a46c567c2b8eb4f0b43b77e9
Author: Andreas Tille <tille at debian.org>
Date: Tue Jan 24 14:16:51 2017 +0100
Import Upstream version 0.8-2
---
DESCRIPTION | 8 +-
NAMESPACE | 56 +-
NEWS | 59 +
R/aamethods.q | 14 +-
R/add1.vglm.q | 3 +-
R/attrassign.R | 3 +-
R/bAIC.q | 11 +-
R/build.terms.vlm.q | 3 +-
R/calibrate.q | 3 +-
R/cao.R | 3 +-
R/cao.fit.q | 42 +-
R/coef.vlm.q | 3 +-
R/cqo.R | 3 +-
R/cqo.fit.q | 57 +-
R/deviance.vlm.q | 3 +-
R/effects.vglm.q | 3 +-
R/family.aunivariate.R | 1123 ++---
R/family.basics.R | 334 +-
R/family.binomial.R | 940 ++--
R/family.bivariate.R | 33 +-
R/family.categorical.R | 219 +-
R/family.censored.R | 662 ++-
R/family.circular.R | 9 +-
R/family.exp.R | 340 +-
R/family.extremes.R | 1517 +++----
R/family.fishing.R | 92 +-
R/family.functions.R | 113 +-
R/family.genetic.R | 723 ++--
R/family.glmgam.R | 373 +-
R/family.loglin.R | 112 +-
R/family.math.R | 67 +
R/family.mixture.R | 12 +-
R/family.nonlinear.R | 577 ++-
R/family.normal.R | 1236 ++++--
R/family.positive.R | 515 ++-
R/family.qreg.R | 3132 ++++++++++++--
R/family.rcam.R | 1316 ++++++
R/family.rcqo.R | 3 +-
R/family.robust.R | 259 ++
R/family.rrr.R | 847 ++--
R/family.survival.R | 96 +-
R/family.ts.R | 3 +-
R/family.univariate.R | 9832 +++++++++++++++++-------------------------
R/family.vglm.R | 3 +-
R/family.zeroinf.R | 1214 ++++--
R/fitted.vlm.R | 3 +-
R/formula.vlm.q | 3 +-
R/generic.q | 3 +-
R/links.q | 207 +-
R/logLik.vlm.q | 55 +-
R/model.matrix.vglm.q | 3 +-
R/mux.q | 214 +-
R/plot.vglm.q | 3 +-
R/predict.vgam.q | 3 +-
R/predict.vglm.q | 3 +-
R/predict.vlm.q | 3 +-
R/print.vglm.q | 3 +-
R/print.vlm.q | 3 +-
R/qrrvglm.control.q | 5 +-
R/qtplot.q | 3 +-
R/residuals.vlm.q | 3 +-
R/rrvglm.R | 3 +-
R/rrvglm.control.q | 24 +-
R/rrvglm.fit.q | 37 +-
R/s.q | 3 +-
R/s.vam.q | 3 +-
R/smart.R | 357 +-
R/step.vglm.q | 3 +-
R/summary.vgam.q | 3 +-
R/summary.vglm.q | 3 +-
R/summary.vlm.q | 3 +-
R/uqo.R | 11 +-
R/vgam.R | 3 +-
R/vgam.control.q | 3 +-
R/vgam.fit.q | 22 +-
R/vgam.match.q | 3 +-
R/vglm.R | 3 +-
R/vglm.control.q | 42 +-
R/vglm.fit.q | 24 +-
R/vlm.R | 3 +-
R/vlm.wfit.q | 3 +-
R/vsmooth.spline.q | 3 +-
R/zzz.R | 3 +-
data/alclevels.R | 22 +
data/alcoff.R | 22 +
data/azprocedure.R | 2289 ++++++++++
data/crashbc.R | 18 +
data/crashf.R | 18 +
data/crashi.R | 21 +
data/crashmc.R | 19 +
data/crashp.R | 18 +
data/crashtr.R | 18 +
data/gala.R | 28 +
data/mmt.R | 328 ++
inst/doc/categoricalVGAM.pdf | Bin 2958722 -> 2955340 bytes
man/CommonVGAMffArguments.Rd | 144 +-
man/Inv.gaussian.Rd | 26 +-
man/Links.Rd | 86 +-
man/RayleighUC.Rd | 42 +-
man/Rcam.Rd | 72 +
man/VGAM-package.Rd | 6 +-
man/acat.Rd | 12 +-
man/alaplace3.Rd | 176 +-
man/alaplaceUC.Rd | 38 +-
man/amh.Rd | 25 +-
man/amlbinomial.Rd | 20 +-
man/auuc.Rd | 10 +-
man/betabin.ab.Rd | 32 +-
man/betabinomial.Rd | 43 +-
man/binom2.or.Rd | 18 +-
man/binom2.rho.Rd | 88 +-
man/binom2.rhoUC.Rd | 38 +-
man/binomialff.Rd | 64 +-
man/bisa.Rd | 41 +-
man/cauchit.Rd | 1 +
man/cauchy.Rd | 32 +-
man/chisq.Rd | 15 +-
man/cloglog.Rd | 1 +
man/cnormal1.Rd | 13 +-
man/constraints.Rd | 69 +-
man/crashes.Rd | 131 +
man/cratio.Rd | 13 +-
man/cumulative.Rd | 56 +-
man/dexpbinomial.Rd | 85 +-
man/dirmul.old.Rd | 58 +-
man/dirmultinomial.Rd | 36 +-
man/eunifUC.Rd | 3 +-
man/expgeometric.Rd | 90 +
man/exponential.Rd | 26 +-
man/gamma2.Rd | 23 +-
man/gamma2.ab.Rd | 43 +-
man/gaussianff.Rd | 1 +
man/gengamma.Rd | 9 +-
man/genpoisson.Rd | 35 +-
man/genrayleigh.Rd | 94 +
man/genrayleighUC.Rd | 78 +
man/geometric.Rd | 1 +
man/get.smart.Rd | 11 +-
man/grc.Rd | 230 +-
man/huber.Rd | 109 +
man/huberUC.Rd | 111 +
man/inv.gaussianff.Rd | 34 +-
man/is.smart.Rd | 11 +-
man/koenker.Rd | 120 +
man/koenkerUC.Rd | 93 +
man/kumar.Rd | 46 +-
man/kumarUC.Rd | 37 +-
man/lambertW.Rd | 80 +
man/laplace.Rd | 17 +-
man/laplaceUC.Rd | 39 +-
man/loge.Rd | 12 +-
man/logistic.Rd | 108 +-
man/logit.Rd | 1 +
man/loglaplace.Rd | 2 +-
man/lognormal.Rd | 44 +-
man/maxwellUC.Rd | 40 +-
man/micmen.Rd | 43 +-
man/moffset.Rd | 92 +
man/multinomial.Rd | 100 +-
man/negbinomial.Rd | 145 +-
man/normal1.Rd | 53 +-
man/notdocumentedyet.Rd | 39 +-
man/plotdeplot.lmscreg.Rd | 72 +-
man/plotqrrvglm.Rd | 12 +-
man/plotrcam0.Rd | 173 +
man/posbinomUC.Rd | 7 +-
man/posbinomial.Rd | 11 +-
man/posnegbinUC.Rd | 23 +-
man/posnegbinomial.Rd | 55 +-
man/pospoisUC.Rd | 44 +-
man/pospoisson.Rd | 4 +-
man/prentice74.Rd | 10 +-
man/probit.Rd | 1 +
man/propodds.Rd | 20 +-
man/put.smart.Rd | 9 +
man/quasipoissonff.Rd | 11 +-
man/rayleigh.Rd | 67 +-
man/rrvglm-class.Rd | 17 +-
man/rrvglm.Rd | 202 +-
man/rrvglm.control.Rd | 30 +-
man/setup.smart.Rd | 72 +-
man/simplex.Rd | 16 +-
man/skewnormal1.Rd | 61 +-
man/smart.expression.Rd | 5 +
man/smart.mode.is.Rd | 10 +-
man/smartpred.Rd | 69 +-
man/snormUC.Rd | 34 +-
man/sratio.Rd | 14 +-
man/studentt.Rd | 145 +-
man/tikuv.Rd | 5 +-
man/tobit.Rd | 22 +-
man/undocumented-methods.Rd | 1 +
man/vgam.Rd | 37 +-
man/vglm.Rd | 58 +-
man/vglmff-class.Rd | 62 +-
man/vonmises.Rd | 2 +-
man/wald.Rd | 16 +-
man/weibull.Rd | 10 +-
man/weightsvglm.Rd | 23 +-
man/wffc.P2star.Rd | 74 +-
man/wrapup.smart.Rd | 12 +-
man/zanegbinUC.Rd | 21 +-
man/zanegbinomial.Rd | 36 +-
man/zapoisson.Rd | 64 +-
man/zibinomial.Rd | 14 +-
man/zinegbinomial.Rd | 44 +-
man/zipoisson.Rd | 192 +-
src/caqo3.c | 19 +-
src/vgam3.c | 18 +-
src/zeta3.c | 43 +
210 files changed, 22698 insertions(+), 12636 deletions(-)
diff --git a/DESCRIPTION b/DESCRIPTION
index e700c2d..1ba07f4 100755
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -1,6 +1,6 @@
Package: VGAM
-Version: 0.8-1
-Date: 2010-06-22
+Version: 0.8-2
+Date: 2011-02-09
Title: Vector Generalized Linear and Additive Models
Author: Thomas W. Yee <t.yee at auckland.ac.nz>
Maintainer: Thomas Yee <t.yee at auckland.ac.nz>
@@ -15,6 +15,6 @@ License: GPL-2
URL: http://www.stat.auckland.ac.nz/~yee/VGAM
LazyLoad: yes
LazyData: yes
-Packaged: 2010-06-22 04:48:23 UTC; tyee001
+Packaged: 2011-02-09 03:49:01 UTC; tyee001
Repository: CRAN
-Date/Publication: 2010-06-22 08:29:15
+Date/Publication: 2011-02-09 10:10:07
diff --git a/NAMESPACE b/NAMESPACE
index 66a1142..e9339ef 100644
--- a/NAMESPACE
+++ b/NAMESPACE
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
@@ -11,6 +12,13 @@ useDynLib(VGAM)
+
+
+export(confint_rrnb, confint_nb1)
+export(vcovrrvglm)
+
+
+
export(
Build.terms.vlm,
procVec,
@@ -25,6 +33,22 @@ variable.namesvlm
)
+
+export(expgeometric, dexpgeom, pexpgeom, qexpgeom, rexpgeom,
+genrayleigh, dgenray, pgenray, qgenray, rgenray)
+
+
+
+
+export(Rcam, plotrcam0,
+rcam, summaryrcam)
+export(moffset)
+
+
+
+
+
+
export(
d2theta.deta2, Deviance.categorical.data.vgam,
lm2qrrvlm.model.matrix,
@@ -59,7 +83,7 @@ maxwell, dmaxwell, pmaxwell, qmaxwell, rmaxwell,
morgenstern,
dfgm, pfgm, rfgm, fgm,
gumbelIbiv,
-erf, erfc, lerch,
+erf, erfc, lerch, lambertW,
tpareto1, dtpareto, qtpareto, rtpareto, ptpareto,
pareto1, dpareto, qpareto, rpareto, ppareto,
paretoIV, dparetoIV, qparetoIV, rparetoIV, pparetoIV,
@@ -74,7 +98,8 @@ recnormal1, recexp1,
crayleigh, rayleigh, drayleigh, prayleigh, qrayleigh, rrayleigh,
drice, rrice, riceff,
dskellam, rskellam, skellam,
-dinv.gaussian, pinv.gaussian, rinv.gaussian, wald, expexp1, expexp)
+inv.gaussianff, dinv.gaussian, pinv.gaussian, rinv.gaussian, wald,
+expexp1, expexp)
export(A1A2A3, a2m, AAaa.nohw,
@@ -119,7 +144,7 @@ formulavlm, formulaNA.VGAM,
garma, gaussianff,
hypersecant, hypersecant.1,
hyperg,
-invbinomial, InverseBrat, inverse.gaussianff, inv.gaussianff,
+invbinomial, InverseBrat, inverse.gaussianff,
is.Numeric,
mccullagh89, leipnik, levy,
lms.bcg.control, lms.bcn.control, lmscreg.control,
@@ -153,6 +178,7 @@ predict.qrrvglm, predict.rrvglm, predict.uqo, predict.vgam,
predict.vglm, predict.vlm, predictvsmooth.spline,
predictvsmooth.spline.fit,
process.binomial2.data.vgam, process.categorical.data.vgam,
+negzero.expression,
qtplot,
qtplot.default, qtplot.gumbel, qtplot.lms.bcg,
qtplot.lms.bcn, qtplot.lms.yjn, qtplot.lms.yjn2, qtplot.vextremes, qtplot.vglm,
@@ -256,7 +282,7 @@ export(dgpd, pgpd, qgpd, rgpd, gpd)
export(dgev, pgev, qgev, rgev, gev, egev)
export(dlaplace, plaplace, qlaplace, rlaplace, laplace)
export(dalap, palap, qalap, ralap,
- alaplace1.control,
+ alaplace1.control, alaplace2.control, alaplace3.control,
alaplace1, alaplace2, alaplace3)
export(dloglap, ploglap, qloglap, rloglap)
export(loglaplace1.control, loglaplace1)
@@ -279,7 +305,7 @@ betaprime,
betaII,
zipebcom,
binom2.or, dbinom2.or, rbinom2.or,
-binom2.rho, dbinom2.rho, rbinom2.rho,
+binom2.rho, dbinom2.rho, rbinom2.rho, binom2.Rho,
binomialff, biplot.rrvglm, brat,
bratt, Brat, calibrate.qrrvglm.control, calibrate.qrrvglm,
calibrate, cao.control,
@@ -304,7 +330,7 @@ export(
dbenf, pbenf, qbenf, rbenf,
genbetaII, genpoisson, geometric,
dlino, plino, qlino, rlino, lino,
-grc,
+grc,
dhzeta, phzeta, qhzeta, rhzeta, hzeta,
nidentity, identity,
prentice74,
@@ -318,12 +344,17 @@ lvplot, lvplot.rrvglm, lv, Max, MNSs,
dmultinomial, multinomial, margeff)
export(
+huber, dhuber, edhuber, phuber, qhuber, rhuber)
+
+
+export(
slash, dslash, pslash, rslash)
export(
deunif, peunif, qeunif, reunif,
denorm, penorm, qenorm, renorm,
+koenker, dkoenker, pkoenker, qkoenker, rkoenker,
deexp, peexp, qeexp, reexp)
@@ -346,7 +377,9 @@ rrvglm.optim.control)
export(eta2theta,
rrvglm,
simplex, dsimplex, rsimplex,
-sratio, s, studentt, Tol, trplot.qrrvglm,
+sratio, s,
+studentt, studentt2, studentt3, Kayfun.studentt,
+Tol, trplot.qrrvglm,
trplot,
rcqo,
cqo,
@@ -359,7 +392,8 @@ dzanegbin, pzanegbin, qzanegbin, rzanegbin, zanegbinomial,
dzapois, pzapois, qzapois, rzapois, zapoisson,
dzibinom, pzibinom, qzibinom, rzibinom, zibinomial,
dzinegbin, pzinegbin, qzinegbin, rzinegbin, zinegbinomial,
-dzipois, pzipois, qzipois, rzipois, zipoisson,
+dzipois, pzipois, qzipois, rzipois,
+zipoisson, zipoissonff,
mix2exp, mix2normal1, mix2poisson,
mix2exp.control, mix2normal1.control, mix2poisson.control,
skewnormal1, dsnorm, rsnorm,
@@ -369,12 +403,12 @@ tikuv, dtikuv, ptikuv, qtikuv, rtikuv)
export(DeLury,
- wffc.P1, wffc.P1star, wffc.P2, wffc.P2star
+ wffc.P1, wffc.P1star, wffc.P2, wffc.P2star, wffc.P3, wffc.P3star
)
exportClasses("vglmff", "vlm", "vglm", "vgam",
-"rrvglm", "qrrvglm", "grc",
+"rrvglm", "qrrvglm", "grc", "rcam",
"vlmsmall", "uqo", "cao",
"summary.vgam", "summary.vglm","summary.vlm",
"summary.qrrvglm",
diff --git a/NEWS b/NEWS
index 2e2a0aa..7ef7c77 100755
--- a/NEWS
+++ b/NEWS
@@ -4,6 +4,65 @@
* *
**************************************************
+ CHANGES IN VGAM VERSION 0.8-2
+
+NEW FEATURES
+
+ o Objects of class "vglmff" have a "infos" slot to give
+ information about the family.
+ o New functions: lambertW(), rcam(), wffc.P3(), wffc.P3star(),
+ confint_rrnb(), confint_nb1().
+ o New VGAM family functions:
+ binom2.Rho(), [dpqr]expgeometric(),
+ [dpqr]genrayleigh(), [dpqr]huber(),
+ [dpqr]koenker(), studentt[23](), zipoissonff().
+ o Argument 'imethod' changed to 'method.init' for some
+ families, e.g., cnormal1(), tobit(), weibull().
+ o Improvements have been made to binom2.rho().
+ o Improved family functions:
+ negbinomial() has a new argument 'parallel',
+ micmen() has more initial value choices and fitting algorithms,
+ kumar(), studentt() and studentt2() now implement the EIM,
+ normal1() can handle multiple responses.
+ o Argument names changed:
+ 'init.rho' renamed to 'irho' in binom2.rho(),
+ 'a' renamed to 'scale' in rayleigh(),
+ 'Structural.zero' renamed to 'szero' thoughout.
+ o zapoisson() permutes the linear/additive predictors.
+ o Several families such as negbinomial(), z[ai]negbinomial(),
+ zapoisson(), gamma2(), handle zero = c(-2, 3), say,
+ i.e., negative and positive values.
+ o New data sets: about half a dozen road crash data frames of
+ 2009 NZ data.
+ o constraints(vglmfit) now has a 'type' argument that can be fed
+ into the original fit (type = "lm") as the constraints argument.
+ o vchol() takes drastic action to avoid infinite looping: it sets
+ the working weights to be proportional to the order-M diagonal
+ matrix.
+ o lognormal() and lognormal3() now have zero = 2 as the default
+ (was zero = NULL).
+ o Some variable names within grc() changed, e.g., Row. and not Row.
+ o The smartpred package within VGAM has updated poly(), ns(),
+ bs() and scale() for R version 2.12.0 (2010-10-15).
+ Calls to smartpredenv are now VGAM:::smartpredenv.
+
+
+BUG FIXES
+
+ o VGAM:::VGAMenv is used now to avoid problems locating
+ this environment.
+ o Input of the mustart, etastart and coefstart arguments at
+ the solution should results in only one iteration being needed.
+ o binomialff() and categorical familes (e.g., multinomial) only
+ accept a factor or non-negative counts as the response.
+ This allows the 'weights' vector to have any value really.
+ In the past the denominator of a sample proportion was allowed
+ via the 'weights' argument.
+ o wffc.P1() had some rounding problems, e.g., with 0.280 m.
+
+
+
+
CHANGES IN VGAM VERSION 0.8-1
NEW FEATURES
diff --git a/R/aamethods.q b/R/aamethods.q
index 42a1d0e..ee52a58 100644
--- a/R/aamethods.q
+++ b/R/aamethods.q
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
@@ -15,7 +16,7 @@ is.Numeric <- function(x, allowable.length=Inf, integer.valued=FALSE, positive=F
(if(positive) all(x>0) else TRUE)) TRUE else FALSE
-VGAMenv = new.env()
+VGAMenv <- new.env()
@@ -52,6 +53,7 @@ setClass("vglmff", representation(
"deviance" = "function",
"fini" = "expression",
"first" = "expression",
+ "infos" = "function", # Added 20101203
"initialize" = "expression",
"inverse" = "function",
"last" = "expression",
@@ -313,9 +315,15 @@ new("vglm", "extra"=from at extra,
- setClass("grc", representation(not.needed="numeric"),
+ setClass("rcam", representation(not.needed="numeric"),
contains = "rrvglm")
+ setClass("grc", representation(not.needed="numeric"),
+ contains = "rrvglm")
+
+setMethod("summary", "rcam",
+ function(object, ...)
+ summary.rcam(object, ...))
setMethod("summary", "grc",
function(object, ...)
diff --git a/R/add1.vglm.q b/R/add1.vglm.q
index 77388a5..c13eb59 100644
--- a/R/add1.vglm.q
+++ b/R/add1.vglm.q
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
diff --git a/R/attrassign.R b/R/attrassign.R
index fc7806c..842a4b0 100644
--- a/R/attrassign.R
+++ b/R/attrassign.R
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
diff --git a/R/bAIC.q b/R/bAIC.q
index 81bc7dc..d9d5b60 100644
--- a/R/bAIC.q
+++ b/R/bAIC.q
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
@@ -32,10 +33,10 @@ AICrrvglm = function(object, ..., k=2) {
estdisp = object at misc$estimated.dispersion
no.dpar = if (length(estdisp) && is.logical(estdisp) && estdisp)
length(object at misc$dispersion) else 0
- Structural.zero = object at control$Structural.zero
+ szero = object at control$szero
MMM = object at misc$M
Rank = object at control$Rank
- elts.tildeA = (MMM - Rank - length(Structural.zero)) * Rank
+ elts.tildeA = (MMM - Rank - length(szero)) * Rank
-2 * logLik.vlm(object, ...) +
k * (length(coefvlm(object)) + no.dpar + elts.tildeA)
}
@@ -46,10 +47,10 @@ AICqrrvglm = function(object, ..., k=2) {
estdisp = object at misc$estimated.dispersion
no.dpar = if (length(estdisp) && is.logical(estdisp) && estdisp)
length(object at misc$dispersion) else 0
- Structural.zero = object at control$Structural.zero
+ szero = object at control$szero
MMM = object at misc$M
Rank = object at control$Rank
- elts.tildeA = (MMM - Rank - length(Structural.zero)) * Rank
+ elts.tildeA = (MMM - Rank - length(szero)) * Rank
EqualTolerances = object at control$EqualTolerances
ITolerances = object at control$ITolerances
diff --git a/R/build.terms.vlm.q b/R/build.terms.vlm.q
index f70fcea..4678dc2 100644
--- a/R/build.terms.vlm.q
+++ b/R/build.terms.vlm.q
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
diff --git a/R/calibrate.q b/R/calibrate.q
index 8acffda..d039fb2 100644
--- a/R/calibrate.q
+++ b/R/calibrate.q
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
diff --git a/R/cao.R b/R/cao.R
index 2f4edf9..35c536e 100644
--- a/R/cao.R
+++ b/R/cao.R
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
diff --git a/R/cao.fit.q b/R/cao.fit.q
index e053fad..336fb08 100644
--- a/R/cao.fit.q
+++ b/R/cao.fit.q
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
@@ -51,19 +52,30 @@ cao.fit <- function(x, y, w=rep(1, length(x[, 1])),
"gamma2"=5, "gaussianff"=8,
0) # stop("cannot fit this model using fast algorithm")
if (!modelno) stop("the family function does not work with cao()")
- if (modelno == 1) modelno = get("modelno", envir = VGAMenv)
+ if (modelno == 1) modelno = get("modelno", envir = VGAM:::VGAMenv)
eval(rrr.init.expression)
if (length(etastart)) {
eta <- etastart
- mu <- if (length(mustart)) mustart else family at inverse(eta, extra)
- } else {
- if (length(mustart))
- mu <- mustart
- eta <- family at link(mu, extra)
+ mu <- if (length(mustart)) mustart else
+ if (length(body(slot(family, "inverse"))))
+ slot(family, "inverse")(eta, extra) else
+ warning("argument 'etastart' assigned a value ",
+ "but there is no 'inverse' slot to use it")
+ }
+
+ if (length(mustart)) {
+ mu <- mustart
+ if (length(body(slot(family, "link")))) {
+ eta <- slot(family, "link")(mu, extra)
+ } else {
+ warning("argument 'mustart' assigned a value ",
+ "but there is no 'link' slot to use it")
+ }
}
+
M <- if (is.matrix(eta)) ncol(eta) else 1
@@ -510,7 +522,7 @@ callcaoc = function(cmatrix,
nstar = if (Nice21) ifelse(modelno %in% c(3, 5), n * 2, n) else n * M
lenbeta = pstar. * ifelse(Nice21, NOS, 1) # Holds the linear coeffs
- inited = if (exists(".VGAM.CAO.etamat", envir=VGAMenv)) 1 else 0
+ inited = if (exists(".VGAM.CAO.etamat", envir=VGAM:::VGAMenv)) 1 else 0
usethiseta = if (inited == 1)
getfromVGAMenv("etamat", prefix = ".VGAM.CAO.") else t(etamat)
@@ -630,7 +642,7 @@ flush.console()
if (ans1$errcode == 0) {
assign2VGAMenv(c("etamat", "beta"), ans1, prefix=".VGAM.CAO.")
- assign(".VGAM.CAO.cmatrix", matrix(cmatrix,p2,Rank), envir=VGAMenv)
+ assign(".VGAM.CAO.cmatrix", matrix(cmatrix,p2,Rank), envir=VGAM:::VGAMenv)
} else {
cat("warning in callcaoc: error code =", ans1$errcode, "\n")
cat("warning in callcaoc: npetc[14] =", ans1$npetc[14], "\n")
@@ -747,12 +759,12 @@ calldcaoc = function(cmatrix,
lenbeta = pstar. * ifelse(Nice21, NOS, 1)
if (TRUE) {
- inited = if (exists(".VGAM.CAO.etamat", envir = VGAMenv)) 1 else 0
+ inited = if (exists(".VGAM.CAO.etamat", envir = VGAM:::VGAMenv)) 1 else 0
usethiseta = if (inited == 1) get(".VGAM.CAO.etamat",
- envir = VGAMenv) else t(etamat)
+ envir = VGAM:::VGAMenv) else t(etamat)
}
usethisbeta = if (inited == 2) get(".VGAM.CAO.beta",
- envir = VGAMenv) else double(lenbeta)
+ envir = VGAM:::VGAMenv) else double(lenbeta)
@@ -897,9 +909,9 @@ warning("20100405; this is new:")
kindex = as.integer(smooth.frame$kindex))
flush.console()
- assign(".VGAM.CAO.etamat", ans1$etamat, envir = VGAMenv)
- assign(".VGAM.CAO.z", ans1$zedd, envir=VGAMenv) # z; minus any offset
- assign(".VGAM.CAO.U", ans1$U, envir=VGAMenv) # U
+ assign(".VGAM.CAO.etamat", ans1$etamat, envir = VGAM:::VGAMenv)
+ assign(".VGAM.CAO.z", ans1$zedd, envir=VGAM:::VGAMenv) # z; minus any offset
+ assign(".VGAM.CAO.U", ans1$U, envir=VGAM:::VGAMenv) # U
if (ans1$errcode == 0) {
} else {
cat("warning in calldcaoc: error code =", ans1$errcode, "\n")
diff --git a/R/coef.vlm.q b/R/coef.vlm.q
index 678fdb6..12d03dd 100644
--- a/R/coef.vlm.q
+++ b/R/coef.vlm.q
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
diff --git a/R/cqo.R b/R/cqo.R
index 24fe945..f3413eb 100644
--- a/R/cqo.R
+++ b/R/cqo.R
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
diff --git a/R/cqo.fit.q b/R/cqo.fit.q
index 74bdd09..2543871 100644
--- a/R/cqo.fit.q
+++ b/R/cqo.fit.q
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
@@ -42,7 +43,7 @@ callcqoc = function(cmatrix, etamat, xmat, ymat, wvec,
flush.console()
}
rmfromVGAMenv(c("etamat", "z", "U", "beta", "deviance",
- "cmatrix", "ocmatrix"), prefix=".VGAM.CQO.")
+ "cmatrix", "ocmatrix"), prefix = ".VGAM.CQO.")
}
} else {
numat = xmat[, control$colx2.index, drop = FALSE] %*% cmatrix
@@ -55,7 +56,7 @@ callcqoc = function(cmatrix, etamat, xmat, ymat, wvec,
}
inited = if (is.R()) {
- if (exists(".VGAM.CQO.etamat", envir = VGAMenv)) 1 else 0
+ if (exists(".VGAM.CQO.etamat", envir = VGAM:::VGAMenv)) 1 else 0
} else 0
@@ -97,10 +98,10 @@ callcqoc = function(cmatrix, etamat, xmat, ymat, wvec,
if (ans1$errcode[1] == 0) {
assign2VGAMenv(c("etamat", "z", "U", "beta", "deviance"),
- ans1, prefix=".VGAM.CQO.")
+ ans1, prefix = ".VGAM.CQO.")
if (is.R()) {
- assign(".VGAM.CQO.cmatrix", cmatrix, envir = VGAMenv)
- assign(".VGAM.CQO.ocmatrix", ocmatrix, envir = VGAMenv)
+ assign(".VGAM.CQO.cmatrix", cmatrix, envir = VGAM:::VGAMenv)
+ assign(".VGAM.CQO.ocmatrix", ocmatrix, envir = VGAM:::VGAMenv)
} else {
.VGAM.CQO.cmatrix <<- cmatrix
.VGAM.CQO.ocmatrix <<- ocmatrix
@@ -113,7 +114,7 @@ callcqoc = function(cmatrix, etamat, xmat, ymat, wvec,
print( ans1$errcode[-1] )
}
rmfromVGAMenv(c("etamat", "z", "U", "beta", "deviance",
- "cmatrix", "ocmatrix"), prefix=".VGAM.CQO.")
+ "cmatrix", "ocmatrix"), prefix = ".VGAM.CQO.")
}
if (control$trace)
flush.console()
@@ -162,7 +163,7 @@ calldcqo = function(cmatrix, etamat, xmat, ymat, wvec,
flush.console()
}
rmfromVGAMenv(c("etamat", "z", "U", "beta", "deviance",
- "cmatrix", "ocmatrix"), prefix=".VGAM.CQO.")
+ "cmatrix", "ocmatrix"), prefix = ".VGAM.CQO.")
}
} else {
numat = xmat[,control$colx2.index,drop=FALSE] %*% cmatrix
@@ -175,7 +176,7 @@ calldcqo = function(cmatrix, etamat, xmat, ymat, wvec,
}
inited = if (is.R()) {
- if (exists(".VGAM.CQO.etamat", envir = VGAMenv)) 1 else 0
+ if (exists(".VGAM.CQO.etamat", envir = VGAM:::VGAMenv)) 1 else 0
} else 0
@@ -309,15 +310,27 @@ cqo.fit <- function(x, y, w=rep(1, length(x[, 1])),
eval(rrr.init.expression)
+
if (length(etastart)) {
eta <- etastart
- mu <- if (length(mustart)) mustart else family at inverse(eta, extra)
- } else {
- if (length(mustart))
- mu <- mustart
- eta <- family at link(mu, extra)
+ mu <- if (length(mustart)) mustart else
+ if (length(body(slot(family, "inverse"))))
+ slot(family, "inverse")(eta, extra) else
+ warning("argument 'etastart' assigned a value ",
+ "but there is no 'inverse' slot to use it")
}
+ if (length(mustart)) {
+ mu <- mustart
+ if (length(body(slot(family, "link")))) {
+ eta <- slot(family, "link")(mu, extra)
+ } else {
+ warning("argument 'mustart' assigned a value ",
+ "but there is no 'link' slot to use it")
+ }
+ }
+
+
M <- if (is.matrix(eta)) ncol(eta) else 1
if (is.character(rrcontrol$Dzero)) {
@@ -400,8 +413,8 @@ cqo.fit <- function(x, y, w=rep(1, length(x[, 1])),
if (modelno == 3 || modelno == 5)
Amat[c(FALSE,TRUE),] <- 0 # Intercept only for log(k)
- if (length(control$Structural.zero))
- Amat[control$Structural.zero,] = 0
+ if (length(control$szero))
+ Amat[control$szero,] = 0
rrcontrol$Ainit = control$Ainit = Amat # Good for valt()
rrcontrol$Cinit = control$Cinit = Cmat # Good for valt()
@@ -433,7 +446,7 @@ cqo.fit <- function(x, y, w=rep(1, length(x[, 1])),
}
rmfromVGAMenv(c("etamat", "z", "U", "beta", "deviance",
- "cmatrix", "ocmatrix"), prefix=".VGAM.CQO.")
+ "cmatrix", "ocmatrix"), prefix = ".VGAM.CQO.")
eval(cqo.init.derivative.expression)
for(iter in 1:control$optim.maxit) {
@@ -682,8 +695,8 @@ cqo.init.derivative.expression <- expression({
NOS = ifelse(modelno == 3 || modelno == 5, M/2, M)
canfitok = if (is.R())
- (exists("CQO.FastAlgorithm", envir=VGAMenv) &&
- get("CQO.FastAlgorithm", envir = VGAMenv)) else
+ (exists("CQO.FastAlgorithm", envir=VGAM:::VGAMenv) &&
+ get("CQO.FastAlgorithm", envir = VGAM:::VGAMenv)) else
(exists("CQO.FastAlgorithm",inherits=TRUE) && CQO.FastAlgorithm)
if (!canfitok)
stop("cannot fit this model using fast algorithm")
@@ -714,8 +727,8 @@ cqo.derivative.expression <- expression({
modelno=modelno, Control=control,
n=n, M=M, p1star=p1star, p2star=p2star, nice31=nice31)
- z = matrix(getfromVGAMenv("z", prefix=".VGAM.CQO."), n, M)
- U = matrix(getfromVGAMenv("U", prefix=".VGAM.CQO."), M, n)
+ z = matrix(getfromVGAMenv("z", prefix = ".VGAM.CQO."), n, M)
+ U = matrix(getfromVGAMenv("U", prefix = ".VGAM.CQO."), M, n)
}
@@ -771,7 +784,7 @@ cqo.derivative.expression <- expression({
cqo.end.expression = expression({
- rmfromVGAMenv(c("etamat"), prefix=".VGAM.CQO.")
+ rmfromVGAMenv(c("etamat"), prefix = ".VGAM.CQO.")
if (control$Quadratic) {
diff --git a/R/deviance.vlm.q b/R/deviance.vlm.q
index aaee232..938af81 100644
--- a/R/deviance.vlm.q
+++ b/R/deviance.vlm.q
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
diff --git a/R/effects.vglm.q b/R/effects.vglm.q
index da66875..c8e6950 100644
--- a/R/effects.vglm.q
+++ b/R/effects.vglm.q
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
diff --git a/R/family.aunivariate.R b/R/family.aunivariate.R
index 2fe3069..17aafc9 100644
--- a/R/family.aunivariate.R
+++ b/R/family.aunivariate.R
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
@@ -11,259 +12,275 @@
-dkumar = function(x, shape1, shape2, log = FALSE) {
- if (!is.logical(log.arg <- log))
- stop("bad input for argument 'log'")
- rm(log)
+dkumar <- function(x, shape1, shape2, log = FALSE) {
+ if (!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
- N = max(length(x), length(shape1), length(shape2))
- x = rep(x, len=N); shape1 = rep(shape1, len=N); shape2 = rep(shape2, len=N)
+ N <- max(length(x), length(shape1), length(shape2))
+ x <- rep(x, len = N); shape1 <- rep(shape1, len = N);
+ shape2 <- rep(shape2, len = N)
- logdensity = rep(log(0), len=N)
- xok = (0 <= x & x <= 1)
- logdensity[xok] = log(shape1[xok]) + log(shape2[xok]) +
- (shape1[xok]-1) * log(x[xok]) +
- (shape2[xok]-1) * log1p(-x[xok]^shape1[xok])
- logdensity[shape1 <= 0] = NaN
- logdensity[shape2 <= 0] = NaN
- if (log.arg) logdensity else exp(logdensity)
+ logdensity <- rep(log(0), len = N)
+ xok <- (0 <= x & x <= 1)
+ logdensity[xok] <- log(shape1[xok]) + log(shape2[xok]) +
+ (shape1[xok] - 1) * log(x[xok]) +
+ (shape2[xok] - 1) * log1p(-x[xok]^shape1[xok])
+
+ logdensity[shape1 <= 0] <- NaN
+ logdensity[shape2 <= 0] <- NaN
+ if (log.arg) logdensity else exp(logdensity)
}
-rkumar = function(n, shape1, shape2) {
- ans = (1 - (1 - runif(n))^(1/shape2))^(1/shape1)
- ans[(shape1 <= 0) | (shape2 <= 0)] = NaN
- ans
+rkumar <- function(n, shape1, shape2) {
+ ans <- (1 - (runif(n))^(1/shape2))^(1/shape1)
+ ans[(shape1 <= 0) | (shape2 <= 0)] <- NaN
+ ans
}
-qkumar = function(p, shape1, shape2) {
+qkumar <- function(p, shape1, shape2) {
- ans = (1 - (1 - p)^(1/shape2))^(1/shape1)
- ans[(shape1 <= 0) | (shape2 <= 0)] = NaN
- ans[p < 0] = NaN
- ans[p > 1] = NaN
- ans
+ ans <- (1.0 - (1.0 - p)^(1/shape2))^(1/shape1)
+ ans[(shape1 <= 0) | (shape2 <= 0)] = NaN
+ ans[p < 0] <- NaN
+ ans[p > 1] <- NaN
+ ans
}
pkumar = function(q, shape1, shape2) {
- ans = 1 - (1 - q^shape1)^shape2
- ans[q <= 0] = 0
- ans[q >= 1] = 1
- ans[(shape1 <= 0) | (shape2 <= 0)] = NaN
- ans
-}
-
-
-kumar.control <- function(save.weight=TRUE, ...)
-{
- list(save.weight=save.weight)
+ ans <- 1.0 - (1.0 - q^shape1)^shape2
+ ans[q <= 0] <- 0
+ ans[q >= 1] <- 1
+ ans[(shape1 <= 0) | (shape2 <= 0)] <- NaN
+ ans
}
- kumar = function(lshape1="loge", lshape2="loge",
- eshape1=list(), eshape2=list(),
- ishape1=NULL, ishape2=NULL,
- nsimEIM=500, zero=NULL)
-{
- if (mode(lshape1) != "character" && mode(lshape1) != "name")
- lshape1 = as.character(substitute(lshape1))
- if (mode(lshape2) != "character" && mode(lshape2) != "name")
- lshape2 = as.character(substitute(lshape2))
- if (length(ishape1) && (!is.Numeric(ishape1, allow=1, positive=TRUE)))
- stop("bad input for argument 'ishape1'")
- if (length(ishape2) && !is.Numeric(ishape2))
- stop("bad input for argument 'ishape2'")
- if (!is.list(eshape1)) eshape1 = list()
- if (!is.list(eshape2)) eshape2 = list()
- if (!is.Numeric(nsimEIM, allow=1, integ=TRUE) || nsimEIM <= 50)
- stop("'nsimEIM' should be an integer greater than 50")
- new("vglmff",
- blurb=c("Kumaraswamy distribution\n\n",
- "Links: ",
- namesof("shape1", lshape1, earg= eshape1), ", ",
- namesof("shape2", lshape2, earg= eshape2, tag=FALSE), "\n",
- "Mean: ",
- "shape2 * beta(1+1/shape1, shape2)"),
- constraints=eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
- if (ncol(y <- cbind(y)) != 1)
- stop("the response must be a vector or one-column matrix")
- if (any((y <= 0) | (y >=1)))
- stop("the response must be in (0,1)")
- predictors.names = c(
- namesof("shape1", .lshape1, earg= .eshape1, tag=FALSE),
- namesof("shape2", .lshape2, earg= .eshape2, tag=FALSE))
- if (!length(etastart)) {
- kumar.Loglikfun = function(shape1, y, x, w, extraargs) {
- if (any(round(w) != w)) warning("assuming w is integer-valued")
- medy = median(rep(y, w))
- shape2 = log(0.5) / log1p(-(medy^shape1))
- sum(w * (log(shape1) + log(shape2) + (shape1-1)*log(y) +
- (shape2-1)*log1p(-y^shape1)))
- }
- shape1.grid = seq(0.4, 6.0, len=19)
- shape1.init = if (length( .ishape1 )) .ishape1 else
- getMaxMin(shape1.grid, objfun=kumar.Loglikfun, y=y, x=x, w=w)
- shape1.init = rep(shape1.init, length=length(y))
- medy = median(rep(y, w))
- shape2.init = if (length( .ishape2 )) .ishape2 else
- log(0.5) / log1p(-(medy^shape1.init))
- shape2.init = rep(shape2.init, length=length(y))
- etastart = cbind(theta2eta(shape1.init, .lshape1, earg= .eshape1),
- theta2eta(shape2.init, .lshape2, earg= .eshape2))
- }
- }), list( .lshape1=lshape1, .lshape2=lshape2,
- .ishape1=ishape1, .ishape2=ishape2,
- .eshape1=eshape1, .eshape2=eshape2 ))),
- inverse=eval(substitute(function(eta, extra=NULL){
- shape1 = eta2theta(eta[,1], link= .lshape1, earg= .eshape1)
- shape2 = eta2theta(eta[,2], link= .lshape2, earg= .eshape2)
- shape2 * (base::beta(1+1/shape1,shape2))
- }, list( .lshape1=lshape1, .lshape2=lshape2,
- .eshape1=eshape1, .eshape2=eshape2 ))),
- last=eval(substitute(expression({
- misc$link = c("shape1"= .lshape1, "shape2"= .lshape2)
- misc$earg = list("shape1"= .eshape1, "shape2"= .eshape2)
- misc$expected = TRUE
- misc$nsimEIM = .nsimEIM
- }), list( .lshape1=lshape1, .lshape2=lshape2,
- .eshape1=eshape1, .eshape2=eshape2, .nsimEIM=nsimEIM ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals=FALSE,eta,extra=NULL) {
- shape1 = eta2theta(eta[,1], link= .lshape1, earg= .eshape1)
- shape2 = eta2theta(eta[,2], link= .lshape2, earg= .eshape2)
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(w * dkumar(x=y, shape1=shape1, shape2=shape2, log=TRUE))
- }
- }, list( .lshape1=lshape1, .lshape2=lshape2,
- .eshape1=eshape1, .eshape2=eshape2 ))),
- vfamily=c("kumar"),
- deriv=eval(substitute(expression({
- shape1 = eta2theta(eta[,1], link= .lshape1, earg= .eshape1)
- shape2 = eta2theta(eta[,2], link= .lshape2, earg= .eshape2)
- dshape1.deta = dtheta.deta(shape1, link= .lshape1, earg= .eshape1)
- dshape2.deta = dtheta.deta(shape2, link= .lshape2, earg= .eshape2)
- dl.dshape1 = 1/shape1 + log(y) - (shape2-1) * log(y) *
- (y^shape1) / (1-y^shape1)
- dl.dshape2 = 1/shape2 + log1p(-y^shape1)
- w * cbind(dl.dshape1 * dshape1.deta,
- dl.dshape2 * dshape2.deta)
- }), list( .lshape1=lshape1, .lshape2=lshape2,
- .eshape1=eshape1, .eshape2=eshape2, .nsimEIM=nsimEIM ))),
- weight = eval(substitute(expression({
- run.mean = 0
- for(ii in 1:( .nsimEIM )) {
- ysim = rkumar(n, shape1=shape1, shape2=shape2)
- dl.dshape1 = 1/shape1 + log(ysim) - (shape2-1) * log(ysim) *
- (ysim^shape1) / (1-ysim^shape1)
- dl.dshape2 = 1/shape2 + log1p(-ysim^shape1)
- d2l.dshape1shape2 = ysim^shape1 * log(ysim) / (1-ysim^shape1)
- d2l.dshape22 = 1 / shape2^2
- d2l.dshape12 = 1 / shape1^2 +
- (shape2-1)* ((log(ysim))^2) * (ysim^shape1) / (1 - ysim^shape1)^2
- rm(ysim)
- temp3 = matrix(0, n, dimm(M))
- temp3[,iam(1,1,M=M)] = d2l.dshape12
- temp3[,iam(2,2,M=M)] = d2l.dshape22
- temp3[,iam(1,2,M=M)] = d2l.dshape1shape2
- run.mean = ((ii-1) * run.mean + temp3) / ii
- }
- wz = if (intercept.only)
- matrix(colMeans(run.mean), n, dimm(M), byrow=TRUE) else run.mean
- dtheta.detas = cbind(dshape1.deta, dshape2.deta)
- index0 = iam(NA, NA, M=M, both=TRUE, diag=TRUE)
- wz = wz * dtheta.detas[,index0$row] * dtheta.detas[,index0$col]
- w * wz
- }), list( .lshape1=lshape1, .lshape2=lshape2,
- .eshape1=eshape1, .eshape2=eshape2, .nsimEIM=nsimEIM ))))
+ kumar <- function(lshape1 = "loge", lshape2 = "loge",
+ eshape1 = list(), eshape2 = list(),
+ ishape1 = NULL, ishape2 = NULL,
+ grid.shape1 = c(0.4, 6.0),
+ tol12 = 1.0e-4, zero = NULL)
+{
+ if (mode(lshape1) != "character" && mode(lshape1) != "name")
+ lshape1 <- as.character(substitute(lshape1))
+ if (mode(lshape2) != "character" && mode(lshape2) != "name")
+ lshape2 <- as.character(substitute(lshape2))
+ if (length(ishape1) &&
+ (!is.Numeric(ishape1, allow = 1, positive = TRUE)))
+ stop("bad input for argument 'ishape1'")
+ if (length(ishape2) && !is.Numeric(ishape2))
+ stop("bad input for argument 'ishape2'")
+
+ if (!is.list(eshape1)) eshape1 = list()
+ if (!is.list(eshape2)) eshape2 = list()
+
+ if (!is.Numeric(tol12, allow = 1, posit = TRUE))
+ stop("bad input for argument 'tol12'")
+ if (!is.Numeric(grid.shape1, allow = 2, posit = TRUE))
+ stop("bad input for argument 'grid.shape1'")
+
+ new("vglmff",
+ blurb = c("Kumaraswamy distribution\n\n",
+ "Links: ",
+ namesof("shape1", lshape1, earg = eshape1, tag = FALSE), ", ",
+ namesof("shape2", lshape2, earg = eshape2, tag = FALSE), "\n",
+ "Mean: ",
+ "shape2 * beta(1+1/shape1, shape2)"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ if (ncol(y <- cbind(y)) != 1)
+ stop("the response must be a vector or one-column matrix")
+ if (any((y <= 0) | (y >=1)))
+ stop("the response must be in (0,1)")
+
+ predictors.names <- c(
+ namesof("shape1", .lshape1, earg = .eshape1, tag = FALSE),
+ namesof("shape2", .lshape2, earg = .eshape2, tag = FALSE))
+ if (!length(etastart)) {
+
+
+
+ kumar.Loglikfun <- function(shape1, y, x, w, extraargs) {
+ if (any(round(w) != w))
+ warning("assuming w is integer-valued")
+ medy <- median(rep(y, w))
+ shape2 <- log(0.5) / log1p(-(medy^shape1))
+ sum(w * (log(shape1) + log(shape2) + (shape1-1)*log(y) +
+ (shape2-1)*log1p(-y^shape1)))
+ }
+
+ shape1.grid <- seq( .grid.shape1[1], .grid.shape1[2], len = 19)
+ shape1.init <- if (length( .ishape1 )) .ishape1 else
+ getMaxMin(shape1.grid, objfun=kumar.Loglikfun, y=y, x=x, w=w)
+ shape1.init <- rep(shape1.init, length = length(y))
+
+ medy <- median(rep(y, w))
+ shape2.init <- if (length( .ishape2 )) .ishape2 else
+ log(0.5) / log1p(-(medy^shape1.init))
+ shape2.init <- rep(shape2.init, length = length(y))
+ etastart <- cbind(
+ theta2eta(shape1.init, .lshape1, earg = .eshape1),
+ theta2eta(shape2.init, .lshape2, earg = .eshape2))
+ }
+ }), list( .lshape1 = lshape1, .lshape2 = lshape2,
+ .ishape1 = ishape1, .ishape2 = ishape2,
+ .eshape1 = eshape1, .eshape2 = eshape2,
+ .grid.shape1 = grid.shape1 ))),
+ inverse = eval(substitute(function(eta, extra = NULL){
+ shape1 <- eta2theta(eta[,1], link = .lshape1, earg = .eshape1)
+ shape2 <- eta2theta(eta[,2], link = .lshape2, earg = .eshape2)
+ shape2 * (base::beta(1 + 1/shape1, shape2))
+ }, list( .lshape1 = lshape1, .lshape2 = lshape2,
+ .eshape1 = eshape1, .eshape2 = eshape2 ))),
+ last = eval(substitute(expression({
+ misc$link <- c("shape1" = .lshape1, "shape2" = .lshape2)
+ misc$earg <- list("shape1" = .eshape1, "shape2" = .eshape2)
+ misc$expected = TRUE
+ }), list( .lshape1 = lshape1, .lshape2 = lshape2,
+ .eshape1 = eshape1, .eshape2 = eshape2 ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ shape1 <- eta2theta(eta[,1], link = .lshape1, earg = .eshape1)
+ shape2 <- eta2theta(eta[,2], link = .lshape2, earg = .eshape2)
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ sum(w * dkumar(x=y, shape1 = shape1, shape2 = shape2, log = TRUE))
+ }
+ }, list( .lshape1 = lshape1, .lshape2 = lshape2,
+ .eshape1 = eshape1, .eshape2 = eshape2 ))),
+ vfamily = c("kumar"),
+ deriv = eval(substitute(expression({
+ shape1 <- eta2theta(eta[,1], link = .lshape1, earg = .eshape1)
+ shape2 <- eta2theta(eta[,2], link = .lshape2, earg = .eshape2)
+ dshape1.deta <- dtheta.deta(shape1, link = .lshape1, earg = .eshape1)
+ dshape2.deta <- dtheta.deta(shape2, link = .lshape2, earg = .eshape2)
+
+ dl.dshape1 <- 1 / shape1 + log(y) - (shape2-1) * log(y) *
+ (y^shape1) / (1-y^shape1)
+ dl.dshape2 <- 1 / shape2 + log1p(-y^shape1)
+
+ w * cbind(dl.dshape1 * dshape1.deta,
+ dl.dshape2 * dshape2.deta)
+ }), list( .lshape1 = lshape1, .lshape2 = lshape2,
+ .eshape1 = eshape1, .eshape2 = eshape2 ))),
+ weight = eval(substitute(expression({
+ ed2l.dshape11 <- (1 + (shape2 / (shape2 - 2)) *
+ ((digamma(shape2) - digamma(2))^2 -
+ (trigamma(shape2) - trigamma(2)))) / shape1^2
+ ed2l.dshape22 <- 1.0 / shape2^2
+ ed2l.dshape12 <-
+ -((digamma(1 + shape2) - digamma(2)) / (shape2 - 1.0)) / shape1
+
+ index1 <- (abs(shape2 - 1.0) < .tol12)
+ if (any(index1))
+ ed2l.dshape12[index1] <- -trigamma(2) / shape1[index1]
+
+ index2 <- (abs(shape2 - 2.0) < .tol12)
+ if (any(index2))
+ ed2l.dshape11[index2] <-
+ (1.0 - 2.0 * psigamma(2.0, deriv = 2)) / shape1[index2]^2
+
+ wz <- matrix(0, n, dimm(M))
+ wz[, iam(1, 1, M = M)] <- ed2l.dshape11 * dshape1.deta^2
+ wz[, iam(2, 2, M = M)] <- ed2l.dshape22 * dshape2.deta^2
+ wz[, iam(1, 2, M = M)] <- ed2l.dshape12 * dshape1.deta * dshape2.deta
+
+ w * wz
+ }), list( .lshape1 = lshape1, .lshape2 = lshape2,
+ .eshape1 = eshape1, .eshape2 = eshape2, .tol12 = tol12 ))))
}
-drice = function(x, vee, sigma, log = FALSE) {
- if (!is.logical(log.arg <- log))
- stop("bad input for argument 'log'")
- rm(log)
+drice <- function(x, vee, sigma, log = FALSE) {
+ if (!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
- N = max(length(x), length(vee), length(sigma))
- x = rep(x, len=N); vee = rep(vee, len=N); sigma = rep(sigma, len=N)
+ N <- max(length(x), length(vee), length(sigma))
+ x <- rep(x, len = N); vee <- rep(vee, len = N);
+ sigma <- rep(sigma, len = N)
- logdensity = rep(log(0), len=N)
- xok = (x > 0)
- x.abs = abs(x[xok]*vee[xok]/sigma[xok]^2)
- logdensity[xok] = log(x[xok]) - 2 * log(sigma[xok]) +
- (-(x[xok]^2+vee[xok]^2)/(2*sigma[xok]^2)) +
- log(besselI(x.abs, nu=0, expon.scaled = TRUE)) + x.abs
- logdensity[sigma <= 0] = NaN
- logdensity[vee < 0] = NaN
- if (log.arg) logdensity else exp(logdensity)
+ logdensity <- rep(log(0), len = N)
+ xok <- (x > 0)
+ x.abs <- abs(x[xok] * vee[xok] / sigma[xok]^2)
+ logdensity[xok] <- log(x[xok]) - 2 * log(sigma[xok]) +
+ (-(x[xok]^2+vee[xok]^2)/(2*sigma[xok]^2)) +
+ log(besselI(x.abs, nu=0, expon.scaled = TRUE)) + x.abs
+ logdensity[sigma <= 0] <- NaN
+ logdensity[vee < 0] <- NaN
+ if (log.arg) logdensity else exp(logdensity)
}
-rrice = function(n, vee, sigma) {
- if (!is.Numeric(n, integ=TRUE,allow=1)) stop("bad input for argument 'n'")
- theta = 1 # any number
- X = rnorm(n, mean=vee * cos(theta), sd=sigma)
- Y = rnorm(n, mean=vee * sin(theta), sd=sigma)
- sqrt(X^2 + Y^2)
+rrice <- function(n, vee, sigma) {
+ if (!is.Numeric(n, integ = TRUE, allow = 1))
+ stop("bad input for argument 'n'")
+ theta <- 1 # any number
+ X <- rnorm(n, mean = vee * cos(theta), sd = sigma)
+ Y <- rnorm(n, mean = vee * sin(theta), sd = sigma)
+ sqrt(X^2 + Y^2)
}
-riceff.control <- function(save.weight=TRUE, ...)
+riceff.control <- function(save.weight = TRUE, ...)
{
- list(save.weight=save.weight)
+ list(save.weight = save.weight)
}
- riceff = function(lvee="loge", lsigma="loge",
- evee=list(), esigma=list(),
- ivee=NULL, isigma=NULL,
- nsimEIM=100, zero=NULL)
+ riceff = function(lvee = "loge", lsigma = "loge",
+ evee = list(), esigma = list(),
+ ivee = NULL, isigma = NULL,
+ nsimEIM = 100, zero = NULL)
{
if (mode(lvee) != "character" && mode(lvee) != "name")
lvee = as.character(substitute(lvee))
if (mode(lsigma) != "character" && mode(lsigma) != "name")
lsigma = as.character(substitute(lsigma))
- if (length(ivee) && !is.Numeric(ivee, positive=TRUE))
+ if (length(ivee) && !is.Numeric(ivee, positive = TRUE))
stop("bad input for argument 'ivee'")
- if (length(isigma) && !is.Numeric(isigma, positive=TRUE))
+ if (length(isigma) && !is.Numeric(isigma, positive = TRUE))
stop("bad input for argument 'isigma'")
if (!is.list(evee)) evee = list()
if (!is.list(esigma)) esigma = list()
- if (!is.Numeric(nsimEIM, allow=1, integ=TRUE) || nsimEIM <= 50)
+ if (!is.Numeric(nsimEIM, allow = 1, integ = TRUE) || nsimEIM <= 50)
stop("'nsimEIM' should be an integer greater than 50")
new("vglmff",
- blurb=c("Rice distribution\n\n",
+ blurb = c("Rice distribution\n\n",
"Links: ",
- namesof("vee", lvee, earg= evee, tag=FALSE), ", ",
- namesof("sigma", lsigma, earg= esigma, tag=FALSE), "\n",
+ namesof("vee", lvee, earg = evee, tag = FALSE), ", ",
+ namesof("sigma", lsigma, earg = esigma, tag = FALSE), "\n",
"Mean: ",
"sigma*sqrt(pi/2)*exp(z/2)*((1-z)*",
"besselI(-z/2,nu=0)-z*besselI(-z/2,nu=1)) where z=-vee^2/(2*sigma^2)"),
- constraints=eval(substitute(expression({
+ constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
if (ncol(y <- cbind(y)) != 1)
stop("the response must be a vector or one-column matrix")
if (any((y <= 0)))
stop("the response must be in (0,Inf)")
predictors.names = c(
- namesof("vee", .lvee, earg= .evee, tag=FALSE),
- namesof("sigma", .lsigma, earg= .esigma, tag=FALSE))
+ namesof("vee", .lvee, earg = .evee, tag = FALSE),
+ namesof("sigma", .lsigma, earg = .esigma, tag = FALSE))
if (!length(etastart)) {
riceff.Loglikfun = function(vee, y, x, w, extraargs) {
sigma.init = sd(rep(y, w))
@@ -275,48 +292,49 @@ riceff.control <- function(save.weight=TRUE, ...)
quantile(rep(y,w), probs = seq(0, 1, 0.2))["80%"], len=11)
vee.init = if (length( .ivee )) .ivee else
getMaxMin(vee.grid, objfun=riceff.Loglikfun, y=y, x=x, w=w)
- vee.init = rep(vee.init, length=length(y))
+ vee.init = rep(vee.init, length = length(y))
sigma.init = if (length( .isigma )) .isigma else
sqrt(max((weighted.mean(y^2, w) - vee.init^2)/2, 0.001))
- sigma.init = rep(sigma.init, length=length(y))
- etastart = cbind(theta2eta(vee.init, .lvee, earg= .evee),
- theta2eta(sigma.init, .lsigma, earg= .esigma))
+ sigma.init = rep(sigma.init, length = length(y))
+ etastart = cbind(theta2eta(vee.init, .lvee, earg = .evee),
+ theta2eta(sigma.init, .lsigma, earg = .esigma))
}
- }), list( .lvee=lvee, .lsigma=lsigma,
- .ivee=ivee, .isigma=isigma,
- .evee=evee, .esigma=esigma ))),
- inverse=eval(substitute(function(eta, extra=NULL){
- vee = eta2theta(eta[,1], link= .lvee, earg= .evee)
- sigma = eta2theta(eta[,2], link= .lsigma, earg= .esigma)
+ }), list( .lvee = lvee, .lsigma = lsigma,
+ .ivee = ivee, .isigma=isigma,
+ .evee = evee, .esigma = esigma ))),
+ inverse = eval(substitute(function(eta, extra = NULL){
+ vee = eta2theta(eta[,1], link = .lvee, earg = .evee)
+ sigma = eta2theta(eta[,2], link = .lsigma, earg = .esigma)
temp9 = -vee^2 / (2*sigma^2)
- sigma * sqrt(pi/2) * ((1-temp9) * besselI(-temp9/2,nu=0,expon=TRUE) -
- temp9 * besselI(-temp9/2,nu=1,expon=TRUE))
- }, list( .lvee=lvee, .lsigma=lsigma,
- .evee=evee, .esigma=esigma ))),
- last=eval(substitute(expression({
- misc$link = c("vee"= .lvee, "sigma"= .lsigma)
- misc$earg = list("vee"= .evee, "sigma"= .esigma)
+ sigma * sqrt(pi/2) * ((1-temp9) * besselI(-temp9/2,nu=0,expon = TRUE) -
+ temp9 * besselI(-temp9/2,nu=1,expon = TRUE))
+ }, list( .lvee = lvee, .lsigma = lsigma,
+ .evee = evee, .esigma = esigma ))),
+ last = eval(substitute(expression({
+ misc$link <- c("vee" = .lvee, "sigma" = .lsigma)
+ misc$earg <- list("vee" = .evee, "sigma" = .esigma)
misc$expected = TRUE
misc$nsimEIM = .nsimEIM
- }), list( .lvee=lvee, .lsigma=lsigma,
- .evee=evee, .esigma=esigma, .nsimEIM=nsimEIM ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals=FALSE,eta,extra=NULL) {
- vee = eta2theta(eta[,1], link= .lvee, earg= .evee)
- sigma = eta2theta(eta[,2], link= .lsigma, earg= .esigma)
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(w * drice(x=y, vee=vee, sigma=sigma, log = TRUE))
+ }), list( .lvee = lvee, .lsigma = lsigma,
+ .evee = evee, .esigma = esigma, .nsimEIM = nsimEIM ))),
+ loglikelihood = eval(substitute(
+ function(mu,y,w,residuals = FALSE,eta,extra = NULL) {
+ vee = eta2theta(eta[,1], link = .lvee, earg = .evee)
+ sigma = eta2theta(eta[,2], link = .lsigma, earg = .esigma)
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
+ sum(w * drice(x=y, vee = vee, sigma=sigma, log = TRUE))
}
- }, list( .lvee=lvee, .lsigma=lsigma,
- .evee=evee, .esigma=esigma ))),
- vfamily=c("riceff"),
- deriv=eval(substitute(expression({
- vee = eta2theta(eta[,1], link= .lvee, earg= .evee)
- sigma = eta2theta(eta[,2], link= .lsigma, earg= .esigma)
- dvee.deta = dtheta.deta(vee, link= .lvee, earg= .evee)
- dsigma.deta = dtheta.deta(sigma, link= .lsigma, earg= .esigma)
+ }, list( .lvee = lvee, .lsigma = lsigma,
+ .evee = evee, .esigma = esigma ))),
+ vfamily = c("riceff"),
+ deriv = eval(substitute(expression({
+ vee = eta2theta(eta[,1], link = .lvee, earg = .evee)
+ sigma = eta2theta(eta[,2], link = .lsigma, earg = .esigma)
+ dvee.deta = dtheta.deta(vee, link = .lvee, earg = .evee)
+ dsigma.deta = dtheta.deta(sigma, link = .lsigma, earg = .esigma)
temp8 = y * vee / sigma^2
dl.dvee = -vee/sigma^2 + (y/sigma^2) *
besselI(temp8, nu=1) / besselI(temp8, nu=0)
@@ -324,12 +342,12 @@ riceff.control <- function(save.weight=TRUE, ...)
besselI(temp8, nu=1) / besselI(temp8, nu=0)
w * cbind(dl.dvee * dvee.deta,
dl.dsigma * dsigma.deta)
- }), list( .lvee=lvee, .lsigma=lsigma,
- .evee=evee, .esigma=esigma, .nsimEIM=nsimEIM ))),
+ }), list( .lvee = lvee, .lsigma = lsigma,
+ .evee = evee, .esigma = esigma, .nsimEIM = nsimEIM ))),
weight = eval(substitute(expression({
run.var = run.cov = 0
for(ii in 1:( .nsimEIM )) {
- ysim = rrice(n, vee=vee, sigma=sigma)
+ ysim = rrice(n, vee = vee, sigma=sigma)
temp8 = ysim * vee / sigma^2
dl.dvee = -vee/sigma^2 + (ysim/sigma^2) *
besselI(temp8, nu=1) / besselI(temp8, nu=0)
@@ -343,20 +361,20 @@ riceff.control <- function(save.weight=TRUE, ...)
}
wz = if (intercept.only)
matrix(colMeans(cbind(run.var, run.cov)),
- n, dimm(M), byrow=TRUE) else cbind(run.var, run.cov)
+ n, dimm(M), byrow = TRUE) else cbind(run.var, run.cov)
dtheta.detas = cbind(dvee.deta, dsigma.deta)
- index0 = iam(NA, NA, M=M, both=TRUE, diag=TRUE)
+ index0 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
wz = wz * dtheta.detas[,index0$row] * dtheta.detas[,index0$col]
w * wz
- }), list( .lvee=lvee, .lsigma=lsigma,
- .evee=evee, .esigma=esigma, .nsimEIM=nsimEIM ))))
+ }), list( .lvee = lvee, .lsigma = lsigma,
+ .evee = evee, .esigma = esigma, .nsimEIM = nsimEIM ))))
}
-dskellam = function(x, mu1, mu2, log=FALSE) {
+dskellam = function(x, mu1, mu2, log = FALSE) {
log.arg = log; rm(log)
if ( !is.logical( log.arg ) || length( log.arg )!=1 )
stop("bad input for 'log.arg'")
@@ -370,14 +388,14 @@ dskellam = function(x, mu1, mu2, log=FALSE) {
if (log.arg) {
ans = -mu1 - mu2 + 2 * sqrt(mu1*mu2) +
0.5 * x * log(mu1) - 0.5 * x * log(mu2) +
- log(besselI(2 * sqrt(mu1*mu2), nu=x, expon=TRUE))
- ans[ok3] = dpois(x=-x[ok3], lambda=mu2[ok3], log=TRUE)
- ans[ok4] = dpois(x=-x[ok4], lambda=mu1[ok4], log=TRUE)
- ans[ok5] = dpois(x= x[ok5], lambda=0.0, log=TRUE)
+ log(besselI(2 * sqrt(mu1*mu2), nu=x, expon = TRUE))
+ ans[ok3] = dpois(x = -x[ok3], lambda = mu2[ok3], log = TRUE)
+ ans[ok4] = dpois(x = -x[ok4], lambda = mu1[ok4], log = TRUE)
+ ans[ok5] = dpois(x = x[ok5], lambda = 0.0, log = TRUE)
ans[x != round(x)] = log(0.0)
} else {
ans = (mu1/mu2)^(x/2) * exp(-mu1-mu2 + 2 * sqrt(mu1*mu2)) *
- besselI(2 * sqrt(mu1*mu2), nu=x, expon=TRUE)
+ besselI(2 * sqrt(mu1*mu2), nu=x, expon = TRUE)
ans[ok3] = dpois(x=-x[ok3], lambda=mu2[ok3])
ans[ok4] = dpois(x=-x[ok4], lambda=mu1[ok4])
ans[ok5] = dpois(x= x[ok5], lambda=0.0)
@@ -398,50 +416,51 @@ rskellam = function(n, mu1, mu2) {
-skellam.control <- function(save.weight=TRUE, ...)
+skellam.control <- function(save.weight = TRUE, ...)
{
- list(save.weight=save.weight)
+ list(save.weight = save.weight)
}
- skellam = function(lmu1="loge", lmu2="loge",
- emu1=list(), emu2=list(),
- imu1=NULL, imu2=NULL,
- nsimEIM=100, parallel=FALSE, zero=NULL)
+ skellam = function(lmu1 = "loge", lmu2 = "loge",
+ emu1 = list(), emu2= list(),
+ imu1 = NULL, imu2 = NULL,
+ nsimEIM = 100, parallel = FALSE, zero = NULL)
{
if (mode(lmu1) != "character" && mode(lmu1) != "name")
lmu1 = as.character(substitute(lmu1))
if (mode(lmu2) != "character" && mode(lmu2) != "name")
lmu2 = as.character(substitute(lmu2))
- if (length(imu1) && !is.Numeric(imu1, positive=TRUE))
+ if (length(imu1) && !is.Numeric(imu1, positive = TRUE))
stop("bad input for argument 'imu1'")
- if (length(imu2) && !is.Numeric(imu2, positive=TRUE))
+ if (length(imu2) && !is.Numeric(imu2, positive = TRUE))
stop("bad input for argument 'imu2'")
if (!is.list(emu1)) emu1 = list()
if (!is.list(emu2)) emu2 = list()
- if (!is.Numeric(nsimEIM, allow=1, integ=TRUE) || nsimEIM <= 50)
+ if (!is.Numeric(nsimEIM, allow = 1, integ = TRUE) ||
+ nsimEIM <= 50)
stop("'nsimEIM' should be an integer greater than 50")
new("vglmff",
- blurb=c("Skellam distribution\n\n",
+ blurb = c("Skellam distribution\n\n",
"Links: ",
- namesof("mu1", lmu1, earg= emu1, tag=FALSE), ", ",
- namesof("mu2", lmu2, earg= emu2, tag=FALSE), "\n",
+ namesof("mu1", lmu1, earg = emu1, tag = FALSE), ", ",
+ namesof("mu2", lmu2, earg = emu2, tag = FALSE), "\n",
"Mean: mu1-mu2", "\n",
"Variance: mu1+mu2"),
- constraints=eval(substitute(expression({
+ constraints = eval(substitute(expression({
constraints = cm.vgam(matrix(1,M,1), x, .parallel, constraints,
- int=TRUE)
+ int = TRUE)
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .parallel=parallel, .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .parallel=parallel, .zero = zero ))),
+ initialize = eval(substitute(expression({
if (ncol(y <- cbind(y)) != 1)
stop("the response must be a vector or one-column matrix")
if (any((y != round(y))))
stop("the response should be integer-valued")
predictors.names = c(
- namesof("mu1", .lmu1, earg= .emu1, tag=FALSE),
- namesof("mu2", .lmu2, earg= .emu2, tag=FALSE))
+ namesof("mu1", .lmu1, earg = .emu1, tag = FALSE),
+ namesof("mu2", .lmu2, earg = .emu2, tag = FALSE))
if (!length(etastart)) {
junk = lm.wfit(x=x, y=y, w=w)
var.y.est = sum(w * junk$resid^2) / junk$df.residual
@@ -450,70 +469,71 @@ skellam.control <- function(save.weight=TRUE, ...)
mu2.init = max((var.y.est - mean.init)/2, 0.01)
mu1.init = rep(if(length( .imu1)) .imu1 else mu1.init, length=n)
mu2.init = rep(if(length( .imu2)) .imu2 else mu2.init, length=n)
- etastart = cbind(theta2eta(mu1.init, .lmu1, earg= .emu1),
- theta2eta(mu2.init, .lmu2, earg= .emu2))
+ etastart = cbind(theta2eta(mu1.init, .lmu1, earg = .emu1),
+ theta2eta(mu2.init, .lmu2, earg = .emu2))
}
- }), list( .lmu1=lmu1, .lmu2=lmu2,
+ }), list( .lmu1 = lmu1, .lmu2 = lmu2,
.imu1=imu1, .imu2=imu2,
- .emu1=emu1, .emu2=emu2 ))),
- inverse=eval(substitute(function(eta, extra=NULL){
- mu1 = eta2theta(eta[,1], link= .lmu1, earg= .emu1)
- mu2 = eta2theta(eta[,2], link= .lmu2, earg= .emu2)
+ .emu1 = emu1, .emu2 = emu2 ))),
+ inverse = eval(substitute(function(eta, extra = NULL){
+ mu1 = eta2theta(eta[,1], link = .lmu1, earg = .emu1)
+ mu2 = eta2theta(eta[,2], link = .lmu2, earg = .emu2)
mu1 - mu2
- }, list( .lmu1=lmu1, .lmu2=lmu2,
- .emu1=emu1, .emu2=emu2 ))),
- last=eval(substitute(expression({
- misc$link = c("mu1"= .lmu1, "mu2"= .lmu2)
- misc$earg = list("mu1"= .emu1, "mu2"= .emu2)
+ }, list( .lmu1 = lmu1, .lmu2 = lmu2,
+ .emu1 = emu1, .emu2 = emu2 ))),
+ last = eval(substitute(expression({
+ misc$link <- c("mu1" = .lmu1, "mu2" = .lmu2)
+ misc$earg <- list("mu1" = .emu1, "mu2" = .emu2)
misc$expected = TRUE
misc$nsimEIM = .nsimEIM
- }), list( .lmu1=lmu1, .lmu2=lmu2,
- .emu1=emu1, .emu2=emu2, .nsimEIM=nsimEIM ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals=FALSE,eta,extra=NULL) {
- mu1 = eta2theta(eta[,1], link= .lmu1, earg= .emu1)
- mu2 = eta2theta(eta[,2], link= .lmu2, earg= .emu2)
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
+ }), list( .lmu1 = lmu1, .lmu2 = lmu2,
+ .emu1 = emu1, .emu2 = emu2, .nsimEIM = nsimEIM ))),
+ loglikelihood = eval(substitute(
+ function(mu,y,w,residuals = FALSE,eta,extra = NULL) {
+ mu1 = eta2theta(eta[,1], link = .lmu1, earg = .emu1)
+ mu2 = eta2theta(eta[,2], link = .lmu2, earg = .emu2)
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
- if ( is.logical( .parallel ) && length( .parallel )==1 &&
+ if ( is.logical( .parallel ) && length( .parallel )== 1 &&
.parallel )
- sum(w * log(besselI(2*mu1, nu=y, expon=TRUE))) else
+ sum(w * log(besselI(2*mu1, nu=y, expon = TRUE))) else
sum(w * (-mu1 - mu2 +
0.5 * y * log(mu1) -
0.5 * y * log(mu2) +
- 2 * sqrt(mu1*mu2) + # Use this when expon=TRUE
- log(besselI(2 * sqrt(mu1*mu2), nu=y, expon=TRUE))))
+ 2 * sqrt(mu1*mu2) + # Use this when expon = TRUE
+ log(besselI(2 * sqrt(mu1*mu2), nu=y, expon = TRUE))))
}
- }, list( .lmu1=lmu1, .lmu2=lmu2,
+ }, list( .lmu1 = lmu1, .lmu2 = lmu2,
.parallel=parallel,
- .emu1=emu1, .emu2=emu2 ))),
- vfamily=c("skellam"),
- deriv=eval(substitute(expression({
- mu1 = eta2theta(eta[,1], link= .lmu1, earg= .emu1)
- mu2 = eta2theta(eta[,2], link= .lmu2, earg= .emu2)
- dmu1.deta = dtheta.deta(mu1, link= .lmu1, earg= .emu1)
- dmu2.deta = dtheta.deta(mu2, link= .lmu2, earg= .emu2)
+ .emu1 = emu1, .emu2 = emu2 ))),
+ vfamily = c("skellam"),
+ deriv = eval(substitute(expression({
+ mu1 = eta2theta(eta[,1], link = .lmu1, earg = .emu1)
+ mu2 = eta2theta(eta[,2], link = .lmu2, earg = .emu2)
+ dmu1.deta = dtheta.deta(mu1, link = .lmu1, earg = .emu1)
+ dmu2.deta = dtheta.deta(mu2, link = .lmu2, earg = .emu2)
temp8 = 2 * sqrt(mu1*mu2)
- temp9 = besselI(temp8, nu=y, expon=TRUE)
- temp7 = (besselI(temp8, nu=y-1, expon=TRUE) +
- besselI(temp8, nu=y+1, expon=TRUE)) / 2
+ temp9 = besselI(temp8, nu=y, expon = TRUE)
+ temp7 = (besselI(temp8, nu=y-1, expon = TRUE) +
+ besselI(temp8, nu=y+1, expon = TRUE)) / 2
temp6 = temp7 / temp9
dl.dmu1 = -1 + 0.5 * y / mu1 + sqrt(mu2/mu1) * temp6
dl.dmu2 = -1 - 0.5 * y / mu2 + sqrt(mu1/mu2) * temp6
w * cbind(dl.dmu1 * dmu1.deta,
dl.dmu2 * dmu2.deta)
- }), list( .lmu1=lmu1, .lmu2=lmu2,
- .emu1=emu1, .emu2=emu2, .nsimEIM=nsimEIM ))),
+ }), list( .lmu1 = lmu1, .lmu2 = lmu2,
+ .emu1 = emu1, .emu2 = emu2, .nsimEIM = nsimEIM ))),
weight = eval(substitute(expression({
run.var = run.cov = 0
for(ii in 1:( .nsimEIM )) {
ysim = rskellam(n, mu1=mu1, mu2=mu2)
- temp9 = besselI(temp8, nu=ysim, expon=TRUE)
- temp7 = (besselI(temp8, nu=ysim-1, expon=TRUE) +
- besselI(temp8, nu=ysim+1, expon=TRUE)) / 2
+ temp9 = besselI(temp8, nu=ysim, expon = TRUE)
+ temp7 = (besselI(temp8, nu=ysim-1, expon = TRUE) +
+ besselI(temp8, nu=ysim+1, expon = TRUE)) / 2
temp6 = temp7 / temp9
dl.dmu1 = -1 + 0.5 * ysim/mu1 + sqrt(mu2/mu1) * temp6
dl.dmu2 = -1 - 0.5 * ysim/mu2 + sqrt(mu1/mu2) * temp6
@@ -524,20 +544,20 @@ skellam.control <- function(save.weight=TRUE, ...)
}
wz = if (intercept.only)
matrix(colMeans(cbind(run.var, run.cov)),
- n, dimm(M), byrow=TRUE) else cbind(run.var, run.cov)
+ n, dimm(M), byrow = TRUE) else cbind(run.var, run.cov)
dtheta.detas = cbind(dmu1.deta, dmu2.deta)
- index0 = iam(NA, NA, M=M, both=TRUE, diag=TRUE)
+ index0 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
wz = wz * dtheta.detas[,index0$row] * dtheta.detas[,index0$col]
w * wz
- }), list( .lmu1=lmu1, .lmu2=lmu2,
- .emu1=emu1, .emu2=emu2, .nsimEIM=nsimEIM ))))
+ }), list( .lmu1 = lmu1, .lmu2 = lmu2,
+ .emu1 = emu1, .emu2 = emu2, .nsimEIM = nsimEIM ))))
}
-dyules = function(x, rho, log=FALSE) {
+dyules = function(x, rho, log = FALSE) {
log.arg = log
rm(log)
if ( !is.logical( log.arg ) || length( log.arg )!=1 )
@@ -555,7 +575,8 @@ dyules = function(x, rho, log=FALSE) {
ryules = function(n, rho) {
- if (!is.Numeric(n, integ=TRUE,allow=1)) stop("bad input for argument 'n'")
+ if (!is.Numeric(n, integ = TRUE, allow = 1))
+ stop("bad input for argument 'n'")
rgeom(n, prob=exp(-rexp(n, rate=rho))) + 1
}
@@ -571,29 +592,30 @@ pyules = function(q, rho) {
-yulesimon.control <- function(save.weight=TRUE, ...)
+yulesimon.control <- function(save.weight = TRUE, ...)
{
- list(save.weight=save.weight)
+ list(save.weight = save.weight)
}
- yulesimon = function(link="loge", earg=list(), irho=NULL, nsimEIM=200)
+ yulesimon = function(link = "loge", earg = list(), irho = NULL, nsimEIM = 200)
{
- if (length(irho) && !is.Numeric(irho, positi=TRUE))
+ if (length(irho) && !is.Numeric(irho, positi = TRUE))
stop("argument 'irho' must be > 0")
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if (!is.list(earg)) earg = list()
- if (!is.Numeric(nsimEIM, allow=1, integ=TRUE) || nsimEIM <= 50)
+ if (!is.Numeric(nsimEIM, allow = 1, integ = TRUE) ||
+ nsimEIM <= 50)
stop("'nsimEIM' should be an integer greater than 50")
new("vglmff",
- blurb=c("Yule-Simon distribution f(y) = rho*beta(y,rho+1), rho>0, y=1,2,..\n\n",
+ blurb = c("Yule-Simon distribution f(y) = rho*beta(y,rho+1), rho>0, y=1,2,..\n\n",
"Link: ",
- namesof("p", link, earg=earg), "\n\n",
+ namesof("p", link, earg =earg), "\n\n",
"Mean: rho/(rho-1), provided rho>1\n",
"Variance: rho^2 / ((rho-1)^2 * (rho-2)), provided rho>2"),
- initialize=eval(substitute(expression({
+ initialize = eval(substitute(expression({
y = as.numeric(y)
if (any(y < 1))
stop("all y values must be in 1,2,3,...")
@@ -601,41 +623,42 @@ yulesimon.control <- function(save.weight=TRUE, ...)
warning("y should be integer-valued")
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
- predictors.names = namesof("rho", .link, earg=.earg, tag=FALSE)
+ predictors.names = namesof("rho", .link, earg =.earg, tag = FALSE)
if (!length(etastart)) {
wmeany = weighted.mean(y,w) + 1/8
rho.init = wmeany / (wmeany - 1)
- rho.init = rep( if (length( .irho )) .irho else rho.init, len=n)
- etastart = theta2eta(rho.init, .link, earg=.earg)
+ rho.init = rep( if (length( .irho )) .irho else rho.init, len = n)
+ etastart = theta2eta(rho.init, .link, earg =.earg)
}
- }), list( .link=link, .earg=earg, .irho=irho ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- ans = rho = eta2theta(eta, .link, earg=.earg)
+ }), list( .link=link, .earg =earg, .irho=irho ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ ans = rho = eta2theta(eta, .link, earg =.earg)
ans[rho>1] = rho / (rho - 1)
ans[rho<=1] = NA
ans
- }, list( .link=link, .earg=earg ))),
- last=eval(substitute(expression({
- misc$link = c(rho= .link)
- misc$earg = list(rho = .earg)
+ }, list( .link=link, .earg =earg ))),
+ last = eval(substitute(expression({
+ misc$link <- c(rho = .link)
+ misc$earg <- list(rho = .earg)
misc$expected = TRUE
misc$nsimEIM = .nsimEIM
- }), list( .link=link, .earg=earg, .nsimEIM=nsimEIM ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- rho = eta2theta(eta, .link, earg=.earg)
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(w * dyules(x=y, rho=rho, log=TRUE))
+ }), list( .link=link, .earg =earg, .nsimEIM = nsimEIM ))),
+ loglikelihood = eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra = NULL) {
+ rho = eta2theta(eta, .link, earg =.earg)
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dyules(x=y, rho=rho, log = TRUE))
}
- }, list( .link=link, .earg=earg ))),
- vfamily=c("yulesimon"),
- deriv=eval(substitute(expression({
- rho = eta2theta(eta, .link, earg=.earg)
+ }, list( .link=link, .earg =earg ))),
+ vfamily = c("yulesimon"),
+ deriv = eval(substitute(expression({
+ rho = eta2theta(eta, .link, earg =.earg)
dl.drho = 1/rho + digamma(1+rho) - digamma(1+rho+y)
- drho.deta = dtheta.deta(rho, .link, earg=.earg)
+ drho.deta = dtheta.deta(rho, .link, earg =.earg)
w * dl.drho * drho.deta
- }), list( .link=link, .earg=earg ))),
+ }), list( .link=link, .earg =earg ))),
weight = eval(substitute(expression({
run.var = 0
for(ii in 1:( .nsimEIM )) {
@@ -647,20 +670,20 @@ yulesimon.control <- function(save.weight=TRUE, ...)
}
wz = if (intercept.only)
matrix(colMeans(cbind(run.var)),
- n, dimm(M), byrow=TRUE) else cbind(run.var)
+ n, dimm(M), byrow = TRUE) else cbind(run.var)
wz = wz * drho.deta^2
w * wz
- }), list( .nsimEIM=nsimEIM ))))
+ }), list( .nsimEIM = nsimEIM ))))
}
-dslash <- function(x, mu=0, sigma=1, log=FALSE,
+dslash <- function(x, mu=0, sigma=1, log = FALSE,
smallno=.Machine$double.eps*1000){
log.arg = log
rm(log)
@@ -697,112 +720,119 @@ rslash <- function (n, mu=0, sigma=1){
stop("bad input for argument 'n'")
if (any(sigma <= 0))
stop("argument 'sigma' must be positive")
- rnorm(n=n, mean=mu, sd=sigma) / runif(n=n)
+ rnorm(n = n, mean=mu, sd=sigma) / runif(n = n)
}
-slash.control <- function(save.weight=TRUE, ...)
+slash.control <- function(save.weight = TRUE, ...)
{
- list(save.weight=save.weight)
+ list(save.weight = save.weight)
}
- slash = function(lmu="identity", lsigma="loge", emu=list(), esigma=list(),
- imu=NULL, isigma=NULL,
+ slash = function(lmu = "identity", lsigma = "loge",
+ emu = list(), esigma = list(),
+ imu = NULL, isigma = NULL,
iprobs = c(0.1, 0.9),
- nsimEIM=250, zero=NULL,
+ nsimEIM = 250, zero = NULL,
smallno = .Machine$double.eps*1000)
{
if (mode(lmu) != "character" && mode(lmu) != "name")
lmu = as.character(substitute(lmu))
if (mode(lsigma) != "character" && mode(lsigma) != "name")
lsigma = as.character(substitute(lsigma))
- if (length(isigma) && !is.Numeric(isigma, posit=TRUE))
+ if (length(isigma) && !is.Numeric(isigma, posit = TRUE))
stop("'isigma' must be > 0")
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
stop("bad input for argument 'zero'")
if (!is.list(emu)) emu = list()
if (!is.list(esigma)) esigma = list()
- if (!is.Numeric(nsimEIM, allow=1, integ=TRUE) || nsimEIM <= 50)
+ if (!is.Numeric(nsimEIM, allow = 1, integ = TRUE) || nsimEIM <= 50)
stop("'nsimEIM' should be an integer greater than 50")
- if (!is.Numeric(iprobs, posit=TRUE) || max(iprobs) >= 1 ||
+ if (!is.Numeric(iprobs, posit = TRUE) || max(iprobs) >= 1 ||
length(iprobs)!=2)
stop("bad input for argument 'iprobs'")
- if (!is.Numeric(smallno, posit=TRUE) || smallno > 0.1)
+ if (!is.Numeric(smallno, posit = TRUE) || smallno > 0.1)
stop("bad input for argument 'smallno'")
new("vglmff",
- blurb=c("Slash distribution\n\n",
+ blurb = c("Slash distribution\n\n",
"Links: ",
- namesof("mu", lmu, earg= emu), ", ",
- namesof("sigma", lsigma, earg= esigma, tag=FALSE), "\n",
+ namesof("mu", lmu, earg = emu), ", ",
+ namesof("sigma", lsigma, earg = esigma, tag = FALSE), "\n",
paste(
"1-exp(-(((y-mu)/sigma)^2)/2))/(sqrt(2*pi)*sigma*((y-mu)/sigma)^2)",
"\ty!=mu",
"\n1/(2*sigma*sqrt(2*pi))",
"\t\t\t\t\t\t\ty=mu\n")),
- constraints=eval(substitute(expression({
+ constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
if (ncol(y <- cbind(y)) != 1)
stop("the response must be a vector or one-column matrix")
predictors.names = c(
- namesof("mu", .lmu, earg= .emu, tag=FALSE),
- namesof("sigma", .lsigma, earg= .esigma, tag=FALSE))
+ namesof("mu", .lmu, earg = .emu, tag = FALSE),
+ namesof("sigma", .lsigma, earg = .esigma, tag = FALSE))
if (!length(etastart)) {
slash.Loglikfun = function(mu, y, x, w, extraargs) {
sigma = if (is.Numeric(.isigma)) .isigma else
- max(0.01, ((quantile(rep(y,w), prob=0.75)/2)-mu)/qnorm(0.75))
+ max(0.01,
+ ((quantile(rep(y,w), prob=0.75)/2)-mu)/qnorm(0.75))
zedd = (y-mu)/sigma
- sum(w * ifelse(abs(zedd)<.smallno, -log(2*sigma*sqrt(2*pi)),
- log1p(-exp(-zedd^2/2)) - log(sqrt(2*pi)*sigma*zedd^2)))
+ sum(w * ifelse(abs(zedd)<.smallno,
+ -log(2*sigma*sqrt(2*pi)),
+ log1p(-exp(-zedd^2/2)) -
+ log(sqrt(2*pi) * sigma * zedd^2)))
}
iprobs = .iprobs
mu.grid = quantile(rep(y,w), probs=iprobs)
mu.grid = seq(mu.grid[1], mu.grid[2], length=100)
mu.init = if (length( .imu )) .imu else
- getMaxMin(mu.grid, objfun=slash.Loglikfun, y=y, x=x, w=w)
+ getMaxMin(mu.grid, objfun=slash.Loglikfun,
+ y=y, x=x, w=w)
sigma.init = if (is.Numeric(.isigma)) .isigma else
max(0.01, ((quantile(rep(y,w), prob=0.75)/2)-mu.init)/qnorm(0.75))
- mu.init = rep(mu.init, length=length(y))
+ mu.init = rep(mu.init, length = length(y))
etastart = matrix(0, n, 2)
- etastart[,1] = theta2eta(mu.init, .lmu, earg=.emu)
- etastart[,2] = theta2eta(sigma.init, .lsigma, earg=.esigma)
+ etastart[,1] = theta2eta(mu.init, .lmu, earg =.emu)
+ etastart[,2] = theta2eta(sigma.init, .lsigma, earg =.esigma)
}
- }), list( .lmu=lmu, .lsigma=lsigma,
- .imu=imu, .isigma=isigma,
- .emu=emu, .esigma=esigma,
- .iprobs=iprobs, .smallno=smallno))),
- inverse=eval(substitute(function(eta, extra=NULL){
- NA * eta2theta(eta[,1], link= .lmu, earg= .emu)
- }, list( .lmu=lmu, .emu=emu ))),
- last=eval(substitute(expression({
- misc$link = c("mu"= .lmu, "sigma"= .lsigma)
- misc$earg = list("mu"= .emu, "sigma"= .esigma)
+ }), list( .lmu = lmu, .lsigma = lsigma,
+ .imu = imu, .isigma = isigma,
+ .emu = emu, .esigma = esigma,
+ .iprobs=iprobs, .smallno = smallno))),
+ inverse = eval(substitute(function(eta, extra = NULL){
+ NA * eta2theta(eta[,1], link = .lmu, earg = .emu)
+ }, list( .lmu = lmu, .emu = emu ))),
+ last = eval(substitute(expression({
+ misc$link <- c("mu" = .lmu, "sigma" = .lsigma)
+ misc$earg <- list("mu" = .emu, "sigma" = .esigma)
misc$expected = TRUE
misc$nsimEIM = .nsimEIM
- }), list( .lmu=lmu, .lsigma=lsigma,
- .emu=emu, .esigma=esigma, .nsimEIM=nsimEIM ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals=FALSE,eta,extra=NULL) {
- mu = eta2theta(eta[,1], link= .lmu, earg= .emu)
- sigma = eta2theta(eta[,2], link= .lsigma, earg= .esigma)
- zedd = (y-mu)/sigma
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(w * dslash(x=y, mu=mu, sigma=sigma, log=TRUE,
+ }), list( .lmu = lmu, .lsigma = lsigma,
+ .emu = emu, .esigma = esigma, .nsimEIM = nsimEIM ))),
+ loglikelihood = eval(substitute(
+ function(mu,y,w,residuals = FALSE,eta,extra = NULL) {
+ mu = eta2theta(eta[,1], link = .lmu, earg = .emu)
+ sigma = eta2theta(eta[,2], link = .lsigma, earg = .esigma)
+ zedd = (y - mu) / sigma
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dslash(x=y, mu=mu, sigma=sigma, log = TRUE,
smallno= .smallno))
}
- }, list( .lmu=lmu, .lsigma=lsigma,
- .emu=emu, .esigma=esigma, .smallno=smallno ))),
- vfamily=c("slash"),
- deriv=eval(substitute(expression({
- mu = eta2theta(eta[,1], link= .lmu, earg= .emu)
- sigma = eta2theta(eta[,2], link= .lsigma, earg= .esigma)
- dmu.deta = dtheta.deta(mu, link= .lmu, earg= .emu)
- dsigma.deta = dtheta.deta(sigma, link= .lsigma, earg= .esigma)
+ }, list( .lmu = lmu, .lsigma = lsigma,
+ .emu = emu, .esigma = esigma, .smallno = smallno ))),
+ vfamily = c("slash"),
+ deriv = eval(substitute(expression({
+ mu = eta2theta(eta[,1], link = .lmu, earg = .emu)
+ sigma = eta2theta(eta[,2], link = .lsigma, earg = .esigma)
+ dmu.deta = dtheta.deta(mu, link = .lmu, earg = .emu)
+ dsigma.deta = dtheta.deta(sigma, link = .lsigma, earg = .esigma)
zedd = (y-mu)/sigma
d3 = deriv3(~ w * log(1-exp(-(((y-mu)/sigma)^2)/2))-
- log(sqrt(2*pi)*sigma*((y-mu)/sigma)^2), c("mu", "sigma"))
+ log(sqrt(2*pi)*sigma*((y-mu)/sigma)^2),
+ c("mu", "sigma"))
eval.d3 = eval(d3)
dl.dthetas = attr(eval.d3, "gradient")
dl.dmu = dl.dthetas[,1]
@@ -813,11 +843,11 @@ slash.control <- function(save.weight=TRUE, ...)
ans = w * cbind(dl.dmu * dmu.deta,
dl.dsigma * dsigma.deta)
ans
- }), list( .lmu=lmu, .lsigma=lsigma,
- .emu=emu, .esigma=esigma, .smallno=smallno ))),
+ }), list( .lmu = lmu, .lsigma = lsigma,
+ .emu = emu, .esigma = esigma, .smallno = smallno ))),
weight=eval(substitute(expression({
run.varcov = 0
- ind1 = iam(NA, NA, M=M, both=TRUE, diag=TRUE)
+ ind1 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
sd3 = deriv3(~ w * log(1-exp(-(((ysim-mu)/sigma)^2)/2))-
log(sqrt(2*pi)*sigma*((ysim-mu)/sigma)^2),
c("mu", "sigma"))
@@ -836,26 +866,26 @@ slash.control <- function(save.weight=TRUE, ...)
temp3[,ind1$row.index]*temp3[,ind1$col.index]) / ii
}
wz = if (intercept.only)
- matrix(colMeans(run.varcov, na.rm=FALSE),
- n, ncol(run.varcov), byrow=TRUE) else run.varcov
+ matrix(colMeans(run.varcov, na.rm = FALSE),
+ n, ncol(run.varcov), byrow = TRUE) else run.varcov
dthetas.detas = cbind(dmu.deta, dsigma.deta)
wz = wz * dthetas.detas[,ind1$row] * dthetas.detas[,ind1$col]
w * wz
- }), list( .lmu=lmu, .lsigma=lsigma,
- .emu=emu, .esigma=esigma,
- .nsimEIM=nsimEIM, .smallno=smallno ))))
+ }), list( .lmu = lmu, .lsigma = lsigma,
+ .emu = emu, .esigma = esigma,
+ .nsimEIM = nsimEIM, .smallno = smallno ))))
}
-dnefghs = function(x, tau, log=FALSE) {
+dnefghs = function(x, tau, log = FALSE) {
if (!is.logical(log.arg <- log))
stop("bad input for argument 'log'")
rm(log)
N = max(length(x), length(tau))
- x = rep(x, len=N); tau = rep(tau, len=N);
+ x = rep(x, len = N); tau = rep(tau, len = N);
logdensity = log(sin(pi*tau)) + (1-tau)*x - log(pi) - log1p(exp(x))
logdensity[tau < 0] = NaN
@@ -865,28 +895,29 @@ dnefghs = function(x, tau, log=FALSE) {
- nefghs = function(link="logit", earg=list(), itau=NULL, method.init=1)
+ nefghs <- function(link = "logit", earg = list(), itau = NULL,
+ method.init = 1)
{
- if (length(itau) && !is.Numeric(itau, positi=TRUE) || any(itau >= 1))
+ if (length(itau) && !is.Numeric(itau, positi = TRUE) || any(itau >= 1))
stop("argument 'itau' must be in (0,1)")
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if (!is.list(earg)) earg = list()
- if (!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
method.init > 2)
stop("'method.init' must be 1 or 2")
new("vglmff",
- blurb=c("Natural exponential family generalized hyperbolic ",
+ blurb = c("Natural exponential family generalized hyperbolic ",
"secant distribution\n",
"f(y) = sin(pi*tau)*exp((1-tau)*y)/(pi*(1+exp(y))\n\n",
"Link: ",
- namesof("tau", link, earg=earg), "\n\n",
+ namesof("tau", link, earg =earg), "\n\n",
"Mean: pi / tan(pi * tau)\n"),
- initialize=eval(substitute(expression({
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
- predictors.names = namesof("tau", .link, earg=.earg, tag=FALSE)
+ predictors.names = namesof("tau", .link, earg =.earg, tag = FALSE)
if (!length(etastart)) {
wmeany = if ( .method.init == 1) weighted.mean(y,w) else
@@ -895,34 +926,36 @@ dnefghs = function(x, tau, log=FALSE) {
tau.init = atan(pi / wmeany) / pi + 0.5
tau.init[tau.init < 0.03] = 0.03
tau.init[tau.init > 0.97] = 0.97
- tau.init = rep( if (length( .itau )) .itau else tau.init, len=n)
- etastart = theta2eta(tau.init, .link, earg=.earg)
+ tau.init = rep( if (length( .itau )) .itau else tau.init, len = n)
+ etastart = theta2eta(tau.init, .link, earg =.earg)
}
- }), list( .link=link, .earg=earg, .itau=itau, .method.init=method.init ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- tau = eta2theta(eta, .link, earg=.earg)
+ }), list( .link = link, .earg = earg, .itau = itau,
+ .method.init = method.init ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ tau = eta2theta(eta, .link, earg =.earg)
pi / tan(pi * tau)
- }, list( .link=link, .earg=earg ))),
- last=eval(substitute(expression({
- misc$link = c(tau= .link)
- misc$earg = list(tau = .earg)
+ }, list( .link=link, .earg =earg ))),
+ last = eval(substitute(expression({
+ misc$link <- c(tau = .link)
+ misc$earg <- list(tau = .earg)
misc$expected = TRUE
misc$method.init= .method.init
- }), list( .link=link, .earg=earg, .method.init=method.init ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- tau = eta2theta(eta, .link, earg=.earg)
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(w * dnefghs(x=y, tau=tau, log=TRUE))
+ }), list( .link=link, .earg =earg, .method.init = method.init ))),
+ loglikelihood = eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra = NULL) {
+ tau = eta2theta(eta, .link, earg =.earg)
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dnefghs(x=y, tau=tau, log = TRUE))
}
- }, list( .link=link, .earg=earg ))),
- vfamily=c("nefghs"),
- deriv=eval(substitute(expression({
- tau = eta2theta(eta, .link, earg=.earg)
+ }, list( .link=link, .earg =earg ))),
+ vfamily = c("nefghs"),
+ deriv = eval(substitute(expression({
+ tau = eta2theta(eta, .link, earg =.earg)
dl.dtau = pi / tan(pi * tau) - y
- dtau.deta = dtheta.deta(tau, .link, earg=.earg)
+ dtau.deta = dtheta.deta(tau, .link, earg =.earg)
w * dl.dtau * dtau.deta
- }), list( .link=link, .earg=earg ))),
+ }), list( .link=link, .earg =earg ))),
weight = eval(substitute(expression({
d2l.dtau2 = (pi / sin(pi * tau))^2
wz = d2l.dtau2 * dtau.deta^2
@@ -933,7 +966,7 @@ dnefghs = function(x, tau, log=FALSE) {
-dlogF = function(x, shape1, shape2, log=FALSE) {
+dlogF = function(x, shape1, shape2, log = FALSE) {
if (!is.logical(log.arg <- log))
stop("bad input for argument 'log'")
rm(log)
@@ -946,15 +979,15 @@ dlogF = function(x, shape1, shape2, log=FALSE) {
- logF = function(lshape1="loge", lshape2="loge",
- eshape1=list(), eshape2=list(),
- ishape1=NULL, ishape2=1,
- method.init=1)
+ logF = function(lshape1 = "loge", lshape2 = "loge",
+ eshape1 = list(), eshape2 = list(),
+ ishape1 = NULL, ishape2 = 1,
+ method.init = 1)
{
- if (length(ishape1) && !is.Numeric(ishape1, positi=TRUE))
+ if (length(ishape1) && !is.Numeric(ishape1, positi = TRUE))
stop("argument 'ishape1' must be positive")
if ( # length(ishape2) &&
- !is.Numeric(ishape2, positi=TRUE))
+ !is.Numeric(ishape2, positi = TRUE))
stop("argument 'ishape2' must be positive")
if (mode(lshape1) != "character" && mode(lshape1) != "name")
lshape1 = as.character(substitute(lshape1))
@@ -962,173 +995,185 @@ dlogF = function(x, shape1, shape2, log=FALSE) {
lshape2 = as.character(substitute(lshape2))
if (!is.list(eshape1)) eshape1 = list()
if (!is.list(eshape2)) eshape2 = list()
- if (!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
method.init > 2)
stop("'method.init' must be 1 or 2")
new("vglmff",
- blurb=c("log F distribution\n",
+ blurb = c("log F distribution\n",
"f(y) = exp(-shape2*y)/(beta(shape1,shape2)*",
"(1+exp(-y))^(shape1+shape2))\n\n",
"Link: ",
- namesof("shape1", lshape1, earg=eshape1),
+ namesof("shape1", lshape1, earg =eshape1),
", ",
- namesof("shape2", lshape2, earg=eshape2),
+ namesof("shape2", lshape2, earg =eshape2),
"\n\n",
"Mean: digamma(shape1) - digamma(shape2)"),
- initialize=eval(substitute(expression({
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
predictors.names = c(
- namesof("shape1", .lshape1, earg=.eshape1, tag=FALSE),
- namesof("shape2", .lshape2, earg=.eshape2, tag=FALSE))
+ namesof("shape1", .lshape1, earg = .eshape1, tag = FALSE),
+ namesof("shape2", .lshape2, earg = .eshape2, tag = FALSE))
if (!length(etastart)) {
wmeany = if ( .method.init == 1) weighted.mean(y,w) else
median(rep(y,w))
- shape1.init = shape2.init = rep( .ishape2, len=n)
- shape1.init = if (length( .ishape1)) rep( .ishape1, len=n) else {
+ shape1.init = shape2.init = rep( .ishape2, len = n)
+ shape1.init = if (length( .ishape1))
+ rep( .ishape1, len = n) else {
index1 = (y > wmeany)
shape1.init[index1] = shape2.init[index1] + 1/1
shape1.init[!index1] = shape2.init[!index1] - 1/1
shape1.init = pmax(shape1.init, 1/8)
shape1.init
}
- etastart = cbind(theta2eta(shape1.init, .lshape1, earg=.eshape1),
- theta2eta(shape2.init, .lshape2, earg=.eshape2))
+ etastart =
+ cbind(theta2eta(shape1.init, .lshape1, earg = .eshape1),
+ theta2eta(shape2.init, .lshape2, earg = .eshape2))
}
- }), list( .lshape1=lshape1, .lshape2=lshape2,
- .eshape1=eshape1, .eshape2=eshape2,
+ }), list( .lshape1 = lshape1, .lshape2 = lshape2,
+ .eshape1 = eshape1, .eshape2 = eshape2,
.ishape1=ishape1, .ishape2=ishape2,
- .method.init=method.init ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- shape1 = eta2theta(eta[,1], .lshape1, earg=.eshape1)
- shape2 = eta2theta(eta[,2], .lshape2, earg=.eshape2)
+ .method.init = method.init ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ shape1 = eta2theta(eta[,1], .lshape1, earg = .eshape1)
+ shape2 = eta2theta(eta[,2], .lshape2, earg = .eshape2)
digamma(shape1) - digamma(shape2)
- }, list( .lshape1=lshape1, .lshape2=lshape2,
- .eshape1=eshape1, .eshape2=eshape2 ))),
- last=eval(substitute(expression({
- misc$link = c(shape1= .lshape1, shape2= .lshape2)
- misc$earg = list(shape1= .eshape1, shape2= .eshape2)
+ }, list( .lshape1 = lshape1, .lshape2 = lshape2,
+ .eshape1 = eshape1, .eshape2 = eshape2 ))),
+ last = eval(substitute(expression({
+ misc$link <- c(shape1 = .lshape1, shape2 = .lshape2)
+ misc$earg <- list(shape1 = .eshape1, shape2 = .eshape2)
misc$expected = TRUE
misc$method.init= .method.init
- }), list( .lshape1=lshape1, .lshape2=lshape2,
- .eshape1=eshape1, .eshape2=eshape2,
- .method.init=method.init ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- shape1 = eta2theta(eta[,1], .lshape1, earg=.eshape1)
- shape2 = eta2theta(eta[,2], .lshape2, earg=.eshape2)
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(w * dlogF(x=y, shape1=shape1, shape2=shape2, log=TRUE))
+ }), list( .lshape1 = lshape1, .lshape2 = lshape2,
+ .eshape1 = eshape1, .eshape2 = eshape2,
+ .method.init = method.init ))),
+ loglikelihood = eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra = NULL) {
+ shape1 = eta2theta(eta[,1], .lshape1, earg = .eshape1)
+ shape2 = eta2theta(eta[,2], .lshape2, earg = .eshape2)
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dlogF(x=y, shape1 = shape1, shape2 = shape2, log = TRUE))
}
- }, list( .lshape1=lshape1, .lshape2=lshape2,
- .eshape1=eshape1, .eshape2=eshape2 ))),
- vfamily=c("logF"),
- deriv=eval(substitute(expression({
- shape1 = eta2theta(eta[,1], .lshape1, earg=.eshape1)
- shape2 = eta2theta(eta[,2], .lshape2, earg=.eshape2)
+ }, list( .lshape1 = lshape1, .lshape2 = lshape2,
+ .eshape1 = eshape1, .eshape2 = eshape2 ))),
+ vfamily = c("logF"),
+ deriv = eval(substitute(expression({
+ shape1 = eta2theta(eta[,1], .lshape1, earg = .eshape1)
+ shape2 = eta2theta(eta[,2], .lshape2, earg = .eshape2)
tmp888 = digamma(shape1 + shape2) - log1p(exp(-y))
dl.dshape1 = tmp888 - digamma(shape1)
dl.dshape2 = tmp888 - digamma(shape2) - y
- dshape1.deta = dtheta.deta(shape1, .lshape1, earg=.eshape1)
- dshape2.deta = dtheta.deta(shape2, .lshape2, earg=.eshape2)
+ dshape1.deta = dtheta.deta(shape1, .lshape1, earg = .eshape1)
+ dshape2.deta = dtheta.deta(shape2, .lshape2, earg = .eshape2)
w * cbind(dl.dshape1 * dshape1.deta, dl.dshape2 * dshape2.deta)
- }), list( .lshape1=lshape1, .lshape2=lshape2,
- .eshape1=eshape1, .eshape2=eshape2 ))),
+ }), list( .lshape1 = lshape1, .lshape2 = lshape2,
+ .eshape1 = eshape1, .eshape2 = eshape2 ))),
weight = eval(substitute(expression({
tmp888 = trigamma(shape1 + shape2)
d2l.dshape12 = trigamma(shape1) - tmp888
d2l.dshape22 = trigamma(shape2) - tmp888
d2l.dshape1shape2 = -tmp888
wz = matrix(0, n, dimm(M))
- wz[,iam(1,1,M=M)] = d2l.dshape12 * dshape1.deta^2
- wz[,iam(2,2,M=M)] = d2l.dshape22 * dshape2.deta^2
- wz[,iam(1,2,M=M)] = d2l.dshape1shape2 * dshape1.deta * dshape2.deta
+ wz[,iam(1,1,M = M)] = d2l.dshape12 * dshape1.deta^2
+ wz[,iam(2,2,M = M)] = d2l.dshape22 * dshape2.deta^2
+ wz[,iam(1,2,M = M)] = d2l.dshape1shape2 * dshape1.deta *
+ dshape2.deta
w * wz
- }), list( .lshape1=lshape1, .lshape2=lshape2,
- .eshape1=eshape1, .eshape2=eshape2 ))))
+ }), list( .lshape1 = lshape1, .lshape2 = lshape2,
+ .eshape1 = eshape1, .eshape2 = eshape2 ))))
}
-dbenf = function(x, ndigits=1, log=FALSE) {
- if (!is.Numeric(ndigits, allow=1, posit=TRUE, integ=TRUE) || ndigits > 2)
- stop("argument 'ndigits' must be 1 or 2")
- lowerlimit = ifelse(ndigits==1, 1, 10)
- upperlimit = ifelse(ndigits==1, 9, 99)
- log.arg = log; rm(log)
- ans = x * NA
- indexTF = is.finite(x) & (x >= lowerlimit)
- ans[indexTF] = log10(1 + 1/x[indexTF])
- ans[!is.na(x) & !is.nan(x) & ((x < lowerlimit) |
- (x > upperlimit) |
- (x != round(x)))] = 0.0
- if (log.arg) log(ans) else ans
+dbenf <- function(x, ndigits = 1, log = FALSE) {
+ if (!is.Numeric(ndigits, allow = 1, posit = TRUE, integ = TRUE) ||
+ ndigits > 2)
+ stop("argument 'ndigits' must be 1 or 2")
+ lowerlimit <- ifelse(ndigits == 1, 1, 10)
+ upperlimit <- ifelse(ndigits == 1, 9, 99)
+ log.arg <- log; rm(log)
+ ans <- x * NA
+ indexTF <- is.finite(x) & (x >= lowerlimit)
+
+ ans[indexTF] <- log10(1 + 1/x[indexTF])
+ ans[!is.na(x) & !is.nan(x) &
+ ((x < lowerlimit) |
+ (x > upperlimit) |
+ (x != round(x)))] <- 0.0
+ if (log.arg) log(ans) else ans
}
-rbenf = function(n, ndigits=1) {
- if (!is.Numeric(ndigits, allow=1, posit=TRUE, integ=TRUE) || ndigits > 2)
- stop("argument 'ndigits' must be 1 or 2")
- lowerlimit = ifelse(ndigits==1, 1, 10)
- upperlimit = ifelse(ndigits==1, 9, 99)
- use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integ=TRUE, allow=1, posit=TRUE))
- stop("bad input for argument 'n'") else n
- myrunif = runif(use.n)
- ans = rep(lowerlimit, length = use.n)
- for(ii in (lowerlimit+1):upperlimit) {
- indexTF = (pbenf(ii-1, ndigits=ndigits) < myrunif) &
- (myrunif <= pbenf(ii, ndigits=ndigits))
- ans[indexTF] = ii
- }
- ans
+rbenf <- function(n, ndigits = 1) {
+ if (!is.Numeric(ndigits, allow = 1, posit = TRUE, integ = TRUE) ||
+ ndigits > 2)
+ stop("argument 'ndigits' must be 1 or 2")
+ lowerlimit <- ifelse(ndigits == 1, 1, 10)
+ upperlimit <- ifelse(ndigits == 1, 9, 99)
+ use.n <- if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integ = TRUE, allow = 1, posit = TRUE))
+ stop("bad input for argument 'n'") else n
+ myrunif <- runif(use.n)
+
+ ans <- rep(lowerlimit, length = use.n)
+ for(ii in (lowerlimit+1):upperlimit) {
+ indexTF <- (pbenf(ii-1, ndigits = ndigits) < myrunif) &
+ (myrunif <= pbenf(ii, ndigits=ndigits))
+ ans[indexTF] <- ii
+ }
+ ans
}
-pbenf = function(q, ndigits=1, log.p=FALSE) {
- if (!is.Numeric(ndigits, allow=1, posit=TRUE, integ=TRUE) || ndigits > 2)
- stop("argument 'ndigits' must be 1 or 2")
- lowerlimit = ifelse(ndigits==1, 1, 10)
- upperlimit = ifelse(ndigits==1, 9, 99)
- ans = q * NA
- floorq = floor(q)
- indexTF = is.finite(q) & (floorq >= lowerlimit)
- ans[indexTF] = log10(1 + floorq[indexTF]) - ifelse(ndigits==1, 0, 1)
- ans[!is.na(q) & !is.nan(q) & (q>=upperlimit)] = 1
- ans[!is.na(q) & !is.nan(q) & (q< lowerlimit)] = 0
- if (log.p) log(ans) else ans
+pbenf <- function(q, ndigits = 1, log.p = FALSE) {
+ if (!is.Numeric(ndigits, allow = 1, posit = TRUE, integ = TRUE) ||
+ ndigits > 2)
+ stop("argument 'ndigits' must be 1 or 2")
+ lowerlimit <- ifelse(ndigits == 1, 1, 10)
+ upperlimit <- ifelse(ndigits == 1, 9, 99)
+
+ ans <- q * NA
+ floorq <- floor(q)
+ indexTF <- is.finite(q) & (floorq >= lowerlimit)
+ ans[indexTF] <- log10(1 + floorq[indexTF]) - ifelse(ndigits == 1, 0, 1)
+ ans[!is.na(q) & !is.nan(q) & (q >= upperlimit)] <- 1
+ ans[!is.na(q) & !is.nan(q) & (q < lowerlimit)] <- 0
+ if (log.p) log(ans) else ans
}
-qbenf = function(p, ndigits=1) {
- if (!is.Numeric(ndigits, allow=1, posit=TRUE, integ=TRUE) || ndigits > 2)
- stop("argument 'ndigits' must be 1 or 2")
- lowerlimit = ifelse(ndigits==1, 1, 10)
- upperlimit = ifelse(ndigits==1, 9, 99)
- bad = !is.na(p) & !is.nan(p) & ((p < 0) | (p > 1))
- if (any(bad))
- stop("bad input for 'p'")
+qbenf <- function(p, ndigits = 1) {
+ if (!is.Numeric(ndigits, allow = 1, posit = TRUE, integ = TRUE) ||
+ ndigits > 2)
+ stop("argument 'ndigits' must be 1 or 2")
+ lowerlimit <- ifelse(ndigits == 1, 1, 10)
+ upperlimit <- ifelse(ndigits == 1, 9, 99)
+ bad <- !is.na(p) & !is.nan(p) & ((p < 0) | (p > 1))
+ if (any(bad))
+ stop("bad input for 'p'")
- ans = rep(lowerlimit, length = length(p))
- for(ii in (lowerlimit+1):upperlimit) {
- indexTF = is.finite(p) &
- (pbenf(ii-1, ndigits=ndigits) < p) &
- (p <= pbenf(ii, ndigits=ndigits))
- ans[indexTF] = ii
- }
+ ans <- rep(lowerlimit, length = length(p))
+ for(ii in (lowerlimit+1):upperlimit) {
+ indexTF <- is.finite(p) &
+ (pbenf(ii-1, ndigits=ndigits) < p) &
+ (p <= pbenf(ii, ndigits=ndigits))
+ ans[indexTF] <- ii
+ }
- ans[is.na(p) | is.nan(p)] = NA
- ans[!is.na(p) & !is.nan(p) & (p==0)] = lowerlimit
- ans[!is.na(p) & !is.nan(p) & (p==1)] = upperlimit
- ans
+ ans[ is.na(p) | is.nan(p)] <- NA
+ ans[!is.na(p) & !is.nan(p) & (p == 0)] <- lowerlimit
+ ans[!is.na(p) & !is.nan(p) & (p == 1)] <- upperlimit
+ ans
}
diff --git a/R/family.basics.R b/R/family.basics.R
index 174d882..9a61c5e 100644
--- a/R/family.basics.R
+++ b/R/family.basics.R
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
@@ -31,8 +32,8 @@ getind <- function(constraints, M, ncolx) {
ansx <- c(ansx, ii)
}
}
- ans[[kk]] <- list(xindex=ansx,
- X_vlmindex=(1:ncol(temp2))[temp2[kk,] != 0])
+ ans[[kk]] <- list(xindex = ansx,
+ X_vlmindex = (1:ncol(temp2))[temp2[kk,] != 0])
}
ans[[M+1]] <- ncol(temp2)
@@ -42,7 +43,7 @@ getind <- function(constraints, M, ncolx) {
cm.vgam <- function(cm, x, bool, constraints,
- intercept.apply=FALSE, overwrite=FALSE)
+ intercept.apply = FALSE, overwrite = FALSE)
{
@@ -65,7 +66,7 @@ getind <- function(constraints, M, ncolx) {
names(constraints) <- nasgn
}
if (!is.list(constraints))
- stop("'constraints' must be a list")
+ stop("argument 'constraints' must be a list")
if (length(constraints) != length(nasgn) ||
any(sort(names(constraints)) != sort(nasgn))) {
@@ -78,7 +79,7 @@ getind <- function(constraints, M, ncolx) {
if (is.logical(bool)) {
if (bool) {
- if (intercept.apply && any(nasgn=="(Intercept)"))
+ if (intercept.apply && any(nasgn == "(Intercept)"))
constraints[["(Intercept)"]] <- cm
if (length(ninasgn))
for (ii in ninasgn)
@@ -87,18 +88,12 @@ getind <- function(constraints, M, ncolx) {
return(constraints)
}
} else {
- if (!is.R()) {
- warn.save <- options()$warn
- options(warn=-1)
- tbool <- terms(bool) # Sqawks if FALSE or TRUE is response
- options(warn=warn.save) # Restore the warnings
- } else
- tbool <- terms(bool)
+ tbool <- terms(bool)
if (attr(tbool, "response")) {
- i <- attr(tbool, "factors")
- default <- dimnames(i)[[1]]
+ ii <- attr(tbool, "factors")
+ default <- dimnames(ii)[[1]]
default <- default[1]
- default <- parse(text=default[1])[[1]]
+ default <- parse(text = default[1])[[1]]
default <- as.logical(eval(default))
} else {
default <- TRUE
@@ -152,7 +147,7 @@ cm.nointercept.vgam <- function(constraints, x, nointercept, M)
warning("Constraint matrix of (Intercept) not diagonal")
temp <- constraints[["(Intercept)"]]
- temp <- temp[,-nointercept,drop=FALSE] # Will have M rows & at least 1 coln
+ temp <- temp[,-nointercept,drop = FALSE] # Will have M rows & at least 1 coln
constraints[["(Intercept)"]] <- temp
constraints
}
@@ -168,14 +163,16 @@ cm.zero.vgam <- function(constraints, x, zero, M)
constraints <- vector("list", length(nasgn)) # list()
names(constraints) <- nasgn
}
- if (!is.list(constraints)) stop("'constraints' must be a list")
+ if (!is.list(constraints))
+ stop("'constraints' must be a list")
for (ii in 1:length(asgn))
constraints[[nasgn[ii]]] <- if (is.null(constraints[[nasgn[ii]]]))
diag(M) else eval(constraints[[nasgn[ii]]])
if (is.null(zero))
return(constraints)
- if (!is.numeric(zero)) stop("'zero' must be numeric")
+ if (!is.numeric(zero))
+ stop("'zero' must be numeric")
if (any(zero < 1 | zero > M))
stop("'zero' out of range")
if (nasgn[1] != "(Intercept)")
@@ -183,22 +180,23 @@ cm.zero.vgam <- function(constraints, x, zero, M)
if (2 <= length(constraints))
for (ii in 2:length(constraints)) {
- temp <- constraints[[nasgn[ii]]]
- temp[zero,] <- 0
+ Hmatk <- constraints[[nasgn[ii]]]
+ Hmatk[zero, ] <- 0
index <- NULL
- for (kk in 1:ncol(temp))
- if (all(temp[,kk] == 0)) index <- c(index,kk)
- if (length(index) == ncol(temp))
+ for (kk in 1:ncol(Hmatk))
+ if (all(Hmatk[,kk] == 0)) index <- c(index, kk)
+ if (length(index) == ncol(Hmatk))
stop("constraint matrix has no columns!")
if (!is.null(index))
- temp <- temp[,-index,drop=FALSE]
- constraints[[nasgn[ii]]] <- temp
+ Hmatk <- Hmatk[, -index, drop = FALSE]
+ constraints[[nasgn[ii]]] <- Hmatk
}
constraints
}
-process.constraints <- function(constraints, x, M, by.col=TRUE, specialCM=NULL)
+process.constraints <- function(constraints, x, M,
+ by.col = TRUE, specialCM = NULL)
{
@@ -222,30 +220,31 @@ process.constraints <- function(constraints, x, M, by.col=TRUE, specialCM=NULL)
lenconstraints <- length(constraints)
if (lenconstraints > 0)
- for (i in 1:lenconstraints) {
- constraints[[i]] <- eval(constraints[[i]])
- if (!is.null(constraints[[i]]) && !is.matrix(constraints[[i]]))
- stop("'constraints[[",i,"]]' is not a matrix")
+ for (ii in 1:lenconstraints) {
+ constraints[[ii]] <- eval(constraints[[ii]])
+ if (!is.null (constraints[[ii]]) &&
+ !is.matrix(constraints[[ii]]))
+ stop("'constraints[[", ii, "]]' is not a matrix")
}
if (is.null(names(constraints)))
names(constraints) <- rep(nasgn, length=lenconstraints)
- temp <- if (!is.R()) list() else {
+ temp <- if (!is.R()) list() else {
junk <- vector("list", length(nasgn))
names(junk) <- nasgn
junk
}
- for (i in 1:length(nasgn))
- temp[[nasgn[i]]] <-
- if (is.null(constraints[[nasgn[i]]])) diag(M) else
- eval(constraints[[nasgn[i]]])
+ for (ii in 1:length(nasgn))
+ temp[[nasgn[ii]]] <-
+ if (is.null(constraints[[nasgn[ii]]])) diag(M) else
+ eval(constraints[[nasgn[ii]]])
- for (i in 1:length(asgn)) {
- if (!is.matrix(temp[[i]])) {
+ for (ii in 1:length(asgn)) {
+ if (!is.matrix(temp[[ii]])) {
stop("not a constraint matrix")
}
- if (ncol(temp[[i]]) > M)
+ if (ncol(temp[[ii]]) > M)
stop("constraint matrix has too many columns")
}
@@ -259,7 +258,8 @@ process.constraints <- function(constraints, x, M, by.col=TRUE, specialCM=NULL)
ictr = 0
for (jay in cols) {
ictr = ictr + 1
- cm = if (is.list(specialCM) && any(nasgn[ii] == names(specialCM))) {
+ cm = if (is.list(specialCM) &&
+ any(nasgn[ii] == names(specialCM))) {
slist = specialCM[[(nasgn[ii])]]
slist[[ictr]]
} else constraints[[ii]]
@@ -305,11 +305,11 @@ process.constraints <- function(constraints, x, M, by.col=TRUE, specialCM=NULL)
add.constraints <- function(constraints, new.constraints,
- overwrite=FALSE, check=FALSE)
+ overwrite = FALSE, check = FALSE)
{
empty.list <- function(l)
- (is.null(l) || (is.list(l) && length(l)==0))
+ (is.null(l) || (is.list(l) && length(l) == 0))
if (empty.list(constraints))
if (is.list(new.constraints))
@@ -323,21 +323,21 @@ add.constraints <- function(constraints, new.constraints,
if (is.null(nc) || is.null(nn))
stop("lists must have names")
- if (any(nc=="") || any(nn==""))
+ if (any(nc == "") || any(nn == ""))
stop("lists must have names")
if (!empty.list(constraints) && !empty.list(new.constraints)) {
- for (i in nn) {
- if (any(i==nc)) {
+ for (ii in nn) {
+ if (any(ii == nc)) {
if (check &&
- (!(all(dim(constraints[[i]])==dim(new.constraints[[i]])) &&
- all(constraints[[i]]==new.constraints[[i]]))))
+ (!(all(dim(constraints[[ii]]) == dim(new.constraints[[ii]])) &&
+ all( constraints[[ii]] == new.constraints[[ii]]))))
stop("apparent contradiction in the specification ",
"of the constraints")
if (overwrite)
- constraints[[i]] <- new.constraints[[i]]
+ constraints[[ii]] <- new.constraints[[ii]]
} else
- constraints[[i]] <- new.constraints[[i]]
+ constraints[[ii]] <- new.constraints[[ii]]
}
} else {
if (!empty.list(constraints))
@@ -355,14 +355,16 @@ add.constraints <- function(constraints, new.constraints,
-iam <- function(j, k, M, hbw=M, both=FALSE, diagonal=TRUE)
+iam <- function(j, k, M, hbw = M, both = FALSE, diagonal = TRUE)
{
+ jay <- j
+ kay <- k
- if (M==1)
+ if (M == 1)
if (!diagonal) stop("cannot handle this")
- if (M==1)
- if (both) return(list(row.index=1, col.index=1)) else return(1)
+ if (M == 1)
+ if (both) return(list(row.index = 1, col.index = 1)) else return(1)
upper <- if (diagonal) M else M-1
i2 <- as.list(upper:1)
@@ -371,20 +373,22 @@ iam <- function(j, k, M, hbw=M, both=FALSE, diagonal=TRUE)
i1 <- matrix(1:M, M, M)
- i1 <- if (diagonal) c(i1[row(i1)>=col(i1)]) else c(i1[row(i1)>col(i1)])
+ i1 <- if (diagonal) c(i1[row(i1) >= col(i1)]) else
+ c(i1[row(i1) > col(i1)])
- if (both) list(row.index=i2, col.index=i1) else {
- if (j > M || k > M || j < 1 || k < 1)
+ if (both) list(row.index = i2, col.index = i1) else {
+ if (jay > M || kay > M || jay < 1 || kay < 1)
stop("range error in j or k")
- both <- (i1==j & i2==k) | (i1==k & i2==j)
+ both <- (i1 == jay & i2 == kay) |
+ (i1 == kay & i2 == jay)
(1:length(i2))[both]
}
}
-dimm <- function(M, hbw=M)
+dimm <- function(M, hbw = M)
{
@@ -401,13 +405,13 @@ dimm <- function(M, hbw=M)
-m2avglm <- function(object, upper=FALSE, allow.vector=FALSE) {
+m2avglm <- function(object, upper = FALSE, allow.vector = FALSE) {
m2adefault(wweights(object), M=object at misc$M,
upper=upper, allow.vector=allow.vector)
}
-m2adefault <- function(m, M, upper=FALSE, allow.vector=FALSE)
+m2adefault <- function(m, M, upper = FALSE, allow.vector = FALSE)
{
if (!is.numeric(m))
stop("argument 'm' is not numeric")
@@ -416,7 +420,7 @@ m2adefault <- function(m, M, upper=FALSE, allow.vector=FALSE)
m <- cbind(m)
n <- nrow(m)
dimm <- ncol(m)
- index <- iam(NA, NA, M=M, both=TRUE, diag=TRUE)
+ index <- iam(NA, NA, M=M, both = TRUE, diag = TRUE)
if (dimm > length(index$row.index))
stop("bad value for M; it is too small")
if (dimm < M) {
@@ -428,7 +432,7 @@ m2adefault <- function(m, M, upper=FALSE, allow.vector=FALSE)
as.integer(index$row-1),
as.integer(index$col-1),
as.integer(n), as.integer(M),
- as.integer(as.numeric(upper)), NAOK=TRUE)
+ as.integer(as.numeric(upper)), NAOK = TRUE)
dim(fred$ans) <- c(M,M,n)
alpn <- NULL
dimnames(fred$ans) <- list(alpn, alpn, dimnames(m)[[1]])
@@ -436,12 +440,12 @@ m2adefault <- function(m, M, upper=FALSE, allow.vector=FALSE)
}
-a2m <- function(a, hbw=M)
+a2m <- function(a, hbw = M)
{
- if (is.matrix(a) && ncol(a)==nrow(a))
+ if (is.matrix(a) && ncol(a) == nrow(a))
a <- array(a, c(nrow(a), ncol(a), 1))
if (!is.array(a))
dim(a) <- c(1,1,length(a))
@@ -449,14 +453,14 @@ a2m <- function(a, hbw=M)
M <- dim(a)[1]
n <- dim(a)[3]
dimm.value <- dimm(M, hbw)
- index <- iam(NA, NA, M, both=TRUE, diag=TRUE)
+ index <- iam(NA, NA, M, both = TRUE, diag = TRUE)
fred <- dotC(name="a2m", as.double(a), m=double(dimm.value*n),
as.integer(dimm.value),
as.integer(index$row-1),
as.integer(index$col-1),
- as.integer(n), as.integer(M), NAOK=TRUE)
+ as.integer(n), as.integer(M), NAOK = TRUE)
dim(fred$m) <- c(dimm.value,n)
fred$m <- t(fred$m)
@@ -468,22 +472,23 @@ a2m <- function(a, hbw=M)
}
-vindex <- function(M, row.arg=FALSE, col.arg=FALSE, length.arg=M*(M+1)/2)
+vindex <- function(M, row.arg = FALSE, col.arg = FALSE,
+ length.arg = M*(M+1)/2)
{
if ((row.arg + col.arg) != 1)
stop("only one of row and col must be TRUE")
- if (M==1) {
+ if (M == 1) {
ans <- 1
} else {
if (row.arg) {
i1 <- matrix(1:M, M, M)
- ans <- c(i1[row(i1)+col(i1)<=(M+1)])
+ ans <- c(i1[row(i1)+col(i1) <= (M+1)])
} else {
i1 <- matrix(1:M, M, M)
- ans <- c(i1[row(i1)>=col(i1)])
+ ans <- c(i1[row(i1) >= col(i1)])
}
}
if (length.arg>length(ans))
@@ -494,11 +499,11 @@ vindex <- function(M, row.arg=FALSE, col.arg=FALSE, length.arg=M*(M+1)/2)
if(!exists("is.R")) is.R <- function()
- exists("version") && !is.null(version$language) && version$language=="R"
+ exists("version") && !is.null(version$language) && version$language == "R"
-wweights = function(object, matrix.arg=TRUE, deriv.arg=FALSE,
- ignore.slot=FALSE, checkwz=TRUE) {
+wweights <- function(object, matrix.arg = TRUE, deriv.arg = FALSE,
+ ignore.slot = FALSE, checkwz = TRUE) {
@@ -510,25 +515,25 @@ wweights = function(object, matrix.arg=TRUE, deriv.arg=FALSE,
M <- object at misc$M # Done below
n <- object at misc$n # Done below
- if (any(slotNames(object)=="extra")) {
+ if (any(slotNames(object) == "extra")) {
extra <- object at extra
- if (length(extra)==1 && !length(names(extra))) {
+ if (length(extra) == 1 && !length(names(extra))) {
# Usage was something like vglm(..., extra = 5)
# so, internally, extra == 5 and not a list
extra <- extra[[1]]
}
}
mu <- object at fitted.values
- if (any(slotNames(object)=="predictors"))
+ if (any(slotNames(object) == "predictors"))
eta <- object at predictors
mt <- terms(object) # object at terms$terms; 11/8/03
Blist <- constraints <- object at constraints
new.coeffs <- object at coefficients
- if (any(slotNames(object)=="iter"))
+ if (any(slotNames(object) == "iter"))
iter <- object at iter
w <- rep(1, n)
- if (any(slotNames(object)=="prior.weights"))
+ if (any(slotNames(object) == "prior.weights"))
w <- object at prior.weights
if (!length(w))
w <- rep(1, n)
@@ -538,17 +543,17 @@ wweights = function(object, matrix.arg=TRUE, deriv.arg=FALSE,
x <- model.matrixvlm(object, type="lm")
y <- object at y
- if (any(slotNames(object)=="control"))
- for (i in names(object at control)) {
- assign(i, object at control[[i]])
+ if (any(slotNames(object) == "control"))
+ for (ii in names(object at control)) {
+ assign(ii, object at control[[ii]])
}
if (length(object at misc))
- for (i in names(object at misc)) {
- assign(i, object at misc[[i]])
+ for (ii in names(object at misc)) {
+ assign(ii, object at misc[[ii]])
}
- if (any(slotNames(object)=="family")) {
+ if (any(slotNames(object) == "family")) {
expr <- object at family@deriv
deriv.mu <- eval(expr)
# Need to compute wz only if it couldn't be extracted from the object
@@ -566,7 +571,7 @@ wweights = function(object, matrix.arg=TRUE, deriv.arg=FALSE,
}
-pweights = function(object, ...) {
+pweights <- function(object, ...) {
ans = object at prior.weights
if (length(ans)) {
ans
@@ -579,7 +584,7 @@ pweights = function(object, ...) {
}
-procVec = function(vec, yn, Default) {
+procVec <- function(vec, yn, Default) {
@@ -590,7 +595,7 @@ procVec = function(vec, yn, Default) {
nvec <- names(vec) # vec[""] undefined
named = length(nvec) # FALSE for c(1,3)
if (named) {
- index = (1:L)[nvec==""]
+ index = (1:L)[nvec == ""]
default = if (length(index)) vec[index] else Default
} else {
default = vec
@@ -625,17 +630,19 @@ setMethod("m2a", "vglm",
}
-weightsvglm = function(object, type = c("prior", "working"),
- matrix.arg=TRUE, ignore.slot=FALSE,
- deriv.arg=FALSE, ...) {
+weightsvglm <- function(object, type = c("prior", "working"),
+ matrix.arg = TRUE, ignore.slot = FALSE,
+ deriv.arg = FALSE, ...) {
weightsvlm(object, type = type, matrix.arg=matrix.arg,
ignore.slot=ignore.slot,
deriv.arg=deriv.arg, ...)
}
-weightsvlm = function(object, type = c("prior", "working"),
- matrix.arg=TRUE, ignore.slot=FALSE,
- deriv.arg=FALSE, ...) {
+
+
+weightsvlm <- function(object, type = c("prior", "working"),
+ matrix.arg = TRUE, ignore.slot = FALSE,
+ deriv.arg = FALSE, ...) {
if (mode(type) != "character" && mode(type) != "name")
type = as.character(substitute(type))
type = match.arg(type, c("prior", "working"))[1]
@@ -645,7 +652,7 @@ weightsvlm = function(object, type = c("prior", "working"),
matrix.arg=matrix.arg, deriv.arg=deriv.arg,
ignore.slot=ignore.slot, ...)
} else {
- if (deriv.arg) stop("cannot set 'deriv=TRUE' when 'type=\"prior\"'")
+ if (deriv.arg) stop("cannot set 'deriv = TRUE' when 'type=\"prior\"'")
ans = pweights(object)
if (matrix.arg) as.matrix(ans) else c(ans)
}
@@ -664,38 +671,35 @@ setMethod("weights", "vglm",
-dotFortran = function(name, ..., NAOK = FALSE, DUP = TRUE,
- PACKAGE="VGAM") {
+dotFortran <- function(name, ..., NAOK = FALSE, DUP = TRUE,
+ PACKAGE = "VGAM") {
if (is.R()) {
- .Fortran(name=name, ..., NAOK = NAOK, DUP = DUP, PACKAGE=PACKAGE)
+ .Fortran(name=name, ..., NAOK = NAOK, DUP = DUP, PACKAGE = PACKAGE)
} else {
- stop()
+ stop()
}
}
-dotC = function(name, ..., NAOK = FALSE, DUP = TRUE, PACKAGE="VGAM") {
- if (is.R()) {
- .C(name=name, ..., NAOK = NAOK, DUP = DUP, PACKAGE=PACKAGE)
- } else {
- stop()
- }
+dotC <- function(name, ..., NAOK = FALSE, DUP = TRUE, PACKAGE="VGAM") {
+ .C(name=name, ..., NAOK = NAOK, DUP = DUP, PACKAGE = PACKAGE)
}
-qnupdate = function(w, wzold, dderiv, deta, M, keeppd=TRUE,
- trace=FALSE, reset=FALSE, effpos=.Machine$double.eps^0.75) {
+qnupdate <- function(w, wzold, dderiv, deta, M, keeppd = TRUE,
+ trace = FALSE, reset = FALSE,
+ effpos=.Machine$double.eps^0.75) {
- if (M ==1) {
+ if (M == 1) {
dderiv = cbind(dderiv)
deta = cbind(deta)
}
- Bs = mux22(t(wzold), deta, M=M, upper=FALSE, as.mat=TRUE) # n x M
+ Bs = mux22(t(wzold), deta, M=M, upper = FALSE, as.mat = TRUE) # n x M
sBs = c( (deta * Bs) %*% rep(1, M) ) # should have positive values
sy = c( (dderiv * deta) %*% rep(1, M) )
wznew = wzold
- index = iam(NA, NA, M=M, both=TRUE)
+ index = iam(NA, NA, M=M, both = TRUE)
index$row.index = rep(index$row.index, len=ncol(wzold))
index$col.index = rep(index$col.index, len=ncol(wzold))
updateThese = if (keeppd) (sy > effpos) else rep(TRUE, len=length(sy))
@@ -720,20 +724,21 @@ qnupdate = function(w, wzold, dderiv, deta, M, keeppd=TRUE,
-mbesselI0 = function(x, deriv.arg=0) {
- if (!is.Numeric(deriv.arg, allow=1, integer=TRUE, positi=TRUE) && deriv.arg!=0)
- stop("deriv.arg must be a single non-negative integer")
- if (!(deriv.arg==0 || deriv.arg==1 || deriv.arg==2))
- stop("deriv must be 0, 1, or 2")
+mbesselI0 <- function(x, deriv.arg = 0) {
+ if (!is.Numeric(deriv.arg, allow = 1, integer = TRUE, posit = TRUE) &&
+ deriv.arg != 0)
+ stop("argument 'deriv.arg' must be a single non-negative integer")
+ if (!(deriv.arg == 0 || deriv.arg == 1 || deriv.arg == 2))
+ stop("argument 'deriv' must be 0, 1, or 2")
if (!is.Numeric(x))
- stop("bad input for x")
- n = length(x)
+ stop("bad input for argument 'x'")
+ nn = length(x)
if (FALSE) {
}
# Use finite differences
- ans = matrix(as.numeric(NA), nrow=n, ncol=deriv.arg+1)
- ans[,1] = besselI(x, nu=0)
+ ans = matrix(as.numeric(NA), nrow=nn, ncol=deriv.arg+1)
+ ans[, 1] = besselI(x, nu=0)
if (deriv.arg>=1) ans[,2] = besselI(x, nu=1)
if (deriv.arg>=2) ans[,3] = ans[,1] - ans[,2] / x
ans
@@ -741,14 +746,14 @@ mbesselI0 = function(x, deriv.arg=0) {
-VGAM.matrix.norm = function(A, power=2, suppressWarning=FALSE) {
+VGAM.matrix.norm <- function(A, power=2, suppressWarning = FALSE) {
if ((nrow(A) != ncol(A)) && !suppressWarning)
warning("norms should be calculated for square matrices; A is not square")
- if (power=="F") {
+ if (power == "F") {
sqrt(sum(A^2))
- } else if (power==1) {
+ } else if (power == 1) {
max(colSums(abs(A)))
- } else if (power==2) {
+ } else if (power == 2) {
sqrt(max(eigen(t(A) %*% A)$value))
} else if (!is.finite(power)) {
max(colSums(abs(A)))
@@ -760,51 +765,52 @@ VGAM.matrix.norm = function(A, power=2, suppressWarning=FALSE) {
-rmfromVGAMenv = function(varnames, prefix="") {
- evarnames = paste(prefix, varnames, sep="")
+rmfromVGAMenv <- function(varnames, prefix = "") {
+ evarnames = paste(prefix, varnames, sep = "")
if (is.R()) {
- for (i in evarnames) {
- mytext1 = "exists(x=i, envir = VGAMenv)"
- myexp1 = parse(text=mytext1)
+ for (ii in evarnames) {
+ mytext1 = "exists(x = ii, envir = VGAM:::VGAMenv)"
+ myexp1 = parse(text = mytext1)
is.there = eval(myexp1)
if (is.there) {
- rm(list=i, envir = VGAMenv)
+ rm(list = ii, envir = VGAM:::VGAMenv)
}
}
} else {
warning("this code needs checking 9")
- for (i in evarnames)
- while(exists(i, inherits=TRUE))
- rm(i, inherits=TRUE)
+ for (ii in evarnames)
+ while(exists(ii, inherits = TRUE))
+ rm(ii, inherits = TRUE)
}
}
-existsinVGAMenv = function(varnames, prefix="") {
+existsinVGAMenv <- function(varnames, prefix="") {
evarnames = paste(prefix, varnames, sep="")
ans = NULL
if (is.R()) {
- for (i in evarnames) {
- mytext1 = "exists(x=i, envir = VGAMenv)"
- myexp1 = parse(text=mytext1)
+ for (ii in evarnames) {
+ mytext1 = "exists(x = ii, envir = VGAM:::VGAMenv)"
+ myexp1 = parse(text = mytext1)
is.there = eval(myexp1)
ans = c(ans, is.there)
}
} else {
warning("this code needs checking 8")
- for (i in evarnames) {
- is.there = exists(i, inherits=TRUE)
+ for (ii in evarnames) {
+ is.there = exists(ii, inherits = TRUE)
ans = c(ans, is.there)
}
}
ans
}
-assign2VGAMenv = function(varnames, mylist, prefix="") {
+assign2VGAMenv <- function(varnames, mylist, prefix="") {
evarnames = paste(prefix, varnames, sep="")
if (is.R()) {
- for (i in 1:length(varnames)) {
- assign(evarnames[i], mylist[[(varnames[i])]], envir = VGAMenv)
+ for (ii in 1:length(varnames)) {
+ assign(evarnames[ii], mylist[[(varnames[ii])]],
+ envir = VGAM:::VGAMenv)
}
} else {
stop("uncomment the lines below")
@@ -815,14 +821,10 @@ assign2VGAMenv = function(varnames, mylist, prefix="") {
-getfromVGAMenv = function(varname, prefix="") {
+getfromVGAMenv <- function(varname, prefix="") {
varname = paste(prefix, varname, sep="")
if (length(varname) > 1) stop("'varname' must be of length 1")
- if (is.R()) {
- get(varname, envir = VGAMenv)
- } else {
- get(varname)
- }
+ get(varname, envir = VGAM:::VGAMenv)
}
@@ -831,13 +833,13 @@ lerch <- function(x, s, v, tolerance=1.0e-10, iter=100) {
stop("bad input in x, s, and/or v")
if (is.complex(c(x,s,v)))
stop("complex arguments not allowed in x, s and v")
- if (!is.Numeric(tolerance, allow=1, posi=TRUE) || tolerance > 0.01)
+ if (!is.Numeric(tolerance, allow=1, posi = TRUE) || tolerance > 0.01)
stop("bad input for argument 'tolerance'")
- if (!is.Numeric(iter, allow=1, integ=TRUE, posi=TRUE))
+ if (!is.Numeric(iter, allow=1, integ = TRUE, posi = TRUE))
stop("bad input for argument 'iter'")
L = max(length(x), length(s), length(v))
x = rep(x, length=L); s = rep(s, length=L); v = rep(v, length=L);
- xok = abs(x) < 1 & !(v <= 0 & v==round(v))
+ xok = abs(x) < 1 & !(v <= 0 & v == round(v))
x[!xok] = 0 # Fix this later
ans = dotC(name="lerchphi123", err=integer(L), as.integer(L),
as.double(x), as.double(s), as.double(v),
@@ -848,10 +850,42 @@ lerch <- function(x, s, v, tolerance=1.0e-10, iter=100) {
+negzero.expression <- expression({
+
+
+
+
+
+
+
+ posdotzero <- dotzero[dotzero > 0]
+ negdotzero <- dotzero[dotzero < 0]
+
+ bigUniqInt <- 1080
+ zneg_index <- if (length(negdotzero)) {
+
+ if (!is.Numeric(-negdotzero, posit = TRUE, integ = TRUE) ||
+ max(-negdotzero) > Musual)
+ stop("bad input for argument 'zero'")
+
+ zneg_index <- rep(0:bigUniqInt, rep(length(negdotzero),
+ 1 + bigUniqInt)) * Musual + abs(negdotzero)
+ sort(intersect(zneg_index, 1:M))
+ } else {
+ NULL
+ }
+
+ zpos_index <- if (length(posdotzero)) posdotzero else NULL
+ z_Index <- if (!length(dotzero)) NULL else
+ unique(sort(c(zneg_index, zpos_index)))
+
+ constraints <- cm.zero.vgam(constraints, x, z_Index, M)
+})
+
+
-
diff --git a/R/family.binomial.R b/R/family.binomial.R
index 8b406a7..ca8306a 100644
--- a/R/family.binomial.R
+++ b/R/family.binomial.R
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
@@ -28,8 +29,8 @@ process.binomial2.data.vgam <- expression({
stop("response must have 4 levels")
nn <- length(yf)
y <- matrix(0, nn, llev)
- y[cbind(1:nn,as.vector(unclass(yf)))] <- 1
- colnamesy <- paste(lev, ":", c("00","01","10","11"), sep="")
+ y[cbind(1:nn, as.vector(unclass(yf)))] <- 1
+ colnamesy <- paste(lev, ":", c("00", "01", "10", "11"), sep = "")
dimnames(y) <- list(names(yf), colnamesy)
input.type <- 1
} else if (ncol(y) == 2) {
@@ -38,8 +39,8 @@ process.binomial2.data.vgam <- expression({
col.index <- y[,2] + 2*y[,1] + 1 # 1:4
nn <- nrow(y)
y <- matrix(0, nn, 4)
- y[cbind(1:nn,col.index)] <- 1
- dimnames(y) <- list(dimnames(y)[[1]], c("00","01","10","11"))
+ y[cbind(1:nn, col.index)] <- 1
+ dimnames(y) <- list(dimnames(y)[[1]], c("00", "01", "10", "11"))
input.type <- 2
} else if (ncol(y) == 4) {
input.type <- 3
@@ -47,13 +48,17 @@ process.binomial2.data.vgam <- expression({
stop("response unrecognized")
- nvec <- drop(y %*% rep(1, ncol(y)))
+
+ nvec <- rowSums(y)
w <- w * nvec
y <- y / nvec # Convert to proportions
- mu <- y + (1/ncol(y) - y)/nvec
- dimnames(mu) <- dimnames(y)
+ if (length(mustart) + length(etastart) == 0) {
+ mu <- y + (1 / ncol(y) - y) / nvec
+ dimnames(mu) <- dimnames(y)
+ mustart <- mu
+ }
})
@@ -68,220 +73,240 @@ betabinomial.control <- function(save.weight=TRUE, ...)
- betabinomial = function(lmu="logit", lrho="logit",
- emu=list(), erho=list(), irho=NULL,
- method.init=1, shrinkage.init=0.95,
- nsimEIM=NULL, zero=2)
+ betabinomial <- function(lmu = "logit", lrho = "logit",
+ emu = list(), erho = list(), irho = NULL,
+ method.init = 1, shrinkage.init = 0.95,
+ nsimEIM = NULL, zero = 2)
{
- if (mode(lmu) != "character" && mode(lmu) != "name")
- lmu = as.character(substitute(lmu))
- if (mode(lrho) != "character" && mode(lrho) != "name")
- lrho = as.character(substitute(lrho))
- if (!is.list(emu )) emu = list()
- if (!is.list(erho)) erho = list()
- if (!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
- method.init > 4) stop("argument 'method.init' must be 1, 2, 3 or 4")
- if (!is.Numeric(shrinkage.init, allow=1) || shrinkage.init < 0 ||
- shrinkage.init > 1) stop("bad input for argument 'shrinkage.init'")
- if (!is.null(nsimEIM)) {
- if (!is.Numeric(nsimEIM, allow=1, integ=TRUE))
- stop("bad input for argument 'nsimEIM'")
- if (nsimEIM <= 10)
- warning("'nsimEIM' should be an integer greater than 10, say")
- }
+ if (mode(lmu) != "character" && mode(lmu) != "name")
+ lmu <- as.character(substitute(lmu))
+ if (mode(lrho) != "character" && mode(lrho) != "name")
+ lrho <- as.character(substitute(lrho))
+
+ if (!is.list(emu )) emu <- list()
+ if (!is.list(erho)) erho <- list()
+
+ if (!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
+ method.init > 4) stop("argument 'method.init' must be 1, 2, 3 or 4")
+
+ if (!is.Numeric(shrinkage.init, allow=1) || shrinkage.init < 0 ||
+ shrinkage.init > 1) stop("bad input for argument 'shrinkage.init'")
+ if (!is.null(nsimEIM)) {
+ if (!is.Numeric(nsimEIM, allow=1, integ=TRUE))
+ stop("bad input for argument 'nsimEIM'")
+ if (nsimEIM <= 10)
+ warning("'nsimEIM' should be an integer greater than 10, say")
+ }
- new("vglmff",
- blurb = c("Beta-binomial model\n",
+ new("vglmff",
+ blurb = c("Beta-binomial model\n",
"Links: ",
namesof("mu", lmu, earg = emu), ", ",
namesof("rho", lrho, earg = erho), "\n",
"Mean: mu", "\n",
"Variance: mu*(1-mu)*(1+(w-1)*rho)/w"),
- constraints = eval(substitute(expression({
- constraints <- cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- if (!all(w == 1))
- extra$orig.w = w
-
- if (is.null( .nsimEIM)) {
- save.weight <- control$save.weight <- FALSE
- }
-
- eval(binomialff()@initialize) # Note: n,w,y,mustart is changed
-
-
- ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
- y * w # Convert proportions to counts
- if (max(abs(ycounts - round(ycounts))) > 1.0e-6)
- warning("the response (as counts) does not appear to ",
- "be integer-valued. Am rounding to integer values.")
- ycounts = round(ycounts) # Make sure it is an integer
- predictors.names = c(namesof("mu", .lmu, earg = .emu, tag=FALSE),
- namesof("rho", .lrho, earg = .erho, tag=FALSE))
- if (!length(etastart)) {
- betabinomial.Loglikfun = function(rhoval, y, x, w, extraargs) {
- shape1 = extraargs$mustart * (1-rhoval) / rhoval
- shape2 = (1-extraargs$mustart) * (1-rhoval) / rhoval
- ycounts = extraargs$ycounts # Ought to be integer-valued
- nvec = extraargs$nvec
- sum(dbetabin.ab(x=ycounts, size=nvec, shape1=shape1,
- shape2=shape2, log=TRUE))
- }
- rho.grid = rvar = seq(0.05, 0.95, len=21) #
- mustart.use = if ( .method.init == 1) {
- rep(weighted.mean(y, w), len=n)
- } else if ( .method.init == 2) {
- .sinit * weighted.mean(y, w) + (1- .sinit) * y
- } else if ( .method.init == 3) {
- y.matrix = cbind(y)
- mat.temp = matrix(colMeans(y.matrix), nrow(y.matrix),
- ncol(y.matrix), byrow = TRUE)
- 0.5 * mustart + 0.5 * mat.temp
- } else {
- mustart
- }
- try.this = getMaxMin(rho.grid, objfun=betabinomial.Loglikfun,
- y=y, x=x, w=w, extraargs=list(
- ycounts=ycounts,
- nvec = if (is.numeric(extra$orig.w))
- round(w / extra$orig.w) else round(w),
- mustart=mustart.use))
- init.rho = if (is.Numeric( .irho )) rep( .irho, length=n) else
- rep(try.this, len=n)
- }
- etastart = cbind(theta2eta(mustart.use, .lmu, earg = .emu),
- theta2eta(init.rho, .lrho, earg = .erho))
- }), list( .lmu = lmu, .lrho = lrho,
- .emu = emu, .erho = erho,
- .method.init = method.init, .sinit = shrinkage.init,
- .nsimEIM = nsimEIM, .irho=irho ))),
- inverse = eval(substitute(function(eta, extra = NULL)
- eta2theta(eta[,1], .lmu, earg = .emu),
- list( .lmu = lmu, .emu = emu ))),
- last = eval(substitute(expression({
- misc$link <- c(mu = .lmu, rho = .lrho)
- misc$earg <- list(mu = .emu, rho = .erho)
- misc$zero <- .zero
- misc$expected <- TRUE
- misc$nsimEIM = .nsimEIM
- }), list( .lmu = lmu, .lrho = lrho,
- .emu = emu, .erho = erho,
- .nsimEIM = nsimEIM, .zero = zero ))),
- loglikelihood = eval(substitute(
- function(mu,y,w,residuals=FALSE, eta, extra = NULL) {
- ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
- y * w # Convert proportions to counts
-
- mymu = eta2theta(eta[,1], .lmu, earg = .emu)
- rho = eta2theta(eta[,2], .lrho, earg = .erho)
- smallno = 1.0e4 * .Machine$double.eps
-
- if (max(abs(ycounts - round(ycounts))) > smallno)
- warning("converting 'ycounts' to integer in @loglikelihood")
- ycounts = round(ycounts)
-
- rho = pmax(rho, smallno)
- rho = pmin(rho, 1-smallno)
- shape1 = mymu * (1 - rho) / rho
- shape2 = (1-mymu) * (1 - rho) / rho
-
- nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
- round(w)
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ if (!all(w == 1))
+ extra$orig.w <- w
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
- sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
- dbetabin.ab(x = ycounts, size = nvec, shape1 = shape1,
- shape2 = shape2, log = TRUE ))
- }
- }, list( .lmu = lmu, .lrho = lrho,
- .emu = emu, .erho = erho ))),
- vfamily = c("betabinomial"),
- deriv = eval(substitute(expression({
- nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
- round(w)
- ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
- y * w # Convert proportions to counts
+ if (is.null( .nsimEIM)) {
+ save.weight <- control$save.weight <- FALSE
+ }
- ycounts = round(ycounts)
- mymu = eta2theta(eta[,1], .lmu, earg = .emu)
- rho = eta2theta(eta[,2], .lrho, earg = .erho)
- smallno = 100 * .Machine$double.eps
- rho = pmax(rho, smallno)
- rho = pmin(rho, 1-smallno)
- shape1 = mymu * (1 - rho) / rho
- shape2 = (1-mymu) * (1 - rho) / rho
- dshape1.dmu = (1 - rho) / rho
- dshape2.dmu = -(1 - rho) / rho
- dshape1.drho = -mymu / rho^2
- dshape2.drho = -(1 - mymu) / rho^2
- dmu.deta = dtheta.deta(mymu, .lmu, earg = .emu)
- drho.deta = dtheta.deta(rho, .lrho, earg = .erho)
- dl.dmu = dshape1.dmu * (digamma(shape1+ycounts) -
- digamma(shape2+nvec-ycounts) -
- digamma(shape1) + digamma(shape2))
- dl.drho = (-1/rho^2) * (mymu * digamma(shape1+ycounts) +
- (1-mymu) * digamma(shape2+nvec-ycounts) -
- digamma(shape1+shape2+nvec) -
- mymu * digamma(shape1) -
- (1-mymu)*digamma(shape2) + digamma(shape1+shape2))
- (if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
- cbind(dl.dmu * dmu.deta, dl.drho * drho.deta)
- }), list( .lmu = lmu, .lrho = lrho,
- .emu = emu, .erho = erho ))),
- weight = eval(substitute(expression({
- if (is.null( .nsimEIM)) {
- wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(2)
- wz11 = -(expected.betabin.ab(nvec, shape1, shape2, TRUE) -
- trigamma(shape1+shape2+nvec) -
- trigamma(shape1) + trigamma(shape1+shape2))
- wz22 = -(expected.betabin.ab(nvec, shape1, shape2, FALSE) -
- trigamma(shape1+shape2+nvec) -
- trigamma(shape2) + trigamma(shape1+shape2))
- wz21 = -(trigamma(shape1+shape2) - trigamma(shape1+shape2+nvec))
- wz[,iam(1,1,M)] = dmu.deta^2 * (wz11 * dshape1.dmu^2 +
- wz22 * dshape2.dmu^2 +
- 2 * wz21 * dshape1.dmu * dshape2.dmu)
- wz[,iam(2,2,M)] = drho.deta^2 * (wz11 * dshape1.drho^2 +
- wz22 * dshape2.drho^2 +
- 2 * wz21 * dshape1.drho * dshape2.drho)
- wz[,iam(2,1,M)] = dmu.deta * drho.deta *
- (dshape1.dmu*(wz11*dshape1.drho + wz21*dshape2.drho) +
- dshape2.dmu*(wz21*dshape1.drho + wz22*dshape2.drho))
- wz * (if (is.numeric(extra$orig.w)) extra$orig.w else 1)
- } else {
- run.varcov = 0
- ind1 = iam(NA, NA, M=M, both=TRUE, diag=TRUE)
- dthetas.detas = cbind(dmu.deta, drho.deta)
+ mustart.orig <- mustart
+ eval(binomialff()@initialize) # Note: n,w,y,mustart is changed
+ if (length(mustart.orig))
+ mustart <- mustart.orig # Retain it if inputted
+
+
+ ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
+ y * w # Convert proportions to counts
+ if (max(abs(ycounts - round(ycounts))) > 1.0e-6)
+ warning("the response (as counts) does not appear to ",
+ "be integer-valued. Am rounding to integer values.")
+ ycounts <- round(ycounts) # Make sure it is an integer
+ predictors.names <- c(namesof("mu", .lmu, earg = .emu, tag=FALSE),
+ namesof("rho", .lrho, earg = .erho, tag=FALSE))
+ if (!length(etastart)) {
+ betabinomial.Loglikfun = function(rhoval, y, x, w, extraargs) {
+ shape1 <- extraargs$mustart * (1-rhoval) / rhoval
+ shape2 <- (1-extraargs$mustart) * (1-rhoval) / rhoval
+ ycounts <- extraargs$ycounts # Ought to be integer-valued
+ nvec <- extraargs$nvec
+ sum(dbetabin.ab(x = ycounts, size=nvec, shape1=shape1,
+ shape2=shape2, log=TRUE))
+ }
+ rho.grid <- seq(0.05, 0.95, len=21) # rvar =
+ mustart.use =
+ if (length(mustart.orig)) {
+ mustart.orig
+ } else if ( .method.init == 1) {
+ rep(weighted.mean(y, w), len = n)
+ } else if ( .method.init == 2) {
+ .sinit * weighted.mean(y, w) + (1 - .sinit) * y
+ } else if ( .method.init == 3) {
+ y.matrix <- cbind(y)
+ mat.temp <- matrix(colMeans(y.matrix), nrow(y.matrix),
+ ncol(y.matrix), byrow = TRUE)
+ 0.5 * mustart + 0.5 * mat.temp
+ } else {
+ mustart
+ }
+ try.this <- getMaxMin(rho.grid, objfun=betabinomial.Loglikfun,
+ y = y, x = x, w = w, extraargs = list(
+ ycounts=ycounts,
+ nvec = if (is.numeric(extra$orig.w))
+ round(w / extra$orig.w) else round(w),
+ mustart = mustart.use))
+ init.rho <- if (is.Numeric( .irho )) rep( .irho, length=n) else
+ rep(try.this, len=n)
+ etastart <- cbind(theta2eta(mustart.use, .lmu, earg = .emu),
+ theta2eta(init.rho, .lrho, earg = .erho))
+ mustart <- NULL # Since etastart has been computed.
+ }
+ }), list( .lmu = lmu, .lrho = lrho,
+ .emu = emu, .erho = erho,
+ .method.init = method.init, .sinit = shrinkage.init,
+ .nsimEIM = nsimEIM, .irho = irho ))),
+ inverse = eval(substitute(function(eta, extra = NULL)
+ eta2theta(eta[,1], .lmu, earg = .emu),
+ list( .lmu = lmu, .emu = emu ))),
+ last = eval(substitute(expression({
+ misc$link <- c(mu = .lmu, rho = .lrho)
+ misc$earg <- list(mu = .emu, rho = .erho)
+ misc$zero <- .zero
+ misc$expected <- TRUE
+ misc$nsimEIM = .nsimEIM
+ }), list( .lmu = lmu, .lrho = lrho,
+ .emu = emu, .erho = erho,
+ .nsimEIM = nsimEIM, .zero = zero ))),
+ loglikelihood = eval(substitute(
+ function(mu,y,w,residuals=FALSE, eta, extra = NULL) {
+ ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
+ y * w # Convert proportions to counts
+
+ mymu <- eta2theta(eta[,1], .lmu, earg = .emu)
+ rho <- eta2theta(eta[,2], .lrho, earg = .erho)
+ smallno <- 1.0e4 * .Machine$double.eps
+
+ if (max(abs(ycounts - round(ycounts))) > smallno)
+ warning("converting 'ycounts' to integer in @loglikelihood")
+ ycounts <- round(ycounts)
+
+ rho <- pmax(rho, smallno)
+ rho <- pmin(rho, 1 - smallno)
+ shape1 <- mymu * (1 - rho) / rho
+ shape2 <- (1 - mymu) * (1 - rho) / rho
+
+ nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ round(w)
+
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
+ dbetabin.ab(x = ycounts, size = nvec, shape1 = shape1,
+ shape2 = shape2, log = TRUE ))
+ }
+ }, list( .lmu = lmu, .lrho = lrho,
+ .emu = emu, .erho = erho ))),
+ vfamily = c("betabinomial"),
+ deriv = eval(substitute(expression({
+ nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ round(w)
+ ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
+ y * w # Convert proportions to counts
+
+ ycounts <- round(ycounts)
+ mymu <- eta2theta(eta[,1], .lmu, earg = .emu)
+ rho <- eta2theta(eta[,2], .lrho, earg = .erho)
+ smallno <- 100 * .Machine$double.eps
+ rho <- pmax(rho, smallno)
+ rho <- pmin(rho, 1-smallno)
+
+ shape1 <- mymu * (1 - rho) / rho
+ shape2 <- (1 - mymu) * (1 - rho) / rho
+ dshape1.dmu <- (1 - rho) / rho
+ dshape2.dmu <- -(1 - rho) / rho
+ dshape1.drho <- -mymu / rho^2
+ dshape2.drho <- -(1 - mymu) / rho^2
+
+ dmu.deta <- dtheta.deta(mymu, .lmu, earg = .emu)
+ drho.deta <- dtheta.deta(rho, .lrho, earg = .erho)
+
+ dl.dmu <- dshape1.dmu * (digamma(shape1+ycounts) -
+ digamma(shape2+nvec-ycounts) -
+ digamma(shape1) + digamma(shape2))
+ dl.drho <- (-1/rho^2) * (mymu * digamma(shape1 + ycounts) +
+ (1 - mymu) * digamma(shape2 + nvec - ycounts) -
+ digamma(shape1 + shape2 + nvec) -
+ mymu * digamma(shape1) -
+ (1 - mymu)*digamma(shape2) + digamma(shape1+shape2))
+
+ (if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
+ cbind(dl.dmu * dmu.deta, dl.drho * drho.deta)
+ }), list( .lmu = lmu, .lrho = lrho,
+ .emu = emu, .erho = erho ))),
+ weight = eval(substitute(expression({
+ if (is.null( .nsimEIM )) {
+ wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(2)
+ wz11 <- -(expected.betabin.ab(nvec, shape1, shape2, TRUE) -
+ trigamma(shape1+shape2+nvec) -
+ trigamma(shape1) + trigamma(shape1+shape2))
+ wz22 <- -(expected.betabin.ab(nvec, shape1, shape2, FALSE) -
+ trigamma(shape1+shape2+nvec) -
+ trigamma(shape2) + trigamma(shape1+shape2))
+ wz21 <- -(trigamma(shape1+shape2) - trigamma(shape1+shape2+nvec))
+
+ wz[,iam(1, 1, M)] <- dmu.deta^2 * (wz11 * dshape1.dmu^2 +
+ wz22 * dshape2.dmu^2 +
+ 2 * wz21 * dshape1.dmu * dshape2.dmu)
+ wz[,iam(2, 2, M)] <- drho.deta^2 * (wz11 * dshape1.drho^2 +
+ wz22 * dshape2.drho^2 +
+ 2 * wz21 * dshape1.drho * dshape2.drho)
+ wz[,iam(2, 1, M)] <- dmu.deta * drho.deta *
+ (dshape1.dmu*(wz11*dshape1.drho + wz21*dshape2.drho) +
+ dshape2.dmu*(wz21*dshape1.drho + wz22*dshape2.drho))
+
+ wz * (if (is.numeric(extra$orig.w)) extra$orig.w else 1)
+ } else {
+ run.varcov <- 0
+ ind1 <- iam(NA, NA, M=M, both=TRUE, diag=TRUE)
+ dthetas.detas <- cbind(dmu.deta, drho.deta)
+
+ for (ii in 1:( .nsimEIM )) {
+ ysim <- rbetabin.ab(n=n, size=nvec, shape1=shape1,
+ shape2=shape2)
+ dl.dmu <- dshape1.dmu * (digamma(shape1+ysim) -
+ digamma(shape2+nvec-ysim) -
+ digamma(shape1) + digamma(shape2))
+ dl.drho <- (-1/rho^2) * (mymu * digamma(shape1+ysim) +
+ (1-mymu) * digamma(shape2+nvec-ysim) -
+ digamma(shape1+shape2+nvec) -
+ mymu * digamma(shape1) -
+ (1-mymu)*digamma(shape2) + digamma(shape1+shape2))
+
+
+ temp3 <- cbind(dl.dmu, dl.drho) # n x M matrix
+ run.varcov <- run.varcov +
+ temp3[,ind1$row.index] * temp3[,ind1$col.index]
+ }
+ run.varcov <- run.varcov / .nsimEIM
- for (ii in 1:( .nsimEIM )) {
- ysim = rbetabin.ab(n=n, size=nvec, shape1=shape1,
- shape2=shape2)
- dl.dmu = dshape1.dmu * (digamma(shape1+ysim) -
- digamma(shape2+nvec-ysim) -
- digamma(shape1) + digamma(shape2))
- dl.drho = (-1/rho^2) * (mymu * digamma(shape1+ysim) +
- (1-mymu) * digamma(shape2+nvec-ysim) -
- digamma(shape1+shape2+nvec) -
- mymu * digamma(shape1) -
- (1-mymu)*digamma(shape2) + digamma(shape1+shape2))
- rm(ysim)
- temp3 = cbind(dl.dmu, dl.drho) # n x M matrix
- run.varcov = ((ii-1) * run.varcov +
- temp3[,ind1$row.index]*temp3[,ind1$col.index]) / ii
- }
- wz = if (intercept.only)
- matrix(colMeans(run.varcov),
- n, ncol(run.varcov), byrow = TRUE) else run.varcov
+ wz <- if (intercept.only)
+ matrix(colMeans(run.varcov),
+ n, ncol(run.varcov), byrow = TRUE) else run.varcov
- wz = wz * dthetas.detas[,ind1$row] * dthetas.detas[,ind1$col]
- wz * (if (is.numeric(extra$orig.w)) extra$orig.w else 1)
- }
- }), list( .lmu = lmu, .lrho = lrho,
- .emu = emu, .erho = erho,
- .nsimEIM = nsimEIM ))))
+ wz <- wz * dthetas.detas[,ind1$row] * dthetas.detas[,ind1$col]
+ wz * (if (is.numeric(extra$orig.w)) extra$orig.w else 1)
+ }
+ }), list( .lmu = lmu, .lrho = lrho,
+ .emu = emu, .erho = erho,
+ .nsimEIM = nsimEIM ))))
}
@@ -376,11 +401,12 @@ rbinom2.or = function(n, mu1,
- binom2.or = function(lmu = "logit", lmu1 = lmu, lmu2 = lmu, loratio = "loge",
- emu = list(), emu1 = emu, emu2 = emu, eoratio = list(),
- imu1 = NULL, imu2 = NULL, ioratio = NULL,
- zero = 3, exchangeable = FALSE, tol = 0.001,
- morerobust = FALSE)
+ binom2.or = function(lmu = "logit", lmu1 = lmu, lmu2 = lmu,
+ loratio = "loge",
+ emu = list(), emu1 = emu, emu2 = emu, eoratio = list(),
+ imu1 = NULL, imu2 = NULL, ioratio = NULL,
+ zero = 3, exchangeable = FALSE, tol = 0.001,
+ morerobust = FALSE)
{
if (mode(lmu) != "character" && mode(lmu) != "name")
lmu = as.character(substitute(lmu))
@@ -413,19 +439,26 @@ rbinom2.or = function(n, mu1,
}), list( .exchangeable = exchangeable, .zero = zero ))),
deviance = Deviance.categorical.data.vgam,
initialize = eval(substitute(expression({
+ mustart.orig = mustart
eval(process.binomial2.data.vgam)
- predictors.names = c(namesof("mu1", .lmu1, earg = .emu1, short=TRUE),
- namesof("mu2", .lmu2, earg = .emu2, short=TRUE),
- namesof("oratio", .loratio, earg = .eoratio, short=TRUE))
+ if (length(mustart.orig))
+ mustart = mustart.orig # Retain it if inputted
+
+ predictors.names =
+ c(namesof("mu1", .lmu1, earg = .emu1, short = TRUE),
+ namesof("mu2", .lmu2, earg = .emu2, short = TRUE),
+ namesof("oratio", .loratio, earg = .eoratio, short = TRUE))
if (!length(etastart)) {
- pmargin = cbind(mu[,3]+mu[,4], mu[,2]+mu[,4])
- ioratio = if (length( .ioratio)) rep( .ioratio, len=n) else
- mu[,4]*mu[,1]/(mu[,2]*mu[,3])
- if (length( .imu1)) pmargin[,1] = .imu1
- if (length( .imu2)) pmargin[,2] = .imu2
+ pmargin = cbind(mustart[, 3] + mustart[, 4],
+ mustart[, 2] + mustart[, 4])
+ ioratio = if (length( .ioratio)) rep( .ioratio, len = n) else
+ mustart[, 4] * mustart[, 1] / (mustart[, 2] *
+ mustart[, 3])
+ if (length( .imu1 )) pmargin[, 1] = .imu1
+ if (length( .imu2 )) pmargin[, 2] = .imu2
etastart = cbind(theta2eta(pmargin[,1], .lmu1, earg = .emu1),
- theta2eta(pmargin[,2], .lmu2, earg = .emu2),
+ theta2eta(pmargin[,2], .lmu2, earg = .emu2),
theta2eta(ioratio, .loratio, earg = .eoratio))
}
}), list( .lmu1 = lmu1, .lmu2 = lmu2, .loratio = loratio,
@@ -442,7 +475,10 @@ rbinom2.or = function(n, mu1,
(a.temp-temp)/(2*(oratio-1)))
pj2 = pmargin[,2] - pj4
pj3 = pmargin[,1] - pj4
- cbind("00" = 1-pj4-pj2-pj3, "01" = pj2, "10" = pj3, "11" = pj4)
+ cbind("00" = 1-pj4-pj2-pj3,
+ "01" = pj2,
+ "10" = pj3,
+ "11" = pj4)
}, list( .lmu1 = lmu1, .lmu2 = lmu2, .loratio = loratio,
.emu1 = emu1, .emu2 = emu2, .eoratio = eoratio,
.tol = tol ))),
@@ -457,9 +493,9 @@ rbinom2.or = function(n, mu1,
link = eval(substitute(function(mu, extra = NULL) {
pmargin = cbind(mu[,3]+mu[,4], mu[,2]+mu[,4])
oratio = mu[,4]*mu[,1] / (mu[,2]*mu[,3])
- cbind(theta2eta(pmargin[,1], .lmu1, earg = .emu1),
- theta2eta(pmargin[,2], .lmu2, earg = .emu2),
- theta2eta(oratio, .loratio, earg = .eoratio))
+ cbind(theta2eta(pmargin[,1], .lmu1, earg = .emu1),
+ theta2eta(pmargin[,2], .lmu2, earg = .emu2),
+ theta2eta(oratio, .loratio, earg = .eoratio))
}, list( .lmu1 = lmu1, .lmu2 = lmu2, .loratio = loratio,
.emu1 = emu1, .emu2 = emu2, .eoratio = eoratio ))),
loglikelihood = eval(substitute(
@@ -491,13 +527,14 @@ rbinom2.or = function(n, mu1,
deriv = eval(substitute(expression({
smallno = 1.0e4 * .Machine$double.eps
mu.use = mu
- mu.use[mu.use < smallno] = smallno
- mu.use[mu.use > 1-smallno] = 1-smallno
- pmargin = cbind(mu.use[,3]+mu.use[,4], mu.use[,2]+mu.use[,4])
- pmargin[,1] = pmax(smallno, pmargin[,1])
- pmargin[,1] = pmin(1-smallno, pmargin[,1])
- pmargin[,2] = pmax(smallno, pmargin[,2])
- pmargin[,2] = pmin(1-smallno, pmargin[,2])
+ mu.use[mu.use < smallno] = smallno
+ mu.use[mu.use > 1 - smallno] = 1 - smallno
+ pmargin = cbind(mu.use[, 3] + mu.use[, 4],
+ mu.use[, 2] + mu.use[, 4])
+ pmargin[, 1] = pmax( smallno, pmargin[, 1])
+ pmargin[, 1] = pmin(1 - smallno, pmargin[, 1])
+ pmargin[, 2] = pmax( smallno, pmargin[, 2])
+ pmargin[, 2] = pmin(1 - smallno, pmargin[, 2])
oratio = mu.use[,4]*mu.use[,1] / (mu.use[,2]*mu.use[,3])
use.oratio = pmax(smallno, oratio)
@@ -521,7 +558,7 @@ rbinom2.or = function(n, mu1,
w * cbind(dl.dmu1 * dtheta.deta(pmargin[,1], .lmu1, earg = .emu1),
dl.dmu2 * dtheta.deta(pmargin[,2], .lmu2, earg = .emu2),
- dl.doratio * dtheta.deta(oratio, .loratio, earg = .eoratio))
+ dl.doratio * dtheta.deta(oratio, .loratio, earg = .eoratio))
}), list( .lmu1 = lmu1, .lmu2 = lmu2, .loratio = loratio,
.emu1 = emu1, .emu2 = emu2, .eoratio = eoratio ))),
weight = eval(substitute(expression({
@@ -537,7 +574,7 @@ rbinom2.or = function(n, mu1,
wz[,iam(2,2,M)] = (pqmargin[,1] * Vab / myDelta) *
dtheta.deta(pmargin[,2], .lmu2, earg = .emu2)^2
wz[,iam(3,3,M)] = (Vab / use.oratio^2) *
- dtheta.deta(use.oratio, .loratio, earg = .eoratio)^2
+ dtheta.deta(use.oratio, .loratio, earg = .eoratio)^2
wz[,iam(1,2,M)] = (Vab * Deltapi / myDelta) *
dtheta.deta(pmargin[,1], .lmu1, earg = .emu1) *
dtheta.deta(pmargin[,2], .lmu2, earg = .emu2)
@@ -565,17 +602,17 @@ dbinom2.rho = function(mu1,
stop("argument 'exchangeable' is TRUE but 'mu1' and 'mu2' differ")
}
- n = max(length(mu1), length(mu2), length(rho))
- rho = rep(rho, len=n)
- mu1 = rep(mu1, len=n)
- mu2 = rep(mu2, len=n)
+ nn = max(length(mu1), length(mu2), length(rho))
+ rho = rep(rho, len = nn)
+ mu1 = rep(mu1, len = nn)
+ mu2 = rep(mu2, len = nn)
eta1 = qnorm(mu1)
eta2 = qnorm(mu2)
p11 = pnorm2(eta1, eta2, rho)
p01 = mu2 - p11
p10 = mu1 - p11
- p00 = 1 - p01 - p10 - p11
- matrix(c(p00,p01,p10,p11), n, 4, dimnames=list(NULL,colnames))
+ p00 = 1.0 - p01 - p10 - p11
+ matrix(c(p00, p01, p10, p11), nn, 4, dimnames = list(NULL, colnames))
}
@@ -624,6 +661,8 @@ rbinom2.rho = function(n, mu1,
+
+
binom2.rho.control <- function(save.weight=TRUE, ...)
{
list(save.weight=save.weight)
@@ -631,23 +670,26 @@ binom2.rho.control <- function(save.weight=TRUE, ...)
- binom2.rho = function(lrho="rhobit", erho=list(),
- imu1=NULL, imu2=NULL,
- init.rho=NULL,
- zero=3, exchangeable=FALSE, nsimEIM=NULL)
+ binom2.rho = function(lrho = "rhobit", erho = list(),
+ imu1 = NULL, imu2 = NULL, irho = NULL,
+ method.init = 1,
+ zero = 3, exchangeable = FALSE, nsimEIM = NULL)
{
+
if (mode(lrho) != "character" && mode(lrho) != "name")
lrho = as.character(substitute(lrho))
if (!is.list(erho)) erho = list()
lmu12 = "probit"
emu12 = list()
if (is.Numeric(nsimEIM)) {
- if (!is.Numeric(nsimEIM, allow=1, integ=TRUE))
+ if (!is.Numeric(nsimEIM, allow = 1, integ = TRUE))
stop("bad input for argument 'nsimEIM'")
if (nsimEIM <= 100)
warning("'nsimEIM' should be an integer greater than 100")
}
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
+ method.init > 2) stop("argument 'method.init' must be 1 or 2")
new("vglmff",
blurb = c("Bivariate probit model\n",
@@ -662,39 +704,98 @@ binom2.rho.control <- function(save.weight=TRUE, ...)
}), list( .exchangeable = exchangeable, .zero = zero ))),
deviance = Deviance.categorical.data.vgam,
initialize = eval(substitute(expression({
+ mustart.orig = mustart
eval(process.binomial2.data.vgam)
+ if (length(mustart.orig))
+ mustart = mustart.orig # Retain it if inputted
+
predictors.names = c(
- namesof("mu1", .lmu12, earg = .emu12, short=TRUE),
- namesof("mu2", .lmu12, earg = .emu12, short=TRUE),
- namesof("rho", .lrho, earg = .erho, short=TRUE))
+ namesof("mu1", .lmu12, earg = .emu12, short = TRUE),
+ namesof("mu2", .lmu12, earg = .emu12, short = TRUE),
+ namesof("rho", .lrho, earg = .erho, short = TRUE))
if (is.null( .nsimEIM)) {
save.weight <- control$save.weight <- FALSE
}
+
+
+ ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
+ y * w # Convert proportions to counts
+ if (max(abs(ycounts - round(ycounts))) > 1.0e-6)
+ warning("the response (as counts) does not appear to ",
+ "be integer-valued. Am rounding to integer values.")
+ ycounts = round(ycounts) # Make sure it is an integer
+ nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ round(w)
+
+
if (is.null(etastart)) {
- mu1.init= if (is.Numeric(.imu1)) rep(.imu1, len=n) else
- mu[,3] + mu[,4]
- mu2.init= if (is.Numeric(.imu2)) rep(.imu2, len=n) else
- mu[,2] + mu[,4]
- rho.init = if (is.Numeric(.init.rho)) rep( .init.rho, len=n) else {
- temp4 = oratio = mu[,1] * mu[,4] / (mu[,2] * mu[,3])
- temp4[oratio <= 0.1] = -0.6
- temp4[oratio > 0.1] = -0.4
- temp4[oratio > 0.5] = -0.2
- temp4[oratio > 0.9] = -0.1
- temp4[oratio > 1.1] = 0.1
- temp4[oratio > 2.0] = 0.3
- temp4[oratio > 6.0] = 0.6
- temp4[oratio > 15.0] = 0.8
- temp4
+ if (length(mustart.orig)) {
+ mu1.init = mustart.orig[,3] + mustart.orig[,4]
+ mu2.init = mustart.orig[,2] + mustart.orig[,4]
+ } else if ( .method.init == 1) {
+ glm1.fit = glm(cbind(ycounts[,3] + ycounts[,4],
+ ycounts[,1] + ycounts[,2]) ~ x - 1,
+ fam = binomial("probit"))
+ glm2.fit = glm(cbind(ycounts[,2] + ycounts[,4],
+ ycounts[,1] + ycounts[,3]) ~ x - 1,
+ fam = binomial("probit"))
+ mu1.init = fitted(glm1.fit)
+ mu2.init = fitted(glm2.fit)
+ } else if ( .method.init == 2) {
+ mu1.init = if (is.Numeric( .imu1 )) rep( .imu1, len=n) else
+ mu[,3] + mu[,4]
+ mu2.init = if (is.Numeric( .imu2 )) rep( .imu2, len=n) else
+ mu[,2] + mu[,4]
+ } else {
+ stop("bad value for 'method.init'")
+ }
+
+
+
+ binom2.rho.Loglikfun = function(rhoval, y, x, w, extraargs) {
+ init.mu1 = extraargs$initmu1
+ init.mu2 = extraargs$initmu2
+ ycounts = extraargs$ycounts
+ nvec = extraargs$nvec
+ eta1 = qnorm(init.mu1)
+ eta2 = qnorm(init.mu2)
+ p11 = pnorm2(eta1, eta2, rhoval)
+ p01 = pmin(init.mu2 - p11, init.mu2)
+ p10 = pmin(init.mu1 - p11, init.mu1)
+ p00 = 1.0 - p01 - p10 - p11
+ mumat = abs(cbind("00" = p00, "01" = p01, "10" = p10, "11" = p11))
+ mumat = mumat / rowSums(mumat)
+ mumat[mumat < 1.0e-100] = 1.0e-100
+
+ sum((if (is.numeric(extraargs$orig.w)) extraargs$orig.w else 1) *
+ dmultinomial(x = ycounts, size = nvec, prob = mumat,
+ log = TRUE, docheck = FALSE))
}
- etastart = cbind(theta2eta(mu1.init, .lmu12, earg = .emu12),
- theta2eta(mu2.init, .lmu12, earg = .emu12),
- theta2eta(rho.init, .lrho, earg = .erho))
+ rho.grid = seq(-0.95, 0.95, len=31)
+ try.this = getMaxMin(rho.grid, objfun=binom2.rho.Loglikfun,
+ y=y, x=x, w=w, extraargs=list(
+ orig.w = extra$orig.w,
+ ycounts = ycounts,
+ initmu1 = mu1.init,
+ initmu2 = mu2.init,
+ nvec = nvec
+ ))
+
+
+ rho.init = if (is.Numeric( .irho ))
+ rep( .irho, len = n) else {
+ try.this
+ }
+ etastart = cbind(theta2eta(mu1.init, .lmu12, earg = .emu12),
+ theta2eta(mu2.init, .lmu12, earg = .emu12),
+ theta2eta(rho.init, .lrho, earg = .erho))
+ mustart <- NULL # Since etastart has been computed.
}
}), list( .lmu12 = lmu12, .emu12 = emu12, .nsimEIM = nsimEIM,
.lrho = lrho, .erho = erho,
- .imu1 = imu1, .imu2 = imu2, .init.rho = init.rho ))),
+ .method.init = method.init,
+ .imu1 = imu1, .imu2 = imu2, .irho = irho ))),
inverse = eval(substitute(function(eta, extra = NULL) {
pmargin = cbind(eta2theta(eta[,1], .lmu12, earg = .emu12),
eta2theta(eta[,2], .lmu12, earg = .emu12))
@@ -702,8 +803,8 @@ binom2.rho.control <- function(save.weight=TRUE, ...)
p11 = pnorm2(eta[,1], eta[,2], rho)
p01 = pmin(pmargin[,2] - p11, pmargin[,2])
p10 = pmin(pmargin[,1] - p11, pmargin[,1])
- p00 = 1 - p01 - p10 - p11
- ansmat = abs(cbind("00"=p00, "01"=p01, "10"=p10, "11"=p11))
+ p00 = 1.0 - p01 - p10 - p11
+ ansmat = abs(cbind("00" = p00, "01" = p01, "10" = p10, "11" = p11))
ansmat / rowSums(ansmat)
}, list( .lmu12 = lmu12, .emu12 = emu12, .lrho = lrho, .erho = erho ))),
last = eval(substitute(expression({
@@ -711,26 +812,25 @@ binom2.rho.control <- function(save.weight=TRUE, ...)
misc$earg = list(mu1 = .emu12, mu2 = .emu12, rho = .erho)
misc$nsimEIM = .nsimEIM
misc$expected = TRUE
- }), list( .lmu12 = lmu12, .lrho = lrho,
- .emu12 = emu12, .erho = erho,
- .nsimEIM = nsimEIM ))),
-
+ }), list( .lmu12 = lmu12, .lrho = lrho, .nsimEIM = nsimEIM,
+ .emu12 = emu12, .erho = erho ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
+ stop("loglikelihood residuals not implemented yet") else {
ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
y * w # Convert proportions to counts
+
+ smallno = 1.0e4 * .Machine$double.eps
+ if (max(abs(ycounts - round(ycounts))) > smallno)
+ warning("converting 'ycounts' to integer in @loglikelihood")
+ ycounts = round(ycounts)
+
nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
round(w)
- smallno = 1.0e4 * .Machine$double.eps
- if (max(abs(ycounts - round(ycounts))) > smallno)
- warning("converting 'ycounts' to integer in @loglikelihood")
- ycounts = round(ycounts)
-
sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
dmultinomial(x = ycounts, size = nvec, prob = mu,
log = TRUE, docheck = FALSE))
@@ -738,6 +838,11 @@ binom2.rho.control <- function(save.weight=TRUE, ...)
}, list( .erho = erho ))),
vfamily = c("binom2.rho", "binom2"),
deriv = eval(substitute(expression({
+ nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ round(w)
+ ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
+ y * w # Convert proportions to counts
+
pmargin = cbind(eta2theta(eta[,1], .lmu12, earg = .emu12),
eta2theta(eta[,2], .lmu12, earg = .emu12))
rhovec = eta2theta(eta[,3], .lrho, earg = .erho)
@@ -746,11 +851,11 @@ binom2.rho.control <- function(save.weight=TRUE, ...)
p10 = pmargin[,1]-p11
p00 = 1-p01-p10-p11
- ABmat = (eta[,1:2] - rhovec*eta[,2:1]) / sqrt(1-rhovec^2)
+ ABmat = (eta[,1:2] - rhovec * eta[,2:1]) / sqrt(1.0 - rhovec^2)
PhiA = pnorm(ABmat[,1])
PhiB = pnorm(ABmat[,2])
- onemPhiA = pnorm(ABmat[,1], lower.tail=FALSE)
- onemPhiB = pnorm(ABmat[,2], lower.tail=FALSE)
+ onemPhiA = pnorm(ABmat[,1], lower.tail = FALSE)
+ onemPhiB = pnorm(ABmat[,2], lower.tail = FALSE)
smallno = 1000 * .Machine$double.eps
p00[p00 < smallno] = smallno
@@ -759,27 +864,33 @@ binom2.rho.control <- function(save.weight=TRUE, ...)
p11[p11 < smallno] = smallno
dprob00 = dnorm2(eta[,1], eta[,2], rhovec)
- dl.dprob1 = PhiB*(y[,4]/p11-y[,2]/p01) + onemPhiB*(y[,3]/p10-y[,1]/p00)
- dl.dprob2 = PhiA*(y[,4]/p11-y[,3]/p10) + onemPhiA*(y[,2]/p01-y[,1]/p00)
- dl.drho = (y[,4]/p11-y[,3]/p10-y[,2]/p01+y[,1]/p00) * dprob00
+ dl.dprob1 = PhiB * (ycounts[,4]/p11 - ycounts[,2]/p01) +
+ onemPhiB * (ycounts[,3]/p10 - ycounts[,1]/p00)
+ dl.dprob2 = PhiA * (ycounts[,4]/p11 - ycounts[,3]/p10) +
+ onemPhiA * (ycounts[,2]/p01 - ycounts[,1]/p00)
+ dl.drho = (ycounts[,4]/p11 - ycounts[,3]/p10 -
+ ycounts[,2]/p01 + ycounts[,1]/p00) * dprob00
dprob1.deta = dtheta.deta(pmargin[,1], .lmu12, earg = .emu12)
dprob2.deta = dtheta.deta(pmargin[,2], .lmu12, earg = .emu12)
drho.deta = dtheta.deta(rhovec, .lrho, earg = .erho)
dthetas.detas = cbind(dprob1.deta, dprob2.deta, drho.deta)
- w * cbind(dl.dprob1, dl.dprob2, dl.drho) * dthetas.detas
+ (if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
+ cbind(dl.dprob1, dl.dprob2, dl.drho) * dthetas.detas
}), list( .lmu12 = lmu12, .emu12 = emu12, .lrho = lrho, .erho = erho ))),
weight = eval(substitute(expression({
- if (is.null( .nsimEIM)) {
- d2l.dprob1prob1 = PhiB^2 *(1/p11+1/p01) + onemPhiB^2 *(1/p10+1/p00)
- d2l.dprob2prob2 = PhiA^2 *(1/p11+1/p10) + onemPhiA^2 *(1/p01+1/p00)
- d2l.dprob1prob2 = PhiA * (PhiB/p11 - onemPhiB/p10) +
- onemPhiA * (onemPhiB/p00 - PhiB/p01)
- d2l.dprob1rho = (PhiB*(1/p11+1/p01) -
- onemPhiB*(1/p10+1/p00)) * dprob00
- d2l.dprob2rho = (PhiA*(1/p11+1/p10) -
- onemPhiA*(1/p01+1/p00)) * dprob00
- d2l.drho2 = (1/p11+1/p01+1/p10+1/p00) * dprob00^2
+ if (is.null( .nsimEIM )) {
+ d2l.dprob1prob1 = PhiB^2 * (1/p11 + 1/p01) +
+ onemPhiB^2 * (1/p10 + 1/p00)
+ d2l.dprob2prob2 = PhiA^2 * (1/p11 + 1/p10) +
+ onemPhiA^2 * (1/p01 + 1/p00)
+ d2l.dprob1prob2 = PhiA * ( PhiB/p11 - onemPhiB/p10) +
+ onemPhiA * (onemPhiB/p00 - PhiB/p01)
+ d2l.dprob1rho = (PhiB * (1/p11 + 1/p01) -
+ onemPhiB * (1/p10 + 1/p00)) * dprob00
+ d2l.dprob2rho = (PhiA * (1/p11 + 1/p10) -
+ onemPhiA * (1/p01 + 1/p00)) * dprob00
+ d2l.drho2 = (1/p11 + 1/p01 + 1/p10 + 1/p00) * dprob00^2
wz = matrix(0, n, dimm(M)) # 6=dimm(M)
wz[,iam(1,1,M)] = d2l.dprob1prob1 * dprob1.deta^2
wz[,iam(2,2,M)] = d2l.dprob2prob2 * dprob2.deta^2
@@ -789,16 +900,16 @@ binom2.rho.control <- function(save.weight=TRUE, ...)
wz[,iam(3,3,M)] = d2l.drho2 * drho.deta^2
} else {
run.varcov = 0
- ind1 = iam(NA, NA, M=M, both=TRUE, diag=TRUE)
+ ind1 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
for (ii in 1:( .nsimEIM )) {
- ysim = rbinom2.rho(n=n, mu1=pmargin[,1], mu2=pmargin[,2],
- twoCols=FALSE, rho=rhovec)
- dl.dprob1 = PhiB * (ysim[,4]/p11-ysim[,2]/p01) +
- onemPhiB * (ysim[,3]/p10-ysim[,1]/p00)
- dl.dprob2 = PhiA * (ysim[,4]/p11-ysim[,3]/p10) +
- onemPhiA * (ysim[,2]/p01-ysim[,1]/p00)
- dl.drho = (ysim[,4]/p11-ysim[,3]/p10 -
- ysim[,2]/p01+ysim[,1]/p00) * dprob00
+ ysim = rbinom2.rho(n, mu1 = pmargin[,1], mu2 = pmargin[,2],
+ twoCols = FALSE, rho = rhovec)
+ dl.dprob1 = PhiB * (ysim[,4]/p11 - ysim[,2]/p01) +
+ onemPhiB * (ysim[,3]/p10 - ysim[,1]/p00)
+ dl.dprob2 = PhiA * (ysim[,4]/p11 - ysim[,3]/p10) +
+ onemPhiA * (ysim[,2]/p01 - ysim[,1]/p00)
+ dl.drho = (ysim[,4]/p11 - ysim[,3]/p10 -
+ ysim[,2]/p01 + ysim[,1]/p00) * dprob00
rm(ysim)
temp3 = cbind(dl.dprob1, dl.dprob2, dl.drho)
@@ -812,14 +923,23 @@ binom2.rho.control <- function(save.weight=TRUE, ...)
wz = wz * dthetas.detas[,ind1$row] * dthetas.detas[,ind1$col]
}
w * wz
- }), list( .nsimEIM = nsimEIM, .lmu12 = lmu12, .emu12 = emu12,
- .lrho = lrho, .erho = erho ))))
+ }), list( .nsimEIM = nsimEIM ))))
}
-dnorm2 <- function(x, y, r)
- exp(-0.5*(x^2+y^2-2*x*y*r)/(1-r^2)) / (2*pi*sqrt(1-r^2))
+dnorm2 <- function(x, y, rho = 0, log = FALSE) {
+ log.arg = log
+ rm(log)
+ if (log.arg) {
+ (-0.5*(x^2 + y^2 - 2*x*y*rho)/(1.0-rho^2)) - log(2) - log(pi) -
+ 0.5 * log1p(-rho^2)
+ } else {
+ exp(-0.5*(x^2 + y^2 - 2*x*y*rho)/(1.0-rho^2)) / (2*pi*sqrt(1.0-rho^2))
+ }
+}
+
+
pnorm2 <- function(ah, ak, r) {
@@ -1080,23 +1200,32 @@ betabin.ab.control <- function(save.weight=TRUE, ...)
}
# Compute initial values for mustart -------
+ mustart.orig = mustart
eval(binomialff()@initialize) # Note: n,w,y,mustart is changed
+ if (length(mustart.orig))
+ mustart = mustart.orig # Retain it if inputted
predictors.names =
c(namesof("shape1", .lshape12, earg = .earg, tag=FALSE),
namesof("shape2", .lshape12, earg = .earg, tag=FALSE))
if (!length(etastart)) {
+
+ mustart.use = if (length(mustart.orig)) mustart.orig else
+ mustart
+
shape1 = rep( .i1, len = n)
- shape2 = if (length( .i2 )) rep( .i2, len = n) else {
- if ( .method.init == 1) {
- shape1 * (1 / weighted.mean(y, w) - 1)
- } else if ( .method.init == 2) {
- temp777 = .sinit * weighted.mean(y, w) + (1- .sinit) * y
- shape1 * (1 / temp777 - 1)
- } else {
- shape1 * (1 / weighted.mean(mustart, w) - 1)
- }
- }
+ shape2 = if (length( .i2 )) {
+ rep( .i2, len = n)
+ } else if (length(mustart.orig)) {
+ shape1 * (1 / mustart.use - 1)
+ } else if ( .method.init == 1) {
+ shape1 * (1 / weighted.mean(y, w) - 1)
+ } else if ( .method.init == 2) {
+ temp777 = .sinit * weighted.mean(y, w) + (1- .sinit) * y
+ shape1 * (1 / temp777 - 1)
+ } else {
+ shape1 * (1 / weighted.mean(mustart.use, w) - 1)
+ }
ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
y * w # Convert proportions to counts
if (max(abs(ycounts - round(ycounts))) > 1.0e-6)
@@ -1105,6 +1234,7 @@ betabin.ab.control <- function(save.weight=TRUE, ...)
ycounts = round(ycounts) # Make sure it is an integer
etastart = cbind(theta2eta(shape1, .lshape12, earg = .earg),
theta2eta(shape2, .lshape12, earg = .earg))
+ mustart <- NULL # Since etastart has been computed.
}
}), list( .lshape12 = lshape12, .earg = earg, .i1 = i1, .i2 = i2,
.nsimEIM = nsimEIM,
@@ -1503,9 +1633,9 @@ zipebcom = function(lmu12 = "cloglog", lphi12 = "logit", loratio = "loge",
initialize = eval(substitute(expression({
eval(process.binomial2.data.vgam)
predictors.names = c(
- namesof("mu12", .lmu12, earg = .emu12, short=TRUE),
- namesof("phi12", .lphi12, earg = .ephi12, short=TRUE),
- namesof("oratio", .loratio, earg = .eoratio, short=TRUE))
+ namesof("mu12", .lmu12, earg = .emu12, short = TRUE),
+ namesof("phi12", .lphi12, earg = .ephi12, short = TRUE),
+ namesof("oratio", .loratio, earg = .eoratio, short = TRUE))
propY1.eq.0 = weighted.mean(y[,'00'], w) + weighted.mean(y[,'01'], w)
propY2.eq.0 = weighted.mean(y[,'00'], w) + weighted.mean(y[,'10'], w)
@@ -1632,10 +1762,12 @@ zipebcom = function(lmu12 = "cloglog", lphi12 = "logit", loratio = "loge",
- lusted68 = function(lrhopos = "loge", lrhoneg = "loge",
- erhopos = list(), erhoneg = list(),
- irhopos = NULL, irhoneg = NULL,
- iprob1 = NULL, iprob2 = NULL, zero = NULL)
+
+if (FALSE)
+ lusted68 <- function(lrhopos = "loge", lrhoneg = "loge",
+ erhopos = list(), erhoneg = list(),
+ irhopos = NULL, irhoneg = NULL,
+ iprob1 = NULL, iprob2 = NULL, zero = NULL)
{
print("hi 20100603")
if (mode(lrhopos) != "character" && mode(lrhopos) != "name")
@@ -1831,3 +1963,153 @@ zipebcom = function(lmu12 = "cloglog", lphi12 = "logit", loratio = "loge",
+ binom2.Rho = function(rho = 0, imu1 = NULL, imu2 = NULL,
+ exchangeable=FALSE, nsimEIM = NULL)
+{
+ lmu12 = "probit"
+ emu12 = list()
+ if (is.Numeric(nsimEIM)) {
+ if (!is.Numeric(nsimEIM, allow=1, integ=TRUE))
+ stop("bad input for argument 'nsimEIM'")
+ if (nsimEIM <= 100)
+ warning("'nsimEIM' should be an integer greater than 100")
+ }
+
+ new("vglmff",
+ blurb = c("Bivariate probit model with rho = ", format(rho), "\n",
+ "Links: ",
+ namesof("mu1", lmu12, earg = emu12), ", ",
+ namesof("mu2", lmu12, earg = emu12)),
+ constraints = eval(substitute(expression({
+ constraints = cm.vgam(matrix(c(1, 1), 2, 1), x,
+ .exchangeable, constraints, intercept.apply = TRUE)
+ }), list( .exchangeable = exchangeable ))),
+ deviance = Deviance.categorical.data.vgam,
+ initialize = eval(substitute(expression({
+ eval(process.binomial2.data.vgam)
+ predictors.names = c(
+ namesof("mu1", .lmu12, earg = .emu12, short = TRUE),
+ namesof("mu2", .lmu12, earg = .emu12, short = TRUE))
+
+ if (is.null( .nsimEIM)) {
+ save.weight <- control$save.weight <- FALSE
+ }
+ if (is.null(etastart)) {
+ mu1.init= if (is.Numeric(.imu1)) rep(.imu1, len=n) else
+ mu[,3] + mu[,4]
+ mu2.init= if (is.Numeric(.imu2)) rep(.imu2, len=n) else
+ mu[,2] + mu[,4]
+ etastart = cbind(theta2eta(mu1.init, .lmu12, earg = .emu12),
+ theta2eta(mu2.init, .lmu12, earg = .emu12))
+ }
+ }), list( .lmu12 = lmu12, .emu12 = emu12, .nsimEIM = nsimEIM,
+ .imu1 = imu1, .imu2 = imu2 ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ pmargin = cbind(eta2theta(eta[,1], .lmu12, earg = .emu12),
+ eta2theta(eta[,2], .lmu12, earg = .emu12))
+ rhovec = rep( .rho, len = nrow(eta))
+ p11 = pnorm2(eta[,1], eta[,2], rhovec)
+ p01 = pmin(pmargin[,2] - p11, pmargin[,2])
+ p10 = pmin(pmargin[,1] - p11, pmargin[,1])
+ p00 = 1 - p01 - p10 - p11
+ ansmat = abs(cbind("00"=p00, "01"=p01, "10"=p10, "11"=p11))
+ ansmat / rowSums(ansmat)
+ }, list( .lmu12 = lmu12, .emu12 = emu12, .rho = rho ))),
+ last = eval(substitute(expression({
+ misc$link = c(mu1 = .lmu12, mu2 = .lmu12)
+ misc$earg = list(mu1 = .emu12, mu2 = .emu12)
+ misc$nsimEIM = .nsimEIM
+ misc$expected = TRUE
+ misc$rho = .rho
+ }), list( .lmu12 = lmu12, .emu12 = emu12, .rho = rho,
+ .nsimEIM = nsimEIM ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
+ ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
+ y * w # Convert proportions to counts
+ nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ round(w)
+
+ smallno = 1.0e4 * .Machine$double.eps
+ if (max(abs(ycounts - round(ycounts))) > smallno)
+ warning("converting 'ycounts' to integer in @loglikelihood")
+ ycounts = round(ycounts)
+
+ sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
+ dmultinomial(x = ycounts, size = nvec, prob = mu,
+ log = TRUE, docheck = FALSE))
+ }
+ }, list( .rho = rho ))),
+ vfamily = c("binom2.Rho", "binom2"),
+ deriv = eval(substitute(expression({
+ pmargin = cbind(eta2theta(eta[,1], .lmu12, earg = .emu12),
+ eta2theta(eta[,2], .lmu12, earg = .emu12))
+ rhovec = rep( .rho, len = nrow(eta))
+ p11 = pnorm2(eta[,1], eta[,2], rhovec)
+ p01 = pmargin[,2]-p11
+ p10 = pmargin[,1]-p11
+ p00 = 1-p01-p10-p11
+
+ ABmat = (eta[,1:2] - rhovec*eta[,2:1]) / sqrt(1-rhovec^2)
+ PhiA = pnorm(ABmat[,1])
+ PhiB = pnorm(ABmat[,2])
+ onemPhiA = pnorm(ABmat[,1], lower.tail=FALSE)
+ onemPhiB = pnorm(ABmat[,2], lower.tail=FALSE)
+
+ smallno = 1000 * .Machine$double.eps
+ p00[p00 < smallno] = smallno
+ p01[p01 < smallno] = smallno
+ p10[p10 < smallno] = smallno
+ p11[p11 < smallno] = smallno
+
+ dprob00 = dnorm2(eta[,1], eta[,2], rhovec)
+ dl.dprob1 = PhiB*(y[,4]/p11-y[,2]/p01) + onemPhiB*(y[,3]/p10-y[,1]/p00)
+ dl.dprob2 = PhiA*(y[,4]/p11-y[,3]/p10) + onemPhiA*(y[,2]/p01-y[,1]/p00)
+ dprob1.deta = dtheta.deta(pmargin[,1], .lmu12, earg = .emu12)
+ dprob2.deta = dtheta.deta(pmargin[,2], .lmu12, earg = .emu12)
+ dthetas.detas = cbind(dprob1.deta, dprob2.deta)
+
+ w * cbind(dl.dprob1, dl.dprob2) * dthetas.detas
+ }), list( .lmu12 = lmu12, .emu12 = emu12, .rho = rho ))),
+ weight = eval(substitute(expression({
+ if (is.null( .nsimEIM)) {
+ d2l.dprob1prob1 = PhiB^2 *(1/p11+1/p01) + onemPhiB^2 *(1/p10+1/p00)
+ d2l.dprob2prob2 = PhiA^2 *(1/p11+1/p10) + onemPhiA^2 *(1/p01+1/p00)
+ d2l.dprob1prob2 = PhiA * (PhiB/p11 - onemPhiB/p10) +
+ onemPhiA * (onemPhiB/p00 - PhiB/p01)
+ wz = matrix(0, n, dimm(M)) # 6=dimm(M)
+ wz[,iam(1,1,M)] = d2l.dprob1prob1 * dprob1.deta^2
+ wz[,iam(2,2,M)] = d2l.dprob2prob2 * dprob2.deta^2
+ wz[,iam(1,2,M)] = d2l.dprob1prob2 * dprob1.deta * dprob2.deta
+ } else {
+ run.varcov = 0
+ ind1 = iam(NA, NA, M=M, both=TRUE, diag=TRUE)
+ for (ii in 1:( .nsimEIM )) {
+ ysim = rbinom2.rho(n=n, mu1=pmargin[,1], mu2=pmargin[,2],
+ twoCols=FALSE, rho=rhovec)
+ dl.dprob1 = PhiB * (ysim[,4]/p11-ysim[,2]/p01) +
+ onemPhiB * (ysim[,3]/p10-ysim[,1]/p00)
+ dl.dprob2 = PhiA * (ysim[,4]/p11-ysim[,3]/p10) +
+ onemPhiA * (ysim[,2]/p01-ysim[,1]/p00)
+
+ rm(ysim)
+ temp3 = cbind(dl.dprob1, dl.dprob2)
+ run.varcov = ((ii-1) * run.varcov +
+ temp3[,ind1$row.index] * temp3[,ind1$col.index]) / ii
+ }
+ wz = if (intercept.only)
+ matrix(colMeans(run.varcov),
+ n, ncol(run.varcov), byrow = TRUE) else run.varcov
+
+ wz = wz * dthetas.detas[,ind1$row] * dthetas.detas[,ind1$col]
+ }
+ w * wz
+ }), list( .nsimEIM = nsimEIM ))))
+}
+
+
+
+
+
diff --git a/R/family.bivariate.R b/R/family.bivariate.R
index 4e062d8..b5377cf 100644
--- a/R/family.bivariate.R
+++ b/R/family.bivariate.R
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
@@ -94,7 +95,8 @@ bilogistic4.control <- function(save.weight=TRUE, ...)
Scale2 = eta2theta(eta[,4], .lscale)
zedd1 = (y[,1]-loc1) / Scale1
zedd2 = (y[,2]-loc2) / Scale2
- if (residuals) stop("loglikelihood residuals not implemented yet") else
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
sum(w * (-zedd1 - zedd2 - 3 * log1p(exp(-zedd1)+exp(-zedd2)) -
log(Scale1) - log(Scale2)))
}, list(.lscale=lscale, .llocation=llocation))),
@@ -260,7 +262,8 @@ dbilogis4 = function(x1, x2, loc1=0, scale1=1, loc2=0, scale2=1, log=FALSE) {
alphap = eta2theta(eta[,2], .lap)
beta = eta2theta(eta[,3], .lb)
betap = eta2theta(eta[,4], .lbp)
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
tmp88 = extra$y1.lt.y2
ell1 = log(alpha[tmp88]) + log(betap[tmp88]) -
betap[tmp88] * y[tmp88,2] -
@@ -414,7 +417,8 @@ dbilogis4 = function(x1, x2, loc1=0, scale1=1, loc2=0, scale2=1, log=FALSE) {
a = eta2theta(eta[,1], .lscale)
p = eta2theta(eta[,2], .lshape1)
q = eta2theta(eta[,3], .lshape2)
- if (residuals) stop("loglikelihood residuals not implemented yet") else
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
sum(w * (-(p+q)*log(a) - lgamma(p) - lgamma(q) +
(p-1)*log(y[,1]) + (q-1)*log(y[,2]-y[,1]) - y[,2] / a))
}, list( .lscale=lscale, .lshape1=lshape1, .lshape2=lshape2 ))),
@@ -593,7 +597,8 @@ frank.control <- function(save.weight=TRUE, ...)
loglikelihood= eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
apar = eta2theta(eta, .lapar, earg= .eapar )
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
sum(w * dfrank(x1=y[,1], x2=y[,2], alpha=apar, log = TRUE))
}
}, list(.lapar=lapar, .eapar=eapar ))),
@@ -695,7 +700,8 @@ frank.control <- function(save.weight=TRUE, ...)
loglikelihood= eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
theta = eta2theta(eta, .ltheta)
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
sum(w * (-exp(-theta)*y[,1]/theta - theta*y[,2]))
}
}, list(.ltheta=ltheta))),
@@ -778,7 +784,8 @@ frank.control <- function(save.weight=TRUE, ...)
function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
alpha = eta2theta(eta, .lapar, earg= .earg )
alpha[abs(alpha) < .tola0 ] = .tola0
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
denom = (1 + alpha - 2*alpha*(exp(-y[,1]) + exp(-y[,2])) +
4*alpha*exp(-y[,1] - y[,2]))
sum(w * (-y[,1] - y[,2] + log(denom)))
@@ -952,7 +959,8 @@ fgm.control <- function(save.weight=TRUE, ...)
loglikelihood= eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
alpha = eta2theta(eta, .lapar, earg= .earg )
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
sum(w * dfgm(x1=y[,1], x2=y[,2], alpha=alpha, log=TRUE))
}
}, list( .lapar=lapar, .earg=earg ))),
@@ -1040,7 +1048,8 @@ fgm.control <- function(save.weight=TRUE, ...)
loglikelihood= eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
alpha = eta2theta(eta, .lapar, earg= .earg )
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
denom = (alpha*y[,1] - 1) * (alpha*y[,2] - 1) + alpha
mytolerance = .Machine$double.xmin
bad <- (denom <= mytolerance) # Range violation
@@ -1229,7 +1238,8 @@ plackett.control <- function(save.weight=TRUE, ...)
loglikelihood= eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
oratio = eta2theta(eta, .link, earg= .earg )
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
sum(w * dplack(x1= y[,1], x2= y[,2], oratio=oratio, log=TRUE))
}
}, list( .link=link, .earg=earg ))),
@@ -1398,7 +1408,8 @@ amh.control <- function(save.weight=TRUE, ...)
loglikelihood= eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
alpha = eta2theta(eta, .lalpha, earg= .ealpha )
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
sum(w * damh(x1=y[,1], x2=y[,2], alpha=alpha, log=TRUE))
}
}, list( .lalpha=lalpha, .earg=ealpha ))),
diff --git a/R/family.categorical.R b/R/family.categorical.R
index d39e0d7..7f37bb1 100644
--- a/R/family.categorical.R
+++ b/R/family.categorical.R
@@ -1,44 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
-
-
-
-
-
-
-
-dmultinomial = function(x, size = NULL, prob, log = FALSE,
- dochecking=TRUE, smallno = 1.0e-7) {
- if (!is.logical(log.arg <- log))
- stop("bad input for argument 'log'")
- rm(log)
-
- x = as.matrix(x)
- prob = as.matrix(prob)
- if (((K <- ncol(x)) <= 1) || ncol(prob) != K)
- stop("'x' and 'prob' must be matrices with two or more columns")
- if (dochecking) {
- if (min(prob) < 0)
- stop("'prob' contains some negative values")
- if (any(abs((rsprob <- rowSums(prob)) - 1) > smallno))
- stop("some rows of 'prob' do not add to unity")
- if (any(abs(x - round(x)) > smallno))
- stop("'x' should be integer valued")
- if (length(size)) {
- if (any(abs(size - rowSums(x)) > smallno))
- stop("rowSums(x) does not agree with 'size'")
- } else {
- size = round(rowSums(x))
- }
- } else {
- if (!length(size))
- size = round(rowSums(prob))
- }
- logdensity = lgamma(size + 1) + rowSums(x * log(prob) - lgamma(x + 1))
- if (log.arg) logdensity else exp(logdensity)
-}
-
-
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
@@ -54,8 +16,6 @@ process.categorical.data.vgam = expression({
if (!all(w == 1))
extra$orig.w = w
- print("head(extra$orig.w)")
- print( head(extra$orig.w) )
if (!is.matrix(y)) {
yf = as.factor(y)
@@ -67,20 +27,24 @@ process.categorical.data.vgam = expression({
dimnames(y) = list(names(yf), lev)
if (llev <= 1)
- stop("the response matrix does not have 2 or more columns")
+ stop("the response matrix does not have 2 or more columns")
} else {
nn = nrow(y)
}
nvec = rowSums(y)
+ if (min(y) < 0 || any(round(y) != y))
+ stop("the response must be non-negative counts (integers)")
+
if (!exists("delete.zero.colns") ||
(exists("delete.zero.colns") && delete.zero.colns)) {
sumy2 = colSums(y)
if (any(index <- sumy2==0)) {
y = y[,!index, drop = FALSE]
sumy2 = sumy2[!index]
- if (all(index) || ncol(y)<=1) stop("'y' matrix has 0 or 1 columns")
+ if (all(index) || ncol(y)<=1)
+ stop("'y' matrix has 0 or 1 columns")
warning("Deleted ", sum(!index),
" columns of the response matrix due to zero counts")
}
@@ -91,20 +55,22 @@ process.categorical.data.vgam = expression({
smiss <- sum(miss)
warning("Deleted ", smiss,
" rows of the response matrix due to zero counts")
- x = x[!miss,,drop = FALSE]
- y = y[!miss,,drop = FALSE]
+ x = x[!miss,, drop = FALSE]
+ y = y[!miss,, drop = FALSE]
w = cbind(w)
- w = w[!miss,,drop = FALSE]
+ w = w[!miss,, drop = FALSE]
nvec = nvec[!miss]
nn = nn - smiss
}
w = w * nvec
+
nvec[nvec == 0] = 1
y = prop.table(y, 1) # Convert to proportions
- if (!length(mustart)) {
+
+ if (length(mustart) + length(etastart) == 0) {
mustart = y + (1 / ncol(y) - y) / nvec
}
})
@@ -154,6 +120,45 @@ Deviance.categorical.data.vgam <-
+dmultinomial = function(x, size = NULL, prob, log = FALSE,
+ dochecking=TRUE, smallno = 1.0e-7) {
+ if (!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ x = as.matrix(x)
+ prob = as.matrix(prob)
+ if (((K <- ncol(x)) <= 1) || ncol(prob) != K)
+ stop("'x' and 'prob' must be matrices with two or more columns")
+ if (dochecking) {
+ if (min(prob) < 0)
+ stop("'prob' contains some negative values")
+ if (any(abs((rsprob <- rowSums(prob)) - 1) > smallno))
+ stop("some rows of 'prob' do not add to unity")
+ if (any(abs(x - round(x)) > smallno))
+ stop("'x' should be integer valued")
+ if (length(size)) {
+ if (any(abs(size - rowSums(x)) > smallno))
+ stop("rowSums(x) does not agree with 'size'")
+ } else {
+ size = round(rowSums(x))
+ }
+ } else {
+ if (!length(size))
+ size = round(rowSums(prob))
+ }
+ logdensity = lgamma(size + 1) + rowSums(x * log(prob) - lgamma(x + 1))
+ if (log.arg) logdensity else exp(logdensity)
+}
+
+
+
+
+
+
+
+
+
sratio = function(link = "logit", earg = list(),
parallel = FALSE, reverse = FALSE, zero = NULL)
{
@@ -182,7 +187,7 @@ Deviance.categorical.data.vgam <-
mynames = if ( .reverse)
paste("P[Y=", 2:(M+1),"|Y<=", 2:(M+1),"]", sep="") else
paste("P[Y=", 1:M, "|Y>=", 1:M, "]", sep="")
- predictors.names = namesof(mynames, .link, short=TRUE, earg = .earg)
+ predictors.names = namesof(mynames, .link, short = TRUE, earg = .earg)
y.names = paste("mu", 1:(M+1), sep="")
extra$mymat = if ( .reverse ) tapplymat1(y, "cumsum") else
tapplymat1(y[,ncol(y):1], "cumsum")[,ncol(y):1]
@@ -310,7 +315,7 @@ Deviance.categorical.data.vgam <-
mynames = if ( .reverse )
paste("P[Y<",2:(M+1),"|Y<=",2:(M+1),"]", sep="") else
paste("P[Y>",1:M,"|Y>=",1:M,"]", sep="")
- predictors.names = namesof(mynames, .link, short=TRUE, earg = .earg)
+ predictors.names = namesof(mynames, .link, short = TRUE, earg = .earg)
y.names = paste("mu", 1:(M+1), sep="")
extra$mymat = if ( .reverse ) tapplymat1(y, "cumsum") else
tapplymat1(y[,ncol(y):1], "cumsum")[,ncol(y):1]
@@ -446,8 +451,8 @@ vglm.vcategorical.control = function(maxit=30, trace=FALSE, panic=TRUE, ...)
- multinomial = function(zero = NULL, parallel = FALSE, nointercept=NULL,
- refLevel="last")
+ multinomial = function(zero = NULL, parallel = FALSE, nointercept = NULL,
+ refLevel = "last")
{
if (length(refLevel) != 1) stop("the length of 'refLevel' must be one")
if (is.character(refLevel)) {
@@ -644,9 +649,9 @@ vglm.vcategorical.control = function(maxit=30, trace=FALSE, panic=TRUE, ...)
cindex = (iii-1)*(Llevels-1) + 1:(Llevels-1)
aindex = (iii-1)*(Llevels) + 1:(Llevels)
totdev = totdev + Deviance.categorical.data.vgam(
- mu=mu[,aindex,drop=FALSE],
- y=y[,aindex,drop=FALSE], w=w, residuals=residuals,
- eta=eta[,cindex,drop=FALSE], extra=extra)
+ mu=mu[,aindex, drop = FALSE],
+ y=y[,aindex, drop = FALSE], w=w, residuals=residuals,
+ eta=eta[,cindex, drop = FALSE], extra=extra)
}
totdev
} else {
@@ -688,20 +693,23 @@ vglm.vcategorical.control = function(maxit=30, trace=FALSE, panic=TRUE, ...)
paste("P[",Y.names,"<=",1:(Llevels-1),"]", sep=""))
y.names = c(y.names, paste(mu.names, 1:Llevels, sep=""))
}
- predictors.names = namesof(mynames, .link, short=TRUE, earg = .earg)
+ predictors.names = namesof(mynames, .link, short = TRUE, earg = .earg)
extra$NOS = NOS
extra$Llevels = Llevels
} else {
delete.zero.colns=TRUE # Cannot have FALSE since then prob(Y=jay)=0
eval(process.categorical.data.vgam)
M = ncol(y)-1
- mynames = if ( .reverse ) paste("P[Y>=",2:(1+M),"]", sep="") else
- paste("P[Y<=",1:M,"]", sep="")
- predictors.names = namesof(mynames, .link, short=TRUE, earg = .earg)
- y.names = paste("mu", 1:(M+1), sep="")
+ mynames = if ( .reverse )
+ paste("P[Y>=", 2:(1+M), "]", sep = "") else
+ paste("P[Y<=", 1:M, "]", sep = "")
+ predictors.names =
+ namesof(mynames, .link, short = TRUE, earg = .earg)
+ y.names = paste("mu", 1:(M+1), sep = "")
if (ncol(cbind(w)) == 1) {
- for(iii in 1:ncol(y))
- mustart[,iii] = weighted.mean(y[,iii], w)
+ if (length(mustart) && all(c(y) %in% c(0, 1)))
+ for(iii in 1:ncol(y))
+ mustart[,iii] = weighted.mean(y[,iii], w)
}
if (length(dimnames(y)))
@@ -718,12 +726,12 @@ vglm.vcategorical.control = function(maxit=30, trace=FALSE, panic=TRUE, ...)
cindex = (iii-1)*(Llevels-1) + 1:(Llevels-1)
aindex = (iii-1)*(Llevels) + 1:(Llevels)
if ( .reverse ) {
- ccump = cbind(1,eta2theta(eta[,cindex,drop=FALSE], .link,
+ ccump = cbind(1,eta2theta(eta[,cindex, drop = FALSE], .link,
earg= .earg))
fv.matrix[,aindex] =
cbind(-tapplymat1(ccump, "diff"), ccump[,ncol(ccump)])
} else {
- cump = cbind(eta2theta(eta[,cindex,drop=FALSE], .link,
+ cump = cbind(eta2theta(eta[,cindex, drop = FALSE], .link,
earg= .earg), 1)
fv.matrix[,aindex] =
cbind(cump[,1], tapplymat1(cump, "diff"))
@@ -744,7 +752,7 @@ vglm.vcategorical.control = function(maxit=30, trace=FALSE, panic=TRUE, ...)
fv.matrix
}
answer
- }, list( .link = link, .reverse = reverse, .earg= earg, .mv = mv ))),
+ }, list( .link = link, .reverse = reverse, .earg = earg, .mv = mv ))),
last = eval(substitute(expression({
if ( .mv ) {
misc$link = .link
@@ -762,7 +770,7 @@ vglm.vcategorical.control = function(maxit=30, trace=FALSE, panic=TRUE, ...)
misc$parallel = .parallel
misc$mv = .mv
}), list( .link = link, .reverse = reverse, .parallel = parallel,
- .mv = mv, .earg= earg ))),
+ .mv = mv, .earg = earg ))),
link = eval(substitute( function(mu, extra = NULL) {
answer =
if ( .mv ) {
@@ -814,11 +822,11 @@ vglm.vcategorical.control = function(maxit=30, trace=FALSE, panic=TRUE, ...)
for(iii in 1:NOS) {
cindex = (iii-1)*(Llevels-1) + 1:(Llevels-1)
aindex = (iii-1)*(Llevels) + 1:(Llevels-1)
- cump = eta2theta(eta[,cindex,drop=FALSE], .link, earg = .earg)
+ cump = eta2theta(eta[,cindex, drop = FALSE], .link, earg = .earg)
dcump.deta[,cindex] = dtheta.deta(cump, .link, earg = .earg)
resmat[,cindex] =
- (y[,aindex,drop=FALSE]/mu.use[,aindex,drop=FALSE] -
- y[,1+aindex,drop=FALSE]/mu.use[,1+aindex,drop=FALSE])
+ (y[,aindex, drop = FALSE]/mu.use[,aindex, drop = FALSE] -
+ y[,1+aindex, drop = FALSE]/mu.use[,1+aindex, drop = FALSE])
}
(if ( .reverse) -w else w) * dcump.deta * resmat
} else {
@@ -828,7 +836,7 @@ vglm.vcategorical.control = function(maxit=30, trace=FALSE, panic=TRUE, ...)
(y[,-(M+1)]/mu.use[,-(M+1)] - y[,-1]/mu.use[,-1])
}
deriv.answer
- }), list( .link = link, .reverse = reverse, .earg= earg, .mv=mv ))),
+ }), list( .link = link, .reverse = reverse, .earg = earg, .mv=mv ))),
weight = eval(substitute(expression({
if ( .mv ) {
NOS = extra$NOS
@@ -837,8 +845,8 @@ vglm.vcategorical.control = function(maxit=30, trace=FALSE, panic=TRUE, ...)
for(iii in 1:NOS) {
cindex = (iii-1)*(Llevels-1) + 1:(Llevels-1)
aindex = (iii-1)*(Llevels) + 1:(Llevels-1)
- wz[,cindex] = w * dcump.deta[,cindex,drop=FALSE]^2 *
- (1/mu.use[,aindex,drop=FALSE] + 1/mu.use[,1+aindex,drop=FALSE])
+ wz[,cindex] = w * dcump.deta[,cindex, drop = FALSE]^2 *
+ (1/mu.use[,aindex, drop = FALSE] + 1/mu.use[,1+aindex, drop = FALSE])
}
if (Llevels-1 > 1) {
iii = 1
@@ -910,13 +918,11 @@ vglm.vcategorical.control = function(maxit=30, trace=FALSE, panic=TRUE, ...)
delete.zero.colns = TRUE
eval(process.categorical.data.vgam)
M = ncol(y) - 1
- print("y 20100607")
- print( y )
mynames = if ( .reverse )
paste("P[Y=", 1:M, "]/P[Y=", 2:(M+1), "]", sep="") else
paste("P[Y=", 2:(M+1), "]/P[Y=", 1:M, "]", sep="")
- predictors.names = namesof(mynames, .link, short=TRUE, earg = .earg)
+ predictors.names = namesof(mynames, .link, short = TRUE, earg = .earg)
y.names = paste("mu", 1:(M+1), sep="")
if (length(dimnames(y)))
extra$dimnamesy2 = dimnames(y)[[2]]
@@ -927,7 +933,7 @@ vglm.vcategorical.control = function(maxit=30, trace=FALSE, panic=TRUE, ...)
M = ncol(eta)
fv.matrix = if ( .reverse ) {
zetar = eta2theta(eta, .link, earg = .earg )
- temp = tapplymat1(zetar[,M:1], "cumprod")[,M:1,drop=FALSE]
+ temp = tapplymat1(zetar[,M:1], "cumprod")[,M:1, drop = FALSE]
cbind(temp,1) / drop(1 + temp %*% rep(1,ncol(temp)))
} else {
zeta = eta2theta(eta, .link, earg = .earg )
@@ -1067,7 +1073,7 @@ acat.deriv = function(zeta, reverse, M, n)
extra$ybrat.indices = .brat.indices(NCo=M+1, are.ties=FALSE)
uindex = if ( .refgp =="last") 1:M else (1:(M+1))[-( .refgp ) ]
- predictors.names=namesof(paste("alpha",uindex,sep=""),"loge",short=TRUE)
+ predictors.names=namesof(paste("alpha",uindex,sep=""),"loge",short = TRUE)
}), list( .refgp = refgp, .init.alpha=init.alpha ))),
inverse = eval(substitute( function(eta, extra = NULL) {
probs = NULL
@@ -1202,8 +1208,8 @@ bratt = function(refgp="last",
uindex = if (refgp=="last") 1:(NCo-1) else (1:(NCo))[-refgp ]
predictors.names=c(
- namesof(paste("alpha",uindex,sep=""),"loge",short=TRUE),
- namesof("alpha0", "loge", short=TRUE))
+ namesof(paste("alpha",uindex,sep=""),"loge",short = TRUE),
+ namesof("alpha0", "loge", short = TRUE))
}), list( .refgp = refgp,
.i0 = i0,
.init.alpha=init.alpha ))),
@@ -1232,7 +1238,8 @@ bratt = function(refgp="last",
misc$alpha0 = alpha0
}), list( .refgp = refgp, .refvalue = refvalue ))),
loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
sum(w * (y * log(mu) + 0.5 * extra$ties * log(attr(mu, "probtie"))))
},
vfamily = c("bratt"),
@@ -1427,7 +1434,7 @@ tapplymat1 = function(mat, function.arg=c("cumsum", "diff", "cumprod"))
dimnames(fred$mat) = dimnames(mat)
switch(function.arg,
cumsum =fred$mat,
- diff =fred$mat[,-1,drop=FALSE],
+ diff =fred$mat[,-1, drop = FALSE],
cumprod=fred$mat)
}
@@ -1515,7 +1522,7 @@ tapplymat1 = function(mat, function.arg=c("cumsum", "diff", "cumprod"))
extra$cutpoints = cp.vector
extra$n = n
mynames = if (M > 1) paste("mu",1:M,sep="") else "mu"
- predictors.names = namesof(mynames, .link, short=TRUE, earg = .earg)
+ predictors.names = namesof(mynames, .link, short = TRUE, earg = .earg)
}), list( .link = link, .countdata = countdata, .earg = earg,
.cutpoints=cutpoints, .NOS=NOS, .Levels=Levels,
.init.mu = init.mu
@@ -1524,7 +1531,7 @@ tapplymat1 = function(mat, function.arg=c("cumsum", "diff", "cumprod"))
mu = eta2theta(eta, link= .link, earg = .earg) # Poisson means
mu = cbind(mu)
mu
- }, list( .link = link, .earg= earg, .countdata = countdata ))),
+ }, list( .link = link, .earg = earg, .countdata = countdata ))),
last = eval(substitute(expression({
if ( .countdata ) {
misc$link = .link
@@ -1539,9 +1546,10 @@ tapplymat1 = function(mat, function.arg=c("cumsum", "diff", "cumprod"))
misc$parameters = mynames
misc$countdata = .countdata
misc$true.mu = FALSE # $fitted is not a true mu
- }), list( .link = link, .countdata = countdata, .earg= earg ))),
+ }), list( .link = link, .countdata = countdata, .earg = earg ))),
loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
probs = ordpoissonProbs(extra, mu)
index0 = y == 0
probs[index0] = 1
@@ -1571,7 +1579,7 @@ tapplymat1 = function(mat, function.arg=c("cumsum", "diff", "cumprod"))
}
resmat = w * resmat * dmu.deta
resmat
- }), list( .link = link, .earg= earg, .countdata=countdata ))),
+ }), list( .link = link, .earg = earg, .countdata=countdata ))),
weight = eval(substitute(expression({
d2l.dmu2 = matrix(0, n, M) # Diagonal matrix
cptr = 1
@@ -1674,7 +1682,7 @@ ordpoissonProbs = function(extra, mu, deriv=0) {
for(ii in 1:length(constraints))
constraints[[ii]] =
- (constraints[[ii]])[interleave.VGAM(M, M=2),,drop=FALSE]
+ (constraints[[ii]])[interleave.VGAM(M, M=2),, drop = FALSE]
}), list( .parallel = parallel, .sparallel=sparallel ))),
deviance=eval(substitute(
function(mu, y, w, residuals=FALSE, eta, extra = NULL) {
@@ -1694,9 +1702,9 @@ ordpoissonProbs = function(extra, mu, deriv=0) {
mynames = if ( .reverse ) paste("P[Y>=",2:(1+J),"]", sep="") else
paste("P[Y<=",1:J,"]", sep="")
predictors.names = c(
- namesof(mynames, .link, short=TRUE, earg = .earg),
+ namesof(mynames, .link, short = TRUE, earg = .earg),
namesof(paste("scale_", 1:J, sep=""),
- .lscale, short=TRUE, earg = .escale))
+ .lscale, short = TRUE, earg = .escale))
y.names = paste("mu", 1:(J+1), sep="")
if (length(dimnames(y)))
@@ -1704,13 +1712,13 @@ ordpoissonProbs = function(extra, mu, deriv=0) {
predictors.names = predictors.names[interleave.VGAM(M, M=2)]
- }), list( .link = link, .lscale=lscale, .reverse = reverse,
- .earg= earg, .escale=escale ))),
+ }), list( .link = link, .lscale = lscale, .reverse = reverse,
+ .earg = earg, .escale = escale ))),
inverse = eval(substitute( function(eta, extra = NULL) {
J = extra$J
M = 2*J
- etamat1 = eta[,2*(1:J)-1,drop=FALSE]
- etamat2 = eta[,2*(1:J), drop=FALSE]
+ etamat1 = eta[,2*(1:J)-1, drop = FALSE]
+ etamat2 = eta[,2*(1:J), drop = FALSE]
scalemat = eta2theta(etamat2, .lscale, earg = .escale)
fv.matrix =
if ( .reverse ) {
@@ -1723,8 +1731,8 @@ ordpoissonProbs = function(extra, mu, deriv=0) {
if (length(extra$dimnamesy2))
dimnames(fv.matrix) = list(dimnames(eta)[[1]], extra$dimnamesy2)
fv.matrix
- }, list( .link = link, .lscale=lscale, .reverse = reverse,
- .earg= earg, .escale=escale ))),
+ }, list( .link = link, .lscale = lscale, .reverse = reverse,
+ .earg = earg, .escale = escale ))),
last = eval(substitute(expression({
J = extra$J
misc$link = c(rep( .link, length=J),
@@ -1738,9 +1746,9 @@ ordpoissonProbs = function(extra, mu, deriv=0) {
misc$reverse = .reverse
misc$parallel = .parallel
misc$sparallel = .sparallel
- }), list( .link = link, .lscale=lscale,
+ }), list( .link = link, .lscale = lscale,
.reverse = reverse, .parallel = parallel, .sparallel=sparallel,
- .earg = earg, .escale=escale ))),
+ .earg = earg, .escale = escale ))),
link = eval(substitute( function(mu, extra = NULL) {
cump = tapplymat1(as.matrix(mu), "cumsum")
J = ncol(as.matrix(mu)) - 1
@@ -1752,8 +1760,8 @@ ordpoissonProbs = function(extra, mu, deriv=0) {
nrow(as.matrix(mu)), J, byrow=TRUE))
answer = answer[,interleave.VGAM(M, M=2)]
answer
- }, list( .link = link, .lscale=lscale, .reverse = reverse,
- .iscale=iscale, .earg = earg, .escale=escale ))),
+ }, list( .link = link, .lscale = lscale, .reverse = reverse,
+ .iscale=iscale, .earg = earg, .escale = escale ))),
loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
@@ -1778,8 +1786,8 @@ ordpoissonProbs = function(extra, mu, deriv=0) {
J = extra$J
mu.use = pmax(mu, .Machine$double.eps * 1.0e-0)
- etamat1 = eta[,2*(1:J)-1,drop=FALSE]
- etamat2 = eta[,2*(1:J), drop=FALSE]
+ etamat1 = eta[,2*(1:J)-1, drop = FALSE]
+ etamat2 = eta[,2*(1:J), drop = FALSE]
scalemat = eta2theta(etamat2, .lscale, earg = .escale)
cump = eta2theta(etamat1 / scalemat, .link, earg = .earg)
@@ -1793,8 +1801,8 @@ ordpoissonProbs = function(extra, mu, deriv=0) {
ans = ans[,interleave.VGAM(M, M=2)]
if (ooz) ans[,c(TRUE,FALSE)] = 0 else ans[,c(FALSE,TRUE)] = 0
ans
- }), list( .link = link, .lscale=lscale, .reverse = reverse,
- .earg= earg, .escale=escale ))),
+ }), list( .link = link, .lscale = lscale, .reverse = reverse,
+ .earg = earg, .escale = escale ))),
weight = eval(substitute(expression({
wz = matrix(0, n, 2*(2*M-3))
@@ -1836,7 +1844,8 @@ ordpoissonProbs = function(extra, mu, deriv=0) {
wz[,iam(2*ii,2*ii+1,M=M)] = if (ooz) wz0[,ii] else 0
}
wz
- }), list( .link = link, .lscale=lscale, .earg = earg, .escale=escale ))))
+ }), list( .link = link, .lscale = lscale, .earg = earg,
+ .escale = escale ))))
}
@@ -2004,7 +2013,7 @@ prplot = function(object,
use.y[,ii] = eta2theta(use.y[,ii], link=object at misc$link[[ii]],
earg=object at misc$earg[[ii]])
}
- if (ncol(use.y) != MM) use.y = use.y[,1:MM,drop=FALSE]
+ if (ncol(use.y) != MM) use.y = use.y[,1:MM, drop = FALSE]
use.x = (object at preplot[[1]])$x
myxlab = if (length(control$xlab)) control$xlab else (object at preplot[[1]])$xlab
diff --git a/R/family.censored.R b/R/family.censored.R
index 360d68c..6746b25 100644
--- a/R/family.censored.R
+++ b/R/family.censored.R
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
@@ -11,7 +12,7 @@
-cenpoisson = function(link = "loge", earg = list(), imu = NULL) {
+ cenpoisson = function(link = "loge", earg = list(), imu = NULL) {
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if (!is.list(earg))
@@ -28,7 +29,7 @@ cenpoisson = function(link = "loge", earg = list(), imu = NULL) {
if (any(y != round(y)))
warning("the response should be integer-valued")
centype = attr(y, "type")
- if (centype=="right") {
+ if (centype == "right") {
temp = y[, 2]
extra$uncensored = ifelse(temp == 1, TRUE, FALSE)
extra$rightcensored = ifelse(temp == 0, TRUE, FALSE)
@@ -36,7 +37,7 @@ cenpoisson = function(link = "loge", earg = list(), imu = NULL) {
extra$interval = rep(FALSE, len=n)
init.mu = pmax(y[,1], 1/8)
} else
- if (centype=="left") {
+ if (centype == "left") {
temp = y[, 2]
extra$uncensored = ifelse(temp == 1, TRUE, FALSE)
extra$rightcensored = rep(FALSE, len=n)
@@ -44,7 +45,7 @@ cenpoisson = function(link = "loge", earg = list(), imu = NULL) {
extra$interval = rep(FALSE, len=n)
init.mu = pmax(y[,1], 1/8)
} else
- if (centype=="interval" || centype=="interval2") {
+ if (centype == "interval" || centype == "interval2") {
temp = y[, 3]
extra$uncensored = ifelse(temp == 1, TRUE, FALSE)
extra$rightcensored = ifelse(temp == 0, TRUE, FALSE)
@@ -54,12 +55,12 @@ cenpoisson = function(link = "loge", earg = list(), imu = NULL) {
if (any(extra$uncensored))
init.mu[extra$uncensored] = pmax(y[extra$uncensored,1], 1/8)
if (any(extra$rightcensored))
- init.mu[extra$rightcensored] = pmax(y[extra$rightcensored,1], 1/8)
+ init.mu[extra$rightcensored] = pmax(y[extra$rightcensored,1], 1/8)
if (any(extra$leftcensored))
- init.mu[extra$leftcensored] = pmax(y[extra$leftcensored,1], 1/8)
+ init.mu[extra$leftcensored] = pmax(y[extra$leftcensored,1], 1/8)
} else
- if (centype=="counting") {
- stop("type=='counting' not compatible with cenpoisson()")
+ if (centype == "counting") {
+ stop("type == 'counting' not compatible with cenpoisson()")
init.mu = pmax(y[,1], 1/8)
stop("currently not working")
} else
@@ -67,7 +68,7 @@ cenpoisson = function(link = "loge", earg = list(), imu = NULL) {
if (length( .imu )) init.mu = 0 * y[,1] + .imu
- predictors.names = namesof("mu", .link, earg= .earg, short=TRUE)
+ predictors.names = namesof("mu", .link, earg = .earg, short=TRUE)
if (!length(etastart))
etastart = theta2eta(init.mu, link = .link, earg = .earg)
}), list( .link = link, .earg = earg, .imu = imu))),
@@ -83,7 +84,8 @@ cenpoisson = function(link = "loge", earg = list(), imu = NULL) {
link = eval(substitute(function(mu, extra = NULL) {
theta2eta(mu, link = .link, earg = .earg)
}, list( .link = link, .earg = earg ))),
- loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ loglikelihood = function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL) {
cen0 = extra$uncensored
cenL = extra$leftcensored
cenU = extra$rightcensored
@@ -129,11 +131,11 @@ cenpoisson = function(link = "loge", earg = list(), imu = NULL) {
dl.dlambda[cenI] =
(-densm02[cenI]+densm12[cenI]) / (Queue2[cenI]-Queue1[cenI])
}
- dlambda.deta = dtheta.deta(theta=lambda, link= .link, earg= .earg)
+ dlambda.deta = dtheta.deta(theta=lambda, link= .link, earg = .earg)
w * dl.dlambda * dlambda.deta
}), list( .link = link, .earg = earg ))),
weight = eval(substitute(expression({
- d2lambda.deta2 = d2theta.deta2(theta=lambda, link= .link, earg= .earg)
+ d2lambda.deta2 = d2theta.deta2(theta=lambda, link= .link, earg = .earg)
d2l.dlambda2 = 1 / lambda # uncensored; Fisher scoring
if (any(cenU)) {
densm2 = dpois(yllim-2, lambda)
@@ -161,8 +163,8 @@ cenpoisson = function(link = "loge", earg = list(), imu = NULL) {
if (FALSE)
-cexpon =
-ecexpon = function(link="loge", location=0)
+ cexpon =
+ ecexpon = function(link="loge", location=0)
{
if (!is.Numeric(location, allow=1))
stop("bad input for 'location'")
@@ -170,53 +172,54 @@ ecexpon = function(link="loge", location=0)
link = as.character(substitute(link))
new("vglmff",
- blurb=c("Censored exponential distribution\n\n",
- "Link: ", namesof("rate", link, tag= TRUE), "\n",
- "Mean: ", "mu = ", location, " + 1 / ",
- namesof("rate", link, tag= FALSE), "\n",
- "Variance: ",
- if (location==0) "Exponential: mu^2" else
- paste("(mu-", location, ")^2", sep="")),
- initialize=eval(substitute(expression({
+ blurb = c("Censored exponential distribution\n\n",
+ "Link: ", namesof("rate", link, tag = TRUE), "\n",
+ "Mean: ", "mu = ", location, " + 1 / ",
+ namesof("rate", link, tag = FALSE), "\n",
+ "Variance: ",
+ if (location == 0) "Exponential: mu^2" else
+ paste("(mu-", location, ")^2", sep="")),
+ initialize = eval(substitute(expression({
extra$location = .location # This is passed into, e.g., link, deriv etc.
if (any(y[,1] <= extra$location))
stop("all responses must be greater than ", extra$location)
- predictors.names = namesof("rate", .link, tag= FALSE)
+ predictors.names = namesof("rate", .link, tag = FALSE)
type <- attr(y, "type")
- if (type=="right" || type=="left"){
+ if (type == "right" || type == "left"){
mu = y[,1] + (abs(y[,1] - extra$location) < 0.001) / 8
}else
- if (type=="interval"){
+ if (type == "interval"){
temp <- y[,3]
- mu = ifelse(temp==3, y[,2] + (abs(y[,2] - extra$location) < 0.001)/8,
+ mu = ifelse(temp == 3, y[,2] + (abs(y[,2] - extra$location)
+ < 0.001)/8,
y[,1] + (abs(y[,1] - extra$location) < 0.001) / 8)
}
if (!length(etastart))
etastart = theta2eta(1/(mu-extra$location), .link)
- if (type=="right") {
+ if (type == "right") {
temp <- y[, 2]
extra$uncensored = ifelse(temp == 1, TRUE, FALSE)
extra$rightcensored = ifelse(temp == 0, TRUE, FALSE)
extra$leftcensored = rep(FALSE, len=n)
extra$interval = rep(FALSE, len=n)
} else
- if (type=="left") {
+ if (type == "left") {
temp <- y[, 2]
extra$uncensored = ifelse(temp == 1, TRUE, FALSE)
extra$rightcensored = rep(FALSE, len=n)
extra$leftcensored = ifelse(temp == 0, TRUE, FALSE)
extra$interval = rep(FALSE, len=n)
} else
- if (type=="counting") {
- stop("type=='counting' not recognized")
+ if (type == "counting") {
+ stop("type == 'counting' not recognized")
extra$uncensored = rep(temp == 1, TRUE, FALSE)
extra$interval = rep(FALSE, len=n)
extra$leftcensored = rep(FALSE, len=n)
extra$rightcensored = rep(FALSE, len=n)
extra$counting = ifelse(temp == 0, TRUE, FALSE)
} else
- if (type=="interval") {
+ if (type == "interval") {
temp <- y[, 3]
extra$uncensored = ifelse(temp == 1, TRUE, FALSE)
extra$rightcensored = ifelse(temp == 0, TRUE, FALSE)
@@ -228,33 +231,34 @@ ecexpon = function(link="loge", location=0)
#if(!length(extra$rightcensored)) extra$rightcensored = rep(FALSE, len=n)
#if(any(extra$rightcensored & extra$leftcensored))
# stop("some observations are both right and left censored!")
- }), list( .location=location, .link=link ))),
- inverse=eval(substitute(function(eta, extra=NULL)
+ }), list( .location=location, .link = link ))),
+ inverse = eval(substitute(function(eta, extra = NULL)
extra$location + 1 / eta2theta(eta, .link),
- list( .link=link ) )),
- last=eval(substitute(expression({
+ list( .link = link ) )),
+ last = eval(substitute(expression({
misc$location = extra$location
misc$link = c("rate" = .link)
- }), list( .link=link ))),
- link=eval(substitute(function(mu, extra=NULL)
+ }), list( .link = link ))),
+ link=eval(substitute(function(mu, extra = NULL)
theta2eta(1/(mu-extra$location), .link),
- list( .link=link ) )),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ list( .link = link ) )),
+ loglikelihood = eval(substitute(
+ function(mu,y,w,residuals = FALSE,eta, extra = NULL) {
rate = 1 / (mu - extra$location)
cen0 = extra$uncensored
cenL = extra$leftcensored
cenU = extra$rightcensored
cenI = extra$interval
- if (residuals) stop("loglikelihood residuals not implemented yet") else
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
sum(w[cenL] * log1p(-exp(-rate[cenL]*(y[cenL,1]-extra$location)))) +
sum(w[cenU] * (-rate[cenU]*(y[cenU,1]-extra$location))) +
sum(w[cen0] * (log(rate[cen0]) - rate[cen0]*(y[cen0,1]-extra$location)))+
sum(w[cenI] * log(-exp(-rate[cenI]*(y[cenI,2]-extra$location))+
exp(-rate[cenI]*(y[cenI,1]-extra$location))))
- }, list( .link=link ))),
- vfamily=c("ecexpon"),
- deriv=eval(substitute(expression({
+ }, list( .link = link ))),
+ vfamily = c("ecexpon"),
+ deriv = eval(substitute(expression({
rate = 1 / (mu - extra$location)
cen0 = extra$uncensored
cenL = extra$leftcensored
@@ -274,8 +278,8 @@ ecexpon = function(link="loge", location=0)
(-tmp200b[cenI]+tmp200[cenI])
drate.deta = dtheta.deta(rate, .link)
w * dl.drate * drate.deta
- }), list( .link=link ) )),
- weight=eval(substitute(expression({
+ }), list( .link = link ) )),
+ weight = eval(substitute(expression({
A123 = ((mu-extra$location)^2) # uncensored d2l.drate2
Lowpt = ifelse(cenL, y[,1], extra$location)
Lowpt = ifelse(cenI, y[,1], Lowpt) #interval censored
@@ -290,59 +294,63 @@ ecexpon = function(link="loge", location=0)
exp(-rate*(Upppt-extra$location))) * A123
wz = w * (drate.deta^2) * d2l.drate2
wz
- }), list( .link=link ))))
+ }), list( .link = link ))))
}
-cnormal1 = function(lmu="identity", lsd="loge", imethod=1, zero=2)
+
+
+ cnormal1 = function(lmu="identity", lsd="loge", method.init=1, zero=2)
{
if (mode(lmu) != "character" && mode(lmu) != "name")
lmu = as.character(substitute(lmu))
if (mode(lsd) != "character" && mode(lsd) != "name")
lsd = as.character(substitute(lsd))
- if (!is.Numeric(imethod, allow=1, integer=TRUE, positi=TRUE) || imethod > 2)
- stop("imethod must be 1 or 2")
+ if (!is.Numeric(method.init, allow=1, integer=TRUE, positi=TRUE) ||
+ method.init > 2)
+ stop("'method.init' must be 1 or 2")
new("vglmff",
- blurb=c("Censored univariate normal\n\n",
- "Links: ", namesof("mu", lmu, tag= TRUE), "; ",
- namesof("sd", lsd, tag= TRUE), "\n",
- "Conditional variance: sd^2"),
- constraints=eval(substitute(expression({
+ blurb = c("Censored univariate normal\n\n",
+ "Links: ", namesof("mu", lmu, tag = TRUE), "; ",
+ namesof("sd", lsd, tag = TRUE), "\n",
+ "Conditional variance: sd^2"),
+ constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ initialize = eval(substitute(expression({
y = cbind(y)
- if (ncol(y)>1) stop("the response must be a vector or a 1-column matrix")
+ if (ncol(y) > 1)
+ stop("the response must be a vector or a 1-column matrix")
- if (!length(extra$leftcensored)) extra$leftcensored = rep(FALSE, len=n)
- if (!length(extra$rightcensored)) extra$rightcensored = rep(FALSE, len=n)
+ if (!length(extra$leftcensored))
+ extra$leftcensored = rep(FALSE, len=n)
+ if (!length(extra$rightcensored))
+ extra$rightcensored = rep(FALSE, len=n)
if (any(extra$rightcensored & extra$leftcensored))
stop("some observations are both right and left censored!")
- predictors.names =
- c(namesof("mu", .lmu, tag= FALSE),
- namesof("sd", .lsd, tag= FALSE))
+ predictors.names = c(namesof("mu", .lmu, tag = FALSE),
+ namesof("sd", .lsd, tag = FALSE))
if (!length(etastart)) {
anyc = extra$leftcensored | extra$rightcensored
- i11 = if ( .imethod == 1) anyc else FALSE # can be all data
- junk=if(is.R()) lm.wfit(x=cbind(x[!i11,]),y=y[!i11],w=w[!i11]) else
- lm.wfit(x=cbind(x[!i11,]), y=y[!i11], w=w[!i11],method="qr")
+ i11 = if ( .method.init == 1) anyc else FALSE # can be all data
+ junk = lm.wfit(x=cbind(x[!i11,]),y=y[!i11],w=w[!i11])
sd.y.est = sqrt( sum(w[!i11] * junk$resid^2) / junk$df.residual )
etastart = cbind(mu=y, rep(theta2eta(sd.y.est, .lsd), length=n))
if (any(anyc)) etastart[anyc,1] = x[anyc,,drop=FALSE] %*% junk$coeff
}
- }), list( .lmu=lmu, .lsd=lsd, .imethod=imethod ))),
- inverse=eval(substitute( function(eta, extra=NULL) {
+ }), list( .lmu = lmu, .lsd = lsd, .method.init = method.init ))),
+ inverse = eval(substitute( function(eta, extra = NULL) {
eta2theta(eta[,1], .lmu)
- }, list( .lmu=lmu ))),
- last=eval(substitute(expression({
+ }, list( .lmu = lmu ))),
+ last = eval(substitute(expression({
misc$link = c("mu"= .lmu, "sd"= .lsd)
misc$expected = TRUE
- }), list( .lmu=lmu, .lsd=lsd ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ }), list( .lmu = lmu, .lsd = lsd ))),
+ loglikelihood = eval(substitute(
+ function(mu,y,w,residuals = FALSE,eta, extra = NULL) {
cenL = extra$leftcensored
cenU = extra$rightcensored
cen0 = !cenL & !cenU # uncensored obsns
@@ -353,11 +361,12 @@ cnormal1 = function(lmu="identity", lsd="loge", imethod=1, zero=2)
ell1 = -log(sd[cen0]) - 0.5 * ((y[cen0] - mum[cen0])/sd[cen0])^2
ell2 = log1p(-pnorm((mum[cenL] - Lower[cenL])/sd[cenL]))
ell3 = log1p(-pnorm(( Upper[cenU] - mum[cenU])/sd[cenU]))
- if (residuals) stop("loglikelihood residuals not implemented yet") else
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
sum(w[cen0] * ell1) + sum(w[cenL] * ell2) + sum(w[cenU] * ell3)
- }, list( .lmu=lmu, .lsd=lsd ))),
- vfamily=c("tobit"),
- deriv=eval(substitute(expression({
+ }, list( .lmu = lmu, .lsd = lsd ))),
+ vfamily = c("cnormal1"),
+ deriv = eval(substitute(expression({
cenL = extra$leftcensored
cenU = extra$rightcensored
cen0 = !cenL & !cenU # uncensored obsns
@@ -390,11 +399,11 @@ cnormal1 = function(lmu="identity", lsd="loge", imethod=1, zero=2)
rm(fred21)
}
w * cbind(dl.dmu * dmu.deta, dl.dsd * dsd.deta)
- }), list( .lmu=lmu, .lsd=lsd ))),
- weight=eval(substitute(expression({
+ }), list( .lmu = lmu, .lsd = lsd ))),
+ weight = eval(substitute(expression({
A1 = 1 - pnorm((mum - Lower) / sd) # Lower
A3 = 1 - pnorm(( Upper - mum) / sd) # Upper
- A2 = 1 - A1 - A3 # Middle; uncensored
+ A2 = 1 - A1 - A3 # Middle; uncensored
wz = matrix(0, n, 3)
wz[,iam(1,1,M)] = A2 * 1 / sd^2 # ed2l.dmu2
wz[,iam(2,2,M)] = A2 * 2 / sd^2 # ed2l.dsd2
@@ -406,7 +415,8 @@ cnormal1 = function(lmu="identity", lsd="loge", imethod=1, zero=2)
wz.cenL11 = phiL * (phiL - (1-PhiL)*temp21L) / temp31L
wz.cenL22 = mumL * phiL * ((1-PhiL) * (2 - temp21L^2) +
mumL * phiL / sd) / (sd * temp31L)
- wz.cenL12 = phiL * ((1-PhiL)*(temp21L^2 - 1) - temp21L*phiL) / temp31L
+ wz.cenL12 = phiL * ((1-PhiL)*(temp21L^2 - 1) -
+ temp21L*phiL) / temp31L
wz.cenL11[!is.finite(wz.cenL11)] = 0
wz.cenL22[!is.finite(wz.cenL22)] = 0
wz.cenL12[!is.finite(wz.cenL12)] = 0
@@ -422,7 +432,8 @@ cnormal1 = function(lmu="identity", lsd="loge", imethod=1, zero=2)
wzcenU11 = phiU * (phiU - tmp8) / temp31U
tmp9 = (1-PhiU) * (2 - temp21U^2)
wzcenU22 = mumU * phiU * (tmp9 + mumU * phiU / sd) / (sd * temp31U)
- wzcenU12 = -phiU * ((1-PhiU)*(temp21U^2 - 1) - temp21U*phiU) / temp31U
+ wzcenU12 = -phiU * ((1-PhiU)*(temp21U^2 - 1) -
+ temp21U*phiU) / temp31U
wzcenU11[!is.finite(wzcenU11)] = 0 # Needed when Upper==Inf
wzcenU22[!is.finite(wzcenU22)] = 0 # Needed when Upper==Inf
wzcenU12[!is.finite(wzcenU12)] = 0 # Needed when Upper==Inf
@@ -433,190 +444,220 @@ cnormal1 = function(lmu="identity", lsd="loge", imethod=1, zero=2)
wz[,iam(2,2,M)] = w * wz[,iam(2,2,M)] * dsd.deta^2
wz[,iam(1,2,M)] = w * wz[,iam(1,2,M)] * dmu.deta * dsd.deta
wz
- }), list( .lmu=lmu, .lsd=lsd ))))
+ }), list( .lmu = lmu, .lsd = lsd ))))
}
-crayleigh = function(link="loge", earg = list(), expected=FALSE) {
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.logical(expected) || length(expected) != 1)
- stop("bad input for argument 'expected'")
- if (!is.list(earg)) earg = list()
+
+ crayleigh = function(lscale = "loge", escale = list(),
+ oim = TRUE) {
+ if (mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
+ if (!is.logical(oim) || length(oim) != 1)
+ stop("bad input for argument 'oim'")
+ if (!is.list(escale)) escale = list()
new("vglmff",
- blurb=c("Censored Rayleigh distribution",
- "f(y) = y*exp(-0.5*(y/a)^2)/a^2, y>0, a>0\n",
- "Link: ",
- namesof("a", link, earg= earg ), "\n", "\n",
- "Mean: a * sqrt(pi / 2)"),
- initialize=eval(substitute(expression({
+ blurb = c("Censored Rayleigh distribution\n\n",
+ "f(y) = y*exp(-0.5*(y/scale)^2)/scale^2, y>0, scale>0\n",
+ "Link: ",
+ namesof("scale", lscale, earg = escale ), "\n", "\n",
+ "Mean: scale * sqrt(pi / 2)"),
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
- if (length(extra$leftcensored)) stop("cannot handle left-censored data")
- if (!length(extra$rightcensored)) extra$rightcensored = rep(FALSE, len=n)
- predictors.names = namesof("a", .link, earg= .earg, tag= FALSE)
+
+ if (length(extra$leftcensored))
+ stop("cannot handle left-censored data")
+ if (!length(extra$rightcensored))
+ extra$rightcensored = rep(FALSE, len=n)
+
+ predictors.names =
+ namesof("scale", .lscale, earg = .escale, tag = FALSE)
if (!length(etastart)) {
a.init = (y+1/8) / sqrt(pi/2)
- etastart = theta2eta(a.init, .link, earg= .earg )
+ etastart = theta2eta(a.init, .lscale, earg = .escale )
}
- }), list( .link=link, .earg=earg ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- a = eta2theta(eta, .link, earg= .earg )
- a * sqrt(pi/2)
- }, list( .link=link, .earg=earg ))),
- last=eval(substitute(expression({
- misc$link = c("a"= .link)
- misc$earg = list(a= .earg)
- misc$expected = .expected
- }), list( .link=link, .earg=earg, .expected=expected ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- a = eta2theta(eta, .link, earg= .earg )
+ }), list( .lscale = lscale, .escale = escale ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ Scale = eta2theta(eta, .lscale, earg = .escale )
+ Scale * sqrt(pi/2)
+ }, list( .lscale = lscale, .escale = escale ))),
+ last = eval(substitute(expression({
+ misc$link = c("scale" = .lscale)
+ misc$earg = list("scale" = .escale)
+ misc$oim = .oim
+ }), list( .lscale = lscale, .escale = escale,
+ .oim = oim ))),
+ loglikelihood = eval(substitute(
+ function(mu,y,w,residuals = FALSE,eta, extra = NULL) {
+ Scale = eta2theta(eta, .lscale, earg = .escale )
cen0 = !extra$rightcensored # uncensored obsns
cenU = extra$rightcensored
- if (residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w[cen0]*(log(y[cen0]) - 2*log(a[cen0]) - 0.5*(y[cen0]/a[cen0])^2)) -
- 0.5 * sum(w[cenU] * (y[cenU]/a[cenU])^2)
- }, list( .link=link, .earg=earg ))),
- vfamily=c("crayleigh"),
- deriv=eval(substitute(expression({
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
+ sum(w[cen0] * (log(y[cen0]) - 2*log(Scale[cen0]) -
+ 0.5*(y[cen0]/Scale[cen0])^2)) -
+ sum(w[cenU] * (y[cenU]/Scale[cenU])^2) * 0.5
+ }, list( .lscale = lscale, .escale = escale ))),
+ vfamily = c("crayleigh"),
+ deriv = eval(substitute(expression({
cen0 = !extra$rightcensored # uncensored obsns
cenU = extra$rightcensored
- a = eta2theta(eta, .link, earg= .earg )
- dl.da = ((y/a)^2 - 2) / a
- da.deta = dtheta.deta(a, .link, earg= .earg )
- dl.da[cenU] = y[cenU]^2 / a[cenU]^3
- w * dl.da * da.deta
- }), list( .link=link, .earg=earg ))),
- weight=eval(substitute(expression({
- ed2l.da2 = 4 / a^2
- wz = da.deta^2 * ed2l.da2
- if ( .expected) {
- ed2l.da2[cenU] = 6 / (a[cenU])^2
- wz[cenU] = (da.deta[cenU])^2 * ed2l.da2[cenU]
+ Scale = eta2theta(eta, .lscale, earg = .escale )
+ dl.dScale = ((y/Scale)^2 - 2) / Scale
+ dScale.deta = dtheta.deta(Scale, .lscale, earg = .escale )
+ dl.dScale[cenU] = y[cenU]^2 / Scale[cenU]^3
+ w * dl.dScale * dScale.deta
+ }), list( .lscale = lscale, .escale = escale ))),
+ weight = eval(substitute(expression({
+ ed2l.dScale2 = 4 / Scale^2
+ wz = dScale.deta^2 * ed2l.dScale2
+ if ( .oim ) {
+ d2l.dScale2 = 3 * (y[cenU])^2 / (Scale[cenU])^4
+ d2Scale.deta2 = d2theta.deta2(Scale[cenU], .lscale, earg = .escale )
+ wz[cenU] = (dScale.deta[cenU])^2 * d2l.dScale2 - dl.dScale[cenU] * d2Scale.deta2
} else {
- d2l.da2 = 3 * (y[cenU])^2 / (a[cenU])^4
- d2a.deta2 = d2theta.deta2(a[cenU], .link, earg= .earg )
- wz[cenU] = (da.deta[cenU])^2 * d2l.da2 - dl.da[cenU] * d2a.deta2
+ ed2l.dScale2[cenU] = 6 / (Scale[cenU])^2
+ wz[cenU] = (dScale.deta[cenU])^2 * ed2l.dScale2[cenU]
}
w * wz
- }), list( .link=link, .earg=earg, .expected=expected ))))
+ }), list( .lscale = lscale, .escale = escale,
+ .oim = oim ))))
}
-weibull =
-weibull.sev = function(lshape="loge", lscale="loge",
- eshape=list(), escale=list(),
- ishape=NULL, iscale=NULL,
+
+
+
+
+
+
+ weibull =
+ weibull.sev = function(lshape = "loge", lscale = "loge",
+ eshape = list(), escale = list(),
+ ishape = NULL, iscale = NULL,
nrfs = 1,
- imethod=1, zero=2)
+ method.init = 1, zero = 2)
{
- if (mode(lshape) != "character" && mode(lshape) != "name")
- lshape = as.character(substitute(lshape))
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument 'zero'")
- if (!is.Numeric(imethod, allow=1, integer=TRUE, positi=TRUE) || imethod > 2)
- stop("argument 'imethod' must be 1 or 2")
- if (!is.list(eshape)) eshape = list()
- if (!is.list(escale)) escale = list()
- if (!is.Numeric(nrfs, allow=1) || nrfs<0 || nrfs > 1)
- stop("bad input for 'nrfs'")
-
- new("vglmff",
- blurb=c("Weibull distribution\n\n",
+ if (mode(lshape) != "character" && mode(lshape) != "name")
+ lshape = as.character(substitute(lshape))
+ if (mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
+ if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("bad input for argument 'zero'")
+ if (!is.Numeric(method.init, allow=1, integer=TRUE, positi=TRUE) ||
+ method.init > 2)
+ stop("argument 'method.init' must be 1 or 2")
+ if (!is.list(eshape)) eshape = list()
+ if (!is.list(escale)) escale = list()
+ if (!is.Numeric(nrfs, allow=1) || nrfs < 0 || nrfs > 1)
+ stop("bad input for 'nrfs'")
+
+ new("vglmff",
+ blurb = c("Weibull distribution\n\n",
"Links: ",
- namesof("shape", lshape, earg= eshape), ", ",
- namesof("scale", lscale, earg= escale), "\n",
+ namesof("shape", lshape, earg = eshape), ", ",
+ namesof("scale", lscale, earg = escale), "\n",
"Mean: scale * gamma(1 + 1/shape)\n",
- "Variance: scale^2 * (gamma(1 + 2/shape) - gamma(1 + 1/shape)^2)"),
- constraints=eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
- y = cbind(y)
- if (ncol(y)>1) stop("the response must be a vector or a 1-column matrix")
+ "Variance: scale^2 * (gamma(1 + 2/shape) - ",
+ "gamma(1 + 1/shape)^2)"),
+ constraints = eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ y = cbind(y)
+ if (ncol(y)>1)
+ stop("the response must be a vector or a 1-column matrix")
+
+ if (is.SurvS4(y))
+ stop("only uncensored observations are allowed; don't use SurvS4()")
+
+ predictors.names =
+ c(namesof("shape", .lshape, earg = .eshape, tag = FALSE),
+ namesof("scale", .lscale, earg = .escale, tag = FALSE))
+
+ if (!length(.ishape) || !length(.iscale)) {
+ anyc = FALSE # extra$leftcensored | extra$rightcensored
+ i11 = if ( .method.init == 1) anyc else FALSE # can be all data
+ qvec = c(.25, .5, .75) # Arbitrary; could be made an argument
+ init.shape = if (length( .ishape)) .ishape else 1
+ xvec = log(-log1p(-qvec))
+ fit0 = lsfit(x=xvec, y=log(quantile(y[!i11], qvec)))
+ }
- if (is.SurvS4(y))
- stop("only uncensored observations are allowed; don't use SurvS4()")
+ if (!length(etastart)) {
+ shape = rep(if(length(.ishape)) .ishape else 1/fit0$coef["X"],len=n)
+ scale = rep(if(length(.iscale)) .iscale else
+ exp(fit0$coef["Intercept"]), len=n)
+ etastart =
+ cbind(theta2eta(shape, .lshape, earg = .eshape ),
+ theta2eta(scale, .lscale, earg = .escale ))
+ }
+ }), list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape,
+ .iscale = iscale, .ishape = ishape,
+ .method.init = method.init ) )),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ shape = eta2theta(eta[,1], .lshape, earg = .eshape )
+ scale = eta2theta(eta[,2], .lscale, earg = .escale )
+ scale * gamma(1+1/shape)
+ }, list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape ) )),
+ last = eval(substitute(expression({
+ if (regnotok <- any(shape <= 2))
+ warning("MLE regularity conditions are violated",
+ "(shape <= 2) at the final iteration")
+
+
+ misc$link = c(shape = .lshape, scale = .lscale)
+ misc$earg = list(shape = .eshape, scale = .escale)
+ misc$nrfs = .nrfs
+ misc$RegCondOK = !regnotok # Save this for later
+ }), list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape, .nrfs = nrfs ) )),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE,eta, extra = NULL) {
+ shape = eta2theta(eta[,1], .lshape, earg = .eshape )
+ scale = eta2theta(eta[,2], .lscale, earg = .escale )
+ ell1 = (log(shape) - log(scale) + (shape-1) *
+ log(y/scale) - (y / scale)^shape)
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
+ sum(w * ell1)
+ }, list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape ) )),
+ vfamily = c("weibull.sev"),
+ deriv = eval(substitute(expression({
+ shape = eta2theta(eta[,1], .lshape, earg = .eshape )
+ scale = eta2theta(eta[,2], .lscale, earg = .escale )
+ dl.dshape = 1/shape + log(y/scale) - log(y/scale) * (y/scale)^shape
+ dl.dscale = (shape/scale) * (-1 + (y/scale)^shape)
+ dshape.deta = dtheta.deta(shape, .lshape, earg = .eshape )
+ dscale.deta = dtheta.deta(scale, .lscale, earg = .escale )
+ w * cbind( dl.dshape * dshape.deta, dl.dscale * dscale.deta )
+ }), list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape ) )),
+ weight = eval(substitute(expression({
+ EulerM = -digamma(1.0)
+ wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
+
+
+ ed2l.dshape = (6*(EulerM-1)^2 +pi^2)/(6*shape^2) # Kleiber&Kotz (2003)
+ ed2l.dscale = (shape/scale)^2
+ ed2l.dshapescale = (EulerM-1)/scale
+ wz[,iam(1,1,M)] = ed2l.dshape * dshape.deta^2
+ wz[,iam(2,2,M)] = ed2l.dscale * dscale.deta^2
+ wz[,iam(1,2,M)] = ed2l.dshapescale * dscale.deta * dshape.deta
+
+ wz = w * wz
+ wz
+ }), list( .eshape = eshape, .nrfs = nrfs ))))
+}
- predictors.names =
- c(namesof("shape", .lshape, earg= .eshape, tag=FALSE),
- namesof("scale", .lscale, earg= .escale, tag=FALSE))
- if (!length(.ishape) || !length(.iscale)) {
- anyc = FALSE # extra$leftcensored | extra$rightcensored
- i11 = if ( .imethod == 1) anyc else FALSE # can be all data
- qvec = c(.25, .5, .75) # Arbitrary; could be made an argument
- init.shape = if (length( .ishape)) .ishape else 1
- xvec = log(-log1p(-qvec))
- fit0 = lsfit(x=xvec, y=log(quantile(y[!i11], qvec)))
- }
- if (!length(etastart)) {
- shape = rep(if(length(.ishape)) .ishape else 1/fit0$coef["X"],len=n)
- scale = rep(if(length(.iscale)) .iscale else
- exp(fit0$coef["Intercept"]), len=n)
- etastart =
- cbind(theta2eta(shape, .lshape, earg= .eshape ),
- theta2eta(scale, .lscale, earg= .escale ))
- }
- }), list( .lscale=lscale, .lshape=lshape,
- .escale=escale, .eshape=eshape,
- .iscale=iscale, .ishape=ishape, .imethod=imethod ) )),
- inverse=eval(substitute(function(eta, extra=NULL) {
- shape = eta2theta(eta[,1], .lshape, earg= .eshape )
- scale = eta2theta(eta[,2], .lscale, earg= .escale )
- scale * gamma(1+1/shape)
- }, list( .lscale=lscale, .lshape=lshape,
- .escale=escale, .eshape=eshape ) )),
- last=eval(substitute(expression({
- if (regnotok <- any(shape <= 2))
- warning(paste("MLE regularity conditions are violated",
- "(shape <= 2) at the final iteration"))
- misc$link = c(shape= .lshape, scale= .lscale)
- misc$earg= list(shape= .eshape, scale= .escale)
- misc$nrfs = .nrfs
- misc$RegCondOK = !regnotok # Save this for later
- }), list( .lscale=lscale, .lshape=lshape,
- .escale=escale, .eshape=eshape, .nrfs=nrfs ) )),
- loglikelihood=eval(substitute(
- function(mu, y, w, residuals= FALSE,eta, extra=NULL) {
- shape = eta2theta(eta[,1], .lshape, earg= .eshape )
- scale = eta2theta(eta[,2], .lscale, earg= .escale )
- ell1 = (log(shape) - log(scale) + (shape-1) *
- log(y/scale) - (y / scale)^shape)
- if (residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * ell1)
- }, list( .lscale=lscale, .lshape=lshape,
- .escale=escale, .eshape=eshape ) )),
- vfamily=c("weibull.sev"),
- deriv=eval(substitute(expression({
- shape = eta2theta(eta[,1], .lshape, earg= .eshape )
- scale = eta2theta(eta[,2], .lscale, earg= .escale )
- dl.dshape = 1/shape + log(y/scale) - log(y/scale) * (y/scale)^shape
- dl.dscale = (shape/scale) * (-1 + (y/scale)^shape)
- dshape.deta = dtheta.deta(shape, .lshape, earg= .eshape )
- dscale.deta = dtheta.deta(scale, .lscale, earg= .escale )
- w * cbind( dl.dshape * dshape.deta, dl.dscale * dscale.deta )
- }), list( .lscale=lscale, .lshape=lshape,
- .escale=escale, .eshape=eshape ) )),
- weight=eval(substitute(expression({
- EulerM = -digamma(1.0)
- wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
- ed2l.dshape = (6*(EulerM-1)^2 +pi^2)/(6*shape^2) # Kleiber & Kotz (2003)
- ed2l.dscale = (shape/scale)^2
- ed2l.dshapescale = (EulerM-1)/scale
- wz[,iam(1,1,M)] = ed2l.dshape * dshape.deta^2
- wz[,iam(2,2,M)] = ed2l.dscale * dscale.deta^2
- wz[,iam(1,2,M)] = ed2l.dshapescale * dscale.deta * dshape.deta
- wz = w * wz
- wz
- }), list( .eshape=eshape, .nrfs=nrfs ))))
-}
@@ -793,6 +834,153 @@ setMethod("show", "SurvS4",
+if (FALSE)
+ weibullff = function(lscale = "loge", lshape = "loge",
+ escale = list(), eshape = list(),
+ iscale = NULL, ishape = NULL,
+ nrfs = 1,
+ method.init = 1, zero = 1)
+{
+
+ if (mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
+ if (mode(lshape) != "character" && mode(lshape) != "name")
+ lshape = as.character(substitute(lshape))
+
+ if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("bad input for argument 'zero'")
+
+ if (!is.Numeric(method.init, allow=1, integer=TRUE, positi=TRUE) ||
+ method.init > 2)
+ stop("argument 'method.init' must be 1 or 2")
+
+ if (!is.list(eshape)) eshape = list()
+ if (!is.list(escale)) escale = list()
+ if (!is.Numeric(nrfs, allow=1) || nrfs < 0 || nrfs > 1)
+ stop("bad input for 'nrfs'")
+
+ new("vglmff",
+ blurb = c("Weibull distribution\n\n",
+ "Links: ",
+ namesof("scale", lscale, earg = escale), ", ",
+ namesof("shape", lshape, earg = eshape), "\n",
+ "Mean: scale * gamma(1 + 1/shape)\n",
+ "Variance: scale^2 * (gamma(1 + 2/shape) - ",
+ "gamma(1 + 1/shape)^2)"),
+ constraints = eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ y = cbind(y)
+ if (ncol(y)>1)
+ stop("the response must be a vector or a 1-column matrix")
+
+ if (is.SurvS4(y))
+ stop("only uncensored observations are allowed; don't use SurvS4()")
+
+ predictors.names =
+ c(namesof("scale", .lscale, earg = .escale, tag = FALSE),
+ namesof("shape", .lshape, earg = .eshape, tag = FALSE))
+
+ if (!length(.ishape) || !length(.iscale)) {
+ anyc = FALSE # extra$leftcensored | extra$rightcensored
+ i11 = if ( .method.init == 1) anyc else FALSE # can be all data
+ qvec = c(.25, .5, .75) # Arbitrary; could be made an argument
+ init.shape = if (length( .ishape)) .ishape else 1
+ xvec = log(-log1p(-qvec))
+ fit0 = lsfit(x=xvec, y=log(quantile(y[!i11], qvec)))
+ }
+
+ if (!length(etastart)) {
+ shape = rep(if(length(.ishape)) .ishape else 1/fit0$coef["X"],len=n)
+ scale = rep(if(length(.iscale)) .iscale else
+ exp(fit0$coef["Intercept"]), len=n)
+ etastart =
+ cbind(theta2eta(scale, .lscale, earg = .escale ),
+ theta2eta(shape, .lshape, earg = .eshape ))
+ }
+ }), list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape,
+ .iscale = iscale, .ishape = ishape,
+ .method.init = method.init ) )),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ scale = eta2theta(eta[,1], .lshape, earg = .escale )
+ shape = eta2theta(eta[,2], .lscale, earg = .eshape )
+ scale * gamma(1+1/shape)
+ }, list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape ) )),
+ last = eval(substitute(expression({
+ if (regnotok <- any(shape <= 2))
+ warning("MLE regularity conditions are violated",
+ "(shape <= 2) at the final iteration")
+
+
+ misc$link = c(scale = .lscale, shape = .lshape)
+ misc$earg = list(scale = .escale, shape = .eshape)
+ misc$nrfs = .nrfs
+ misc$RegCondOK = !regnotok # Save this for later
+ }), list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape, .nrfs = nrfs ) )),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE,eta, extra = NULL) {
+ scale = eta2theta(eta[,1], .lscale, earg = .escale )
+ shape = eta2theta(eta[,2], .lshape, earg = .eshape )
+ ell2 = dweibull()
+
+
+ ell1 = (log(shape) - log(scale) + (shape-1) *
+ log(y/scale) - (y / scale)^shape)
+
+ print("max(abs(ell1 - ell2))")
+ print( max(abs(ell1 - ell2)) )
+
+
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
+ sum(w * ell1)
+ }, list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape ) )),
+ vfamily = c("weibullff"),
+ deriv = eval(substitute(expression({
+ scale = eta2theta(eta[,1], .lscale, earg = .escale )
+ shape = eta2theta(eta[,2], .lshape, earg = .eshape )
+
+ dl.dshape = 1/shape + log(y/scale) - log(y/scale) * (y/scale)^shape
+ dl.dscale = (shape/scale) * (-1 + (y/scale)^shape)
+
+ dshape.deta = dtheta.deta(shape, .lshape, earg = .eshape )
+ dscale.deta = dtheta.deta(scale, .lscale, earg = .escale )
+
+ w * cbind( dl.dscale * dscale.deta,
+ dl.dshape * dshape.deta)
+ }), list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape ) )),
+ weight = eval(substitute(expression({
+ EulerM = -digamma(1.0)
+ wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
+
+
+ ed2l.dshape = (6*(EulerM-1)^2 +pi^2)/(6*shape^2) # Kleiber&Kotz (2003)
+ ed2l.dscale = (shape/scale)^2
+ ed2l.dshapescale = (EulerM-1)/scale
+ wz[,iam(1,1,M)] = ed2l.dscale * dscale.deta^2
+ wz[,iam(2,2,M)] = ed2l.dshape * dshape.deta^2
+ wz[,iam(1,2,M)] = ed2l.dshapescale * dscale.deta * dshape.deta
+
+ wz = w * wz
+ wz
+ }), list( .eshape = eshape, .nrfs = nrfs ))))
+}
+
+
+
+
+
+
+
+
+
+
diff --git a/R/family.circular.R b/R/family.circular.R
index 3dc9d80..e32284b 100644
--- a/R/family.circular.R
+++ b/R/family.circular.R
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
@@ -160,7 +161,8 @@ cardioid.control <- function(save.weight=TRUE, ...)
function(mu,y,w,residuals=FALSE,eta,extra=NULL) {
mu = eta2theta(eta[,1], link= .lmu, earg= .emu)
rho = eta2theta(eta[,2], link= .lrho, earg= .erho)
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
sum(w * dcard(x=y, mu=mu, rho=rho, log=TRUE))
}
}, list( .lmu=lmu, .lrho=lrho,
@@ -269,7 +271,8 @@ cardioid.control <- function(save.weight=TRUE, ...)
function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
location = eta2theta(eta[,1], .llocation, earg= .elocation)
Scale = eta2theta(eta[,2], .lscale, earg= .escale)
- if (residuals) stop("loglikelihood residuals not implemented yet") else
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
sum(w * (Scale * cos(y - location) -
log(mbesselI0(x=Scale ))))
}, list( .escale=escale, .lscale=lscale,
diff --git a/R/family.exp.R b/R/family.exp.R
index ba5fe48..107a844 100644
--- a/R/family.exp.R
+++ b/R/family.exp.R
@@ -1,36 +1,18 @@
-# These functions are Copyright (C) 1998-2010 T. W. Yee All rights reserved.
-
-# Families for expectile regression are put in this file
-# 20100324;
-# Last modified: 20100324, 20100326, 20100329, 20100331,
-
-# Yet to do:
-# 1. lms.bcn(expectiles = FALSE). If lms.bcn(expectiles = TRUE) then
-# expectiles, and not quantiles, are the fitted values.
-# This is LMS-BCN expectile regression, a new method.
-# 2. Improve the approximations (initial values) for each of the
-# three distributions. See the zzs below.
-# 3. For peunif(q) etc.: use e or q as first argument??
-# For deunif(x) etc.: use e or x as first argument??
-# For qeunif(x) etc.: rename to eeunif(x)?
-
-# Done:
-# 1. For norm, exp and unif distributions:
-# qenorm(0.25) returns the 0.25-expectile of a standard normal,
-# penorm(1.25) returns the tau (in (0,1)) for an expectile of 1.25.
-# This is based on the paper by M C Jones (1994) in Stat Prob Letters.
-
-# Notes:
-# 1.
-
-# ======================================================================
-# Expectiles for uniform distribution ----------------------------------
-# 20100324
-# The [et]norm() here were adapted from MC Jones paper.
+# These functions are
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
+
+
+
+
+
+
+
+
+
+
qeunif <- function(p, min = 0, max = 1, Maxit_nr = 10, Tol_nr = 1.0e-6) {
-# Using Newton-Raphson may be a problem at the boundaries.
-# The secant method may be better.
ppp = p
vsmallno = sqrt(.Machine$double.eps)
@@ -41,39 +23,21 @@ qeunif <- function(p, min = 0, max = 1, Maxit_nr = 10, Tol_nr = 1.0e-6) {
stop("argument 'Tol_nr' is not a single positive value, or is too large")
nrok = ppp >= vsmallno & ppp <= 1.0 - vsmallno & is.finite(ppp)
-# A beta function seems to approximate it ok near the middle.
-# This can be improved zz.
eee = qbeta(ppp, shape1 = 3, shape2 = 3)
-# A different quadratic fits each boundary well (asymptotic expansion).
eee[ppp < smallno] = sqrt(ppp[ppp < smallno])
eee[ppp > 1.0 - smallno] = 1.0 - sqrt(1.0 - ppp[ppp > 1.0 - smallno])
-#lines(ppp, eee, col="purple", type="b")
-#print("initial eee")
-#isample = sample(length(eee))
-#isample = 1:length(eee)
-#print( head(eee[isample]) )
-#print( (eee[isample]) )
-#cat("\n")
for(iii in 1:Maxit_nr) {
realdiff <- (peunif(eee[nrok]) - ppp[nrok]) / deunif(eee[nrok])
-# #print("max(abs(realdiff))")
-# #print( max(abs(realdiff)) )
eee[nrok] = eee[nrok] - realdiff
-# cat("Iteration ", iii, "\n")
-# #print( head(eee[isample]) )
-# #print( (eee[isample]) )
-# cat("\n")
if (all(abs(realdiff) / (1.0 + abs(realdiff)) < Tol_nr )) break
if (iii == Maxit_nr) warning("did not converge")
}
-# Check again (on the standard uniform distribution);
if (max(abs(peunif(eee[nrok]) - ppp[nrok])) > Tol_nr)
warning("did not converge on the second check")
-# zz; Needs checking, esp. near the boundary of 1.0:
eee[ppp < vsmallno] = sqrt( ppp[ppp < vsmallno])
eee[ppp > 1.0 - vsmallno] = 1.0 - sqrt(1.0 - ppp[ppp > 1.0 - vsmallno])
eee[ppp == 0] = 0
@@ -85,8 +49,6 @@ qeunif <- function(p, min = 0, max = 1, Maxit_nr = 10, Tol_nr = 1.0e-6) {
peunif <- function(q, min = 0, max = 1, log = FALSE) {
-# zz use e or x ??
-# This is G(y).
if (!is.logical(log.arg <- log)) stop("bad input for argument 'log'")
rm(log)
if (any(min >= max))
@@ -109,7 +71,6 @@ peunif <- function(q, min = 0, max = 1, log = FALSE) {
deunif <- function(x, min = 0, max = 1, log = FALSE) {
-# This is g(x).
if (!is.logical(log.arg <- log)) stop("bad input for argument 'log'")
rm(log)
if (any(min >= max))
@@ -126,8 +87,6 @@ deunif <- function(x, min = 0, max = 1, log = FALSE) {
gunif <- function(y)
as.numeric(y >= 0 & y <= 1) * 2*y*(1-y) / (2*y*(1-y) - 1)^2
ans = gunif(eee) / (max - min)
-# ans[eee < 0.0] = 0.0
-# ans[eee > 1.0] = 0.0
}
ans
}
@@ -145,50 +104,29 @@ reunif <- function(n, min = 0, max = 1) {
-# ======================================================================
-# Expectiles for normal distribution -----------------------------------
-# 20100324
-# The [et]norm() here were adapted from MC Jones paper.
qenorm <- function(p, mean = 0, sd = 1, Maxit_nr = 10, Tol_nr = 1.0e-6) {
ppp = p
if (!is.Numeric( Tol_nr, allow = 1, posit = TRUE) || Tol_nr > 0.10)
- stop("argument 'Tol_nr' is not a single positive value, or is too large")
-# if (!is.Numeric( sd, posit = TRUE))
-# stop("argument 'sd' must contain positive values")
+ stop("argument 'Tol_nr' is not a single ",
+ "positive value, or is too large")
nrok = is.finite(ppp)
-# A N(0, sd = 2/3) approximation is good according to the paper.
eee = qnorm(ppp, sd = 2/3)
-# lines(ppp, eee, col="purple", type="b")
-##print("initial eee")
-#isample = sample(length(eee))
-#isample = 1:length(eee)
-##print( head(eee[isample]) )
-##print( (eee[isample]) )
-# cat("\n")
gnorm = function(y) dnorm(y) / (y * (1-2*pnorm(y)) - 2*dnorm(y))^2
for(iii in 1:Maxit_nr) {
realdiff <- (penorm(eee[nrok]) - ppp[nrok]) / gnorm(eee[nrok])
-# #print("max(abs(realdiff))")
-# #print( max(abs(realdiff)) )
eee[nrok] = eee[nrok] - realdiff
-# cat("Iteration ", iii, "\n")
-# #print( head(eee[isample]) )
-# #print( (eee[isample]) )
-# cat("\n")
if (all(abs(realdiff) / (1.0 + abs(realdiff)) < Tol_nr )) break
if (iii == Maxit_nr) warning("did not converge")
}
-# Check again (on the standard normal distribution);
if (max(abs(penorm(eee[nrok]) - ppp[nrok])) > Tol_nr)
warning("did not converge on the second check")
-# zz; Needs checking, esp. near the boundary of 1.0:
eee[ppp == 0] = -Inf
eee[ppp == 1] = Inf
eee[ppp < 0] = NA
@@ -218,7 +156,6 @@ penorm <- function(q, mean = 0, sd = 1, log = FALSE) {
denorm <- function(x, mean = 0, sd = 1, log = FALSE) {
-# This is g(x).
if (!is.logical(log.arg <- log)) stop("bad input for argument 'log'")
rm(log)
@@ -248,10 +185,6 @@ renorm <- function(n, mean = 0, sd = 1) {
-# ======================================================================
-# Expectiles for exponential distribution ------------------------------
-# 20100324
-# The [et]exp() here were adapted from MC Jones paper.
qeexp <- function(p, rate = 1, Maxit_nr = 10, Tol_nr = 1.0e-6) {
@@ -261,44 +194,25 @@ qeexp <- function(p, rate = 1, Maxit_nr = 10, Tol_nr = 1.0e-6) {
stop("argument 'Tol_nr' is not a single positive value, or is too large")
nrok = ppp >= vsmallno & is.finite(ppp)
-# 20100401; An approximation: (zz improve this!!)
-# eee = qf(0.8 * ppp, df1 = 4.0, df2 = 44) * 1.5
-# 20100408; This is a piecewise approximation, and looks ok.
eee = qf(1.0 * ppp, df1 = 4.0, df2 = 44)
if ( any(rangex <- ppp < 0.8) )
- eee[rangex] = qrayleigh(ppp[rangex], a = 0.8)
+ eee[rangex] = qrayleigh(ppp[rangex], scale = 0.8)
-# A different quadratic fits each boundary well (asymptotic expansion). zz
eee[ppp < vsmallno] = sqrt(ppp[ppp < vsmallno])
-#lines(ppp,eee,col="purple",type="b") # See what the initial values were like
-##print("initial eee")
-#isample = sample(length(eee))
-#isample = 1:length(eee)
-##print( head(eee[isample]) )
-##print( (eee[isample]) )
-##cat("\n")
for(iii in 1:Maxit_nr) {
realdiff <- (peexp(eee[nrok]) - ppp[nrok]) / deexp(eee[nrok])
-# #print("max(abs(realdiff))")
-# #print( max(abs(realdiff)) )
eee[nrok] = eee[nrok] - realdiff
-# cat("Iteration ", iii, "\n")
-# #print( head(eee[isample]) )
-# #print( (eee[isample]) )
-# cat("\n")
if (all(abs(realdiff) / (1.0 + abs(realdiff)) < Tol_nr )) break
if (iii == Maxit_nr) warning("did not converge")
}
-# Check again (on the standard exponential distribution);
if (max(abs(peexp(eee[nrok]) - ppp[nrok])) > Tol_nr)
warning("did not converge on the second check")
-# zz; Needs checking, esp. near the boundary of 1.0:
eee[ppp < vsmallno] = sqrt(ppp[ppp < vsmallno])
eee[ppp == 0] = 0
eee[ppp == 1] = Inf
@@ -315,13 +229,11 @@ peexp <- function(q, rate = 1, log = FALSE) {
eee = q * rate
if (log.arg) {
tmp1 = -expm1(-eee) - eee
-# logGofy = log(tmp1) - log(2 * tmp1 + eee - 1.0)
logGofy = log1p(- eee - exp(-eee)) - log(2 * tmp1 + eee - 1.0)
logGofy[eee < 0] = log(0.0)
logGofy[eee >= Inf] = log(1.0)
logGofy
} else {
-# tmp1 = 1 - eee - exp(-eee)
tmp1 = -expm1(-eee) - eee
Gofy = tmp1 / (2 * tmp1 + eee - 1.0)
Gofy[eee < 0] = 0.0
@@ -333,7 +245,6 @@ peexp <- function(q, rate = 1, log = FALSE) {
deexp <- function(x, rate = 1, log = FALSE) {
-# This is g(x).
if (!is.logical(log.arg <- log)) stop("bad input for argument 'log'")
rm(log)
if (any(rate <= 0))
@@ -362,8 +273,223 @@ reexp <- function(n, rate = 1) {
}
-# ======================================================================
-# ======================================================================
+
+dkoenker <- function(x, location = 0, scale = 1, log = FALSE) {
+ if (!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ zedd <- (x - location) / scale
+ zedd[scale <= 0] <- NaN
+
+ if (log.arg) {
+ log(0.25) - 1.5 * log1p((zedd / 2)^2) - log(scale)
+ } else {
+ 2 / (scale * (4 + zedd^2)^1.5)
+ }
+}
+
+
+pkoenker <- function(q, location = 0, scale = 1, log = FALSE) {
+ if (!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ zedd <- (q - location) / scale
+ zedd[scale <= 0] <- NaN
+
+ if (log.arg) {
+ -log(2) + log1p(zedd / sqrt(4 + zedd^2))
+ } else {
+ 0.5 * (1 + zedd / sqrt(4 + zedd^2))
+ }
+}
+
+
+
+
+qkoenker <- function(p, location = 0, scale = 1) {
+
+ answer <- -2 * (1 - 2*p) / sqrt(1 - (1 - 2*p)^2)
+ answer[p < 0] <- NaN
+ answer[p > 1] <- NaN
+ answer[p == 0] <- -Inf
+ answer[p == 1] <- +Inf
+
+ answer <- answer * scale + location
+ answer[scale <= 0] <- NaN
+ answer
+}
+
+
+
+rkoenker <- function(n, location = 0, scale = 1) {
+ answer <- qkoenker(runif(n)) * scale + location
+ answer[scale <= 0] <- NaN
+ answer
+}
+
+
+
+
+ koenker <- function(percentile = 50,
+ llocation = "identity", lscale = "loge",
+ elocation = list(), escale = list(),
+ ilocation = NULL, iscale = NULL,
+ method.init = 1,
+ zero = 2)
+{
+
+
+
+
+
+ llocat = llocation
+ elocat = elocation
+ ilocat = ilocation
+
+ if (mode(llocat) != "character" && mode(llocat) != "name")
+ llocat <- as.character(substitute(llocat))
+ if (mode(lscale) != "character" && mode(lscale) != "name")
+ lscale <- as.character(substitute(lscale))
+ if (length(ilocat) &&
+ (!is.Numeric(ilocat, allow = 1, positive = TRUE)))
+ stop("bad input for argument 'ilocation'")
+ if (length(iscale) && !is.Numeric(iscale))
+ stop("bad input for argument 'iscale'")
+
+ if (!is.list(elocat)) elocat = list()
+ if (!is.list(escale)) escale = list()
+
+ if (!is.Numeric(percentile, posit = TRUE) ||
+ any(percentile >= 100))
+ stop("bad input for argument 'percentile'")
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
+ method.init > 2)
+ stop("'method.init' must be 1 or 2")
+
+ new("vglmff",
+ blurb = c("Koenker distribution\n\n",
+ "Links: ",
+ namesof("location", llocat, earg = elocat, tag = FALSE), ", ",
+ namesof("scale", lscale, earg = escale, tag = FALSE), "\n\n",
+ "Mean: location\n",
+ "Variance: infinite"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ if (ncol(y <- cbind(y)) != 1)
+ stop("the response must be a vector or one-column matrix")
+
+ predictors.names <- c(
+ namesof("location", .llocat, earg = .elocat, tag = FALSE),
+ namesof("scale", .lscale, earg = .escale, tag = FALSE))
+ if (!length(etastart)) {
+
+ locat.init <- if ( .method.init == 2) {
+ weighted.mean(y, w)
+ } else {
+ median(y)
+ }
+ Scale.init <- if (length( .iscale )) .iscale else
+ diff(quantile(y, prob = c(0.25, 0.75))) / (2 * 1.155) + 1.0e-5
+ locat.init <- rep(locat.init, length = length(y))
+ Scale.init <- rep(Scale.init, length = length(y))
+ etastart <- cbind(theta2eta(locat.init, .llocat, earg = .elocat),
+ theta2eta(Scale.init, .lscale, earg = .escale))
+ }
+ }), list( .llocat = llocat, .lscale = lscale,
+ .ilocat = ilocat, .iscale = iscale,
+ .elocat = elocat, .escale = escale,
+ .method.init = method.init ))),
+ inverse = eval(substitute(function(eta, extra = NULL){
+ Perce <- .percentile
+ locat <- eta2theta(eta[, 1], link = .llocat, earg = .elocat)
+ Scale <- eta2theta(eta[, 2], link = .lscale, earg = .escale)
+ answer <- matrix(locat, nrow(eta), length(Perce))
+ for (ii in 1:length(Perce))
+ answer[, ii] <- qkoenker(Perce[ii] / 100, loc = locat, sc = Scale)
+ dimnames(answer) <- list(dimnames(eta)[[1]],
+ paste(as.character(Perce), "%", sep = ""))
+ answer
+ }, list( .llocat = llocat, .lscale = lscale,
+ .elocat = elocat, .escale = escale,
+ .percentile = percentile ))),
+ last = eval(substitute(expression({
+ misc$link <- c("location" = .llocat, "scale" = .lscale)
+ misc$earg <- list("location" = .elocat, "scale" = .escale)
+ misc$expected <- TRUE
+ misc$percentile <- .percentile
+ misc$method.init <- .method.init
+
+ ncoly <- ncol(y)
+ for(ii in 1:length( .percentile )) {
+ y.use <- if (ncoly > 1) y[, ii] else y
+ mu <- cbind(mu)
+ extra$percentile[ii] = 100 * weighted.mean(y.use <= mu[, ii], w)
+ }
+ names(extra$percentile) = colnames(mu)
+ }), list( .llocat = llocat, .lscale = lscale,
+ .elocat = elocat, .escale = escale,
+ .method.init = method.init, .percentile = percentile ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ locat <- eta2theta(eta[, 1], link = .llocat, earg = .elocat)
+ Scale <- eta2theta(eta[, 2], link = .lscale, earg = .escale)
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ sum(w * dkoenker(x = y, location = locat, scale = Scale, log = TRUE))
+ }
+ }, list( .llocat = llocat, .lscale = lscale,
+ .elocat = elocat, .escale = escale ))),
+ vfamily = c("koenker"),
+ deriv = eval(substitute(expression({
+ locat <- eta2theta(eta[, 1], link = .llocat, earg = .elocat)
+ Scale <- eta2theta(eta[, 2], link = .lscale, earg = .escale)
+ dlocat.deta <- dtheta.deta(locat, link = .llocat, earg = .elocat)
+ dscale.deta <- dtheta.deta(Scale, link = .lscale, earg = .escale)
+
+ zedd <- (y - locat) / Scale
+
+ dl.dlocat <- 3 * zedd / (Scale * (4 + zedd^2))
+ dl.dscale <- 3 * zedd^2 / (Scale * (4 + zedd^2)) - 1 / Scale
+
+ w * cbind(dl.dlocat * dlocat.deta,
+ dl.dscale * dscale.deta)
+ }), list( .llocat = llocat, .lscale = lscale,
+ .elocat = elocat, .escale = escale ))),
+ weight = eval(substitute(expression({
+ ed2l.dlocat2 <- 0.3 / Scale^2
+ ed2l.dscale2 <- 2.0 / (3 * Scale^2)
+
+ wz <- matrix(-10, n, M) # Diagonal EIM
+ wz[, iam(1, 1, M = M)] <- ed2l.dlocat2 * dlocat.deta^2
+ wz[, iam(2, 2, M = M)] <- ed2l.dscale2 * dscale.deta^2
+
+ w * wz
+ }), list( .llocat = llocat, .lscale = lscale,
+ .elocat = elocat, .escale = escale ))))
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/R/family.extremes.R b/R/family.extremes.R
index 980924f..da4f622 100644
--- a/R/family.extremes.R
+++ b/R/family.extremes.R
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
@@ -11,23 +12,24 @@
-rgev = function(n, location=0, scale=1, shape=0) {
+rgev <- function(n, location = 0, scale = 1, shape = 0) {
use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integ=TRUE, allow=1, posit=TRUE))
+ if (!is.Numeric(n, integ = TRUE, allow = 1, posit = TRUE))
stop("bad input for argument 'n'") else n
if (!is.Numeric(location))
stop("bad input for argument argument 'location'")
- if (!is.Numeric(shape)) stop("bad input for argument argument 'shape'")
+ if (!is.Numeric(shape))
+ stop("bad input for argument argument 'shape'")
ans = numeric(use.n)
- shape = rep(shape, len=use.n); location = rep(location, len=use.n);
- scale = rep(scale, len=use.n)
+ shape = rep(shape, len = use.n); location = rep(location, len = use.n);
+ scale = rep(scale, len = use.n)
scase = abs(shape) < sqrt(.Machine$double.eps)
nscase = sum(scase)
if (use.n - nscase)
ans[!scase] = location[!scase] + scale[!scase] *
- ((-log(runif(use.n-nscase)))^(-shape[!scase]) -1) / shape[!scase]
+ ((-log(runif(use.n-nscase)))^(-shape[!scase]) -1) / shape[!scase]
if (nscase)
ans[scase] = rgumbel(nscase, location[scase], scale[scase])
ans[scale <= 0] = NaN
@@ -36,23 +38,23 @@ rgev = function(n, location=0, scale=1, shape=0) {
-dgev = function(x, location=0, scale=1, shape=0, log = FALSE,
+dgev <- function(x, location = 0, scale = 1, shape = 0, log = FALSE,
tolshape0 = sqrt(.Machine$double.eps),
- oobounds.log = -Inf, giveWarning=FALSE) {
+ oobounds.log = -Inf, giveWarning = FALSE) {
if (!is.logical(log.arg <- log))
stop("bad input for argument 'log'")
rm(log)
if (oobounds.log > 0)
stop("bad input for argument 'oobounds.log'")
- if (!is.Numeric(tolshape0, allow=1, posit=TRUE))
+ if (!is.Numeric(tolshape0, allow = 1, posit = TRUE))
stop("bad input for argument 'tolshape0'")
use.n = max(length(x), length(location), length(scale), length(shape))
- shape = rep(shape, len=use.n); location = rep(location, len=use.n);
- scale = rep(scale, len=use.n);
- x = rep(x, len=use.n)
+ shape = rep(shape, len = use.n); location = rep(location, len = use.n);
+ scale = rep(scale, len = use.n);
+ x = rep(x, len = use.n)
- logdensity = rep(log(0), len=use.n)
+ logdensity = rep(log(0), len = use.n)
scase = abs(shape) < tolshape0
nscase = sum(scase)
if (use.n - nscase) {
@@ -71,7 +73,7 @@ dgev = function(x, location=0, scale=1, shape=0, log = FALSE,
}
if (nscase) {
logdensity[scase] = dgumbel(x[scase], loc=location[scase],
- sc=scale[scase], log=TRUE)
+ sc=scale[scase], log = TRUE)
}
logdensity[scale <= 0] = NaN
@@ -80,15 +82,18 @@ dgev = function(x, location=0, scale=1, shape=0, log = FALSE,
-pgev = function(q, location=0, scale=1, shape=0) {
- if (!is.Numeric(q)) stop("bad input for argument 'q'")
- if (!is.Numeric(location)) stop("bad input for argument 'location'")
- if (!is.Numeric(shape)) stop("bad input for argument 'shape'")
+pgev <- function(q, location = 0, scale = 1, shape = 0) {
+ if (!is.Numeric(q))
+ stop("bad input for argument 'q'")
+ if (!is.Numeric(location))
+ stop("bad input for argument 'location'")
+ if (!is.Numeric(shape))
+ stop("bad input for argument 'shape'")
use.n = max(length(q), length(location), length(scale), length(shape))
ans = numeric(use.n)
- shape = rep(shape, len=use.n); location = rep(location, len=use.n);
- scale = rep(scale, len=use.n); q = rep(q-location, len=use.n)
+ shape = rep(shape, len = use.n); location = rep(location, len = use.n);
+ scale = rep(scale, len = use.n); q = rep(q-location, len = use.n)
scase = abs(shape) < sqrt(.Machine$double.eps)
nscase = sum(scase)
if (use.n - nscase) {
@@ -103,15 +108,18 @@ pgev = function(q, location=0, scale=1, shape=0) {
-qgev = function(p, location=0, scale=1, shape=0) {
- if (!is.Numeric(p, posit=TRUE) || any(p >= 1)) stop("0 < p < 1 is required")
- if (!is.Numeric(location)) stop("bad input for argument 'location'")
- if (!is.Numeric(shape)) stop("bad input for argument 'shape'")
+qgev <- function(p, location = 0, scale = 1, shape = 0) {
+ if (!is.Numeric(p, posit = TRUE) || any(p >= 1))
+ stop("0 < p < 1 is required")
+ if (!is.Numeric(location))
+ stop("bad input for argument 'location'")
+ if (!is.Numeric(shape))
+ stop("bad input for argument 'shape'")
use.n = max(length(p), length(location), length(scale), length(shape))
ans = numeric(use.n)
- shape = rep(shape, len=use.n); location = rep(location, len=use.n);
- scale = rep(scale, len=use.n); p = rep(p, len=use.n)
+ shape = rep(shape, len = use.n); location = rep(location, len = use.n);
+ scale = rep(scale, len = use.n); p = rep(p, len = use.n)
scase = abs(shape) < sqrt(.Machine$double.eps)
nscase = sum(scase)
if (use.n - nscase) {
@@ -128,25 +136,26 @@ qgev = function(p, location=0, scale=1, shape=0) {
- gev = function(llocation="identity",
- lscale="loge",
- lshape="logoff",
- elocation = list(),
- escale = list(),
- eshape = if (lshape=="logoff") list(offset=0.5) else
- if (lshape=="elogit") list(min=-0.5, max=0.5) else list(),
- percentiles=c(95,99),
- iscale=NULL, ishape=NULL,
- method.init=1, gshape=c(-0.45, 0.45),
- tolshape0=0.001, giveWarning=TRUE,
- zero=3)
+ gev <- function(llocation = "identity",
+ lscale = "loge",
+ lshape = "logoff",
+ elocation = list(),
+ escale = list(),
+ eshape = if (lshape == "logoff") list(offset = 0.5) else
+ if (lshape == "elogit") list(min = -0.5, max = 0.5) else list(),
+ percentiles = c(95, 99),
+ iscale = NULL, ishape = NULL,
+ method.init = 1, gshape = c(-0.45, 0.45),
+ tolshape0 = 0.001, giveWarning = TRUE,
+ zero = 3)
{
if (!is.logical(giveWarning) || length(giveWarning) != 1)
stop("bad input for argument 'giveWarning'")
+
mean = FALSE
- if (length(iscale) && !is.Numeric(iscale, posit=TRUE))
+ if (length(iscale) && !is.Numeric(iscale, posit = TRUE))
stop("bad input for argument 'iscale'")
if (mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
@@ -154,41 +163,43 @@ qgev = function(p, location=0, scale=1, shape=0) {
llocation = as.character(substitute(llocation))
if (mode(lshape) != "character" && mode(lshape) != "name")
lshape = as.character(substitute(lshape))
+
if (!mean && length(percentiles) &&
- (!is.Numeric(percentiles, posit=TRUE) || max(percentiles) >= 100))
+ (!is.Numeric(percentiles, posit = TRUE) || max(percentiles) >= 100))
stop("bad input for argument 'percentiles'")
- if (!is.Numeric(method.init, allow=1, posit=TRUE, integer=TRUE) ||
+ if (!is.Numeric(method.init, allow = 1, posit = TRUE, integer = TRUE) ||
method.init > 2.5)
stop("argument 'method.init' must be 1 or 2")
if (length(ishape) && !is.Numeric(ishape))
stop("bad input for argument 'ishape'")
- if (!is.Numeric(tolshape0, allow=1, posit=TRUE) || tolshape0 > 0.1)
+ if (!is.Numeric(tolshape0, allow = 1, posit = TRUE) || tolshape0 > 0.1)
stop("bad input for argument 'tolshape0'")
if (!is.Numeric(gshape, allow=2) || gshape[1] >= gshape[2])
stop("bad input for argument 'gshape'")
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
stop("bad input for argument 'zero'")
+
if (!is.list(elocation)) elocation = list()
if (!is.list(escale)) escale = list()
if (!is.list(eshape)) eshape = list()
new("vglmff",
- blurb=c("Generalized Extreme Value Distribution\n",
+ blurb = c("Generalized Extreme Value Distribution\n",
"Links: ",
- namesof("location", link=llocation, earg= elocation), ", ",
- namesof("scale", link=lscale, earg= escale), ", ",
- namesof("shape", link=lshape, earg= eshape)),
+ namesof("location", link = llocation, earg = elocation), ", ",
+ namesof("scale", link = lscale, earg = escale), ", ",
+ namesof("shape", link = lshape, earg = eshape)),
constraints=eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
predictors.names =
- c(namesof("location", .llocation, earg= .elocation, short=TRUE),
- namesof("scale", .lscale, earg= .escale, short=TRUE),
- namesof("shape", .lshape, earg= .eshape, short=TRUE))
+ c(namesof("location", .llocation, earg = .elocation, short = TRUE),
+ namesof("scale", .lscale, earg = .escale, short = TRUE),
+ namesof("shape", .lshape, earg = .eshape, short = TRUE))
y = as.matrix(y)
if (ncol(y) > 1)
- y = -t(apply(-y, 1, sort, na.last=TRUE))
+ y = -t(apply(-y, 1, sort, na.last = TRUE))
r.vec = rowSums(cbind(!is.na(y)))
@@ -198,10 +209,10 @@ qgev = function(p, location=0, scale=1, shape=0) {
extra$percentiles = .percentiles
if (!length(etastart)) {
- init.sig= if (length( .iscale)) rep( .iscale, len=nrow(y)) else NULL
- init.xi = if (length( .ishape)) rep( .ishape, len=nrow(y)) else NULL
+ init.sig= if (length( .iscale)) rep( .iscale, len = nrow(y)) else NULL
+ init.xi = if (length( .ishape)) rep( .ishape, len = nrow(y)) else NULL
eshape = .eshape
- if ( .lshape=="elogit" && length(init.xi) &&
+ if ( .lshape == "elogit" && length(init.xi) &&
(any(init.xi <= eshape$min | init.xi >= eshape$max)))
stop("bad input for argument 'eshape'")
if ( .method.init == 1) {
@@ -211,36 +222,36 @@ qgev = function(p, location=0, scale=1, shape=0) {
est.sigma = !length(init.sig)
gshape = .gshape
temp234 = if (length(init.xi)) init.xi[1] else
- seq(gshape[1], gshape[2], len=12)
+ seq(gshape[1], gshape[2], len = 12)
for(xi.try in temp234) {
xvec = if (abs(xi.try) < .tolshape0) log(nvector) else
(nvector^xi.try - 1) / xi.try
- fit0 = lsfit(x=xvec, y=ynvector, intercept=TRUE)
+ fit0 = lsfit(x=xvec, y=ynvector, intercept = TRUE)
sigmaTry = if (est.sigma)
- rep(fit0$coef["X"], len=nrow(y)) else init.sig
- muTry = rep(fit0$coef["Intercept"], len=nrow(y))
+ rep(fit0$coef["X"], len = nrow(y)) else init.sig
+ muTry = rep(fit0$coef["Intercept"], len = nrow(y))
llTry = egev(giveWarning=
- FALSE)@loglikelihood(mu=NULL, y=y[,1], w=w,
- residuals=FALSE,
- eta=cbind(theta2eta(muTry, .llocation,earg= .elocation),
- theta2eta(sigmaTry, .lscale,earg= .escale),
- theta2eta(xi.try, link= .lshape, earg= .eshape)))
+ FALSE)@loglikelihood(mu = NULL, y=y[,1], w=w,
+ residuals = FALSE,
+ eta=cbind(theta2eta(muTry, .llocation,earg = .elocation),
+ theta2eta(sigmaTry, .lscale,earg = .escale),
+ theta2eta(xi.try, link= .lshape, earg = .eshape)))
if (llTry >= objecFunction) {
if (est.sigma)
init.sig = sigmaTry
- init.mu = rep(muTry, len=nrow(y))
+ init.mu = rep(muTry, len = nrow(y))
objecFunction = llTry
bestxi = xi.try
}
}
if (!length(init.xi))
- init.xi = rep(bestxi, len=nrow(y))
+ init.xi = rep(bestxi, len = nrow(y))
} else {
- init.xi = rep(0.05, len=nrow(y))
+ init.xi = rep(0.05, len = nrow(y))
if (!length(init.sig))
- init.sig = rep(sqrt(6 * var(y[,1]))/pi, len=nrow(y))
+ init.sig = rep(sqrt(6 * var(y[,1]))/pi, len = nrow(y))
EulerM = -digamma(1)
- init.mu = rep(median(y[,1]) - EulerM*init.sig, len=nrow(y))
+ init.mu = rep(median(y[,1]) - EulerM*init.sig, len = nrow(y))
}
bad = ((1 + init.xi*(y-init.mu)/init.sig) <= 0)
@@ -250,98 +261,100 @@ qgev = function(p, location=0, scale=1, shape=0) {
init.xi[bad] = ifelse(y[bad] > init.mu[bad], 0.1, -0.1)
}
- etastart = cbind(theta2eta(init.mu, .llocation, earg= .elocation),
- theta2eta(init.sig, .lscale, earg= .escale),
- theta2eta(init.xi, .lshape, earg= .eshape))
+ etastart = cbind(theta2eta(init.mu, .llocation, earg = .elocation),
+ theta2eta(init.sig, .lscale, earg = .escale),
+ theta2eta(init.xi, .lshape, earg = .eshape))
}
- }), list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
- .percentiles=percentiles,
- .elocation = elocation, .escale = escale,
- .eshape= eshape, .tolshape0=tolshape0,
- .method.init=method.init, .giveWarning= giveWarning,
- .iscale=iscale, .ishape=ishape, .gshape=gshape ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- loc = eta2theta(eta[,1], .llocation, earg= .elocation)
- sigma = eta2theta(eta[,2], .lscale, earg= .escale)
- xi = eta2theta(eta[,3], .lshape, earg= .eshape)
- iszero = (abs(xi) < .tolshape0)
+ }), list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
+ .elocation = elocation, .escale = escale, .eshape = eshape,
+ .iscale = iscale, .ishape = ishape,
+ .gshape = gshape,
+ .percentiles = percentiles,
+ .tolshape0 = tolshape0,
+ .method.init = method.init, .giveWarning= giveWarning ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ loc = eta2theta(eta[,1], .llocation, earg = .elocation)
+ sigma = eta2theta(eta[,2], .lscale, earg = .escale)
+ xi = eta2theta(eta[,3], .lshape, earg = .eshape)
+ is.zero = (abs(xi) < .tolshape0)
cent = extra$percentiles
lp = length(cent)
fv = matrix(as.numeric(NA), nrow(eta), lp)
if (lp) {
for(ii in 1:lp) {
yp = -log(cent[ii]/100)
- fv[!iszero,ii] = loc[!iszero] - sigma[!iszero] *
- (1 - yp^(-xi[!iszero])) / xi[!iszero]
- fv[iszero,ii] = loc[iszero] - sigma[iszero] * log(yp)
+ fv[!is.zero,ii] = loc[!is.zero] - sigma[!is.zero] *
+ (1 - yp^(-xi[!is.zero])) / xi[!is.zero]
+ fv[is.zero,ii] = loc[is.zero] - sigma[is.zero] * log(yp)
}
dimnames(fv) = list(dimnames(eta)[[1]],
- paste(as.character(cent), "%", sep=""))
+ paste(as.character(cent), "%", sep = ""))
} else {
EulerM = -digamma(1)
fv = loc + sigma * EulerM # When xi=0, is Gumbel
- fv[!iszero] = loc[!iszero] + sigma[!iszero] *
- (gamma(1-xi[!iszero])-1) / xi[!iszero]
+ fv[!is.zero] = loc[!is.zero] + sigma[!is.zero] *
+ (gamma(1-xi[!is.zero])-1) / xi[!is.zero]
fv[xi >= 1] = NA # Mean exists only if xi < 1.
}
fv
- }, list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
- .eshape= eshape, .tolshape0=tolshape0 ))),
- last=eval(substitute(expression({
+ }, list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
+ .eshape = eshape, .tolshape0 = tolshape0 ))),
+ last = eval(substitute(expression({
misc$links = c(location = .llocation, scale = .lscale, shape =.lshape)
misc$true.mu = !length( .percentiles) # @fitted is not a true mu
misc$percentiles = .percentiles
- misc$earg= list(location= .elocation, scale= .escale, shape= .eshape)
+ misc$earg = list(location= .elocation, scale= .escale, shape= .eshape)
misc$expected = TRUE
misc$tolshape0 = .tolshape0
- if (ncol(y)==1)
+ if (ncol(y) == 1)
y = as.vector(y)
if (any(xi < -0.5))
warning("some values of the shape parameter are less than -0.5")
- }), list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
- .elocation = elocation, .escale = escale, .eshape= eshape,
- .tolshape0=tolshape0, .percentiles=percentiles ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals=FALSE,eta,extra=NULL) {
- mmu = eta2theta(eta[,1], .llocation, earg= .elocation)
- sigma = eta2theta(eta[,2], .lscale, earg= .escale)
- xi = eta2theta(eta[,3], .lshape, earg= .eshape)
- iszero = (abs(xi) < .tolshape0)
+ }), list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
+ .elocation = elocation, .escale = escale, .eshape = eshape,
+ .tolshape0 = tolshape0, .percentiles = percentiles ))),
+ loglikelihood = eval(substitute(
+ function(mu,y,w,residuals = FALSE,eta,extra = NULL) {
+ mmu = eta2theta(eta[,1], .llocation, earg = .elocation)
+ sigma = eta2theta(eta[,2], .lscale, earg = .escale)
+ xi = eta2theta(eta[,3], .lshape, earg = .eshape)
+ is.zero = (abs(xi) < .tolshape0)
zedd = (y-mmu) / sigma
r.vec = rowSums(cbind(!is.na(y)))
A = 1 + xi * (y-mmu)/sigma
ii = 1:nrow(eta)
A1 = A[cbind(ii, r.vec)]
mytolerance = 0 # .Machine$double.eps
- if (any(bad <- (A1 <= mytolerance), na.rm=TRUE)) {
+ if (any(bad <- (A1 <= mytolerance), na.rm = TRUE)) {
cat("There are",sum(bad),"range violations in @loglikelihood\n")
flush.console()
}
- igev = !iszero & !bad
- igum = iszero & !bad
+ igev = !is.zero & !bad
+ igum = is.zero & !bad
pow = 1 + 1/xi[igev]
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
old.answer =
sum(bad) * (-1.0e10) +
sum(w[igum] * (-r.vec[igum]*log(sigma[igum]) -
exp(-zedd[igum,r.vec]) -
- rowSums(cbind(zedd, na.rm=TRUE)))) +
+ rowSums(cbind(zedd, na.rm = TRUE)))) +
sum(w[igev] * (-r.vec[igev]*log(sigma[igev]) -
- pow*rowSums(cbind(log(A[igev])), na.rm=TRUE) -
+ pow*rowSums(cbind(log(A[igev])), na.rm = TRUE) -
A1[igev]^(-1/xi[igev])))
old.answer
}
- }, list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
- .elocation = elocation, .escale = escale, .eshape= eshape,
- .giveWarning=giveWarning, .tolshape0=tolshape0 ))),
- vfamily=c("gev", "vextremes"),
- deriv=eval(substitute(expression({
+ }, list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
+ .elocation = elocation, .escale = escale, .eshape = eshape,
+ .giveWarning = giveWarning, .tolshape0 = tolshape0 ))),
+ vfamily = c("gev", "vextremes"),
+ deriv = eval(substitute(expression({
r.vec = rowSums(cbind(!is.na(y)))
- mmu = eta2theta(eta[,1], .llocation, earg= .elocation)
- sigma = eta2theta(eta[,2], .lscale, earg= .escale)
- xi = eta2theta(eta[,3], .lshape, earg= .eshape)
- iszero = (abs(xi) < .tolshape0)
+ mmu = eta2theta(eta[,1], .llocation, earg = .elocation)
+ sigma = eta2theta(eta[,2], .lscale, earg = .escale)
+ xi = eta2theta(eta[,3], .lshape, earg = .eshape)
+ is.zero = (abs(xi) < .tolshape0)
ii = 1:nrow(eta)
zedd = (y-mmu) / sigma
A = 1 + xi * zedd
@@ -351,33 +364,33 @@ qgev = function(p, location=0, scale=1, shape=0) {
pow = 1 + 1/xi
A1 = A[cbind(ii, r.vec)]
- AAr1 = dA.dmu/(xi * A1^pow) - pow * rowSums(cbind(dA.dmu/A), na.rm=TRUE)
+ AAr1 = dA.dmu/(xi * A1^pow) - pow * rowSums(cbind(dA.dmu/A), na.rm = TRUE)
AAr2 = dA.dsigma[cbind(ii,r.vec)] / (xi * A1^pow) -
- pow * rowSums(cbind(dA.dsigma/A), na.rm=TRUE)
- AAr3 = 1/(xi * A1^pow) - pow * rowSums(cbind(dA.dsigma/A), na.rm=TRUE)
+ pow * rowSums(cbind(dA.dsigma/A), na.rm = TRUE)
+ AAr3 = 1/(xi * A1^pow) - pow * rowSums(cbind(dA.dsigma/A), na.rm = TRUE)
dl.dmu = AAr1
dl.dsi = AAr2 - r.vec/sigma
- dl.dxi = rowSums(cbind(log(A)), na.rm=TRUE)/xi^2 -
- pow * rowSums(cbind(dA.dxi/A), na.rm=TRUE) -
+ dl.dxi = rowSums(cbind(log(A)), na.rm = TRUE)/xi^2 -
+ pow * rowSums(cbind(dA.dxi/A), na.rm = TRUE) -
(log(A1) / xi^2 -
dA.dxi[cbind(ii,r.vec)] / (xi*A1)) * A1^(-1/xi)
- if (any(iszero)) {
+ if (any(is.zero)) {
zorro = c(zedd[cbind(1:n,r.vec)])
- zorro = zorro[iszero]
+ zorro = zorro[is.zero]
ezedd = exp(-zorro)
- dl.dmu[iszero] = (1-ezedd) / sigma[iszero]
- dl.dsi[iszero] = (zorro * (1-ezedd) - 1) / sigma[iszero]
- dl.dxi[iszero] = zorro * ((1 - ezedd) * zorro / 2 - 1)
+ dl.dmu[is.zero] = (1-ezedd) / sigma[is.zero]
+ dl.dsi[is.zero] = (zorro * (1-ezedd) - 1) / sigma[is.zero]
+ dl.dxi[is.zero] = zorro * ((1 - ezedd) * zorro / 2 - 1)
}
- dmu.deta = dtheta.deta(mmu, .llocation, earg= .elocation)
- dsi.deta = dtheta.deta(sigma, .lscale, earg= .escale)
- dxi.deta = dtheta.deta(xi, .lshape, earg= .eshape)
+ dmu.deta = dtheta.deta(mmu, .llocation, earg = .elocation)
+ dsi.deta = dtheta.deta(sigma, .lscale, earg = .escale)
+ dxi.deta = dtheta.deta(xi, .lshape, earg = .eshape)
w * cbind(dl.dmu * dmu.deta, dl.dsi * dsi.deta, dl.dxi * dxi.deta)
- }), list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
- .elocation = elocation, .escale = escale, .eshape= eshape,
- .tolshape0=tolshape0 ))),
- weight=eval(substitute(expression({
+ }), list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
+ .elocation = elocation, .escale = escale, .eshape = eshape,
+ .tolshape0 = tolshape0 ))),
+ weight = eval(substitute(expression({
kay = -xi
dd = digamma(r.vec-kay+1)
ddd = digamma(r.vec+1) # Unnecessarily evaluated at each iteration
@@ -400,17 +413,17 @@ qgev = function(p, location=0, scale=1, shape=0) {
temp23 - tmp2 - r.vec*k0) / (sigma * k3)
wz[,iam(3,3,M)] = (2*tmp1*(-temp13) + tmp2 + r.vec*k0*temp33)/(k3*kay)
- if (any(iszero)) {
+ if (any(is.zero)) {
if (ncol(y) > 1)
stop("cannot handle xi==0 with a multivariate response")
EulerM = -digamma(1)
- wz[iszero,iam(2,2,M)] = (pi^2/6 + (1-EulerM)^2) / sigma^2
- wz[iszero,iam(3,3,M)] = 2.4236
- wz[iszero,iam(1,2,M)] = (digamma(2) + 2*(EulerM-1)) / sigma^2
- wz[iszero,iam(1,3,M)]= -(trigamma(1)/2 + digamma(1)*
+ wz[is.zero,iam(2,2,M)] = (pi^2/6 + (1-EulerM)^2) / sigma^2
+ wz[is.zero,iam(3,3,M)] = 2.4236
+ wz[is.zero,iam(1,2,M)] = (digamma(2) + 2*(EulerM-1)) / sigma^2
+ wz[is.zero,iam(1,3,M)]= -(trigamma(1)/2 + digamma(1)*
(digamma(1)/2+1))/sigma
- wz[iszero,iam(2,3,M)] = (-dgammadx(2,3)/6 + dgammadx(1,1) +
+ wz[is.zero,iam(2,3,M)] = (-dgammadx(2,3)/6 + dgammadx(1,1) +
2*dgammadx(1,2) + 2*dgammadx(1,3)/3) / sigma
if (FALSE ) {
@@ -433,7 +446,7 @@ qgev = function(p, location=0, scale=1, shape=0) {
-dgammadx = function(x, deriv.arg=1) {
+dgammadx <- function(x, deriv.arg=1) {
if (deriv.arg==0) {
gamma(x)
} else if (deriv.arg == 1) {
@@ -454,66 +467,66 @@ dgammadx = function(x, deriv.arg=1) {
- egev = function(llocation="identity",
- lscale="loge",
- lshape="logoff",
- elocation = list(),
- escale = list(),
- eshape = if (lshape=="logoff") list(offset=0.5) else
- if (lshape=="elogit") list(min=-0.5, max=0.5) else list(),
- percentiles=c(95,99),
- iscale=NULL, ishape=NULL,
- method.init=1, gshape=c(-0.45, 0.45),
- tolshape0=0.001, giveWarning=TRUE,
- zero=3)
+ egev <- function(llocation = "identity",
+ lscale = "loge",
+ lshape = "logoff",
+ elocation = list(),
+ escale = list(),
+ eshape = if (lshape == "logoff") list(offset = 0.5) else
+ if (lshape == "elogit") list(min = -0.5, max = 0.5) else list(),
+ percentiles = c(95, 99),
+ iscale = NULL, ishape = NULL,
+ method.init = 1, gshape = c(-0.45, 0.45),
+ tolshape0 = 0.001, giveWarning = TRUE,
+ zero = 3)
{
- if (!is.logical(giveWarning) || length(giveWarning) != 1)
- stop("bad input for argument 'giveWarning'")
- if (length(iscale) && !is.Numeric(iscale, posit=TRUE))
- stop("bad input for argument 'iscale'")
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale <- as.character(substitute(lscale))
- if (mode(llocation) != "character" && mode(llocation) != "name")
- llocation <- as.character(substitute(llocation))
- if (mode(lshape) != "character" && mode(lshape) != "name")
- lshape <- as.character(substitute(lshape))
- if (!is.Numeric(gshape, allow=2) || gshape[1] >= gshape[2])
- stop("bad input for argument 'gshape'")
+ if (!is.logical(giveWarning) || length(giveWarning) != 1)
+ stop("bad input for argument 'giveWarning'")
+ if (length(iscale) && !is.Numeric(iscale, posit = TRUE))
+ stop("bad input for argument 'iscale'")
+ if (mode(lscale) != "character" && mode(lscale) != "name")
+ lscale <- as.character(substitute(lscale))
+ if (mode(llocation) != "character" && mode(llocation) != "name")
+ llocation <- as.character(substitute(llocation))
+ if (mode(lshape) != "character" && mode(lshape) != "name")
+ lshape <- as.character(substitute(lshape))
+ if (!is.Numeric(gshape, allow=2) || gshape[1] >= gshape[2])
+ stop("bad input for argument 'gshape'")
if (length(percentiles) &&
- (!is.Numeric(percentiles, posit=TRUE) || max(percentiles) >= 100))
+ (!is.Numeric(percentiles, posit = TRUE) || max(percentiles) >= 100))
stop("bad input for argument 'percentiles'")
- if (!is.Numeric(method.init, allow=1, posit=TRUE, integer=TRUE) ||
+ if (!is.Numeric(method.init, allow = 1, posit = TRUE, integer = TRUE) ||
method.init > 2.5)
stop("argument 'method.init' must be 1 or 2")
if (length(ishape) && !is.Numeric(ishape))
stop("bad input for argument 'ishape'")
- if (!is.Numeric(tolshape0, allow=1, posit=TRUE) || tolshape0 > 0.1)
+ if (!is.Numeric(tolshape0, allow = 1, posit = TRUE) || tolshape0 > 0.1)
stop("bad input for argument 'tolshape0'")
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
stop("bad input for argument 'zero'")
if (!is.list(elocation)) elocation = list()
if (!is.list(escale)) escale = list()
if (!is.list(eshape)) eshape = list()
new("vglmff",
- blurb=c("Generalized Extreme Value Distribution\n",
+ blurb = c("Generalized Extreme Value Distribution\n",
"Links: ",
- namesof("location", link=llocation, earg= elocation), ", ",
- namesof("scale", link=lscale, earg= escale), ", ",
- namesof("shape", link=lshape, earg= eshape)),
+ namesof("location", link = llocation, earg = elocation), ", ",
+ namesof("scale", link = lscale, earg = escale), ", ",
+ namesof("shape", link = lshape, earg = eshape)),
constraints=eval(substitute(expression({
constraints <- cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
predictors.names =
- c(namesof("location", .llocation, earg= .elocation, short=TRUE),
- namesof("scale", .lscale, earg= .escale, short=TRUE),
- namesof("shape", .lshape, earg= .eshape, short=TRUE))
+ c(namesof("location", .llocation, earg = .elocation, short = TRUE),
+ namesof("scale", .lscale, earg = .escale, short = TRUE),
+ namesof("shape", .lshape, earg = .eshape, short = TRUE))
if (ncol(as.matrix(y)) != 1)
stop("response must be a vector or one-column matrix")
if (!length(etastart)) {
- init.sig= if (length( .iscale)) rep( .iscale, len=length(y)) else NULL
- init.xi = if (length( .ishape)) rep( .ishape, len=length(y)) else NULL
+ init.sig= if (length( .iscale)) rep( .iscale, len = length(y)) else NULL
+ init.xi = if (length( .ishape)) rep( .ishape, len = length(y)) else NULL
eshape = .eshape
if ( .lshape == "elogit" && length(init.xi) &&
(any(init.xi <= eshape$min | init.xi >= eshape$max)))
@@ -525,44 +538,44 @@ dgammadx = function(x, deriv.arg=1) {
est.sigma = !length(init.sig)
gshape = .gshape
temp234 = if (length(init.xi)) init.xi[1] else
- seq(gshape[1], gshape[2], len=12)
+ seq(gshape[1], gshape[2], len = 12)
for(xi.try in temp234) {
xvec = if (abs(xi.try) < .tolshape0) log(nvector) else
(nvector^xi.try - 1) / xi.try
- fit0 = lsfit(x=xvec, y=ynvector, intercept=TRUE)
+ fit0 = lsfit(x=xvec, y=ynvector, intercept = TRUE)
if (est.sigma) {
- sigmaTry = rep(fit0$coef["X"], len=length(y))
+ sigmaTry = rep(fit0$coef["X"], len = length(y))
} else {
sigmaTry = init.sig
}
- muTry = rep(fit0$coef["Intercept"], len=length(y))
+ muTry = rep(fit0$coef["Intercept"], len = length(y))
llTry = egev(giveWarning=
- FALSE)@loglikelihood(mu=NULL, y=y, w=w,
- residuals=FALSE,
- eta=cbind(theta2eta(muTry, .llocation, earg= .elocation),
- theta2eta(sigmaTry, .lscale, earg= .escale),
- theta2eta(xi.try, .lshape, earg= .eshape)))
+ FALSE)@loglikelihood(mu = NULL, y=y, w=w,
+ residuals = FALSE,
+ eta=cbind(theta2eta(muTry, .llocation, earg = .elocation),
+ theta2eta(sigmaTry, .lscale, earg = .escale),
+ theta2eta(xi.try, .lshape, earg = .eshape)))
if (llTry >= objecFunction) {
if (est.sigma)
init.sig = sigmaTry
- init.mu = rep(muTry, len=length(y))
+ init.mu = rep(muTry, len = length(y))
objecFunction = llTry
bestxi = xi.try
}
}
if (!length(init.xi))
- init.xi = rep(bestxi, len=length(y))
+ init.xi = rep(bestxi, len = length(y))
} else {
- init.xi = rep(if(length(init.xi)) init.xi else 0.05,
- len=length(y))
+ init.xi = rep(if (length(init.xi)) init.xi else 0.05,
+ len = length(y))
if (!length(init.sig))
- init.sig = rep(sqrt(6*var(y))/pi, len=length(y))
+ init.sig = rep(sqrt(6*var(y))/pi, len = length(y))
EulerM = -digamma(1)
- init.mu = rep(median(y) - EulerM * init.sig, len=length(y))
+ init.mu = rep(median(y) - EulerM * init.sig, len = length(y))
}
bad <- (1 + init.xi*(y-init.mu)/init.sig <= 0)
- if (fred <- sum(bad, na.rm=TRUE)) {
+ if (fred <- sum(bad, na.rm = TRUE)) {
warning(paste(fred, "observations violating boundary",
"constraints while initializing. Taking corrective action."))
init.xi[bad] = ifelse(y[bad] > init.mu[bad], 0.01, -0.01)
@@ -570,106 +583,107 @@ dgammadx = function(x, deriv.arg=1) {
extra$percentiles = .percentiles
- etastart = cbind(theta2eta(init.mu, .llocation, earg= .elocation),
- theta2eta(init.sig, .lscale, earg= .escale),
- theta2eta(init.xi, .lshape, earg= .eshape))
+ etastart = cbind(theta2eta(init.mu, .llocation, earg = .elocation),
+ theta2eta(init.sig, .lscale, earg = .escale),
+ theta2eta(init.xi, .lshape, earg = .eshape))
}
- }), list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
- .percentiles=percentiles, .tolshape0=tolshape0,
- .elocation = elocation, .escale = escale, .eshape= eshape,
- .method.init=method.init,
+ }), list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
+ .percentiles = percentiles, .tolshape0 = tolshape0,
+ .elocation = elocation, .escale = escale, .eshape = eshape,
+ .method.init = method.init,
.giveWarning= giveWarning,
- .iscale=iscale, .ishape=ishape, .gshape=gshape ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- loc <- eta2theta(eta[,1], .llocation, earg= .elocation)
- sigma <- eta2theta(eta[,2], .lscale, earg= .escale)
- xi <- eta2theta(eta[,3], .lshape, earg= .eshape)
- iszero <- (abs(xi) < .tolshape0)
+ .iscale = iscale, .ishape = ishape, .gshape = gshape ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ loc <- eta2theta(eta[,1], .llocation, earg = .elocation)
+ sigma <- eta2theta(eta[,2], .lscale, earg = .escale)
+ xi <- eta2theta(eta[,3], .lshape, earg = .eshape)
+ is.zero <- (abs(xi) < .tolshape0)
cent = extra$percentiles
lp <- length(cent)
fv <- matrix(as.numeric(NA), nrow(eta), lp)
if (lp) {
for(ii in 1:lp) {
yp = -log(cent[ii]/100)
- fv[!iszero,ii] = loc[!iszero] - sigma[!iszero] *
- (1 - yp^(-xi[!iszero])) / xi[!iszero]
- fv[iszero,ii] = loc[iszero] - sigma[iszero] * log(yp)
+ fv[!is.zero,ii] = loc[!is.zero] - sigma[!is.zero] *
+ (1 - yp^(-xi[!is.zero])) / xi[!is.zero]
+ fv[is.zero,ii] = loc[is.zero] - sigma[is.zero] * log(yp)
}
dimnames(fv) = list(dimnames(eta)[[1]],
- paste(as.character(cent), "%", sep=""))
+ paste(as.character(cent), "%", sep = ""))
} else {
EulerM = -digamma(1)
fv = loc + sigma * EulerM # When xi=0, is Gumbel
- fv[!iszero] = loc[!iszero] + sigma[!iszero] *
- (gamma(1-xi[!iszero])-1) / xi[!iszero]
+ fv[!is.zero] = loc[!is.zero] + sigma[!is.zero] *
+ (gamma(1-xi[!is.zero])-1) / xi[!is.zero]
fv[xi >= 1] = NA # Mean exists only if xi < 1.
}
fv
- }, list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
- .elocation = elocation, .escale = escale, .eshape= eshape,
- .tolshape0=tolshape0 ))),
- last=eval(substitute(expression({
+ }, list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
+ .elocation = elocation, .escale = escale, .eshape = eshape,
+ .tolshape0 = tolshape0 ))),
+ last = eval(substitute(expression({
misc$links <- c(location = .llocation, scale = .lscale, shape = .lshape)
misc$true.mu = !length( .percentiles) # @fitted is not a true mu
misc$percentiles <- .percentiles
- misc$earg= list(location= .elocation, scale= .escale, shape= .eshape)
+ misc$earg = list(location= .elocation, scale= .escale, shape= .eshape)
misc$tolshape0 = .tolshape0
misc$expected = TRUE
if (any(xi < -0.5))
warning("some values of the shape parameter are less than -0.5")
- }), list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
- .elocation = elocation, .escale = escale, .eshape= eshape,
- .tolshape0=tolshape0, .percentiles=percentiles ))),
- loglikelihood=eval(substitute(
- function(mu,y,w, residuals=FALSE, eta, extra=NULL) {
- mmu <- eta2theta(eta[,1], .llocation, earg= .elocation )
- sigma <- eta2theta(eta[,2], .lscale, earg= .escale )
- xi <- eta2theta(eta[,3], .lshape, earg= .eshape )
-
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
+ }), list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
+ .elocation = elocation, .escale = escale, .eshape = eshape,
+ .tolshape0 = tolshape0, .percentiles = percentiles ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ mmu <- eta2theta(eta[,1], .llocation, earg = .elocation )
+ sigma <- eta2theta(eta[,2], .lscale, earg = .escale )
+ xi <- eta2theta(eta[,3], .lshape, earg = .eshape )
+
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
sum(w * dgev(x=y, location=mmu, scale=sigma, shape=xi,
tolshape0 = .tolshape0,
- log=TRUE, oobounds.log = -1.0e04,
+ log = TRUE, oobounds.log = -1.0e04,
giveWarning= .giveWarning))
}
- }, list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
- .elocation = elocation, .escale = escale, .eshape= eshape,
- .giveWarning= giveWarning, .tolshape0=tolshape0 ))),
- vfamily=c("egev", "vextremes"),
- deriv=eval(substitute(expression({
- mmu = eta2theta(eta[,1], .llocation, earg= .elocation)
- sigma = eta2theta(eta[,2], .lscale, earg= .escale )
- xi = eta2theta(eta[,3], .lshape, earg= .eshape)
- iszero <- (abs(xi) < .tolshape0)
+ }, list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
+ .elocation = elocation, .escale = escale, .eshape = eshape,
+ .giveWarning= giveWarning, .tolshape0 = tolshape0 ))),
+ vfamily = c("egev", "vextremes"),
+ deriv = eval(substitute(expression({
+ mmu = eta2theta(eta[,1], .llocation, earg = .elocation)
+ sigma = eta2theta(eta[,2], .lscale, earg = .escale )
+ xi = eta2theta(eta[,3], .lshape, earg = .eshape)
+ is.zero <- (abs(xi) < .tolshape0)
zedd = (y-mmu) / sigma
A = 1 + xi * zedd
dA.dxi = zedd
dA.dmu = -xi / sigma
dA.dsigma = -xi * zedd / sigma
pow = 1 + 1/xi
- if (any(bad <- A<=0, na.rm=TRUE)) stop(sum(bad, na.rm=TRUE),
+ if (any(bad <- A<=0, na.rm = TRUE)) stop(sum(bad, na.rm = TRUE),
" observations violating boundary constraints in '@deriv'")
AA = 1/(xi*A^pow)- pow/A
dl.dmu = dA.dmu * AA
dl.dsi = dA.dsigma * AA - 1/sigma
dl.dxi = log(A)/xi^2 - pow * dA.dxi / A -
(log(A)/xi^2 - dA.dxi /(xi*A)) * A^(-1/xi)
- if (any(iszero)) {
- ezedd = exp(-zedd[iszero])
- dl.dmu[iszero] = (1-ezedd) / sigma[iszero]
- dl.dsi[iszero] = (zedd[iszero] * (1-ezedd) - 1) / sigma[iszero]
- dl.dxi[iszero] = zedd[iszero] * ((1 - ezedd) * zedd[iszero] / 2 -1)
+ if (any(is.zero)) {
+ ezedd = exp(-zedd[is.zero])
+ dl.dmu[is.zero] = (1-ezedd) / sigma[is.zero]
+ dl.dsi[is.zero] = (zedd[is.zero] * (1-ezedd) - 1) / sigma[is.zero]
+ dl.dxi[is.zero] = zedd[is.zero] * ((1 - ezedd) * zedd[is.zero] / 2 -1)
}
- dmu.deta = dtheta.deta(mmu, .llocation, earg= .elocation)
- dsi.deta = dtheta.deta(sigma, .lscale, earg= .escale )
- dxi.deta = dtheta.deta(xi, .lshape, earg= .eshape)
+ dmu.deta = dtheta.deta(mmu, .llocation, earg = .elocation)
+ dsi.deta = dtheta.deta(sigma, .lscale, earg = .escale )
+ dxi.deta = dtheta.deta(xi, .lshape, earg = .eshape)
w * cbind(dl.dmu * dmu.deta, dl.dsi * dsi.deta, dl.dxi*dxi.deta)
- }), list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
- .elocation = elocation, .escale = escale, .eshape= eshape,
- .tolshape0=tolshape0 ))),
- weight=eval(substitute(expression({
+ }), list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
+ .elocation = elocation, .escale = escale, .eshape = eshape,
+ .tolshape0 = tolshape0 ))),
+ weight = eval(substitute(expression({
bad <- A <= 0
- if (any(bad, na.rm = TRUE)) stop(sum(bad, na.rm=TRUE),
+ if (any(bad, na.rm = TRUE)) stop(sum(bad, na.rm = TRUE),
" observations violating boundary constraints in '@weight'")
kay = -xi # for the formulae
kay[abs(kay-0.5) < .tolshape0] = 0.501
@@ -686,32 +700,32 @@ dgammadx = function(x, deriv.arg=1) {
wz[,iam(1,3,M)] = -(qq + pp/kay) / (sigma * kay)
wz[,iam(2,3,M)] = (1-EulerM - (1-temp100)/kay - qq -
pp/kay) / (sigma * kay^2)
- if (any(iszero)) {
- wz[iszero,iam(2,2,M)] = (pi^2/6 + (1-EulerM)^2) / sigma^2
- wz[iszero,iam(3,3,M)] = 2.4236
- wz[iszero,iam(1,2,M)] = (digamma(2) + 2*(EulerM-1)) / sigma^2
- wz[iszero,iam(1,3,M)]= -(trigamma(1)/2 + digamma(1)*
- (digamma(1)/2+1))/sigma
- wz[iszero,iam(2,3,M)] = (-dgammadx(2,3)/6 + dgammadx(1,1) +
+ if (any(is.zero)) {
+ wz[is.zero,iam(2,2,M)] = (pi^2/6 + (1-EulerM)^2) / sigma^2
+ wz[is.zero,iam(3,3,M)] <- 2.4236
+ wz[is.zero,iam(1,2,M)] <- (digamma(2) + 2*(EulerM-1)) / sigma^2
+ wz[is.zero,iam(1,3,M)] <- -(trigamma(1)/2 + digamma(1)*
+ (digamma(1)/2+1))/sigma
+ wz[is.zero,iam(2,3,M)] <- (-dgammadx(2,3)/6 + dgammadx(1,1) +
2*dgammadx(1,2) + 2*dgammadx(1,3)/3)/sigma
}
- wz[,iam(1,1,M)] = wz[,iam(1,1,M)] * dmu.deta^2
- wz[,iam(2,2,M)] = wz[,iam(2,2,M)] * dsi.deta^2
- wz[,iam(3,3,M)] = wz[,iam(3,3,M)] * dxi.deta^2
- wz[,iam(1,2,M)] = wz[,iam(1,2,M)] * dmu.deta * dsi.deta
- wz[,iam(1,3,M)] = wz[,iam(1,3,M)] * dmu.deta * (-dxi.deta)
- wz[,iam(2,3,M)] = wz[,iam(2,3,M)] * dsi.deta * (-dxi.deta)
+ wz[,iam(1,1,M)] <- wz[,iam(1,1,M)] * dmu.deta^2
+ wz[,iam(2,2,M)] <- wz[,iam(2,2,M)] * dsi.deta^2
+ wz[,iam(3,3,M)] <- wz[,iam(3,3,M)] * dxi.deta^2
+ wz[,iam(1,2,M)] <- wz[,iam(1,2,M)] * dmu.deta * dsi.deta
+ wz[,iam(1,3,M)] <- wz[,iam(1,3,M)] * dmu.deta * (-dxi.deta)
+ wz[,iam(2,3,M)] <- wz[,iam(2,3,M)] * dsi.deta * (-dxi.deta)
w * wz
- }), list( .eshape= eshape, .tolshape0=tolshape0 ))))
+ }), list( .eshape = eshape, .tolshape0 = tolshape0 ))))
}
-rgumbel = function(n, location=0, scale=1) {
+rgumbel <- function(n, location = 0, scale = 1) {
use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integ=TRUE, allow=1, posit=TRUE))
+ if (!is.Numeric(n, integ = TRUE, allow = 1, posit = TRUE))
stop("bad input for argument 'n'") else n
answer = location - scale * log(-log(runif(use.n)))
@@ -719,7 +733,7 @@ rgumbel = function(n, location=0, scale=1) {
answer
}
-dgumbel = function(x, location=0, scale=1, log = FALSE) {
+dgumbel <- function(x, location = 0, scale = 1, log = FALSE) {
if (!is.logical(log.arg <- log))
stop("bad input for argument 'log'")
rm(log)
@@ -729,7 +743,7 @@ dgumbel = function(x, location=0, scale=1, log = FALSE) {
if (log.arg) logdensity else exp(logdensity)
}
-qgumbel = function(p, location=0, scale=1) {
+qgumbel <- function(p, location = 0, scale = 1) {
answer = location - scale * log(-log(p))
answer[scale <= 0] = NaN
answer[p < 0] = NaN
@@ -739,20 +753,20 @@ qgumbel = function(p, location=0, scale=1) {
answer
}
-pgumbel = function(q, location=0, scale=1) {
+pgumbel <- function(q, location = 0, scale = 1) {
answer = exp(-exp(-(q-location) / scale))
answer[scale <= 0] = NaN
answer
}
- gumbel = function(llocation="identity",
- lscale="loge",
+ gumbel <- function(llocation = "identity",
+ lscale = "loge",
elocation = list(),
escale = list(),
- iscale=NULL,
+ iscale = NULL,
R=NA, percentiles=c(95,99),
- mpv=FALSE, zero=NULL)
+ mpv = FALSE, zero = NULL)
{
if (mode(llocation) != "character" && mode(llocation) != "name")
llocation = as.character(substitute(llocation))
@@ -761,44 +775,44 @@ pgumbel = function(q, location=0, scale=1) {
if (!is.logical(mpv) || length(mpv) != 1)
stop("bad input for argument 'mpv'")
if (length(percentiles) &&
- (!is.Numeric(percentiles, posit=TRUE) || max(percentiles) >= 100))
+ (!is.Numeric(percentiles, posit = TRUE) || max(percentiles) >= 100))
stop("bad input for argument 'percentiles'")
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
stop("bad input for argument 'zero'")
- if (length(iscale) && !is.Numeric(iscale, posit=TRUE))
+ if (length(iscale) && !is.Numeric(iscale, posit = TRUE))
stop("bad input for argument 'iscale'")
if (!is.list(elocation)) elocation = list()
if (!is.list(escale)) escale = list()
new("vglmff",
- blurb=c("Gumbel Distribution for Extreme Value Regression\n",
+ blurb = c("Gumbel Distribution for Extreme Value Regression\n",
"Links: ",
- namesof("location", link=llocation, earg= elocation), ", ",
- namesof("scale", link=lscale, earg= escale )),
+ namesof("location", link = llocation, earg = elocation), ", ",
+ namesof("scale", link = lscale, earg = escale )),
constraints=eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
predictors.names =
- c(namesof("location", .llocation, earg= .elocation, short=TRUE),
- namesof("scale", .lscale, earg= .escale , short=TRUE))
+ c(namesof("location", .llocation, earg = .elocation, short = TRUE),
+ namesof("scale", .lscale, earg = .escale , short = TRUE))
y = as.matrix(y)
if (ncol(y) > 1)
- y = -t(apply(-y, 1, sort, na.last=TRUE))
+ y = -t(apply(-y, 1, sort, na.last = TRUE))
r.vec = rowSums(cbind(!is.na(y)))
if (any(r.vec == 0))
stop("There is at least one row of the response containing all NAs")
if (ncol(y) > 1) {
yiri = y[cbind(1:nrow(y), r.vec)]
- sc.init = if (is.Numeric( .iscale, posit=TRUE))
- .iscale else {3 * (rowMeans(y, na.rm=TRUE) - yiri)}
+ sc.init = if (is.Numeric( .iscale, posit = TRUE))
+ .iscale else {3 * (rowMeans(y, na.rm = TRUE) - yiri)}
sc.init = rep(sc.init, length=nrow(y))
sc.init[sc.init <= 0.0001] = 1 # Used to be .iscale
loc.init = yiri + sc.init * log(r.vec)
} else {
- sc.init = if (is.Numeric( .iscale, posit=TRUE))
+ sc.init = if (is.Numeric( .iscale, posit = TRUE))
.iscale else 1.1 * (0.01+sqrt(var(y)*6)) / pi
- sc.init = rep(sc.init, len=n)
+ sc.init = rep(sc.init, len = n)
EulerM = -digamma(1)
loc.init = (y - sc.init * EulerM)
loc.init[loc.init <= 0] = min(y)
@@ -809,14 +823,14 @@ pgumbel = function(q, location=0, scale=1) {
extra$percentiles = .percentiles
if (!length(etastart))
- etastart = cbind(theta2eta(loc.init, .llocation, earg= .elocation),
- theta2eta(sc.init, .lscale, earg= .escale ))
- }), list( .llocation=llocation, .lscale=lscale, .iscale=iscale,
+ etastart = cbind(theta2eta(loc.init, .llocation, earg = .elocation),
+ theta2eta(sc.init, .lscale, earg = .escale ))
+ }), list( .llocation = llocation, .lscale = lscale, .iscale = iscale,
.elocation = elocation, .escale = escale,
- .R=R, .mpv=mpv, .percentiles=percentiles ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- loc = eta2theta(eta[,1], .llocation, earg= .elocation)
- sigma = eta2theta(eta[,2], .lscale, earg= .escale ) # sigma
+ .R=R, .mpv=mpv, .percentiles = percentiles ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ loc = eta2theta(eta[,1], .llocation, earg = .elocation)
+ sigma = eta2theta(eta[,2], .lscale, earg = .escale ) # sigma
Percentiles = extra$percentiles
lp = length(Percentiles) # may be 0
if (lp > 0) {
@@ -830,7 +844,7 @@ pgumbel = function(q, location=0, scale=1) {
}
if (mpv)
mu[,ncol(mu)] = loc - sigma * log(log(2))
- dmn2 = paste(as.character(Percentiles), "%", sep="")
+ dmn2 = paste(as.character(Percentiles), "%", sep = "")
if (mpv)
dmn2 = c(dmn2, "MPV")
dimnames(mu) = list(dimnames(eta)[[1]], dmn2)
@@ -839,23 +853,23 @@ pgumbel = function(q, location=0, scale=1) {
mu = loc + sigma * EulerM
}
mu
- }, list( .llocation=llocation, .lscale=lscale,
+ }, list( .llocation = llocation, .lscale = lscale,
.elocation = elocation, .escale = escale ))),
- last=eval(substitute(expression({
+ last = eval(substitute(expression({
misc$R = .R
misc$links = c(location = .llocation, scale = .lscale)
- misc$earg= list(location= .elocation, scale= .escale )
+ misc$earg = list(location= .elocation, scale= .escale )
misc$mpv = .mpv
misc$true.mu = !length( .percentiles) # @fitted is not a true mu
misc$percentiles = .percentiles
- }), list( .llocation=llocation, .lscale=lscale, .percentiles=percentiles,
+ }), list( .llocation = llocation, .lscale = lscale, .percentiles = percentiles,
.elocation = elocation, .escale = escale,
.mpv=mpv, .R=R ))),
- vfamily=c("gumbel", "vextremes"),
- loglikelihood=eval(substitute(
- function(mu,y,w, residuals=FALSE, eta, extra=NULL) {
- loc = eta2theta(eta[,1], .llocation, earg= .elocation)
- sigma = eta2theta(eta[,2], .lscale, earg= .escale )
+ vfamily = c("gumbel", "vextremes"),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ loc = eta2theta(eta[,1], .llocation, earg = .elocation)
+ sigma = eta2theta(eta[,2], .lscale, earg = .escale )
r.vec = rowSums(cbind(!is.na(y)))
yiri = y[cbind(1:nrow(y),r.vec)]
ans = -r.vec * log(sigma) - exp( -(yiri-loc)/sigma )
@@ -864,36 +878,37 @@ pgumbel = function(q, location=0, scale=1) {
index = (jay <= r.vec)
ans[index] = ans[index] - (y[index,jay]-loc[index]) / sigma[index]
}
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
sum(w * ans)
}
- }, list( .llocation=llocation, .lscale=lscale,
+ }, list( .llocation = llocation, .lscale = lscale,
.elocation = elocation, .escale = escale ))),
- deriv=eval(substitute(expression({
- loc = eta2theta(eta[,1], .llocation, earg= .elocation)
- sigma = eta2theta(eta[,2], .lscale, earg= .escale )
+ deriv = eval(substitute(expression({
+ loc = eta2theta(eta[,1], .llocation, earg = .elocation)
+ sigma = eta2theta(eta[,2], .lscale, earg = .escale )
r.vec = rowSums(cbind(!is.na(y)))
yiri = y[cbind(1:nrow(y),r.vec)]
- yi.bar = rowMeans(y, na.rm=TRUE)
+ yi.bar = rowMeans(y, na.rm = TRUE)
temp2 = (yiri - loc) / sigma
term2 = exp(-temp2)
- dloc.deta = dtheta.deta(loc, .llocation, earg= .elocation)
- dsigma.deta = dtheta.deta(sigma, .lscale, earg= .escale )
+ dloc.deta = dtheta.deta(loc, .llocation, earg = .elocation)
+ dsigma.deta = dtheta.deta(sigma, .lscale, earg = .escale )
dl.dloc = (r.vec - term2) / sigma
- dl.dsigma = (rowSums((y - loc) / sigma, na.rm=TRUE) - r.vec -
+ dl.dsigma = (rowSums((y - loc) / sigma, na.rm = TRUE) - r.vec -
temp2 * term2) / sigma
w * cbind(dl.dloc * dloc.deta, dl.dsigma * dsigma.deta)
- }), list( .llocation=llocation, .lscale=lscale,
+ }), list( .llocation = llocation, .lscale = lscale,
.elocation = elocation, .escale = escale ))),
- weight=eval(substitute(expression({
+ weight = eval(substitute(expression({
temp6 = digamma(r.vec) # , integer=T
temp5 = digamma(1:max(r.vec)) # , integer=T
- temp5 = matrix(temp5, n, max(r.vec), byrow=TRUE)
+ temp5 = matrix(temp5, n, max(r.vec), byrow = TRUE)
temp5[col(temp5) > r.vec] = 0
temp5 = temp5 %*% rep(1, ncol(temp5))
- wz = matrix(as.numeric(NA), n, dimm(M=2)) # 3=dimm(M=2)
+ wz = matrix(as.numeric(NA), n, dimm(M = 2)) # 3=dimm(M = 2)
wz[,iam(1,1,M)] = r.vec / sigma^2
wz[,iam(2,1,M)] = -(1 + r.vec * temp6) / sigma^2
wz[,iam(2,2,M)] = (2*(r.vec+1)*temp6 + r.vec*(trigamma(r.vec) +
@@ -902,21 +917,21 @@ pgumbel = function(q, location=0, scale=1) {
wz[,iam(2,1,M)] = wz[,iam(2,1,M)] * dsigma.deta * dloc.deta
wz[,iam(2,2,M)] = wz[,iam(2,2,M)] * dsigma.deta^2
w * wz
- }), list( .lscale=lscale ))))
+ }), list( .lscale = lscale ))))
}
-rgpd = function(n, location=0, scale=1, shape=0) {
+rgpd <- function(n, location = 0, scale = 1, shape = 0) {
use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integ=TRUE, allow=1, posit=TRUE))
+ if (!is.Numeric(n, integ = TRUE, allow = 1, posit = TRUE))
stop("bad input for argument 'n'") else n
if (!is.Numeric(location)) stop("bad input for argument 'location'")
if (!is.Numeric(shape)) stop("bad input for argument 'shape'")
ans = numeric(use.n)
- shape = rep(shape, len=use.n); location = rep(location, len=use.n);
- scale = rep(scale, len=use.n)
+ shape = rep(shape, len = use.n); location = rep(location, len = use.n);
+ scale = rep(scale, len = use.n)
scase = abs(shape) < sqrt(.Machine$double.eps)
nscase = sum(scase)
if (use.n - nscase)
@@ -930,23 +945,23 @@ rgpd = function(n, location=0, scale=1, shape=0) {
-dgpd = function(x, location=0, scale=1, shape=0, log=FALSE,
+dgpd <- function(x, location = 0, scale = 1, shape = 0, log = FALSE,
tolshape0 = sqrt(.Machine$double.eps),
- oobounds.log = -Inf, giveWarning=FALSE) {
+ oobounds.log = -Inf, giveWarning = FALSE) {
if (!is.logical(log.arg <- log))
stop("bad input for argument 'log'")
rm(log)
if (oobounds.log > 0)
stop("bad input for argument 'oobounds.log'")
- if (!is.Numeric(tolshape0, allow=1, posit=TRUE))
+ if (!is.Numeric(tolshape0, allow = 1, posit = TRUE))
stop("bad input for argument 'tolshape0'")
L = max(length(x), length(location), length(scale), length(shape))
- shape = rep(shape, len=L); location = rep(location, len=L);
- scale = rep(scale, len=L);
- x = rep(x, len=L)
+ shape = rep(shape, len = L); location = rep(location, len = L);
+ scale = rep(scale, len = L);
+ x = rep(x, len = L)
- logdensity = rep(log(0), len=L)
+ logdensity = rep(log(0), len = L)
scase = abs(shape) < tolshape0
nscase = sum(scase)
if (L - nscase) {
@@ -982,16 +997,16 @@ dgpd = function(x, location=0, scale=1, shape=0, log=FALSE,
-pgpd = function(q, location=0, scale=1, shape=0) {
+pgpd <- function(q, location = 0, scale = 1, shape = 0) {
if (!is.Numeric(q)) stop("bad input for argument 'q'")
if (!is.Numeric(location)) stop("bad input for argument 'location'")
if (!is.Numeric(shape)) stop("bad input for argument 'shape'")
use.n = max(length(q), length(location), length(scale), length(shape))
ans = numeric(use.n)
- shape = rep(shape, len=use.n); location = rep(location, len=use.n);
- scale = rep(scale, len=use.n);
- q = rep(q-location, len=use.n) # Note the centering, careful with dgumbel()!
+ shape = rep(shape, len = use.n); location = rep(location, len = use.n);
+ scale = rep(scale, len = use.n);
+ q = rep(q-location, len = use.n) # Note the centering, careful with dgumbel()!
scase = abs(shape) < sqrt(.Machine$double.eps)
nscase = sum(scase)
if (use.n - nscase) {
@@ -1009,12 +1024,12 @@ pgpd = function(q, location=0, scale=1, shape=0) {
ans
}
-qgpd = function(p, location=0, scale=1, shape=0) {
+qgpd <- function(p, location = 0, scale = 1, shape = 0) {
use.n = max(length(p), length(location), length(scale), length(shape))
ans = numeric(use.n)
- shape = rep(shape, len=use.n); location = rep(location, len=use.n);
- scale = rep(scale, len=use.n); p = rep(p, len=use.n)
+ shape = rep(shape, len = use.n); location = rep(location, len = use.n);
+ scale = rep(scale, len = use.n); p = rep(p, len = use.n)
scase = abs(shape) < sqrt(.Machine$double.eps)
nscase = sum(scase)
if (use.n - nscase) {
@@ -1040,23 +1055,23 @@ qgpd = function(p, location=0, scale=1, shape=0) {
- gpd = function(threshold=0,
- lscale="loge",
- lshape="logoff",
+ gpd <- function(threshold=0,
+ lscale = "loge",
+ lshape = "logoff",
escale = list(),
- eshape = if (lshape=="logoff") list(offset=0.5) else
- if (lshape=="elogit") list(min=-0.5, max=0.5) else NULL,
+ eshape = if (lshape == "logoff") list(offset=0.5) else
+ if (lshape == "elogit") list(min=-0.5, max=0.5) else NULL,
percentiles=c(90,95),
- iscale=NULL,
- ishape=NULL,
- tolshape0=0.001, giveWarning=TRUE,
- method.init=1,
+ iscale = NULL,
+ ishape = NULL,
+ tolshape0 = 0.001, giveWarning = TRUE,
+ method.init = 1,
zero=2) {
if (!is.logical(giveWarning) || length(giveWarning) != 1)
stop("bad input for argument 'giveWarning'")
if (!is.Numeric(threshold))
stop("bad input for argument 'threshold'")
- if (!is.Numeric(method.init, allow=1, posit=TRUE, integer=TRUE) ||
+ if (!is.Numeric(method.init, allow = 1, posit = TRUE, integer = TRUE) ||
method.init > 2.5)
stop("argument 'method.init' must be 1 or 2")
if (mode(lscale) != "character" && mode(lscale) != "name")
@@ -1064,24 +1079,24 @@ qgpd = function(p, location=0, scale=1, shape=0) {
if (mode(lshape) != "character" && mode(lshape) != "name")
lshape = as.character(substitute(lshape))
if (length(percentiles) &&
- (!is.Numeric(percentiles, posit=TRUE) || max(percentiles) >= 100))
+ (!is.Numeric(percentiles, posit = TRUE) || max(percentiles) >= 100))
stop("bad input for argument 'percentiles'")
- if (!is.Numeric(tolshape0, allow=1, posit=TRUE) || tolshape0 > 0.1)
+ if (!is.Numeric(tolshape0, allow = 1, posit = TRUE) || tolshape0 > 0.1)
stop("bad input for argument 'tolshape0'")
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
stop("bad input for argument 'zero'")
if (!is.list(escale)) escale = list()
if (!is.list(eshape)) eshape = list()
new("vglmff",
- blurb=c("Generalized Pareto Distribution\n",
+ blurb = c("Generalized Pareto Distribution\n",
"Links: ",
- namesof("scale", link=lscale, earg= escale ), ", ",
- namesof("shape", link=lshape, earg= eshape)),
+ namesof("scale", link = lscale, earg = escale ), ", ",
+ namesof("shape", link = lshape, earg = eshape)),
constraints=eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
if (ncol(as.matrix(y)) != 1)
stop("response must be a vector or one-column matrix")
Threshold = if (is.Numeric( .threshold)) .threshold else 0
@@ -1091,8 +1106,8 @@ qgpd = function(p, location=0, scale=1, shape=0) {
ystar = y - Threshold # Operate on ystar
extra$threshold = Threshold
predictors.names=
- c(namesof("scale", .lscale, earg= .escale, short=TRUE),
- namesof("shape", .lshape, earg= .eshape, short=TRUE ))
+ c(namesof("scale", .lscale, earg = .escale, short = TRUE),
+ namesof("shape", .lshape, earg = .eshape, short = TRUE ))
if (!length(etastart)) {
meany = mean(ystar)
vary = var(ystar)
@@ -1101,7 +1116,7 @@ qgpd = function(p, location=0, scale=1, shape=0) {
0.5 * (1 - median(ystar)^2 / vary)
}
init.sig = if (length( .iscale)) .iscale else {
- if (.method.init==1) 0.5*meany*(meany^2/vary + 1) else
+ if (.method.init == 1) 0.5*meany*(meany^2/vary + 1) else
abs(1-init.xi) * median(ystar)
}
init.sig[init.sig <= 0] = 0.01 # sigma > 0
@@ -1111,96 +1126,97 @@ qgpd = function(p, location=0, scale=1, shape=0) {
init.sig = rep(init.sig, leng=length(y))
init.xi = rep(init.xi, leng=length(y))
- etastart = cbind(theta2eta(init.sig, .lscale, earg= .escale ),
- theta2eta(init.xi, .lshape, earg= .eshape ))
+ etastart = cbind(theta2eta(init.sig, .lscale, earg = .escale ),
+ theta2eta(init.xi, .lshape, earg = .eshape ))
}
- }), list( .lscale=lscale, .lshape=lshape, .threshold=threshold,
- .iscale=iscale, .ishape=ishape,
- .escale=escale, .eshape=eshape,
- .method.init=method.init ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- sigma = eta2theta(eta[,1], .lscale, earg= .escale )
- xi = eta2theta(eta[,2], .lshape, earg= .eshape )
+ }), list( .lscale = lscale, .lshape = lshape, .threshold=threshold,
+ .iscale = iscale, .ishape = ishape,
+ .escale = escale, .eshape = eshape,
+ .method.init = method.init ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ sigma = eta2theta(eta[,1], .lscale, earg = .escale )
+ xi = eta2theta(eta[,2], .lshape, earg = .eshape )
cent = .percentiles
lp = length(cent) # NULL means lp==0 and the mean is returned
Threshold = if (is.Numeric( .threshold)) .threshold else 0
if (lp) {
fv = matrix(as.numeric(NA), nrow(eta), lp)
- iszero = (abs(xi) < .tolshape0)
+ is.zero = (abs(xi) < .tolshape0)
for(ii in 1:lp) {
temp = 1-cent[ii]/100
- fv[!iszero,ii] = Threshold + (temp^(-xi[!iszero]) -1) *
- sigma[!iszero] / xi[!iszero]
- fv[ iszero,ii] = Threshold - sigma[iszero] * log(temp)
+ fv[!is.zero,ii] = Threshold + (temp^(-xi[!is.zero]) -1) *
+ sigma[!is.zero] / xi[!is.zero]
+ fv[ is.zero,ii] = Threshold - sigma[is.zero] * log(temp)
}
dimnames(fv) = list(dimnames(eta)[[1]],
- paste(as.character(.percentiles), "%", sep=""))
+ paste(as.character(.percentiles), "%", sep = ""))
} else {
fv = Threshold + sigma / (1 - xi) # This is the mean, E(Y)
fv[xi >= 1] = NA # Mean exists only if xi < 1.
}
fv
- }, list( .lscale=lscale, .lshape=lshape, .threshold=threshold,
- .escale=escale, .eshape=eshape,
- .tolshape0=tolshape0, .percentiles=percentiles ))),
- last=eval(substitute(expression({
+ }, list( .lscale = lscale, .lshape = lshape, .threshold=threshold,
+ .escale = escale, .eshape = eshape,
+ .tolshape0 = tolshape0, .percentiles = percentiles ))),
+ last = eval(substitute(expression({
misc$links = c(scale = .lscale, shape = .lshape)
misc$true.mu = FALSE # @fitted is not a true mu
- misc$earg= list(scale= .escale , shape= .eshape )
+ misc$earg = list(scale= .escale , shape= .eshape )
misc$percentiles = .percentiles
misc$threshold = if (is.Numeric( .threshold)) .threshold else 0
misc$expected = TRUE
misc$tolshape0 = .tolshape0
if (any(xi < -0.5))
warning("some values of the shape parameter are less than -0.5")
- }), list( .lscale=lscale, .lshape=lshape, .threshold=threshold,
- .escale=escale, .eshape=eshape,
- .tolshape0=tolshape0, .percentiles=percentiles ))),
- loglikelihood=eval(substitute(
- function(mu,y,w, residuals=FALSE, eta, extra=NULL) {
- sigma = eta2theta(eta[,1], .lscale, earg= .escale )
- xi = eta2theta(eta[,2], .lshape, earg= .eshape )
+ }), list( .lscale = lscale, .lshape = lshape, .threshold=threshold,
+ .escale = escale, .eshape = eshape,
+ .tolshape0 = tolshape0, .percentiles = percentiles ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ sigma = eta2theta(eta[,1], .lscale, earg = .escale )
+ xi = eta2theta(eta[,2], .lshape, earg = .eshape )
Threshold = extra$threshold
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
sum(w * dgpd(x=y, location=Threshold, scale=sigma, shape=xi,
tolshape0 = .tolshape0, giveWarning= .giveWarning,
- log=TRUE, oobounds.log = -1.0e04))
+ log = TRUE, oobounds.log = -1.0e04))
}
- }, list( .tolshape0=tolshape0, .giveWarning= giveWarning,
- .escale=escale, .eshape=eshape,
- .lscale=lscale, .lshape=lshape ))),
- vfamily=c("gpd", "vextremes"),
- deriv=eval(substitute(expression({
- sigma = eta2theta(eta[,1], .lscale, earg= .escale )
- xi = eta2theta(eta[,2], .lshape, earg= .eshape )
+ }, list( .tolshape0 = tolshape0, .giveWarning= giveWarning,
+ .escale = escale, .eshape = eshape,
+ .lscale = lscale, .lshape = lshape ))),
+ vfamily = c("gpd", "vextremes"),
+ deriv = eval(substitute(expression({
+ sigma = eta2theta(eta[,1], .lscale, earg = .escale )
+ xi = eta2theta(eta[,2], .lshape, earg = .eshape )
Threshold = extra$threshold
ystar = y - Threshold # Operate on ystar
A = 1 + xi*ystar/sigma
mytolerance = .Machine$double.eps
bad <- (A <= mytolerance)
if (any(bad) && any(w[bad] != 0)) {
- cat(sum(w[bad],na.rm=TRUE), # "; ignoring them"
+ cat(sum(w[bad],na.rm = TRUE), # "; ignoring them"
"observations violating boundary constraints\n")
flush.console()
}
- if (any(iszero <- (abs(xi) < .tolshape0))) {
+ if (any(is.zero <- (abs(xi) < .tolshape0))) {
}
- igpd = !iszero & !bad
- iexp = iszero & !bad
- dl.dxi = dl.dsigma = rep(0, len=length(y))
+ igpd = !is.zero & !bad
+ iexp = is.zero & !bad
+ dl.dxi = dl.dsigma = rep(0, len = length(y))
dl.dsigma[igpd] = ((1 + xi[igpd]) * ystar[igpd] / (sigma[igpd] +
xi[igpd]*ystar[igpd]) - 1) / sigma[igpd]
dl.dxi[igpd] = log(A[igpd])/xi[igpd]^2 - (1 + 1/xi[igpd]) *
ystar[igpd] / (A[igpd] * sigma[igpd])
dl.dxi[iexp] = ystar[iexp] *
(0.5*ystar[iexp]/sigma[iexp] - 1) / sigma[iexp]
- dsigma.deta = dtheta.deta(sigma, .lscale, earg= .escale )
- dxi.deta = dtheta.deta(xi, .lshape, earg= .eshape )
+ dsigma.deta = dtheta.deta(sigma, .lscale, earg = .escale )
+ dxi.deta = dtheta.deta(xi, .lshape, earg = .eshape )
w * cbind(dl.dsigma * dsigma.deta, dl.dxi * dxi.deta)
- }), list( .tolshape0=tolshape0, .lscale=lscale,
- .escale=escale, .eshape=eshape,
- .lshape=lshape ))),
- weight=eval(substitute(expression({
+ }), list( .tolshape0 = tolshape0, .lscale = lscale,
+ .escale = escale, .eshape = eshape,
+ .lshape = lshape ))),
+ weight = eval(substitute(expression({
n <- length(w) # needed!
wz = matrix(as.numeric(NA), n, 3)
wz[,iam(1,1,M)] = 1 / ((1+2*xi) * sigma^2)
@@ -1210,16 +1226,16 @@ qgpd = function(p, location=0, scale=1, shape=0) {
wz[,iam(2,2,M)] = w * wz[,iam(2,2,M)] * dxi.deta^2
wz[,iam(1,2,M)] = w * wz[,iam(1,2,M)] * dsigma.deta * dxi.deta
wz
- }), list( .lscale=lscale ))))
+ }), list( .lscale = lscale ))))
}
-meplot.default = function(y, main="Mean Excess Plot",
+meplot.default <- function(y, main = "Mean Excess Plot",
xlab="Threshold", ylab="Mean Excess", lty=c(2,1:2),
- conf=0.95, col=c("blue","black","blue"), type="l", ...) {
+ conf=0.95, col=c("blue","black","blue"), type = "l", ...) {
if (!is.Numeric(y)) stop("bad input for argument 'y'")
n = length(y)
sy = sort(y)
@@ -1235,13 +1251,13 @@ meplot.default = function(y, main="Mean Excess Plot",
invisible(list(threshold=sy, meanExcess=me))
}
-meplot.vlm = function(object, ...) {
+meplot.vlm <- function(object, ...) {
if (!length(y <- object at y)) stop("y slot is empty")
ans = meplot(as.numeric(y), ...)
invisible(ans)
}
-if(!isGeneric("meplot"))
+if (!isGeneric("meplot"))
setGeneric("meplot", function(object, ...) standardGeneric("meplot"))
setMethod("meplot", "numeric",
@@ -1254,8 +1270,8 @@ setMethod("meplot", "vlm",
-guplot.default = function(y, main="Gumbel Plot",
- xlab="Reduced data", ylab="Observed data", type="p", ...) {
+guplot.default <- function(y, main = "Gumbel Plot",
+ xlab="Reduced data", ylab="Observed data", type = "p", ...) {
if (!is.Numeric(y)) stop("bad input for argument 'y'")
n = length(y)
sy = sort(y)
@@ -1264,13 +1280,13 @@ guplot.default = function(y, main="Gumbel Plot",
invisible(list(x=x, y=sy))
}
-guplot.vlm = function(object, ...) {
+guplot.vlm <- function(object, ...) {
if (!length(y <- object at y)) stop("y slot is empty")
ans = guplot(as.numeric(y), ...)
invisible(ans)
}
-if(!isGeneric("guplot"))
+if (!isGeneric("guplot"))
setGeneric("guplot", function(object, ...) standardGeneric("guplot"))
setMethod("guplot", "numeric",
@@ -1286,13 +1302,13 @@ setMethod("guplot", "vlm",
- egumbel = function(llocation="identity",
- lscale="loge",
+ egumbel <- function(llocation = "identity",
+ lscale = "loge",
elocation = list(),
escale = list(),
- iscale=NULL,
+ iscale = NULL,
R=NA, percentiles=c(95,99),
- mpv=FALSE, zero=NULL)
+ mpv = FALSE, zero = NULL)
{
if (mode(llocation) != "character" && mode(llocation) != "name")
llocation = as.character(substitute(llocation))
@@ -1301,54 +1317,54 @@ setMethod("guplot", "vlm",
if (!is.logical(mpv) || length(mpv) != 1)
stop("bad input for argument 'mpv'")
if (length(percentiles) &&
- (!is.Numeric(percentiles, posit=TRUE) || max(percentiles) >= 100))
+ (!is.Numeric(percentiles, posit = TRUE) || max(percentiles) >= 100))
stop("bad input for argument 'percentiles'")
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
stop("bad input for argument 'zero'")
- if (length(iscale) && !is.Numeric(iscale, posit=TRUE))
+ if (length(iscale) && !is.Numeric(iscale, posit = TRUE))
stop("bad input for argument 'iscale'")
if (!is.list(elocation)) elocation = list()
if (!is.list(escale)) escale = list()
new("vglmff",
- blurb=c("Gumbel distribution (univariate response)\n\n",
+ blurb = c("Gumbel distribution (univariate response)\n\n",
"Links: ",
- namesof("location", llocation, earg= elocation, tag= TRUE), ", ",
- namesof("scale", lscale, earg= escale , tag= TRUE), "\n",
+ namesof("location", llocation, earg = elocation, tag = TRUE), ", ",
+ namesof("scale", lscale, earg = escale , tag = TRUE), "\n",
"Mean: location + scale*0.5772..\n",
"Variance: pi^2 * scale^2 / 6"),
constraints=eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
y = cbind(y)
if (ncol(y) > 1)
stop("Use gumbel() to handle multivariate responses")
if (min(y) <= 0)
stop("all response values must be positive")
predictors.names =
- c(namesof("location", .llocation, earg= .elocation, tag= FALSE),
- namesof("scale", .lscale, earg= .escale , tag= FALSE))
+ c(namesof("location", .llocation, earg = .elocation, tag = FALSE),
+ namesof("scale", .lscale, earg = .escale , tag = FALSE))
extra$R = .R
extra$mpv = .mpv
extra$percentiles = .percentiles
if (!length(etastart)) {
- sc.init = if (is.Numeric( .iscale, posit=TRUE))
+ sc.init = if (is.Numeric( .iscale, posit = TRUE))
.iscale else 1.5 * (0.01+sqrt(var(y)*6)) / pi
- sc.init = rep(sc.init, len=n)
+ sc.init = rep(sc.init, len = n)
EulerM = -digamma(1)
loc.init = (y - sc.init * EulerM)
- etastart = cbind(theta2eta(loc.init, .llocation, earg= .elocation),
- theta2eta(sc.init, .lscale, earg= .escale ))
+ etastart = cbind(theta2eta(loc.init, .llocation, earg = .elocation),
+ theta2eta(sc.init, .lscale, earg = .escale ))
}
- }), list( .llocation=llocation, .lscale=lscale, .iscale=iscale,
- .elocation=elocation, .escale=escale,
- .R=R, .mpv=mpv, .percentiles=percentiles ))),
- inverse=eval(substitute( function(eta, extra=NULL) {
- loc = eta2theta(eta[,1], .llocation, earg= .elocation)
- sigma = eta2theta(eta[,2], .lscale, earg= .escale )
+ }), list( .llocation = llocation, .lscale = lscale, .iscale = iscale,
+ .elocation=elocation, .escale = escale,
+ .R=R, .mpv=mpv, .percentiles = percentiles ))),
+ inverse = eval(substitute( function(eta, extra = NULL) {
+ loc = eta2theta(eta[,1], .llocation, earg = .elocation)
+ sigma = eta2theta(eta[,2], .lscale, earg = .escale )
EulerM = -digamma(1)
Percentiles = extra$percentiles
mpv = extra$mpv
@@ -1364,51 +1380,52 @@ setMethod("guplot", "vlm",
}
if (mpv)
mu[,ncol(mu)] = loc - sigma * log(log(2))
- dmn2 = if (lp>=1) paste(as.character(Percentiles), "%", sep="") else NULL
+ dmn2 = if (lp>=1) paste(as.character(Percentiles), "%", sep = "") else NULL
if (mpv)
dmn2 = c(dmn2, "MPV")
dimnames(mu) = list(dimnames(eta)[[1]], dmn2)
mu
- }, list( .llocation=llocation, .lscale=lscale,
- .elocation=elocation, .escale=escale ))),
- last=eval(substitute(expression({
+ }, list( .llocation = llocation, .lscale = lscale,
+ .elocation=elocation, .escale = escale ))),
+ last = eval(substitute(expression({
misc$link = c(location= .llocation, scale = .lscale)
- misc$earg= list(location= .elocation, scale= .escale)
+ misc$earg = list(location= .elocation, scale= .escale)
misc$true.mu = !length( .percentiles) # @fitted is not a true mu
misc$R = .R
misc$mpv = .mpv
misc$percentiles = .percentiles
- }), list( .llocation=llocation, .lscale=lscale, .mpv=mpv,
- .elocation=elocation, .escale=escale,
- .R=R, .percentiles=percentiles ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta,extra=NULL) {
- loc = eta2theta(eta[,1], .llocation, earg= .elocation)
- sc = eta2theta(eta[,2], .lscale, earg= .escale )
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(w * dgumbel(x=y, location=loc, scale=sc, log=TRUE))
+ }), list( .llocation = llocation, .lscale = lscale, .mpv=mpv,
+ .elocation=elocation, .escale = escale,
+ .R=R, .percentiles = percentiles ))),
+ loglikelihood = eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta,extra = NULL) {
+ loc = eta2theta(eta[,1], .llocation, earg = .elocation)
+ sc = eta2theta(eta[,2], .lscale, earg = .escale )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(w * dgumbel(x=y, location = loc, scale=sc, log = TRUE))
}
- }, list( .llocation=llocation, .lscale=lscale,
- .elocation=elocation, .escale=escale ))),
- vfamily="egumbel",
- deriv=eval(substitute(expression({
- loc = eta2theta(eta[,1], .llocation, earg= .elocation)
- sc = eta2theta(eta[,2], .lscale, earg= .escale )
+ }, list( .llocation = llocation, .lscale = lscale,
+ .elocation=elocation, .escale = escale ))),
+ vfamily = "egumbel",
+ deriv = eval(substitute(expression({
+ loc = eta2theta(eta[,1], .llocation, earg = .elocation)
+ sc = eta2theta(eta[,2], .lscale, earg = .escale )
zedd = (y-loc) / sc
temp2 = -expm1(-zedd)
dl.dloc = temp2 / sc
dl.dsc = -1/sc + temp2 * zedd / sc
- dloc.deta = dtheta.deta(loc, .llocation, earg= .elocation)
- dsc.deta = dtheta.deta(sc, .lscale, earg= .escale )
+ dloc.deta = dtheta.deta(loc, .llocation, earg = .elocation)
+ dsc.deta = dtheta.deta(sc, .lscale, earg = .escale )
w * cbind(dl.dloc * dloc.deta, dl.dsc * dsc.deta)
- }), list( .llocation=llocation, .lscale=lscale,
- .elocation=elocation, .escale=escale ))),
+ }), list( .llocation = llocation, .lscale = lscale,
+ .elocation=elocation, .escale = escale ))),
weight=expression({
digamma1 = digamma(1)
ed2l.dsc2 = ((2+digamma1)*digamma1 + trigamma(1) + 1) / sc^2
ed2l.dloc2 = 1 / sc^2
ed2l.dscloc = -(1 + digamma1) / sc^2
- wz = matrix(as.numeric(NA), n, dimm(M=2))
+ wz = matrix(as.numeric(NA), n, dimm(M = 2))
wz[,iam(1,1,M)] = ed2l.dloc2 * dloc.deta^2
wz[,iam(2,2,M)] = ed2l.dsc2 * dsc.deta^2
wz[,iam(1,2,M)] = ed2l.dscloc * dloc.deta * dsc.deta
@@ -1419,11 +1436,11 @@ setMethod("guplot", "vlm",
- cgumbel = function(llocation="identity",
- lscale="loge",
+ cgumbel <- function(llocation = "identity",
+ lscale = "loge",
elocation = list(),
- escale = list(), iscale=NULL,
- mean=TRUE, percentiles=NULL, zero=2)
+ escale = list(), iscale = NULL,
+ mean = TRUE, percentiles = NULL, zero=2)
{
if (mode(llocation) != "character" && mode(llocation) != "name")
llocation = as.character(substitute(llocation))
@@ -1431,54 +1448,54 @@ setMethod("guplot", "vlm",
lscale = as.character(substitute(lscale))
if (!is.logical(mean) || length(mean) != 1)
stop("mean must be a single logical value")
- if (!mean && (!is.Numeric(percentiles, posit=TRUE) ||
+ if (!mean && (!is.Numeric(percentiles, posit = TRUE) ||
any(percentiles>=100)))
- stop("valid percentiles values must be given when mean=FALSE")
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("valid percentiles values must be given when mean = FALSE")
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
stop("bad input for argument 'zero'")
if (!is.list(elocation)) elocation = list()
if (!is.list(escale)) escale = list()
new("vglmff",
- blurb=c("Censored Gumbel distribution\n\n",
+ blurb = c("Censored Gumbel distribution\n\n",
"Links: ",
- namesof("location", llocation, earg= elocation, tag= TRUE), ", ",
- namesof("scale", lscale, earg= escale, tag= TRUE),
+ namesof("location", llocation, earg = elocation, tag = TRUE), ", ",
+ namesof("scale", lscale, earg = escale, tag = TRUE),
"\n",
"Mean: location + scale*0.5772..\n",
"Variance: pi^2 * scale^2 / 6"),
constraints=eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
y = cbind(y)
if (ncol(y) > 1)
stop("Use gumbel.block() to handle multivariate responses")
if (any(y) <= 0)
stop("all response values must be positive")
- if (!length(extra$leftcensored)) extra$leftcensored = rep(FALSE, len=n)
- if (!length(extra$rightcensored)) extra$rightcensored = rep(FALSE, len=n)
+ if (!length(extra$leftcensored)) extra$leftcensored = rep(FALSE, len = n)
+ if (!length(extra$rightcensored)) extra$rightcensored = rep(FALSE, len = n)
if (any(extra$rightcensored & extra$leftcensored))
stop("some observations are both right and left censored!")
predictors.names =
- c(namesof("location", .llocation, earg= .elocation, tag= FALSE),
- namesof("scale", .lscale, earg= .escale , tag= FALSE))
+ c(namesof("location", .llocation, earg = .elocation, tag = FALSE),
+ namesof("scale", .lscale, earg = .escale , tag = FALSE))
if (!length(etastart)) {
- sc.init = if (is.Numeric( .iscale, posit=TRUE))
+ sc.init = if (is.Numeric( .iscale, posit = TRUE))
.iscale else 1.1 * sqrt(var(y) * 6 ) / pi
- sc.init = rep(sc.init, len=n)
+ sc.init = rep(sc.init, len = n)
EulerM = -digamma(1)
loc.init = (y - sc.init * EulerM)
loc.init[loc.init <= 0] = min(y)
- etastart = cbind(theta2eta(loc.init, .llocation, earg= .elocation ),
- theta2eta(sc.init, .lscale, earg= .escale ))
+ etastart = cbind(theta2eta(loc.init, .llocation, earg = .elocation ),
+ theta2eta(sc.init, .lscale, earg = .escale ))
}
- }), list( .lscale=lscale, .iscale=iscale,
+ }), list( .lscale = lscale, .iscale = iscale,
.llocation = llocation,
.elocation = elocation, .escale = escale ))),
- inverse=eval(substitute( function(eta, extra=NULL) {
+ inverse = eval(substitute( function(eta, extra = NULL) {
loc = eta2theta(eta[,1], .llocation)
sc = eta2theta(eta[,2], .lscale)
EulerM = -digamma(1)
@@ -1489,27 +1506,27 @@ setMethod("guplot", "vlm",
ci = -log( .percentiles[ii] / 100)
mu[,ii] = loc - sc * log(ci)
}
- dmn2 = paste(as.character(.percentiles), "%", sep="")
+ dmn2 = paste(as.character(.percentiles), "%", sep = "")
dimnames(mu) <- list(dimnames(eta)[[1]], dmn2)
mu
}
- }, list( .lscale=lscale, .percentiles=percentiles,
+ }, list( .lscale = lscale, .percentiles = percentiles,
.llocation = llocation,
.elocation = elocation, .escale = escale ,
.mean=mean ))),
- last=eval(substitute(expression({
+ last = eval(substitute(expression({
misc$link = c(location= .llocation, scale = .lscale)
- misc$earg= list(location= .elocation, scale= .escale )
+ misc$earg = list(location= .elocation, scale= .escale )
misc$true.mu = .mean # if FALSE then @fitted is not a true mu
misc$percentiles = .percentiles
- }), list( .lscale=lscale, .mean=mean,
+ }), list( .lscale = lscale, .mean=mean,
.llocation = llocation,
.elocation = elocation, .escale = escale ,
- .percentiles=percentiles ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta,extra=NULL) {
- loc = eta2theta(eta[,1], .llocation, earg= .elocation )
- sc = eta2theta(eta[,2], .lscale, earg= .escale )
+ .percentiles = percentiles ))),
+ loglikelihood = eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta,extra = NULL) {
+ loc = eta2theta(eta[,1], .llocation, earg = .elocation )
+ sc = eta2theta(eta[,2], .lscale, earg = .escale )
zedd = (y-loc) / sc
cenL = extra$leftcensored
@@ -1519,25 +1536,26 @@ setMethod("guplot", "vlm",
ell1 = -log(sc[cen0]) - zedd[cen0] - exp(-zedd[cen0])
ell2 = log(Fy[cenL])
ell3 = log1p(-Fy[cenU])
- if (residuals) stop("loglikelihood residuals not implemented yet") else
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
sum(w[cen0] * ell1) + sum(w[cenL] * ell2) + sum(w[cenU] * ell3)
- }, list( .lscale=lscale,
+ }, list( .lscale = lscale,
.llocation = llocation,
.elocation = elocation, .escale = escale ))),
- vfamily="cgumbel",
- deriv=eval(substitute(expression({
+ vfamily = "cgumbel",
+ deriv = eval(substitute(expression({
cenL = extra$leftcensored
cenU = extra$rightcensored
cen0 = !cenL & !cenU # uncensored obsns
- loc = eta2theta(eta[,1], .llocation, earg= .elocation )
- sc = eta2theta(eta[,2], .lscale, earg= .escale )
+ loc = eta2theta(eta[,1], .llocation, earg = .elocation )
+ sc = eta2theta(eta[,2], .lscale, earg = .escale )
zedd = (y-loc) / sc
temp2 = -expm1(-zedd)
dl.dloc = temp2 / sc
dl.dsc = -1/sc + temp2 * zedd / sc
- dloc.deta = dtheta.deta(loc, .llocation, earg= .elocation )
- dsc.deta = dtheta.deta(sc, .lscale, earg= .escale )
+ dloc.deta = dtheta.deta(loc, .llocation, earg = .elocation )
+ dsc.deta = dtheta.deta(sc, .lscale, earg = .escale )
ezedd = exp(-zedd)
Fy = exp(-ezedd)
@@ -1552,7 +1570,7 @@ setMethod("guplot", "vlm",
dl.dsc[cenU] = -dFy.dsc[cenU] / (1-Fy[cenU])
}
w * cbind(dl.dloc * dloc.deta, dl.dsc * dsc.deta)
- }), list( .lscale=lscale,
+ }), list( .lscale = lscale,
.llocation = llocation,
.elocation = elocation, .escale = escale ))),
weight=expression({
@@ -1563,7 +1581,7 @@ setMethod("guplot", "vlm",
ed2l.dsc2 = ((2+digamma1)*digamma1 + trigamma(1) + 1) / sc^2
ed2l.dloc2 = 1 / sc^2
ed2l.dlocsc = -(1 + digamma1) / sc^2
- wz = matrix(as.numeric(NA), n, dimm(M=2))
+ wz = matrix(as.numeric(NA), n, dimm(M = 2))
wz[,iam(1,1,M)] = A2 * ed2l.dloc2 * dloc.deta^2
wz[,iam(2,2,M)] = A2 * ed2l.dsc2 * dsc.deta^2
wz[,iam(1,2,M)] = A2 * ed2l.dlocsc * dloc.deta * dsc.deta
@@ -1588,14 +1606,14 @@ setMethod("guplot", "vlm",
-dfrechet = function(x, location=0, scale=1, shape, log=FALSE) {
+dfrechet <- function(x, location = 0, scale = 1, shape, log = FALSE) {
if (!is.logical(log.arg <- log))
stop("bad input for argument 'log'")
rm(log)
L = max(length(x), length(scale), length(shape))
- x = rep(x, len=L); scale = rep(scale, len=L); shape = rep(shape, len=L);
- logdensity = rep(log(0), len=L)
+ x = rep(x, len = L); scale = rep(scale, len = L); shape = rep(shape, len = L);
+ logdensity = rep(log(0), len = L)
xok = (x > location)
rzedd = scale / (x - location)
logdensity[xok] = log(shape[xok]) - (rzedd[xok]^shape[xok]) +
@@ -1605,42 +1623,42 @@ dfrechet = function(x, location=0, scale=1, shape, log=FALSE) {
if (log.arg) logdensity else exp(logdensity)
}
-pfrechet = function(q, location=0, scale=1, shape) {
- if (!is.Numeric(scale, posit=TRUE)) stop("scale must be positive")
- if (!is.Numeric(shape, posit=TRUE)) stop("shape must be positive")
+pfrechet <- function(q, location = 0, scale = 1, shape) {
+ if (!is.Numeric(scale, posit = TRUE)) stop("scale must be positive")
+ if (!is.Numeric(shape, posit = TRUE)) stop("shape must be positive")
rzedd = scale / (q - location)
ans = exp(-(rzedd^shape))
ans[q <= location] = 0
ans
}
-qfrechet = function(p, location=0, scale=1, shape) {
- if (!is.Numeric(p, posit=TRUE) || any(p >= 1)) stop("0 < p < 1 is required")
- if (!is.Numeric(scale, posit=TRUE)) stop("scale must be positive")
- if (!is.Numeric(shape, posit=TRUE)) stop("shape must be positive")
+qfrechet <- function(p, location = 0, scale = 1, shape) {
+ if (!is.Numeric(p, posit = TRUE) || any(p >= 1)) stop("0 < p < 1 is required")
+ if (!is.Numeric(scale, posit = TRUE)) stop("scale must be positive")
+ if (!is.Numeric(shape, posit = TRUE)) stop("shape must be positive")
location + scale * (-log(p))^(-1/shape)
}
-rfrechet = function(n, location=0, scale=1, shape) {
- if (!is.Numeric(n, posit=TRUE, allow=1, integ=TRUE))
+rfrechet <- function(n, location = 0, scale = 1, shape) {
+ if (!is.Numeric(n, posit = TRUE, allow = 1, integ = TRUE))
stop("bad input for argument 'n'")
- if (!is.Numeric(scale, posit=TRUE)) stop("scale must be positive")
- if (!is.Numeric(shape, posit=TRUE)) stop("shape must be positive")
+ if (!is.Numeric(scale, posit = TRUE)) stop("scale must be positive")
+ if (!is.Numeric(shape, posit = TRUE)) stop("shape must be positive")
location + scale * (-log(runif(n)))^(-1/shape)
}
-frechet2.control <- function(save.weight=TRUE, ...)
+frechet2.control <- function(save.weight = TRUE, ...)
{
- list(save.weight=save.weight)
+ list(save.weight = save.weight)
}
- frechet2 = function(location=0,
- lscale="loge",
- lshape="loglog",
+ frechet2 <- function(location = 0,
+ lscale = "loge",
+ lshape = "loglog",
escale = list(),
eshape = list(),
- iscale=NULL, ishape=3,
- zero=NULL)
+ iscale = NULL, ishape=3,
+ zero = NULL)
{
if (!is.Numeric(location))
stop("bad input for argument 'location'")
@@ -1652,69 +1670,70 @@ frechet2.control <- function(save.weight=TRUE, ...)
if (!is.list(eshape)) eshape = list()
new("vglmff",
- blurb=c("2-parameter Frechet Distribution\n",
+ blurb = c("2-parameter Frechet Distribution\n",
"Links: ",
- namesof("scale", link=lscale, earg=escale ), ", ",
- namesof("shape", link=lshape, earg=eshape )),
+ namesof("scale", link = lscale, earg = escale ), ", ",
+ namesof("shape", link = lshape, earg = eshape )),
constraints=eval(substitute(expression({
constraints <- cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
predictors.names =
- c(namesof("scale", .lscale, earg=.escale, short=TRUE),
- namesof("shape", .lshape, earg=.eshape, short=TRUE))
- extra$location = rep( .location, len=n) # stored here
+ c(namesof("scale", .lscale, earg = .escale, short = TRUE),
+ namesof("shape", .lshape, earg = .eshape, short = TRUE))
+ extra$location = rep( .location, len = n) # stored here
if (!length(etastart)) {
- # Initial values for limiting case as xi --> 0, r_i==1
+ # Initial values for limiting case as xi --> 0, r_i == 1
locinit = extra$location
if (any(y <= locinit))
stop("initial values for 'location' are out of range")
- shape.init = if (length( .ishape)) rep( .ishape, len=n) else {
- rep(3.0, len=n) # variance exists if shape>2
+ shape.init = if (length( .ishape)) rep( .ishape, len = n) else {
+ rep(3.0, len = n) # variance exists if shape>2
}
- Scale.init = if (length( .iscale)) rep( .iscale, len=n) else {
+ Scale.init = if (length( .iscale)) rep( .iscale, len = n) else {
if (all(shape.init > 1))
abs( (y-locinit+0.001) / (gamma(1-1/shape.init)) ) else
- rep( 1.0, len=n)
+ rep( 1.0, len = n)
}
- etastart = cbind(theta2eta(Scale.init, .lscale, earg=.escale ),
- theta2eta(shape.init, .lshape, earg=.escale ))
+ etastart = cbind(theta2eta(Scale.init, .lscale, earg = .escale ),
+ theta2eta(shape.init, .lshape, earg = .escale ))
}
- }), list( .lscale=lscale, .lshape=lshape,
- .escale = escale, .eshape= eshape,
- .location=location, .iscale=iscale, .ishape=ishape ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
+ }), list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape,
+ .location = location, .iscale = iscale, .ishape = ishape ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
loc = extra$location
- Scale = eta2theta(eta[,1], .lscale, earg= .escale )
- shape = eta2theta(eta[,2], .lshape, earg= .eshape )
- ans = rep(as.numeric(NA), len=length(shape))
+ Scale = eta2theta(eta[,1], .lscale, earg = .escale )
+ shape = eta2theta(eta[,2], .lshape, earg = .eshape )
+ ans = rep(as.numeric(NA), len = length(shape))
ok = shape > 1
ans[ok] = loc[ok] + Scale[ok] * gamma(1 - 1/shape[ok])
ans
- }, list( .lscale=lscale, .lshape=lshape,
- .escale=escale, .eshape=eshape ))),
- last=eval(substitute(expression({
- misc$links <- c("scale"= .lscale, "shape"= .lshape)
+ }, list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape ))),
+ last = eval(substitute(expression({
+ misc$links <- c("scale" = .lscale, "shape" = .lshape)
misc$expected = FALSE
misc$BFGS = TRUE
- }), list( .lscale=lscale, .lshape=lshape,
- .escale=escale, .eshape=eshape ))),
- loglikelihood=eval(substitute(
- function(mu,y,w, residuals=FALSE, eta, extra=NULL) {
+ }), list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
loc = extra$location
- Scale = eta2theta(eta[,1], .lscale, earg= .escale )
- shape = eta2theta(eta[,2], .lshape, earg= .eshape )
+ Scale = eta2theta(eta[,1], .lscale, earg = .escale )
+ shape = eta2theta(eta[,2], .lshape, earg = .eshape )
rzedd = Scale / (y-loc)
- if (residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * dfrechet(x=y, location=loc, scale=Scale, shape=shape,
- log=TRUE))
- }, list( .lscale=lscale, .lshape=lshape,
- .escale=escale, .eshape=eshape ))),
- vfamily=c("frechet2", "vextremes"),
- deriv=eval(substitute(expression({
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
+ sum(w * dfrechet(x=y, location = loc, scale=Scale, shape=shape,
+ log = TRUE))
+ }, list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape ))),
+ vfamily = c("frechet2", "vextremes"),
+ deriv = eval(substitute(expression({
loc = extra$location
- Scale = eta2theta(eta[,1], .lscale, earg= .escale )
- shape = eta2theta(eta[,2], .lshape, earg= .eshape )
+ Scale = eta2theta(eta[,1], .lscale, earg = .escale )
+ shape = eta2theta(eta[,2], .lshape, earg = .eshape )
rzedd = Scale / (y-loc) # reciprocial of zedd
dl.dloc = (shape+1)/(y-loc) - (shape / (y-loc)) * (rzedd)^shape
dl.dScale = shape * (1-rzedd^shape) / Scale
@@ -1727,40 +1746,40 @@ frechet2.control <- function(save.weight=TRUE, ...)
etanew = eta
}
derivnew = w *
- cbind(dl.dScale * dtheta.deta(Scale, .lscale, earg= .escale ),
- dl.dshape * dtheta.deta(shape, .lshape, earg= .eshape ))
+ cbind(dl.dScale * dtheta.deta(Scale, .lscale, earg = .escale ),
+ dl.dshape * dtheta.deta(shape, .lshape, earg = .eshape ))
derivnew
- }), list( .lscale=lscale, .lshape=lshape,
- .escale=escale, .eshape=eshape ))),
- weight=eval(substitute(expression({
+ }), list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape ))),
+ weight = eval(substitute(expression({
if (iter == 1) {
wznew = cbind(matrix(w, n, M), matrix(0, n, dimm(M)-M))
} else {
wzold = wznew
wznew = qnupdate(w=w, wzold=wzold, dderiv=(derivold - derivnew),
- deta=etanew-etaold, M=M,
+ deta=etanew-etaold, M = M,
trace=trace) # weights incorporated in args
}
wznew
- }), list( .lscale=lscale, .lshape=lshape ))))
+ }), list( .lscale = lscale, .lshape = lshape ))))
}
-frechet3.control <- function(save.weight=TRUE, ...)
+frechet3.control <- function(save.weight = TRUE, ...)
{
- list(save.weight=save.weight)
+ list(save.weight = save.weight)
}
- frechet3 = function(anchor=NULL,
- ldifference="loge",
- lscale="loge",
- lshape="loglog",
+ frechet3 <- function(anchor = NULL,
+ ldifference = "loge",
+ lscale = "loge",
+ lshape = "loglog",
edifference = list(),
escale = list(),
eshape = list(),
- ilocation=NULL, iscale=NULL, ishape=3, zero=NULL,
+ ilocation = NULL, iscale = NULL, ishape=3, zero = NULL,
effpos = .Machine$double.eps^0.75)
{
if (mode(ldifference) != "character" && mode(ldifference) != "name")
@@ -1769,86 +1788,87 @@ frechet3.control <- function(save.weight=TRUE, ...)
lscale <- as.character(substitute(lscale))
if (mode(lshape) != "character" && mode(lshape) != "name")
lshape <- as.character(substitute(lshape))
- if (!is.Numeric(ishape, allo=1, posi=TRUE)) stop("bad input for argument 'ishape'")
+ if (!is.Numeric(ishape, allo=1, posi = TRUE)) stop("bad input for argument 'ishape'")
if (!is.Numeric(effpos, allo=1)|| effpos<0) stop("bad input for argument 'effpos'")
if (!is.list(edifference)) edifference = list()
if (!is.list(escale)) escale = list()
if (!is.list(eshape)) eshape = list()
new("vglmff",
- blurb=c("3-parameter Frechet Distribution\n",
+ blurb = c("3-parameter Frechet Distribution\n",
"Links: ",
- namesof("difference", link=ldifference, earg=edifference), ", ",
- namesof("scale", link=lscale, earg=escale), ", ",
- namesof("shape", link=lshape, earg=eshape)),
+ namesof("difference", link = ldifference, earg = edifference), ", ",
+ namesof("scale", link = lscale, earg = escale), ", ",
+ namesof("shape", link = lshape, earg = eshape)),
constraints=eval(substitute(expression({
constraints <- cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
predictors.names =
- c(namesof("difference", .ldifference, earg= .edifference, short=TRUE),
- namesof("scale", .lscale, earg= .escale, short=TRUE),
- namesof("shape", .lshape, earg= .eshape, short=TRUE))
- anchorpt = if (is.Numeric( .anchor, allow=1)) .anchor else min(y)
+ c(namesof("difference", .ldifference, earg = .edifference, short = TRUE),
+ namesof("scale", .lscale, earg = .escale, short = TRUE),
+ namesof("shape", .lshape, earg = .eshape, short = TRUE))
+ anchorpt = if (is.Numeric( .anchor, allow = 1)) .anchor else min(y)
if (min(y) < anchorpt) stop("anchor point is too large")
extra$LHSanchor = anchorpt
if (!length(etastart)) {
- locinit = if (length( .ilocation)) rep( .ilocation, len=n) else
- rep(anchorpt - 0.01*diff(range(y)), len=n)
+ locinit = if (length( .ilocation)) rep( .ilocation, len = n) else
+ rep(anchorpt - 0.01*diff(range(y)), len = n)
if (any(y <= locinit))
stop("initial values for 'location' are out of range")
if (any(anchorpt <= locinit))
stop("require anchor point > initial location parameter value")
- shape.init = if (length( .ishape)) rep( .ishape, len=n) else {
- rep(3.0, len=n) # variance exists if shape>2
+ shape.init = if (length( .ishape)) rep( .ishape, len = n) else {
+ rep(3.0, len = n) # variance exists if shape>2
}
- Scale.init = if (length( .iscale)) rep( .iscale, len=n) else {
+ Scale.init = if (length( .iscale)) rep( .iscale, len = n) else {
if (all(shape.init > 1))
abs( (y-locinit+0.001) / (gamma(1-1/shape.init)) ) else
- rep( 1.0, len=n)
+ rep( 1.0, len = n)
}
etastart = cbind(theta2eta(anchorpt - locinit, .ldifference),
theta2eta(Scale.init, .lscale),
theta2eta(shape.init, .lshape))
}
- }), list( .ldifference=ldifference, .lscale=lscale, .lshape=lshape,
- .edifference=edifference, .escale=escale, .eshape=eshape,
+ }), list( .ldifference = ldifference, .lscale = lscale, .lshape = lshape,
+ .edifference = edifference, .escale = escale, .eshape = eshape,
.anchor=anchor,
- .ilocation=ilocation, .iscale=iscale, .ishape=ishape ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- loc = extra$LHSanchor - eta2theta(eta[,1], .ldifference, earg= .edifference)
- Scale = eta2theta(eta[,2], .lscale, earg= .escale )
- shape = eta2theta(eta[,3], .lshape, earg= .eshape )
- ans = rep(as.numeric(NA), len=length(shape))
+ .ilocation = ilocation, .iscale = iscale, .ishape = ishape ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ loc = extra$LHSanchor - eta2theta(eta[,1], .ldifference, earg = .edifference)
+ Scale = eta2theta(eta[,2], .lscale, earg = .escale )
+ shape = eta2theta(eta[,3], .lshape, earg = .eshape )
+ ans = rep(as.numeric(NA), len = length(shape))
ok = shape > 1
ans[ok] = loc[ok] + Scale[ok] * gamma(1 - 1/shape[ok])
ans
- }, list( .ldifference=ldifference, .lscale=lscale, .lshape=lshape,
- .edifference=edifference, .escale=escale, .eshape=eshape ))),
- last=eval(substitute(expression({
- misc$links <- c("difference"= .ldifference, "scale"= .lscale,
- "shape"= .lshape)
+ }, list( .ldifference = ldifference, .lscale = lscale, .lshape = lshape,
+ .edifference = edifference, .escale = escale, .eshape = eshape ))),
+ last = eval(substitute(expression({
+ misc$links <- c("difference" = .ldifference, "scale" = .lscale,
+ "shape" = .lshape)
misc$expected = FALSE
misc$BFGS = TRUE
- }), list( .ldifference=ldifference, .lscale=lscale, .lshape=lshape,
- .edifference=edifference, .escale=escale, .eshape=eshape ))),
- loglikelihood=eval(substitute(
- function(mu,y,w, residuals=FALSE, eta, extra=NULL) {
+ }), list( .ldifference = ldifference, .lscale = lscale, .lshape = lshape,
+ .edifference = edifference, .escale = escale, .eshape = eshape ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
loc = extra$LHSanchor -
- eta2theta(eta[,1], .ldifference, earg= .edifference)
- Scale = eta2theta(eta[,2], .lscale, earg= .escale )
- shape = eta2theta(eta[,3], .lshape, earg= .eshape )
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(w * dfrechet(x=y, location=loc, scale=Scale, shape=shape,
- log=TRUE))
+ eta2theta(eta[,1], .ldifference, earg = .edifference)
+ Scale = eta2theta(eta[,2], .lscale, earg = .escale )
+ shape = eta2theta(eta[,3], .lshape, earg = .eshape )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(w * dfrechet(x=y, location = loc, scale=Scale, shape=shape,
+ log = TRUE))
}
- }, list( .ldifference=ldifference, .lscale=lscale, .lshape=lshape,
- .edifference=edifference, .escale=escale, .eshape=eshape ))),
- vfamily=c("frechet3", "vextremes"),
- deriv=eval(substitute(expression({
- difference = eta2theta(eta[,1], .ldifference, earg= .edifference )
- Scale = eta2theta(eta[,2], .lscale, earg= .escale )
- shape = eta2theta(eta[,3], .lshape, earg= .eshape )
+ }, list( .ldifference = ldifference, .lscale = lscale, .lshape = lshape,
+ .edifference = edifference, .escale = escale, .eshape = eshape ))),
+ vfamily = c("frechet3", "vextremes"),
+ deriv = eval(substitute(expression({
+ difference = eta2theta(eta[,1], .ldifference, earg = .edifference )
+ Scale = eta2theta(eta[,2], .lscale, earg = .escale )
+ shape = eta2theta(eta[,3], .lshape, earg = .eshape )
loc = extra$LHSanchor - difference
extra$location = loc # Store the location parameter estimate here
rzedd = Scale / (y-loc) # reciprocial of zedd
@@ -1865,56 +1885,56 @@ frechet3.control <- function(save.weight=TRUE, ...)
}
derivnew = w *
cbind(dl.ddiff * dtheta.deta(difference, .ldifference,
- earg= .edifference ),
- dl.dScale * dtheta.deta(Scale, .lscale, earg= .escale ),
- dl.dshape * dtheta.deta(shape, .lshape, earg= .eshape ))
+ earg = .edifference ),
+ dl.dScale * dtheta.deta(Scale, .lscale, earg = .escale ),
+ dl.dshape * dtheta.deta(shape, .lshape, earg = .eshape ))
derivnew
- }), list( .ldifference=ldifference, .lscale=lscale, .lshape=lshape,
- .edifference=edifference, .escale=escale, .eshape=eshape ))),
- weight=eval(substitute(expression({
+ }), list( .ldifference = ldifference, .lscale = lscale, .lshape = lshape,
+ .edifference = edifference, .escale = escale, .eshape = eshape ))),
+ weight = eval(substitute(expression({
if (iter == 1) {
wznew = cbind(matrix(w, n, M), matrix(0, n, dimm(M)-M))
} else {
wzold = wznew
wznew = qnupdate(w=w, wzold=wzold, dderiv=(derivold - derivnew),
- deta=etanew-etaold, M=M, effpos = .effpos,
+ deta=etanew-etaold, M = M, effpos = .effpos,
trace=trace) # weights incorporated in args
}
wznew
- }), list( .effpos=effpos ))))
+ }), list( .effpos = effpos ))))
}
-recnormal1.control <- function(save.weight=TRUE, ...)
+recnormal1.control <- function(save.weight = TRUE, ...)
{
- list(save.weight=save.weight)
+ list(save.weight = save.weight)
}
- recnormal1 = function(lmean="identity", lsd="loge",
- imean=NULL, isd=NULL, method.init=1, zero=NULL)
+ recnormal1 <- function(lmean = "identity", lsd = "loge",
+ imean = NULL, isd = NULL, method.init = 1, zero = NULL)
{
if (mode(lmean) != "character" && mode(lmean) != "name")
lmean = as.character(substitute(lmean))
if (mode(lsd) != "character" && mode(lsd) != "name")
lsd = as.character(substitute(lsd))
- if (!is.Numeric(method.init, allow=1, integ=TRUE, positi=TRUE) ||
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, positi = TRUE) ||
method.init > 3.5)
stop("argument 'method.init' must be 1 or 2 or 3")
new("vglmff",
- blurb=c("Upper record values from a univariate normal distribution\n\n",
+ blurb = c("Upper record values from a univariate normal distribution\n\n",
"Links: ",
- namesof("mean", lmean, tag= TRUE), "; ",
- namesof("sd", lsd, tag= TRUE),
+ namesof("mean", lmean, tag = TRUE), "; ",
+ namesof("sd", lsd, tag = TRUE),
"\n",
"Variance: sd^2"),
constraints=eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
- predictors.names = c(namesof("mean", .lmean, tag= FALSE),
- namesof("sd", .lsd, tag= FALSE))
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ predictors.names = c(namesof("mean", .lmean, tag = FALSE),
+ namesof("sd", .lsd, tag = FALSE))
if (ncol(y <- cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
if (any(diff(y) <= 0))
@@ -1922,37 +1942,38 @@ recnormal1.control <- function(save.weight=TRUE, ...)
if (any(w != 1))
warning("weights should have unit values only")
if (!length(etastart)) {
- mean.init = if (length( .imean)) rep( .imean, len=n) else {
+ mean.init = if (length( .imean)) rep( .imean, len = n) else {
if (.lmean == "loge") pmax(1/1024, min(y)) else min(y)}
- sd.init = if (length( .isd)) rep( .isd, len=n) else {
+ sd.init = if (length( .isd)) rep( .isd, len = n) else {
if (.method.init == 1) 1*(sd(y)) else
if (.method.init == 2) 5*(sd(y)) else
.5*(sd(y))
}
- etastart = cbind(theta2eta(rep(mean.init, len=n), .lmean),
- theta2eta(rep(sd.init, len=n), .lsd))
+ etastart = cbind(theta2eta(rep(mean.init, len = n), .lmean),
+ theta2eta(rep(sd.init, len = n), .lsd))
}
- }), list( .lmean=lmean, .lsd=lsd, .imean=imean, .isd=isd,
- .method.init=method.init ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
+ }), list( .lmean = lmean, .lsd = lsd, .imean = imean, .isd = isd,
+ .method.init = method.init ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
eta2theta(eta[,1], .lmean)
- }, list( .lmean=lmean ))),
- last=eval(substitute(expression({
- misc$link = c("mu"= .lmean, "sd"= .lsd)
+ }, list( .lmean = lmean ))),
+ last = eval(substitute(expression({
+ misc$link = c("mu" = .lmean, "sd" = .lsd)
misc$expected = FALSE
- }), list( .lmean=lmean, .lsd=lsd ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ }), list( .lmean = lmean, .lsd = lsd ))),
+ loglikelihood = eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra = NULL) {
sd = eta2theta(eta[,2], .lsd)
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
zedd = (y - mu) / sd
NN = nrow(eta)
sum(w * (-log(sd) - 0.5 * zedd^2)) -
- sum(w[-NN] * pnorm(zedd[-NN], lower.tail=FALSE, log.p=TRUE))
+ sum(w[-NN] * pnorm(zedd[-NN], lower.tail = FALSE, log.p = TRUE))
}
- }, list( .lsd=lsd ))),
- vfamily=c("recnormal1"),
- deriv=eval(substitute(expression({
+ }, list( .lsd = lsd ))),
+ vfamily = c("recnormal1"),
+ deriv = eval(substitute(expression({
NN = nrow(eta)
mymu = eta2theta(eta[,1], .lmean)
sd = eta2theta(eta[,2], .lsd)
@@ -1973,14 +1994,14 @@ recnormal1.control <- function(save.weight=TRUE, ...)
}
derivnew = w * cbind(dl.dmu * dmu.deta, dl.dsd * dsd.deta)
derivnew
- }), list( .lmean=lmean, .lsd=lsd ))),
+ }), list( .lmean = lmean, .lsd = lsd ))),
weight=expression({
if (iter == 1) {
wznew = cbind(matrix(w, n, M), matrix(0, n, dimm(M)-M))
} else {
wzold = wznew
wznew = qnupdate(w=w, wzold=wzold, dderiv=(derivold - derivnew),
- deta=etanew-etaold, M=M,
+ deta=etanew-etaold, M = M,
trace=trace) # weights incorporated in args
}
wznew
@@ -1989,28 +2010,29 @@ recnormal1.control <- function(save.weight=TRUE, ...)
-recexp1.control <- function(save.weight=TRUE, ...)
+recexp1.control <- function(save.weight = TRUE, ...)
{
- list(save.weight=save.weight)
+ list(save.weight = save.weight)
}
- recexp1 = function(lrate="loge", irate=NULL, method.init=1)
+ recexp1 <- function(lrate = "loge", irate = NULL, method.init = 1)
{
if (mode(lrate) != "character" && mode(lrate) != "name")
lrate = as.character(substitute(lrate))
- if (!is.Numeric(method.init, allow=1, integ=TRUE, positi=TRUE) ||
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, positi = TRUE) ||
method.init > 3.5)
stop("argument 'method.init' must be 1 or 2 or 3")
new("vglmff",
- blurb=c("Upper record values from a 1-parameter exponential distribution\n\n",
+ blurb = c("Upper record values from a ",
+ "1-parameter exponential distribution\n\n",
"Links: ",
- namesof("rate", lrate, tag= TRUE),
+ namesof("rate", lrate, tag = TRUE),
"\n",
"Variance: 1/rate^2"),
- initialize=eval(substitute(expression({
- predictors.names = c(namesof("rate", .lrate, tag= FALSE))
+ initialize = eval(substitute(expression({
+ predictors.names = c(namesof("rate", .lrate, tag = FALSE))
if (ncol(y <- cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
if (any(diff(y) <= 0))
@@ -2018,39 +2040,40 @@ recexp1.control <- function(save.weight=TRUE, ...)
if (any(w != 1))
warning("weights should have unit values only")
if (!length(etastart)) {
- rate.init = if (length( .irate)) rep( .irate, len=n) else {
+ rate.init = if (length( .irate)) rep( .irate, len = n) else {
init.rate =
if (.method.init == 1) length(y) / y[length(y),1] else
if (.method.init == 2) 1/mean(y) else 1/median(y)
if (.lrate == "loge") pmax(1/1024, init.rate) else init.rate}
- etastart = cbind(theta2eta(rep(rate.init, len=n), .lrate))
+ etastart = cbind(theta2eta(rep(rate.init, len = n), .lrate))
}
- }), list( .lrate=lrate, .irate=irate, .method.init=method.init ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
+ }), list( .lrate = lrate, .irate = irate, .method.init = method.init ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
eta2theta(eta, .lrate)
- }, list( .lrate=lrate ))),
- last=eval(substitute(expression({
- misc$link = c("rate"= .lrate)
+ }, list( .lrate = lrate ))),
+ last = eval(substitute(expression({
+ misc$link = c("rate" = .lrate)
misc$expected = TRUE
- }), list( .lrate=lrate ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ }), list( .lrate = lrate ))),
+ loglikelihood = eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra = NULL) {
rate = eta2theta(eta, .lrate)
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
NN = length(eta)
y = cbind(y)
sum(w * log(rate)) - w[NN] * rate[NN] * y[NN,1]
}
- }, list( .lrate=lrate ))),
- vfamily=c("recexp1"),
- deriv=eval(substitute(expression({
+ }, list( .lrate = lrate ))),
+ vfamily = c("recexp1"),
+ deriv = eval(substitute(expression({
NN = length(eta)
rate = c(eta2theta(eta, .lrate))
dl.drate = 1 / rate
dl.drate[NN] = 1/ rate[NN] - y[NN,1]
drate.deta = dtheta.deta(rate, .lrate)
w * cbind(dl.drate * drate.deta)
- }), list( .lrate=lrate ))),
+ }), list( .lrate = lrate ))),
weight=expression({
ed2l.drate2 = -1 / rate^2
wz = -w * drate.deta^2 * ed2l.drate2
@@ -2066,80 +2089,82 @@ recexp1.control <- function(save.weight=TRUE, ...)
- poissonp = function(ostatistic, dimension=2, link="loge", earg=list(),
- idensity=NULL, method.init=1) {
- if (!is.Numeric(ostatistic, posit=TRUE, allow=1, integ=TRUE))
+ poissonp <- function(ostatistic, dimension = 2,
+ link = "loge", earg = list(),
+ idensity = NULL, method.init = 1) {
+ if (!is.Numeric(ostatistic, posit = TRUE, allow = 1, integ = TRUE))
stop("argument 'ostatistic' must be a single positive integer")
- if (!is.Numeric(dimension, posit=TRUE, allow=1, integ=TRUE) ||
+ if (!is.Numeric(dimension, posit = TRUE, allow = 1, integ = TRUE) ||
dimension > 3)
stop("argument 'dimension' must be 2 or 3")
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if (!is.list(earg)) earg = list()
- if (!is.Numeric(method.init, allow=1, posit=TRUE, integer=TRUE) ||
+ if (!is.Numeric(method.init, allow = 1, posit = TRUE, integer = TRUE) ||
method.init > 2.5)
stop("argument 'method.init' must be 1 or 2")
- if (length(idensity) && !is.Numeric(idensity, posit=TRUE))
+ if (length(idensity) && !is.Numeric(idensity, posit = TRUE))
stop("bad input for argument 'idensity'")
new("vglmff",
- blurb=c(if(dimension==2)
+ blurb = c(if (dimension == 2)
"Poisson-points-on-a-plane distances distribution\n" else
"Poisson-points-on-a-volume distances distribution\n",
"Link: ",
- namesof("density", link, earg=earg), "\n\n",
- if (dimension==2)
+ namesof("density", link, earg = earg), "\n\n",
+ if (dimension == 2)
"Mean: gamma(s+0.5) / (gamma(s) * sqrt(density * pi))" else
"Mean: gamma(s+1/3) / (gamma(s) * (4*density*pi/3)^(1/3))"),
- initialize=eval(substitute(expression({
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
if (any(y <= 0))
stop("response must contain positive values only")
- predictors.names = namesof("density", .link, earg=.earg, tag=FALSE)
+ predictors.names = namesof("density", .link, earg = .earg, tag = FALSE)
if (!length(etastart)) {
use.this = if ( .method.init == 1) median(y) + 1/8 else
weighted.mean(y,w)
if ( .dimension == 2) {
- myratio = exp(lgamma( .ostatistic +0.5) - lgamma( .ostatistic))
- density.init = if (is.Numeric( .idensity))
- rep( .idensity, len=n) else
- rep(myratio^2 / (pi * use.this^2), len=n)
- etastart = theta2eta(density.init, .link, earg= .earg)
+ myratio = exp(lgamma( .ostatistic +0.5) - lgamma( .ostatistic ))
+ density.init = if (is.Numeric( .idensity ))
+ rep( .idensity, len = n) else
+ rep(myratio^2 / (pi * use.this^2), len = n)
+ etastart = theta2eta(density.init, .link, earg = .earg)
} else {
- myratio = exp(lgamma( .ostatistic +1/3) - lgamma( .ostatistic))
- density.init = if (is.Numeric( .idensity))
- rep( .idensity, len=n) else
- rep(3 * myratio^3 / (4 * pi * use.this^3), len=n)
- etastart = theta2eta(density.init, .link, earg= .earg)
+ myratio = exp(lgamma( .ostatistic +1/3) - lgamma( .ostatistic ))
+ density.init = if (is.Numeric( .idensity ))
+ rep( .idensity, len = n) else
+ rep(3 * myratio^3 / (4 * pi * use.this^3), len = n)
+ etastart = theta2eta(density.init, .link, earg = .earg)
}
}
- }), list( .link=link, .earg=earg, .ostatistic=ostatistic,
- .dimension=dimension, .method.init=method.init,
- .idensity=idensity ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- density = eta2theta(eta, .link, earg= .earg)
+ }), list( .link = link, .earg = earg, .ostatistic = ostatistic,
+ .dimension = dimension, .method.init = method.init,
+ .idensity = idensity ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ density = eta2theta(eta, .link, earg = .earg)
if ( .dimension == 2) {
myratio = exp(lgamma( .ostatistic +0.5) - lgamma( .ostatistic ))
myratio / sqrt(density * pi)
} else {
myratio = exp(lgamma( .ostatistic +1/3) - lgamma( .ostatistic))
- myratio / (4*density * pi/3)^(1/3)
+ myratio / (4 * density * pi/3)^(1/3)
}
- }, list( .link=link, .earg=earg, .ostatistic=ostatistic,
- .dimension=dimension ))),
- last=eval(substitute(expression({
- misc$link = c("density"= .link)
- misc$earg = list("density"= .earg)
+ }, list( .link = link, .earg = earg, .ostatistic = ostatistic,
+ .dimension = dimension ))),
+ last = eval(substitute(expression({
+ misc$link = c("density" = .link)
+ misc$earg = list("density" = .earg)
misc$expected = TRUE
misc$ostatistic = .ostatistic
misc$dimension = .dimension
- }), list( .link=link, .earg=earg, .ostatistic=ostatistic,
- .dimension=dimension ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- density = eta2theta(eta, .link, earg= .earg)
- if (residuals) stop("loglikelihood residuals not implemented yet") else
+ }), list( .link = link, .earg = earg, .ostatistic = ostatistic,
+ .dimension = dimension ))),
+ loglikelihood = eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra = NULL) {
+ density = eta2theta(eta, .link, earg = .earg)
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
if ( .dimension == 2)
sum(w * (log(2) + .ostatistic * log(pi * density) -
lgamma( .ostatistic) + (2* .ostatistic-1) * log(y) -
@@ -2147,26 +2172,26 @@ recexp1.control <- function(save.weight=TRUE, ...)
sum(w * (log(3) + .ostatistic * log(4*pi * density/3) -
lgamma( .ostatistic) + (3* .ostatistic-1) * log(y) -
(4/3) * density * pi * y^3))
- }, list( .link=link, .earg=earg, .ostatistic=ostatistic,
- .dimension=dimension ))),
- vfamily=c("poissonp"),
- deriv=eval(substitute(expression({
- density = eta2theta(eta, .link, earg= .earg)
+ }, list( .link = link, .earg = earg, .ostatistic = ostatistic,
+ .dimension = dimension ))),
+ vfamily = c("poissonp"),
+ deriv = eval(substitute(expression({
+ density = eta2theta(eta, .link, earg = .earg)
if ( .dimension == 2) {
dl.ddensity = .ostatistic / density - pi * y^2
} else {
dl.ddensity = .ostatistic / density - (4/3) * pi * y^3
}
- ddensity.deta = dtheta.deta(density, .link, earg= .earg)
+ ddensity.deta = dtheta.deta(density, .link, earg = .earg)
w * dl.ddensity * ddensity.deta
- }), list( .link=link, .earg=earg, .ostatistic=ostatistic,
- .dimension=dimension ))),
- weight=eval(substitute(expression({
+ }), list( .link = link, .earg = earg, .ostatistic = ostatistic,
+ .dimension = dimension ))),
+ weight = eval(substitute(expression({
ed2l.ddensity2 = .ostatistic / density^2
wz = ddensity.deta^2 * ed2l.ddensity2
w * wz
- }), list( .link=link, .earg=earg, .ostatistic=ostatistic,
- .dimension=dimension ))))
+ }), list( .link = link, .earg = earg, .ostatistic = ostatistic,
+ .dimension = dimension ))))
}
diff --git a/R/family.fishing.R b/R/family.fishing.R
index 4fd0be2..6fd3209 100644
--- a/R/family.fishing.R
+++ b/R/family.fishing.R
@@ -1,16 +1,14 @@
-# "family.fishing.q"
-# Last modified: 01/12/08, 02/12/08
+# These functions are
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
+
-# These functions are Copyright (C) 2009-2010 T. W. Yee All rights reserved.
-# ====================================================================
-# 20081201
DeLury = function(catch, effort,
type=c("DeLury","Leslie"),
ricker=FALSE) {
-# 20081202; this function has been checked not ok
type = match.arg(type, c("DeLury","Leslie"))[1]
if (!is.logical(ricker)) stop("bad input for 'ricker'")
if ((LLL <- Lcatch <- length(catch)) != (Leffort <- length(effort)))
@@ -47,33 +45,77 @@ DeLury = function(catch, effort,
-# ======================================================================
-# 20081201
-# Transferred over from my own files and then modified here.
-# length is in metres
-wffc.P1 = function(length, min.eligible=0.18)
- ifelse(length >= min.eligible, 100 + 20 * ceiling(100*length), 0)
-wffc.P1star = function(length, min.eligible=0.18)
- ifelse(length >= min.eligible, 100 + 2000 * length, 0)
-# This was in the original mss. but problem is P2 does not return an integer
-#wffc.P2 = function(y, min.eligible=0.18)
-# P1(y) + ifelse(y >= min.eligible, 0.7*ceiling(100*(y-min.eligible))^2, 0)
-#wffc.P2star = function(y, min.eligible=0.18)
-# P1star(y) + ifelse(y >= min.eligible, 7000 * (y-min.eligible)^2, 0)
-# 7/6/08; This returns an integer
-wffc.P2 = function(length, min.eligible=0.18)
- wffc.P1(length) +
+wffc.P1 = function(length, c1 = 100, min.eligible = 0.18, ppm = 2000)
+ ifelse(length >= min.eligible, c1 + (ppm/100) *
+ ceiling( signif(100*length, dig = 8) ), 0)
+wffc.P1star = function(length, c1 = 100, min.eligible = 0.18, ppm = 2000)
+ ifelse(length >= min.eligible, c1 + ppm * length, 0)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+wffc.P2 = function(length, c1 = 100, min.eligible = 0.18, ppm = 2000)
+ wffc.P1(length, c1 = c1, min.eligible = min.eligible, ppm = ppm) +
ifelse(length >= min.eligible,
ceiling(100*(length-min.eligible))^2, 0)
-wffc.P2star = function(length, min.eligible=0.18)
- wffc.P1star(length) +
+wffc.P2star = function(length, c1 = 100, min.eligible = 0.18, ppm = 2000)
+ wffc.P1star(length, c1 = c1, min.eligible = min.eligible, ppm = ppm) +
ifelse(length >= min.eligible, 10000 * (length-min.eligible)^2, 0)
-# ======================================================================
+
+
+
+wffc.P3 = function(length, c1 = 100, min.eligible = 0.18, ppm = 2000) {
+
+ temp1 = floor((ceiling(100*length)/100) / min.eligible) # zz not sure
+ temp1 = floor(length / min.eligible)
+ ans = ifelse(temp1 >= 1, c1, length * 0) # Handles NAs
+ ans = ans + ifelse(temp1 >= 1, ppm * (ceiling(100*length)/100), 0)
+ maxtemp1 = max(temp1, na.rm = TRUE)
+ if (maxtemp1 > 1)
+ for (ii in 2:maxtemp1) {
+ ans = ans +
+ ifelse(ii < temp1, min.eligible * (ii-1) * ppm, 0) +
+ ifelse(ii == temp1, (ceiling(100*length)/100 -
+ ii*min.eligible) * (ii-1) * ppm, 0)
+ }
+ ans
+}
+
+
+
+wffc.P3star = function(length, c1 = 100, min.eligible = 0.18, ppm = 2000) {
+ temp1 = floor(length / min.eligible)
+ ans = ifelse(temp1 >= 1, c1, length * 0) # Handles NAs
+ ans = ans + ifelse(temp1 >= 1, length * ppm, 0)
+ maxtemp1 = max(temp1, na.rm = TRUE)
+ if (maxtemp1 > 1)
+ for (ii in 2:maxtemp1) {
+ ans = ans + ifelse(ii < temp1, min.eligible * (ii-1) * ppm, 0) +
+ ifelse(ii == temp1, (length - ii*min.eligible) *
+ (ii-1) * ppm, 0)
+ }
+ ans
+}
+
+
+
+
diff --git a/R/family.functions.R b/R/family.functions.R
index 9927f88..0c76de7 100644
--- a/R/family.functions.R
+++ b/R/family.functions.R
@@ -1,14 +1,15 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
fill =
-fill1 = fill2 = fill3 =
-function(x, values=0, ncolx=ncol(x)) {
+fill1 = fill2 = fill3 =
+function(x, values = 0, ncolx = ncol(x)) {
x = as.matrix(x)
- matrix(values, nrow=nrow(x), ncol=ncolx, byrow=TRUE)
+ matrix(values, nrow = nrow(x), ncol = ncolx, byrow = TRUE)
}
@@ -24,11 +25,11 @@ remove.arg <- function(string)
nc <- nchar(string)
bits <- substring(string, 1:nc, 1:nc)
- b1 <- (1:nc)[bits=="("]
+ b1 <- (1:nc)[bits == "("]
b1 <- if (length(b1)) b1[1]-1 else nc
- if (b1==0)
+ if (b1 == 0)
return("")
- string <- paste(bits[1:b1], collapse="")
+ string <- paste(bits[1:b1], collapse = "")
string
}
@@ -36,7 +37,7 @@ remove.arg <- function(string)
add.arg <- function(string, arg.string)
{
- if (arg.string=="")
+ if (arg.string == "")
return(string)
nc <- nchar(string)
lastc <- substring(string, nc, nc)
@@ -44,11 +45,11 @@ add.arg <- function(string, arg.string)
{
if (substring(string, nc-1, nc-1) == "(")
{
- paste(substring(string, 1, nc-2), "(", arg.string, ")", sep="")
+ paste(substring(string, 1, nc-2), "(", arg.string, ")", sep = "")
} else
- paste(substring(string, 1, nc-1), ", ", arg.string, ")", sep="")
+ paste(substring(string, 1, nc-1), ", ", arg.string, ")", sep = "")
} else
- paste(string, "(", arg.string, ")", sep="")
+ paste(string, "(", arg.string, ")", sep = "")
}
@@ -57,21 +58,21 @@ get.arg <- function(string)
nc <- nchar(string)
bits <- substring(string, 1:nc, 1:nc)
- b1 <- (1:nc)[bits=="("]
- b2 <- (1:nc)[bits==")"]
- b1 <- if (length(b1)) min(b1) else return("") # stop("no \"(\" in string")
- b2 <- if (length(b2)) max(b2) else return("") # stop("no \")\" in string")
- if (b2-b1==1) "" else paste(bits[(1+b1):(b2-1)], collapse="")
+ b1 <- (1:nc)[bits == "("]
+ b2 <- (1:nc)[bits == ")"]
+ b1 <- if (length(b1)) min(b1) else return("") # stop('no "(" in string')
+ b2 <- if (length(b2)) max(b2) else return("") # stop('no ")" in string')
+ if (b2-b1 == 1) "" else paste(bits[(1+b1):(b2-1)], collapse = "")
}
ei <- function(i,n)
- cbind(as.numeric((1:n)==i))
+ cbind(as.numeric((1:n) == i))
ei = function(i, n)
- diag(n)[,i,drop=FALSE]
+ diag(n)[,i,drop = FALSE]
eij = function(i, n) {
temp = matrix(0, n, 1)
@@ -89,7 +90,7 @@ dneg.binomial <- function(x, k, prob)
}
-tapplymat1 <- function(mat, function.arg=c("cumsum", "diff", "cumprod"))
+tapplymat1 <- function(mat, function.arg = c("cumsum", "diff", "cumprod"))
{
@@ -98,56 +99,56 @@ tapplymat1 <- function(mat, function.arg=c("cumsum", "diff", "cumprod"))
function.arg <- match.arg(function.arg, c("cumsum", "diff", "cumprod"))[1]
type <- switch(function.arg,
- cumsum=1,
- diff=2,
- cumprod=3,
+ cumsum = 1,
+ diff = 2,
+ cumprod = 3,
stop("function.arg not matched"))
if (!is.matrix(mat))
mat <- as.matrix(mat)
nr <- nrow(mat)
nc <- ncol(mat)
- fred <- dotC(name="tapplymat1", mat=as.double(mat),
+ fred <- dotC(name = "tapplymat1", mat = as.double(mat),
as.integer(nr), as.integer(nc), as.integer(type))
dim(fred$mat) <- c(nr, nc)
dimnames(fred$mat) <- dimnames(mat)
switch(function.arg,
- cumsum=fred$mat,
- diff=fred$mat[,-1,drop=FALSE],
- cumprod=fred$mat)
+ cumsum = fred$mat,
+ diff = fred$mat[,-1,drop = FALSE],
+ cumprod = fred$mat)
}
-matrix.power <- function(wz, M, power, fast=TRUE)
+matrix.power <- function(wz, M, power, fast = TRUE)
{
n <- nrow(wz)
- index <- iam(NA, NA, M, both=TRUE, diag=TRUE)
+ index <- iam(NA, NA, M, both = TRUE, diag = TRUE)
dimm.value <- if (is.matrix(wz)) ncol(wz) else 1
if (dimm.value > M*(M+1)/2)
stop("too many columns")
- if (M == 1 || dimm.value==M) {
+ if (M == 1 || dimm.value == M) {
WW <- wz^power # May contain NAs
return(t(WW))
}
if (fast) {
- k <- veigen(t(wz), M=M) # matrix.arg)
+ k <- veigen(t(wz), M = M) # matrix.arg)
evals <- k$values # M x n
evects <- k$vectors # M x M x n
} else {
stop("sorry, cannot handle matrix-band form yet")
- k <- unlist(apply(wz,3,eigen), use.names=FALSE)
+ k <- unlist(apply(wz,3,eigen), use.names = FALSE)
dim(k) <- c(M,M+1,n)
- evals <- k[,1,,drop=TRUE] # M x n
- evects <- k[,-1,,drop=TRUE] # M x M x n
+ evals <- k[,1,,drop = TRUE] # M x n
+ evects <- k[,-1,,drop = TRUE] # M x M x n
}
temp <- evals^power # Some values may be NAs
@@ -156,14 +157,14 @@ matrix.power <- function(wz, M, power, fast=TRUE)
index <- as.vector( matrix(1, 1, M) %*% is.na(temp) )
- index <- index == 0
+ index <- (index == 0)
if (!all(index)) {
warning(paste("Some weight matrices have negative",
"eigenvalues. They\nwill be assigned NAs"))
temp[,!index] <- 1
}
- WW <- mux55(evects, temp, M=M)
+ WW <- mux55(evects, temp, M = M)
WW[,!index] <- NA
WW
}
@@ -174,30 +175,30 @@ rss.vgam <- function(z, wz, M)
{
- if (M==1)
+ if (M == 1)
return(sum(c(wz) * c(z^2)))
- wz.z <- mux22(t(wz), z, M, as.mat=TRUE) # else mux2(wz, z)
+ wz.z <- mux22(t(wz), z, M, as.mat = TRUE) # else mux2(wz, z)
ans <- sum(wz.z * z)
ans
}
-wweighted.mean <- function(y, w = NULL, matrix.arg=TRUE)
+wweighted.mean <- function(y, w = NULL, matrix.arg = TRUE)
{
if (!matrix.arg)
stop("currently, matrix.arg must be TRUE")
y <- as.matrix(y)
M <- ncol(y)
n <- nrow(y)
- if (M==1) {
+ if (M == 1) {
if (missing(w)) mean(y) else sum(w * y)/sum(w)
} else {
if (missing(w)) y %*% rep(1, n) else {
- numer <- mux22(t(w), y, M, as.matrix=TRUE) # matrix.arg=matrix.arg,
+ numer <- mux22(t(w), y, M, as.matrix = TRUE) # matrix.arg = matrix.arg,
numer <- t(numer) %*% rep(1, n)
denom <- t(w) %*% rep(1, n)
denom <- matrix(denom, 1, length(denom))
if (matrix.arg)
- denom <- m2adefault(denom, M=M)[,,1]
+ denom <- m2adefault(denom, M = M)[,,1]
c(solve(denom, numer))
}
}
@@ -211,10 +212,10 @@ veigen <- function(x, M)
n <- ncol(x)
- index <- iam(NA, NA, M, both=TRUE, diag=TRUE)
+ index <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
dimm.value <- nrow(x) # usually M or M(M+1)/2
- z <- dotFortran(name="veigen",
+ z <- dotFortran(name = "veigen",
as.integer(M),
as.integer(n),
as.double(x),
@@ -223,7 +224,7 @@ veigen <- function(x, M)
vectors = double(M*M*n),
double(M),
double(M),
- wk=double(M*M),
+ wk = double(M*M),
as.integer(index$row), as.integer(index$col),
as.integer(dimm.value),
error.code = integer(1))
@@ -231,32 +232,34 @@ veigen <- function(x, M)
if (z$error.code)
stop("eigen algorithm (rs) returned error code ", z$error.code)
ord <- M:1
- dim(z$values) <- c(M,n)
- z$values <- z$values[ord,,drop=FALSE]
- dim(z$vectors) <- c(M,M,n)
- z$vectors <- z$vectors[,ord,,drop=FALSE]
- return(list(values = z$values, vectors = z$vectors))
+ dim(z$values) <- c(M, n)
+ z$values <- z$values[ord,,drop = FALSE]
+ dim(z$vectors) <- c(M, M, n)
+ z$vectors <- z$vectors[, ord, , drop = FALSE]
+ return(list(values = z$values,
+ vectors = z$vectors))
}
-ima <- function(j,k,M)
+ima <- function(j, k, M)
{
- if (length(M)>1 || M<=0 || j<=0 || k<=0 || j>M || k>M)
+ if (length(M) > 1 || M <= 0 || j <= 0 || k <= 0 ||
+ j > M || k > M)
stop("input wrong")
m <- diag(M)
- m[col(m)<=row(m)] <- 1:(M*(M+1)/2)
- if (j>=k) m[j,k] else m[k,j]
+ m[col(m) <= row(m)] <- 1:(M*(M+1)/2)
+ if (j >= k) m[j,k] else m[k,j]
}
-checkwz <- function(wz, M, trace=FALSE, wzepsilon=.Machine$double.eps^0.75) {
+checkwz <- function(wz, M, trace = FALSE, wzepsilon = .Machine$double.eps^0.75) {
if (wzepsilon > 0.5) warning("'wzepsilon' is probably too large")
if (!is.matrix(wz)) wz = as.matrix(wz)
- if ((temp <- sum(wz[,1:M,drop=FALSE] < wzepsilon)))
+ if ((temp <- sum(wz[,1:M,drop = FALSE] < wzepsilon)))
warning(paste(temp, "elements replaced by", signif(wzepsilon, 5)))
wz[,1:M] = pmax(wzepsilon, wz[,1:M])
wz
diff --git a/R/family.genetic.R b/R/family.genetic.R
index 246b28e..9b73f24 100644
--- a/R/family.genetic.R
+++ b/R/family.genetic.R
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
@@ -12,79 +13,86 @@
- G1G2G3 = function(link="logit", earg = list(), ip1=NULL, ip2=NULL, iF=NULL)
+ G1G2G3 = function(link = "logit", earg = list(),
+ ip1 = NULL, ip2 = NULL, iF = NULL)
{
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if (!is.list(earg)) earg = list()
new("vglmff",
- blurb=c("G1-G2-G3 phenotype\n\n",
+ blurb = c("G1-G2-G3 phenotype\n\n",
"Links: ",
- namesof("p1", link, earg= earg), ", ",
- namesof("p2", link, earg= earg), ", ",
- namesof("f", link, earg= earg, tag=FALSE)),
- deviance=Deviance.categorical.data.vgam,
- initialize=eval(substitute(expression({
+ namesof("p1", link, earg = earg), ", ",
+ namesof("p2", link, earg = earg), ", ",
+ namesof("f", link, earg = earg, tag = FALSE)),
+ deviance = Deviance.categorical.data.vgam,
+ initialize = eval(substitute(expression({
delete.zero.colns = FALSE
+
eval(process.categorical.data.vgam)
ok.col.ny = c("G1G1","G1G2","G1G3","G2G2","G2G3","G3G3")
if (length(col.ny <- colnames(y)) == length(ok.col.ny) &&
setequal(ok.col.ny, col.ny)) {
if (!all(ok.col.ny == col.ny))
- stop("the columns of the response matrix should have names ",
- "(output of colnames()) ordered as ",
+ stop("the columns of the response matrix should have ",
+ "names (output of colnames()) ordered as ",
"c('G1G1','G1G2','G1G3','G2G2','G2G3','G3G3')")
}
- predictors.names = c(namesof("p1", .link, earg= .earg, tag=FALSE),
- namesof("p2", .link, earg= .earg, tag=FALSE),
- namesof("f", .link, earg= .earg, tag=FALSE))
+ predictors.names =
+ c(namesof("p1", .link, earg = .earg, tag = FALSE),
+ namesof("p2", .link, earg = .earg, tag = FALSE),
+ namesof("f", .link, earg = .earg, tag = FALSE))
if (is.null(etastart)) {
- p1 = if (is.numeric(.ip1)) rep(.ip1, n) else
- sqrt(mustart[,1])
- f = if (is.numeric(.iF)) rep(.iF, n) else
- rep(0.01, n) # close to zero
- p2 = if (is.numeric(.ip2)) rep(.ip2, n) else
- mustart[,2] / (sqrt(mustart[,1]) * 2)
+ p1 = if (is.numeric( .ip1 )) rep( .ip1 , len = n) else
+ sqrt(mustart[,1])
+ f = if (is.numeric( .iF )) rep( .iF , len = n) else
+ rep(0.01, len = n) # close to zero
+ p2 = if (is.numeric( .ip2 )) rep( .ip2 , len = n) else
+ mustart[,2] / (sqrt(mustart[,1]) * 2)
if (any(p1 <= 0) || any(p1 >= 1))
stop("bad initial value for 'p1'")
if (any(p2 <= 0) || any(p2 >= 1))
stop("bad initial value for 'p2'")
- etastart = cbind(theta2eta(p1, .link, earg= .earg),
- theta2eta(p2, .link, earg= .earg),
- theta2eta(f, .link, earg= .earg))
+ etastart = cbind(theta2eta(p1, .link, earg = .earg),
+ theta2eta(p2, .link, earg = .earg),
+ theta2eta(f, .link, earg = .earg))
+ mustart <- NULL # Since etastart has been computed.
+
}
- }), list( .link=link, .ip1=ip1, .ip2=ip2, .iF=iF, .earg=earg ))),
- inverse=eval(substitute(function(eta, extra=NULL){
- p1 = eta2theta(eta[,1], link=.link, earg= .earg)
- p2 = eta2theta(eta[,2], link=.link, earg= .earg)
- p3 = 1-p1-p2
- f = eta2theta(eta[,3], link=.link, earg= .earg)
- cbind("G1G1"=f*p1+(1-f)*p1^2,
- "G1G2"=2*p1*p2*(1-f),
- "G1G3"=2*p1*p3*(1-f),
- "G2G2"=f*p2+(1-f)*p2^2,
- "G2G3"=2*p2*p3*(1-f),
- "G3G3"=f*p3+(1-f)*p3^2)
- }, list( .link=link, .earg=earg ))),
- last=eval(substitute(expression({
- misc$link = c(p1= .link, p2= .link, f= .link)
- misc$earg = list(p1= .earg, p2= .earg, f= .earg )
- }), list( .link=link, .earg=earg ))),
- loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL)
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(dmultinomial(x=w*y, size=w, prob=mu, log=TRUE, docheck=FALSE))
+ }), list( .link = link, .ip1 = ip1, .ip2 = ip2, .iF = iF,
+ .earg = earg ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ p1 = eta2theta(eta[,1], link = .link, earg = .earg)
+ p2 = eta2theta(eta[,2], link = .link, earg = .earg)
+ f = eta2theta(eta[,3], link = .link, earg = .earg)
+ p3 = 1 - p1 - p2
+ cbind("G1G1" = f*p1+(1-f)*p1^2,
+ "G1G2" = 2*p1*p2*(1-f),
+ "G1G3" = 2*p1*p3*(1-f),
+ "G2G2" = f*p2+(1-f)*p2^2,
+ "G2G3" = 2*p2*p3*(1-f),
+ "G3G3" = f*p3+(1-f)*p3^2)
+ }, list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
+ misc$link = c(p1 = .link, p2 = .link, f = .link)
+ misc$earg = list(p1 = .earg, p2 = .earg, f = .earg )
+ }), list( .link = link, .earg = earg ))),
+ loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
+ sum(dmultinomial(x=w*y, size=w, prob=mu, log = TRUE, docheck = FALSE))
},
- vfamily=c("G1G2G3", "vgenetic"),
- deriv=eval(substitute(expression({
- p1 = eta2theta(eta[,1], link=.link, earg= .earg)
- p2 = eta2theta(eta[,2], link=.link, earg= .earg)
+ vfamily = c("G1G2G3", "vgenetic"),
+ deriv = eval(substitute(expression({
+ p1 = eta2theta(eta[,1], link = .link, earg = .earg)
+ p2 = eta2theta(eta[,2], link = .link, earg = .earg)
p3 = 1-p1-p2
- f = eta2theta(eta[,3], link=.link, earg= .earg)
- dP1 = cbind(f + 2*p1*(1-f), 2*(1-f)*p2, 2*(1-f)*(1-p2-2*p1), 0,
- -2*(1-f)*p2, -f - 2*p3*(1-f))
+ f = eta2theta(eta[,3], link = .link, earg = .earg)
+ dP1 = cbind(f + 2*p1*(1-f), 2*(1-f)*p2, 2*(1-f)*(1-p2-2*p1),
+ 0, -2*(1-f)*p2, -f - 2*p3*(1-f))
dP2 = cbind(0, 2*p1*(1-f), -2*(1-f)*p1, f+2*p2*(1-f),
2*(1-f)*(1-p1-2*p2), -f - 2*p3*(1-f))
dP3 = cbind(p1*(1-p1), -2*p1*p2, -2*p1*p3, p2*(1-p2), -2*p2*p3,
@@ -92,28 +100,29 @@
dl1 = rowSums(y * dP1 / mu)
dl2 = rowSums(y * dP2 / mu)
dl3 = rowSums(y * dP3 / mu)
- dPP.deta = dtheta.deta(cbind(p1,p2,f), link=.link, earg= .earg)
- w * cbind(dPP.deta[,1] * dl1, dPP.deta[,2] * dl2,
+ dPP.deta = dtheta.deta(cbind(p1, p2, f), link = .link, earg = .earg)
+ w * cbind(dPP.deta[,1] * dl1,
+ dPP.deta[,2] * dl2,
dPP.deta[,3] * dl3)
- }), list( .link=link, .earg=earg ))),
- weight=eval(substitute(expression({
+ }), list( .link = link, .earg = earg ))),
+ weight = eval(substitute(expression({
dPP = array(c(dP1,dP2,dP3), c(n,6,3))
wz = matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==6 because M==3
for(i1 in 1:M)
for(i2 in i1:M) {
index = iam(i1,i2,M)
- wz[,index] = rowSums(dPP[,,i1,drop=TRUE] *
- dPP[,,i2,drop=TRUE] / mu) *
- dPP.deta[,i1] * dPP.deta[,i2]
+ wz[,index] = rowSums(dPP[, , i1, drop = TRUE] *
+ dPP[, , i2, drop = TRUE] / mu) *
+ dPP.deta[, i1] * dPP.deta[, i2]
}
w * wz
- }), list( .link=link, .earg=earg ))))
+ }), list( .link = link, .earg = earg ))))
}
- AAaa.nohw = function(link="logit", earg = list(), ipA=NULL, iF=NULL)
+ AAaa.nohw = function(link = "logit", earg = list(), ipA = NULL, iF = NULL)
{
if (mode(link) != "character" && mode(link) != "name")
@@ -121,12 +130,12 @@
if (!is.list(earg)) earg = list()
new("vglmff",
- blurb=c("AA-Aa-aa phenotype (without Hardy-Weinberg assumption)\n\n",
+ blurb = c("AA-Aa-aa phenotype (without Hardy-Weinberg assumption)\n\n",
"Links: ",
- namesof("pA", link, earg= earg), ", ",
- namesof("f", "identity", tag=FALSE)),
- deviance=Deviance.categorical.data.vgam,
- initialize=eval(substitute(expression({
+ namesof("pA", link, earg = earg), ", ",
+ namesof("f", "identity", tag = FALSE)),
+ deviance = Deviance.categorical.data.vgam,
+ initialize = eval(substitute(expression({
delete.zero.colns = FALSE
eval(process.categorical.data.vgam)
@@ -138,85 +147,93 @@
"(output of colnames()) ordered as c('AA','Aa','aa')")
}
- predictors.names = c(namesof("pA", .link, earg= .earg, tag=FALSE),
- namesof("f", "identity", tag=FALSE))
+ predictors.names =
+ c(namesof("pA", .link, earg = .earg, tag = FALSE),
+ namesof("f", "identity", earg = list(), tag = FALSE))
if (is.null(etastart)) {
- pA = if (is.numeric(.ipA)) rep(.ipA, n) else
- c(sqrt(mustart[,1] - mustart[,2]/2))
- f = if (is.numeric(.iF)) rep(.iF, n) else
- rep(0.01, n) # 1- mustart[,2]/(2*pA*(1-pA))
+ pA = if (is.numeric( .ipA )) rep( .ipA , len = n) else
+ c(sqrt(mustart[, 1] - mustart[, 2] / 2))
+ f = if (is.numeric( .iF )) rep( .iF , len = n) else
+ rep(0.01, len = n) # 1- mustart[,2]/(2*pA*(1-pA))
if (any(pA <= 0) || any(pA >= 1))
stop("bad initial value for 'pA'")
- etastart = cbind(theta2eta(pA, .link, earg= .earg),
- theta2eta(f, "identity"))
+ etastart = cbind(theta2eta(pA, .link, earg = .earg),
+ theta2eta(f, "identity"))
+ mustart <- NULL # Since etastart has been computed.
}
- }), list( .link=link, .ipA=ipA, .iF=iF, .earg=earg ))),
- inverse=eval(substitute(function(eta, extra=NULL){
- pA = eta2theta(eta[,1], link=.link, earg= .earg)
- f = eta2theta(eta[,2], link="identity")
- cbind(AA=pA^2+pA*(1-pA)*f,
- Aa=2*pA*(1-pA)*(1-f),
- aa=(1-pA)^2 + pA*(1-pA)*f)
- }, list( .link=link, .earg=earg ))),
- last=eval(substitute(expression({
- misc$link = c(pA= .link, f= "identity")
- misc$earg = list(pA= .earg, f= list() )
- }), list( .link=link, .earg=earg ))),
- link=eval(substitute(function(mu, extra=NULL){
+ }), list( .link = link, .ipA=ipA, .iF = iF, .earg = earg ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ pA = eta2theta(eta[,1], link = .link, earg = .earg)
+ f = eta2theta(eta[,2], link = "identity", earg = list())
+ cbind(AA = pA^2+pA*(1-pA)*f,
+ Aa = 2*pA*(1-pA)*(1-f),
+ aa = (1-pA)^2 + pA*(1-pA)*f)
+ }, list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
+ misc$link = c(pA = .link, f = "identity")
+ misc$earg = list(pA = .earg, f = list() )
+ }), list( .link = link, .earg = earg ))),
+ link = eval(substitute(function(mu, extra = NULL) {
pA = sqrt(mu[,1] - mu[,2]/2)
- f = 1- mu[,2]/(2*pA*(1-pA))
- cbind(theta2eta(pA, .link, earg= .earg),
+ f = 1 - mu[,2] / (2*pA*(1-pA))
+ cbind(theta2eta(pA, .link, earg = .earg),
theta2eta(f, "identity"))
- }, list( .link=link, .earg=earg ))),
- loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL)
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(dmultinomial(x=w*y, size=w, prob=mu, log=TRUE, docheck=FALSE))
+ }, list( .link = link, .earg = earg ))),
+ loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
+ sum(dmultinomial(x=w*y, size=w, prob=mu, log = TRUE, docheck = FALSE))
},
- vfamily=c("AAaa.nohw", "vgenetic"),
- deriv=eval(substitute(expression({
- pA = eta2theta(eta[,1], link=.link, earg= .earg)
- f = eta2theta(eta[,2], link="identity")
- dP1 = cbind(f + 2*pA*(1-f), 2*(1-f)*(1-2*pA), -2*(1-pA) +f*(1-2*pA))
- dP2 = cbind(pA*(1-pA), -2*pA*(1-pA), pA*(1-pA))
+ vfamily = c("AAaa.nohw", "vgenetic"),
+ deriv = eval(substitute(expression({
+ pA = eta2theta(eta[,1], link = .link, earg = .earg)
+ f = eta2theta(eta[,2], link = "identity")
+ dP1 = cbind(f + 2*pA*(1-f),
+ 2*(1-f)*(1-2*pA),
+ -2*(1-pA) +f*(1-2*pA))
+ dP2 = cbind(pA*(1-pA),
+ -2*pA*(1-pA),
+ pA*(1-pA))
dl1 = rowSums(y * dP1 / mu)
dl2 = rowSums(y * dP2 / mu)
- dPP.deta = dtheta.deta(pA, link=.link, earg= .earg)
- w * cbind(dPP.deta * dl1, dl2)
- }), list( .link=link, .earg=earg ))),
- weight=eval(substitute(expression({
- dPP = array(c(dP1,dP2), c(n,3,2))
- dPP.deta = cbind(dtheta.deta(pA, link=.link, earg= .earg),
- dtheta.deta(f, link="identity"))
+ dPP.deta = dtheta.deta(pA, link = .link, earg = .earg)
+ w * cbind(dPP.deta * dl1,
+ dl2)
+ }), list( .link = link, .earg = earg ))),
+ weight = eval(substitute(expression({
+ dPP = array(c(dP1, dP2), c(n, 3, 2))
+ dPP.deta = cbind(dtheta.deta(pA, link = .link, earg = .earg),
+ dtheta.deta(f, link = "identity"))
wz = matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==3 because M==2
for(i1 in 1:M)
for(i2 in i1:M) {
index = iam(i1,i2,M)
- wz[,index] = rowSums(dPP[,,i1,drop=TRUE] *
- dPP[,,i2,drop=TRUE] / mu) *
- dPP.deta[,i1] * dPP.deta[,i2]
+ wz[,index] = rowSums(dPP[,,i1,drop = TRUE] *
+ dPP[,,i2,drop = TRUE] / mu) *
+ dPP.deta[,i1] * dPP.deta[,i2]
}
w * wz
- }), list( .link=link, .earg=earg ))))
+ }), list( .link = link, .earg = earg ))))
}
- AB.Ab.aB.ab2 = function(link="logit", earg = list(), init.p=NULL)
+ AB.Ab.aB.ab2 = function(link = "logit", earg = list(), init.p = NULL)
{
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if (!is.list(earg)) earg = list()
new("vglmff",
- blurb=c("AB-Ab-aB-ab2 phenotype\n\n",
+ blurb = c("AB-Ab-aB-ab2 phenotype\n\n",
"Links: ",
- namesof("p", link, earg= earg)),
- deviance=Deviance.categorical.data.vgam,
- initialize=eval(substitute(expression({
+ namesof("p", link, earg = earg)),
+ deviance = Deviance.categorical.data.vgam,
+ initialize = eval(substitute(expression({
delete.zero.colns = FALSE
eval(process.categorical.data.vgam)
- predictors.names = namesof("p", .link, earg= .earg, tag=FALSE)
+ predictors.names = namesof("p", .link, earg = .earg, tag = FALSE)
ok.col.ny = c("AB","Ab","aB","ab")
if (length(col.ny <- colnames(y)) == length(ok.col.ny) &&
@@ -228,58 +245,64 @@
if (is.null(etastart)) {
p.init = if (is.numeric(.init.p)) rep(.init.p, n) else
- c(1 - 2 * sqrt(mustart[,4]))
- etastart = theta2eta(p.init, .link, earg= .earg)
+ c(1 - 2 * sqrt(mustart[,4]))
+ etastart = theta2eta(p.init, .link, earg = .earg)
}
- }), list( .link=link, .init.p=init.p, .earg=earg ))),
- inverse=eval(substitute(function(eta,extra=NULL){
- p = eta2theta(eta, link=.link, earg= .earg)
- cbind("AB"=(2+(1-p)^2),
- "Ab"=(1-(1-p)^2),
- "aB"=(1-(1-p)^2),
- "ab"=(1-p)^2) / 4
- }, list( .link=link, .earg=earg ) )),
- last=eval(substitute(expression({
+ }), list( .link = link, .init.p=init.p, .earg = earg ))),
+ inverse = eval(substitute(function(eta,extra = NULL) {
+ p = eta2theta(eta, link = .link, earg = .earg)
+ cbind("AB" = (2+(1-p)^2),
+ "Ab" = (1-(1-p)^2),
+ "aB" = (1-(1-p)^2),
+ "ab" = (1-p)^2) / 4
+ }, list( .link = link, .earg = earg ) )),
+ last = eval(substitute(expression({
misc$link = c(p = .link)
misc$earg = list(p= .earg )
- }), list( .link=link, .earg=earg ) )),
- link=eval(substitute(function(mu, extra=NULL){
+ }), list( .link = link, .earg = earg ) )),
+ link = eval(substitute(function(mu, extra = NULL) {
p = 1 - 2 * sqrt(mu[,4])
- theta2eta(p, .link, earg= .earg)
- }, list( .link=link, .earg=earg ) )),
- loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL)
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(dmultinomial(x=w*y, size=w, prob=mu, log=TRUE, docheck=FALSE))
+ theta2eta(p, .link, earg = .earg)
+ }, list( .link = link, .earg = earg ) )),
+ loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
+ sum(dmultinomial(x=w*y, size=w, prob=mu,
+ log = TRUE, docheck = FALSE))
},
- vfamily=c("AB.Ab.aB.ab2", "vgenetic"),
- deriv=eval(substitute(expression({
- pp = eta2theta(eta, link=.link, earg= .earg)
- dP1 = cbind(-0.5*(1-pp), 0.5*(1-pp), 0.5*(1-pp), -0.5*(1-pp))
+ vfamily = c("AB.Ab.aB.ab2", "vgenetic"),
+ deriv = eval(substitute(expression({
+ pp = eta2theta(eta, link = .link, earg = .earg)
+ dP1 = cbind(-0.5*(1-pp),
+ 0.5*(1-pp),
+ 0.5*(1-pp),
+ -0.5*(1-pp))
dl1 = rowSums(y * dP1 / mu)
- dPP.deta = dtheta.deta(pp, link=.link, earg= .earg)
+ dPP.deta = dtheta.deta(pp, link = .link, earg = .earg)
w * dPP.deta * dl1
- }), list( .link=link, .earg=earg ) )),
- weight=eval(substitute(expression({
+ }), list( .link = link, .earg = earg ) )),
+ weight = eval(substitute(expression({
wz = rowSums(dP1 * dP1 / mu) * dPP.deta^2
w * wz
- }), list( .link=link, .earg=earg ) )))
+ }), list( .link = link, .earg = earg ) )))
}
- A1A2A3 = function(link="logit", earg = list(), ip1=NULL, ip2=NULL)
+ A1A2A3 = function(link = "logit", earg = list(), ip1 = NULL, ip2 = NULL)
{
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if (!is.list(earg)) earg = list()
new("vglmff",
- blurb=c("A1A2A3 Allele System (A1A1, A1A2, A2A2, A1A3, A2A3, A3A3)\n\n",
+ blurb = c("A1A2A3 Allele System ",
+ "(A1A1, A1A2, A2A2, A1A3, A2A3, A3A3)\n\n",
"Links: ",
- namesof("p1", link, earg= earg), ", ",
- namesof("p2", link, earg= earg, tag=FALSE)),
- deviance=Deviance.categorical.data.vgam,
- initialize=eval(substitute(expression({
+ namesof("p1", link, earg = earg), ", ",
+ namesof("p2", link, earg = earg, tag = FALSE)),
+ deviance = Deviance.categorical.data.vgam,
+ initialize = eval(substitute(expression({
delete.zero.colns = FALSE
eval(process.categorical.data.vgam)
@@ -292,50 +315,58 @@
"c('A1A1','A1A2','A2A2','A1A3','A2A3','A3A3')")
}
- predictors.names = c(namesof("pA", .link, earg= .earg, tag=FALSE),
- namesof("pB", .link, earg= .earg, tag=FALSE))
+ predictors.names =
+ c(namesof("pA", .link, earg = .earg, tag = FALSE),
+ namesof("pB", .link, earg = .earg, tag = FALSE))
if (is.null(etastart)) {
p1 = if (is.numeric(.ip1)) rep(.ip1, n) else
c(sqrt(mustart[,1]))
p2 = if (is.numeric(.ip2)) rep(.ip2, n) else
c(sqrt(mustart[,3]))
- etastart = cbind(theta2eta(p1, .link, earg= .earg),
- theta2eta(p2, .link, earg= .earg))
+ etastart = cbind(theta2eta(p1, .link, earg = .earg),
+ theta2eta(p2, .link, earg = .earg))
}
- }), list( .link=link, .ip1=ip1, .ip2=ip2, .earg=earg ))),
- inverse=eval(substitute(function(eta, extra=NULL){
- p1 = eta2theta(eta[,1], link=.link, earg= .earg)
- p2 = eta2theta(eta[,2], link=.link, earg= .earg)
+ }), list( .link = link, .ip1 = ip1, .ip2 = ip2, .earg = earg ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ p1 = eta2theta(eta[,1], link = .link, earg = .earg)
+ p2 = eta2theta(eta[,2], link = .link, earg = .earg)
qq = 1-p1-p2
- cbind(A1A1=p1*p1, A1A2=2*p1*p2, A2A2=p2*p2, A1A3=2*p1*qq,
- A2A3=2*p2*qq, A3A3=qq*qq)
- }, list( .link=link, .earg=earg ))),
- last=eval(substitute(expression({
+ cbind(A1A1 = p1*p1,
+ A1A2 = 2*p1*p2,
+ A2A2 = p2*p2,
+ A1A3 = 2*p1*qq,
+ A2A3 = 2*p2*qq,
+ A3A3 = qq*qq)
+ }, list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
misc$link = c(p1= .link, p2= .link)
misc$earg = list(p1= .earg, p2= .earg )
- }), list( .link=link, .earg=earg ))),
- link=eval(substitute(function(mu, extra=NULL){
+ }), list( .link = link, .earg = earg ))),
+ link = eval(substitute(function(mu, extra = NULL) {
p1 = sqrt(mu[,1])
p2 = sqrt(mu[,3])
- qq = 1-p1-p2
- cbind(theta2eta(p1, .link, earg= .earg),
- theta2eta(p2, .link, earg= .earg))
- }, list( .link=link, .earg=earg ))),
- loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL)
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(dmultinomial(x=w*y, size=w, prob=mu, log=TRUE, docheck=FALSE))
+ qq = 1 - p1 - p2
+ cbind(theta2eta(p1, .link, earg = .earg),
+ theta2eta(p2, .link, earg = .earg))
+ }, list( .link = link, .earg = earg ))),
+ loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
+ sum(dmultinomial(x=w*y, size=w, prob=mu,
+ log = TRUE, docheck = FALSE))
},
- vfamily=c("A1A2A3", "vgenetic"),
- deriv=eval(substitute(expression({
- p1 = eta2theta(eta[,1], link=.link, earg= .earg)
- p2 = eta2theta(eta[,2], link=.link, earg= .earg)
+ vfamily = c("A1A2A3", "vgenetic"),
+ deriv = eval(substitute(expression({
+ p1 = eta2theta(eta[,1], link = .link, earg = .earg)
+ p2 = eta2theta(eta[,2], link = .link, earg = .earg)
dl.dp1 = (2*y[,1]+y[,2]+y[,4])/p1 - (2*y[,6]+y[,4]+y[,5])/(1-p1-p2)
dl.dp2 = (2*y[,3]+y[,2]+y[,5])/p2 - (2*y[,6]+y[,4]+y[,5])/(1-p1-p2)
- dp1.deta = dtheta.deta(p1, link=.link, earg= .earg)
- dp2.deta = dtheta.deta(p2, link=.link, earg= .earg)
- w * cbind(dl.dp1 * dp1.deta, dl.dp2 * dp2.deta)
- }), list( .link=link, .earg=earg ))),
- weight=eval(substitute(expression({
+ dp1.deta = dtheta.deta(p1, link = .link, earg = .earg)
+ dp2.deta = dtheta.deta(p2, link = .link, earg = .earg)
+ w * cbind(dl.dp1 * dp1.deta,
+ dl.dp2 * dp2.deta)
+ }), list( .link = link, .earg = earg ))),
+ weight = eval(substitute(expression({
qq = 1-p1-p2
wz = matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==3 because M==2
ed2l.dp12 = 2 * (1/p1 + 1/qq)
@@ -345,42 +376,43 @@
wz[,iam(2,2,M)] = dp2.deta^2 * ed2l.dp22
wz[,iam(1,2,M)] = ed2l.dp1dp2 * dp1.deta * dp2.deta
w * wz
- }), list( .link=link, .earg=earg ))))
+ }), list( .link = link, .earg = earg ))))
}
- MNSs = function(link="logit", earg = list(), imS=NULL, ims=NULL, inS=NULL)
+ MNSs = function(link = "logit", earg = list(),
+ imS = NULL, ims = NULL, inS = NULL)
{
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if (!is.list(earg)) earg = list()
new("vglmff",
- blurb=c("MNSs Blood Group System (MS-Ms-MNS-MNs-NS-Ns phenotype)\n\n",
+ blurb = c("MNSs Blood Group System (MS-Ms-MNS-MNs-NS-Ns phenotype)\n\n",
"Links: ",
- namesof("mS", link, earg= earg), ", ",
- namesof("ms", link, earg= earg), ", ",
- namesof("nS", link, earg= earg, tag=FALSE)),
- deviance=Deviance.categorical.data.vgam,
- initialize=eval(substitute(expression({
+ namesof("mS", link, earg = earg), ", ",
+ namesof("ms", link, earg = earg), ", ",
+ namesof("nS", link, earg = earg, tag = FALSE)),
+ deviance = Deviance.categorical.data.vgam,
+ initialize = eval(substitute(expression({
delete.zero.colns = FALSE
eval(process.categorical.data.vgam)
ok.col.ny = c("MS","Ms","MNS","MNs","NS","Ns")
if (length(col.ny <- colnames(y)) == length(ok.col.ny) &&
- setequal(ok.col.ny, col.ny)) {
+ setequal(ok.col.ny, col.ny)) {
if (!all(ok.col.ny == col.ny))
- stop("the columns of the response matrix should have names ",
- "(output of colnames()) ordered as ",
+ stop("the columns of the response matrix should have ",
+ "names (output of colnames()) ordered as ",
"c('MS','Ms','MNS','MNs','NS','Ns')")
}
predictors.names <-
- c(namesof("mS", .link, earg= .earg, tag=FALSE),
- namesof("ms", .link, earg= .earg, tag=FALSE),
- namesof("nS", .link, earg= .earg, tag=FALSE))
+ c(namesof("mS", .link, earg = .earg, tag = FALSE),
+ namesof("ms", .link, earg = .earg, tag = FALSE),
+ namesof("nS", .link, earg = .earg, tag = FALSE))
if (is.null(etastart)) {
ms = if (is.numeric(.ims)) rep(.ims, n) else
c(sqrt(mustart[,2]))
@@ -389,41 +421,46 @@
c(-ns + sqrt(ns^2 + mustart[,5])) # Solve a quadratic eqn
mS = if (is.numeric(.imS)) rep(.imS, n) else
1-ns-ms-nS
- etastart = cbind(theta2eta(mS, .link, earg= .earg),
- theta2eta(ms, .link, earg= .earg),
- theta2eta(nS, .link, earg= .earg))
+ etastart = cbind(theta2eta(mS, .link, earg = .earg),
+ theta2eta(ms, .link, earg = .earg),
+ theta2eta(nS, .link, earg = .earg))
}
- }), list( .link=link, .imS=imS, .ims=ims, .inS=inS, .earg=earg ))),
- inverse=eval(substitute(function(eta, extra=NULL){
- mS = eta2theta(eta[,1], link=.link, earg= .earg)
- ms = eta2theta(eta[,2], link=.link, earg= .earg)
- nS = eta2theta(eta[,3], link=.link, earg= .earg)
- ns = 1-mS-ms-nS
- cbind(MS=mS^2+2*mS*ms, Ms=ms^2, MNS=2*(mS*nS+ms*nS+mS*ns),
- MNs=2*ms*ns, NS=nS^2 + 2*nS*ns, Ns=ns^2)
- }, list( .link=link, .earg=earg ))),
- last=eval(substitute(expression({
- misc$link = c(mS= .link, ms= .link, nS= .link)
- misc$earg = list(mS= .earg, ms= .earg, nS= .earg )
- }), list( .link=link, .earg=earg ))),
- link=eval(substitute(function(mu, extra=NULL){
+ }), list( .link = link, .imS = imS, .ims = ims, .inS = inS, .earg = earg ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ mS = eta2theta(eta[,1], link = .link, earg = .earg)
+ ms = eta2theta(eta[,2], link = .link, earg = .earg)
+ nS = eta2theta(eta[,3], link = .link, earg = .earg)
+ ns = 1 - mS - ms - nS
+ cbind(MS = mS^2 + 2*mS*ms,
+ Ms = ms^2,
+ MNS = 2*(mS*nS + ms*nS + mS*ns),
+ MNs = 2*ms*ns,
+ NS = nS^2 + 2*nS*ns,
+ Ns = ns^2)
+ }, list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
+ misc$link = c(mS = .link, ms = .link, nS = .link)
+ misc$earg = list(mS = .earg, ms = .earg, nS = .earg )
+ }), list( .link = link, .earg = earg ))),
+ link = eval(substitute(function(mu, extra = NULL) {
ms = sqrt(mu[,2])
ns = sqrt(mu[,6])
- nS = c(-nS + sqrt(nS^2 + mu[,5]))
- mS = 1-ns-ms-nS
- cbind(theta2eta(mS, .link, earg= .earg),
- theta2eta(ms, .link, earg= .earg),
- theta2eta(nS, .link, earg= .earg))
- }, list( .link=link, .earg=earg ))),
- loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL)
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(dmultinomial(x=w*y, size=w, prob=mu, log=TRUE, docheck=FALSE))
+ nS = c(-ns + sqrt(ns^2 + mu[,5]))
+ mS = 1 - ns - ms - nS
+ cbind(theta2eta(mS, .link, earg = .earg),
+ theta2eta(ms, .link, earg = .earg),
+ theta2eta(nS, .link, earg = .earg))
+ }, list( .link = link, .earg = earg ))),
+ loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
+ sum(dmultinomial(x=w*y, size=w, prob=mu, log = TRUE, docheck = FALSE))
},
- vfamily=c("MNSs", "vgenetic"),
- deriv=eval(substitute(expression({
- mS = eta2theta(eta[,1], link=.link, earg= .earg)
- ms = eta2theta(eta[,2], link=.link, earg= .earg)
- nS = eta2theta(eta[,3], link=.link, earg= .earg)
+ vfamily = c("MNSs", "vgenetic"),
+ deriv = eval(substitute(expression({
+ mS = eta2theta(eta[,1], link = .link, earg = .earg)
+ ms = eta2theta(eta[,2], link = .link, earg = .earg)
+ nS = eta2theta(eta[,3], link = .link, earg = .earg)
ns = 1-mS-ms-nS
dP1 = cbind(2*(mS+ms), 0, 2*(nS+ns-mS), -2*ms, -2*nS, -2*ns)
dP2 = cbind(2*mS, 2*ms, 2*(nS-mS), 2*(ns-ms), -2*nS, -2*ns)
@@ -431,21 +468,21 @@
dl1 = rowSums(y * dP1 / mu)
dl2 = rowSums(y * dP2 / mu)
dl3 = rowSums(y * dP3 / mu)
- dPP.deta = dtheta.deta(cbind(mS,ms,nS), link=.link, earg= .earg)
+ dPP.deta = dtheta.deta(cbind(mS, ms, nS), link = .link, earg = .earg)
w * dPP.deta * cbind(dl1, dl2, dl3)
- }), list( .link=link, .earg=earg ))),
- weight=eval(substitute(expression({
+ }), list( .link = link, .earg = earg ))),
+ weight = eval(substitute(expression({
dPP = array(c(dP1,dP2,dP3), c(n,6,3))
wz = matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==6 because M==3
for(i1 in 1:M)
for(i2 in i1:M) {
index = iam(i1,i2,M)
- wz[,index] = rowSums(dPP[,,i1,drop=TRUE] *
- dPP[,,i2,drop=TRUE] / mu) *
+ wz[,index] = rowSums(dPP[,,i1,drop = TRUE] *
+ dPP[,,i2,drop = TRUE] / mu) *
dPP.deta[,i1] * dPP.deta[,i2]
}
w * wz
- }), list( .link=link, .earg=earg ))))
+ }), list( .link = link, .earg = earg ))))
}
@@ -453,19 +490,19 @@
- ABO = function(link="logit", earg = list(), ipA=NULL, ipO=NULL)
+ ABO = function(link = "logit", earg = list(), ipA = NULL, ipO = NULL)
{
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if (!is.list(earg)) earg = list()
new("vglmff",
- blurb=c("ABO Blood Group System (A-B-AB-O phenotype)\n\n",
+ blurb = c("ABO Blood Group System (A-B-AB-O phenotype)\n\n",
"Links: ",
- namesof("pA", link, earg= earg), ", ",
- namesof("pB", link, earg= earg, tag=FALSE)),
- deviance=Deviance.categorical.data.vgam,
- initialize=eval(substitute(expression({
+ namesof("pA", link, earg = earg), ", ",
+ namesof("pB", link, earg = earg, tag = FALSE)),
+ deviance = Deviance.categorical.data.vgam,
+ initialize = eval(substitute(expression({
delete.zero.colns = FALSE
eval(process.categorical.data.vgam)
@@ -478,85 +515,97 @@
}
predictors.names <-
- c(namesof("pA", .link, earg= .earg, tag=FALSE),
- namesof("pB", .link, earg= .earg, tag=FALSE))
+ c(namesof("pA", .link, earg = .earg, tag = FALSE),
+ namesof("pB", .link, earg = .earg, tag = FALSE))
+
if (is.null(etastart)) {
- pO = if (is.Numeric( .ipO)) rep( .ipO, n) else c(sqrt(mustart[,4]))
- pA = if (is.Numeric( .ipA)) rep( .ipA, n) else
- c(1-sqrt(mustart[,2]+mustart[,4]))
- pB = 1-pA-pO
- etastart = cbind(theta2eta(pA, .link, earg= .earg),
- theta2eta(pB, .link, earg= .earg))
+ pO = if (is.Numeric( .ipO )) rep( .ipO , len = n) else
+ c(sqrt(mustart[,4]))
+ pA = if (is.Numeric( .ipA )) rep( .ipA , len = n) else
+ c(1 - sqrt(mustart[,2] + mustart[,4]))
+ pB = 1 - pA - pO
+ etastart = cbind(theta2eta(pA, .link, earg = .earg),
+ theta2eta(pB, .link, earg = .earg))
}
- }), list( .link=link, .ipO=ipO, .ipA=ipA, .earg=earg ))),
- inverse=eval(substitute(function(eta, extra=NULL){
- pA = eta2theta(eta[,1], link=.link, earg= .earg)
- pB = eta2theta(eta[,2], link=.link, earg= .earg)
- pO = 1-pA-pB
- cbind(A=pA*(pA+2*pO), B=pB*(pB+2*pO), AB=2*pA*pB, O=pO*pO)
- }, list( .link=link, .earg=earg ))),
- last=eval(substitute(expression({
+ }), list( .link = link, .ipO = ipO, .ipA = ipA, .earg = earg ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ pA = eta2theta(eta[,1], link = .link, earg = .earg)
+ pB = eta2theta(eta[,2], link = .link, earg = .earg)
+ pO = 1 - pA - pB
+ cbind(A = pA*(pA+2*pO),
+ B = pB*(pB+2*pO),
+ AB = 2*pA*pB,
+ O = pO*pO)
+ }, list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
misc$link = c(pA = .link, pB = .link)
misc$earg = list(pA = .earg, pB = .earg )
- }), list( .link=link, .earg=earg ))),
- link=eval(substitute(function(mu, extra=NULL){
+ }), list( .link = link, .earg = earg ))),
+ link = eval(substitute(function(mu, extra = NULL) {
pO = sqrt(mu[,4])
p1 = ( (1-pO)+sqrt((1-pO)^2 + 2*mu[,3]) )/2
p2 = ( (1-pO)-sqrt((1-pO)^2 + 2*mu[,3]) )/2
- index = p2 >= 0 & p2 <= 1
+ index = (p2 >= 0) & (p2 <= 1)
pA = p1
pA[index] = p2[index]
- pB = 1-pA-pO
- cbind(theta2eta(pA, .link, earg= .earg),
- theta2eta(pB, .link, earg= .earg))
- }, list( .link=link, .earg=earg ))),
- loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL)
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(dmultinomial(x=w*y, size=w, prob=mu, log=TRUE, docheck=FALSE))
+ pB = abs(1-pA-pO)
+ cbind(theta2eta(pA, .link, earg = .earg),
+ theta2eta(pB, .link, earg = .earg))
+ }, list( .link = link, .earg = earg ))),
+ loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
+ sum(dmultinomial(x=w*y, size=w, prob=mu, log = TRUE,
+ docheck = FALSE))
},
- vfamily=c("ABO", "vgenetic"),
- deriv=eval(substitute(expression({
- p = eta2theta(eta[,1], link=.link, earg= .earg)
- q = eta2theta(eta[,2], link=.link, earg= .earg)
- r = 1-p-q
- pbar = 2*r+p
- qbar = 2*r+q
+ vfamily = c("ABO", "vgenetic"),
+ deriv = eval(substitute(expression({
+ ppp = eta2theta(eta[,1], link = .link, earg = .earg)
+ qqq = eta2theta(eta[,2], link = .link, earg = .earg)
+ rrr = 1-ppp-qqq
+
+
+ pbar = 2*rrr+ppp
+ qbar = 2*rrr+qqq
na = y[,1]
nb = y[,2]
nab = y[,3]
no = y[,4]
- dl.dp = (na+nab)/p - na/pbar - 2*nb/qbar - 2*no/r
- dl.dq = (nb+nab)/q - 2*na/pbar - nb/qbar - 2*no/r
- dp.deta = dtheta.deta(p, link=.link, earg= .earg)
- dq.deta = dtheta.deta(q, link=.link, earg= .earg)
- w * cbind(dl.dp * dp.deta, dl.dq * dq.deta)
- }), list( .link=link, .earg=earg ))),
- weight=eval(substitute(expression({
+
+ dl.dp = (na+nab)/ppp - na/pbar - 2*nb/qbar - 2*no/rrr
+ dl.dq = (nb+nab)/qqq - 2*na/pbar - nb/qbar - 2*no/rrr
+ dp.deta = dtheta.deta(ppp, link = .link, earg = .earg)
+ dq.deta = dtheta.deta(qqq, link = .link, earg = .earg)
+
+ w * cbind(dl.dp * dp.deta,
+ dl.dq * dq.deta)
+ }), list( .link = link, .earg = earg ))),
+ weight = eval(substitute(expression({
wz = matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==3 because M==2
- ed2l.dp2 = (1 + 2/p + 4*q/qbar + p/pbar)
- ed2l.dq2 = (1 + 2/q + 4*p/pbar + q/qbar)
- ed2l.dpdq = 2 * (1 + q/qbar + p/pbar)
+ ed2l.dp2 = (1 + 2/ppp + 4*qqq/qbar + ppp/pbar)
+ ed2l.dq2 = (1 + 2/qqq + 4*ppp/pbar + qqq/qbar)
+ ed2l.dpdq = 2 * (1 + qqq/qbar + ppp/pbar)
wz[,iam(1,1,M)] = dp.deta^2 * ed2l.dp2
wz[,iam(2,2,M)] = dq.deta^2 * ed2l.dq2
wz[,iam(1,2,M)] = ed2l.dpdq * dp.deta * dq.deta
w * wz
- }), list( .link=link, .earg=earg ))))
+ }), list( .link = link, .earg = earg ))))
}
- AB.Ab.aB.ab = function(link="logit", earg = list(), init.p=NULL)
+ AB.Ab.aB.ab = function(link = "logit", earg = list(), init.p = NULL)
{
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if (!is.list(earg)) earg = list()
new("vglmff",
- blurb=c("AB-Ab-aB-ab phenotype\n\n",
- "Links: ", namesof("p", link, earg= earg, tag=TRUE)),
- deviance=Deviance.categorical.data.vgam,
- initialize=eval(substitute(expression({
+ blurb = c("AB-Ab-aB-ab phenotype\n\n",
+ "Links: ", namesof("p", link, earg = earg, tag = TRUE)),
+ deviance = Deviance.categorical.data.vgam,
+ initialize = eval(substitute(expression({
delete.zero.colns = FALSE
eval(process.categorical.data.vgam)
@@ -568,62 +617,66 @@
"(output of colnames()) ordered as c('AB','Ab','aB','ab')")
}
- predictors.names = namesof("p", .link, earg= .earg, tag=FALSE)
+ predictors.names = namesof("p", .link, earg = .earg, tag = FALSE)
if (is.null(etastart)) {
- p = if (is.numeric(.init.p)) rep(.init.p,n) else
- c(sqrt(4*mustart[,4]))
- etastart = cbind(theta2eta(p, .link, earg= .earg))
+ p = if (is.numeric( .init.p )) rep(.init.p, len = n) else
+ c(sqrt(4 * mustart[, 4]))
+ etastart = cbind(theta2eta(p, .link, earg = .earg))
}
- }), list( .link=link, .init.p=init.p, .earg=earg ))),
- inverse=eval(substitute(function(eta,extra=NULL){
- p = eta2theta(eta, link=.link, earg= .earg)
- pp4 = p*p/4
- cbind(AB=0.5+pp4, Ab=0.25-pp4, aB=0.25-pp4, ab=pp4)
- }, list( .link=link, .earg=earg ))),
- last=eval(substitute(expression({
+ }), list( .link = link, .init.p=init.p, .earg = earg ))),
+ inverse = eval(substitute(function(eta,extra = NULL) {
+ p = eta2theta(eta, link = .link, earg = .earg)
+ pp4 = p * p / 4
+ cbind(AB = 0.5 + pp4,
+ Ab = 0.25 - pp4,
+ aB = 0.25 - pp4,
+ ab = pp4)
+ }, list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
misc$link = c(p = .link)
misc$earg = list(p= .earg )
- }), list( .link=link, .earg=earg ))),
- link=eval(substitute(function(mu, extra=NULL){
+ }), list( .link = link, .earg = earg ))),
+ link = eval(substitute(function(mu, extra = NULL) {
p = sqrt(4* mu[,4])
- theta2eta(p, .link, earg= .earg)
- }, list( .link=link, .earg=earg ))),
- loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL)
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(dmultinomial(x=w*y, size=w, prob=mu, log=TRUE, docheck=FALSE))
+ theta2eta(p, .link, earg = .earg)
+ }, list( .link = link, .earg = earg ))),
+ loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
+ sum(dmultinomial(x=w*y, size=w, prob=mu, log = TRUE, docheck = FALSE))
},
- vfamily=c("AB.Ab.aB.ab", "vgenetic"),
- deriv=eval(substitute(expression({
- pp = eta2theta(eta, link=.link, earg= .earg)
+ vfamily = c("AB.Ab.aB.ab", "vgenetic"),
+ deriv = eval(substitute(expression({
+ pp = eta2theta(eta, link = .link, earg = .earg)
p2 = pp*pp
nAB = w*y[,1]
nAb = w*y[,2]
naB = w*y[,3]
nab = w*y[,4]
dl.dp = 8 * pp * (nAB/(2+p2) - (nAb+naB)/(1-p2) + nab/p2)
- dp.deta = dtheta.deta(pp, link=.link, earg= .earg)
+ dp.deta = dtheta.deta(pp, link = .link, earg = .earg)
dl.dp * dp.deta
- }), list( .link=link, .earg=earg ))),
- weight=eval(substitute(expression({
+ }), list( .link = link, .earg = earg ))),
+ weight = eval(substitute(expression({
ed2l.dp2 = 4 * w * p2 * (1/(2+p2) + 2/(1-p2) + 1/p2)
wz = cbind((dp.deta^2) * ed2l.dp2)
wz
- }), list( .link=link, .earg=earg ))))
+ }), list( .link = link, .earg = earg ))))
}
- AA.Aa.aa = function(link="logit", earg = list(), init.pA=NULL)
+ AA.Aa.aa = function(link = "logit", earg = list(), init.pA = NULL)
{
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if (!is.list(earg)) earg = list()
new("vglmff",
- blurb=c("AA-Aa-aa phenotype\n\n",
- "Links: ", namesof("pA", link, earg= earg)),
- deviance=Deviance.categorical.data.vgam,
- initialize=eval(substitute(expression({
+ blurb = c("AA-Aa-aa phenotype\n\n",
+ "Links: ", namesof("pA", link, earg = earg)),
+ deviance = Deviance.categorical.data.vgam,
+ initialize = eval(substitute(expression({
delete.zero.colns = FALSE
eval(process.categorical.data.vgam)
@@ -635,45 +688,49 @@
"(output of colnames()) ordered as c('AA','Aa','aa')")
}
- predictors.names = namesof("pA", .link, earg= .earg, tag=FALSE)
+ predictors.names = namesof("pA", .link, earg = .earg, tag = FALSE)
if (is.null(etastart)) {
pA = if (is.numeric(.init.pA)) rep(.init.pA, n) else
c(sqrt(mustart[,1]))
- etastart = cbind(theta2eta(pA, .link, earg= .earg))
+ etastart = cbind(theta2eta(pA, .link, earg = .earg))
}
- }), list( .link=link, .init.pA=init.pA, .earg=earg ))),
- inverse=eval(substitute(function(eta,extra=NULL){
- pA = eta2theta(eta, link=.link, earg= .earg)
+ }), list( .link = link, .init.pA=init.pA, .earg = earg ))),
+ inverse = eval(substitute(function(eta,extra = NULL) {
+ pA = eta2theta(eta, link = .link, earg = .earg)
pp = pA*pA
- cbind(AA=pp, Aa=2*pA*(1-pA), aa=(1-pA)^2)
- }, list( .link=link, .earg=earg ))),
- last=eval(substitute(expression({
+ cbind(AA = pp,
+ Aa = 2*pA*(1-pA),
+ aa = (1-pA)^2)
+ }, list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
misc$link = c("pA" = .link)
misc$earg = list("pA" = .earg )
- }), list( .link=link, .earg=earg ))),
- link=eval(substitute(function(mu, extra=NULL){
+ }), list( .link = link, .earg = earg ))),
+ link = eval(substitute(function(mu, extra = NULL) {
pA = sqrt(mu[,1])
- theta2eta(pA, .link, earg= .earg)
- }, list( .link=link, .earg=earg ))),
- loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL)
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(dmultinomial(x=w*y, size=w, prob=mu, log=TRUE, docheck=FALSE))
+ theta2eta(pA, .link, earg = .earg)
+ }, list( .link = link, .earg = earg ))),
+ loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
+ sum(dmultinomial(x=w*y, size=w, prob=mu,
+ log = TRUE, docheck = FALSE))
},
- vfamily=c("AA.Aa.aa", "vgenetic"),
- deriv=eval(substitute(expression({
- pA = eta2theta(eta, link=.link, earg= .earg)
+ vfamily = c("AA.Aa.aa", "vgenetic"),
+ deriv = eval(substitute(expression({
+ pA = eta2theta(eta, link = .link, earg = .earg)
nAA = w*y[,1]
nAa = w*y[,2]
naa = w*y[,3]
dl.dpA = (2*nAA+nAa)/pA - (nAa+2*naa)/(1-pA)
- dpA.deta = dtheta.deta(pA, link=.link, earg= .earg)
+ dpA.deta = dtheta.deta(pA, link = .link, earg = .earg)
dl.dpA * dpA.deta
- }), list( .link=link, .earg=earg ))),
- weight=eval(substitute(expression({
+ }), list( .link = link, .earg = earg ))),
+ weight = eval(substitute(expression({
d2l.dp2 = (2*nAA+nAa)/pA^2 + (nAa+2*naa)/(1-pA)^2
wz = cbind((dpA.deta^2) * d2l.dp2)
wz
- }), list( .link=link, .earg=earg ))))
+ }), list( .link = link, .earg = earg ))))
}
diff --git a/R/family.glmgam.R b/R/family.glmgam.R
index 05c51ca..63baeea 100644
--- a/R/family.glmgam.R
+++ b/R/family.glmgam.R
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
@@ -40,55 +41,71 @@
w=w, residuals = residuals,
eta=eta, extra=extra)
},
+ infos = eval(substitute(function(...) {
+ list(Musual = 1,
+ zero = .zero)
+ }, list( .zero = zero ))),
initialize = eval(substitute(expression({
assign("CQO.FastAlgorithm",
- ( .link == "logit" || .link == "cloglog"), envir = VGAMenv)
+ ( .link == "logit" || .link == "cloglog"),
+ envir = VGAM:::VGAMenv)
assign("modelno", if ( .link == "logit") 1 else
- if ( .link=="cloglog") 4 else NULL, envir = VGAMenv)
- if (.mv) {
+ if ( .link == "cloglog") 4 else NULL,
+ envir = VGAM:::VGAMenv)
+ if ( .mv ) {
y = as.matrix(y)
M = ncol(y)
if (!all(y == 0 | y == 1))
stop("response must contain 0's and 1's only")
dn2 = if (is.matrix(y)) dimnames(y)[[2]] else NULL
dn2 = if (length(dn2)) {
- paste("E[", dn2, "]", sep="")
+ paste("E[", dn2, "]", sep = "")
} else {
- paste("mu", 1:M, sep="")
+ paste("mu", 1:M, sep = "")
}
predictors.names = namesof(if (M > 1) dn2 else
"mu", .link, earg = .earg, short = TRUE)
- mustart = (0.5 + w * y) / (1 + w)
+ if (!length(mustart) && !length(etastart))
+ mustart = (0.5 + w * y) / (1 + w)
} else {
- NCOL = function (x)
- if (is.array(x) && length(dim(x)) > 1 ||
- is.data.frame(x)) ncol(x) else as.integer(1)
+ if (!all(w == 1))
+ extra$orig.w = w
+
+
+ NCOL = function (x) if (is.array(x) && length(dim(x)) > 1 ||
+ is.data.frame(x)) ncol(x) else as.integer(1)
if (NCOL(y) == 1) {
- if (is.factor(y)) y = y != levels(y)[1]
+ if (is.factor(y)) y = (y != levels(y)[1])
nvec = rep(1, n)
y[w == 0] <- 0
- if (!all(y >= 0 & y <= 1))
- stop("response values 'y' must satisfy 0 <= y <= 1")
- mustart = (0.5 + w * y) / (1 + w)
- no.successes = w * y
- if (any(abs(no.successes - round(no.successes)) > 0.001))
+ if (!all(y == 0 || y == 1))
+ stop("response values 'y' must be 0 or 1")
+ if (!length(mustart) && !length(etastart))
+ mustart = (0.5 + w * y) / (1 + w)
+
+
+ no.successes = y
+ if (min(y) < 0)
+ stop("Negative data not allowed!")
+ if (any(abs(no.successes - round(no.successes)) > 1.0e-8))
stop("Number of successes must be integer-valued")
} else if (NCOL(y) == 2) {
- if (!all(w == 1))
- extra$orig.w = w
-
- if (any(abs(y - round(y)) > 0.001))
+ if (min(y) < 0)
+ stop("Negative data not allowed!")
+ if (any(abs(y - round(y)) > 1.0e-8))
stop("Count data must be integer-valued")
+ y = round(y)
nvec = y[,1] + y[,2]
y = ifelse(nvec > 0, y[,1] / nvec, 0)
w = w * nvec
- mustart = (0.5 + nvec * y) / (1 + nvec)
+ if (!length(mustart) && !length(etastart))
+ mustart = (0.5 + nvec * y) / (1 + nvec)
} else {
- stop("for the binomial family, 'y' must be a ",
+ stop("for the binomialff family, response 'y' must be a ",
"vector of 0 and 1's\n",
- "or a vector of proportions and 'weight' specified,\n",
+ "or a factor (first level = fail, other levels = success),\n",
"or a 2-column matrix where col 1 is the no. of ",
"successes and col 2 is the no. of failures")
}
@@ -100,16 +117,16 @@
mu
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- if (exists("CQO.FastAlgorithm", envir = VGAMenv))
- rm("CQO.FastAlgorithm", envir = VGAMenv)
- if (exists("modelno", envir = VGAMenv))
- rm("modelno", envir = VGAMenv)
+ if (exists("CQO.FastAlgorithm", envir = VGAM:::VGAMenv))
+ rm("CQO.FastAlgorithm", envir = VGAM:::VGAMenv)
+ if (exists("modelno", envir = VGAM:::VGAMenv))
+ rm("modelno", envir = VGAM:::VGAMenv)
dpar <- .dispersion
if (!dpar) {
temp87 = (y-mu)^2 * wz / (dtheta.deta(mu, link = .link,
earg = .earg )^2) # w cancel
if (.mv && ! .onedpar) {
- dpar = rep(as.numeric(NA), len=M)
+ dpar = rep(as.numeric(NA), len = M)
temp87 = cbind(temp87)
nrow.mu = if (is.matrix(mu)) nrow(mu) else length(mu)
for(ii in 1:M)
@@ -145,7 +162,9 @@
nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
round(w)
- smallno = 1.0e4 * .Machine$double.eps
+
+ smallno = 1.0e6 * .Machine$double.eps
+ smallno = sqrt(.Machine$double.eps)
if (max(abs(ycounts - round(ycounts))) > smallno)
warning("converting 'ycounts' to integer in @loglikelihood")
ycounts = round(ycounts)
@@ -192,7 +211,7 @@
- gammaff = function(link="nreciprocal", earg = list(), dispersion=0)
+ gammaff = function(link = "nreciprocal", earg = list(), dispersion=0)
{
estimated.dispersion <- dispersion == 0
if (mode(link )!= "character" && mode(link )!= "name")
@@ -214,9 +233,9 @@
M = if (is.matrix(y)) ncol(y) else 1
dn2 = if (is.matrix(y)) dimnames(y)[[2]] else NULL
dn2 = if (length(dn2)) {
- paste("E[", dn2, "]", sep="")
+ paste("E[", dn2, "]", sep = "")
} else {
- paste("mu", 1:M, sep="")
+ paste("mu", 1:M, sep = "")
}
predictors.names = namesof(if (M > 1) dn2 else "mu", .link,
earg=.earg, short = TRUE)
@@ -233,7 +252,7 @@
temp = w * dmu.deta^2
dpar = sum(w * (y-mu)^2 * wz / temp) / (length(mu) - ncol(x))
} else {
- dpar = rep(0, len=M)
+ dpar = rep(0, len = M)
for(spp in 1:M) {
temp = w * dmu.deta[,spp]^2
dpar[spp] = sum(w * (y[,spp]-mu[,spp])^2 * wz[,spp]/temp) /
@@ -245,7 +264,7 @@
misc$default.dispersion <- 0
misc$estimated.dispersion <- .estimated.dispersion
misc$link = rep( .link, length=M)
- names(misc$link) = if (M > 1) paste("mu", 1:M, sep="") else "mu"
+ names(misc$link) = if (M > 1) paste("mu", 1:M, sep = "") else "mu"
misc$earg = vector("list", M)
names(misc$earg) = names(misc$link)
@@ -261,7 +280,7 @@
vfamily = "gammaff",
deriv = eval(substitute(expression({
dl.dmu = (y-mu) / mu^2
- dmu.deta = dtheta.deta(theta=mu, link = .link, earg=.earg)
+ dmu.deta = dtheta.deta(theta = mu, link = .link, earg=.earg)
w * dl.dmu * dmu.deta
}), list( .link = link, .earg = earg ))),
weight = eval(substitute(expression({
@@ -272,7 +291,7 @@
- inverse.gaussianff = function(link="natural.ig", dispersion=0)
+ inverse.gaussianff = function(link = "natural.ig", dispersion=0)
{
estimated.dispersion <- dispersion==0
warning("@deviance() not finished")
@@ -313,7 +332,7 @@
misc$default.dispersion <- 0
misc$estimated.dispersion <- .estimated.dispersion
misc$link = rep( .link, length=M)
- names(misc$link) = if (M > 1) paste("mu", 1:M, sep="") else "mu"
+ names(misc$link) = if (M > 1) paste("mu", 1:M, sep = "") else "mu"
}), list( .dispersion = dispersion,
.estimated.dispersion = estimated.dispersion,
.link = link ))),
@@ -323,7 +342,7 @@
vfamily = "inverse.gaussianff",
deriv = eval(substitute(expression({
dl.dmu <- (y-mu) / mu^3
- dmu.deta <- dtheta.deta(theta=mu, link = .link)
+ dmu.deta <- dtheta.deta(theta = mu, link = .link)
w * dl.dmu * dmu.deta
}), list( .link = link ))),
weight = eval(substitute(expression({
@@ -335,17 +354,18 @@
-dinv.gaussian = function(x, mu, lambda, log=FALSE) {
+dinv.gaussian = function(x, mu, lambda, log = FALSE) {
if (!is.logical(log.arg <- log))
stop("bad input for argument 'log'")
rm(log)
LLL = max(length(x), length(mu), length(lambda))
- x = rep(x, len=LLL); mu = rep(mu, len=LLL); lambda = rep(lambda, len=LLL)
- logdensity = rep(log(0), len=LLL)
+ x = rep(x, len = LLL); mu = rep(mu, len = LLL); lambda = rep(lambda, len = LLL)
+ logdensity = rep(log(0), len = LLL)
xok = (x > 0)
logdensity[xok] = 0.5 * log(lambda[xok] / (2 * pi * x[xok]^3)) -
- lambda[xok] * (x[xok]-mu[xok])^2 / (2*mu[xok]^2 * x[xok])
+ lambda[xok] *
+ (x[xok]-mu[xok])^2 / (2*mu[xok]^2 * x[xok])
logdensity[mu <= 0] = NaN
logdensity[lambda <= 0] = NaN
if (log.arg) logdensity else exp(logdensity)
@@ -353,11 +373,11 @@ dinv.gaussian = function(x, mu, lambda, log=FALSE) {
pinv.gaussian = function(q, mu, lambda) {
- if (any(mu <=0)) stop("mu must be positive")
- if (any(lambda <=0)) stop("lambda must be positive")
+ if (any(mu <= 0)) stop("mu must be positive")
+ if (any(lambda <= 0)) stop("lambda must be positive")
ans = q
- mu = rep(mu, len=length(q))
- lambda = rep(lambda, len=length(q))
+ mu = rep(mu, len = length(q))
+ lambda = rep(lambda, len = length(q))
ans[q <= 0] = 0
bb = q > 0
ans[bb] = pnorm(sqrt(lambda[bb]/q[bb])*(q[bb]/mu[bb]-1)) +
@@ -369,11 +389,11 @@ pinv.gaussian = function(q, mu, lambda) {
rinv.gaussian = function(n, mu, lambda) {
use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integ=TRUE, allow=1, posit=TRUE))
+ if (!is.Numeric(n, integ=TRUE, allow = 1, posit = TRUE))
stop("bad input for argument 'n'") else n
- mu = rep(mu, len=use.n); lambda = rep(lambda, len=use.n)
+ mu = rep(mu, len = use.n); lambda = rep(lambda, len = use.n)
u = runif(use.n)
- Z = rnorm(use.n)^2 # rchisq(use.n, df=1)
+ Z = rnorm(use.n)^2 # rchisq(use.n, df = 1)
phi = lambda / mu
y1 = 1 - 0.5 * (sqrt(Z^2 + 4*phi*Z) - Z) / phi
ans = mu * ifelse((1+y1)*u > 1, 1/y1, y1)
@@ -384,11 +404,24 @@ rinv.gaussian = function(n, mu, lambda) {
- inv.gaussianff = function(lmu="loge", llambda="loge",
- emu=list(), elambda=list(),
- ilambda=1,
+
+
+
+
+
+
+
+
+ inv.gaussianff = function(lmu = "loge", llambda = "loge",
+ emu = list(), elambda = list(),
+ method.init = 1,
+ ilambda = 1,
+ shrinkage.init = 0.99,
zero = NULL)
{
+
+
+
if (mode(lmu) != "character" && mode(lmu) != "name")
lmu <- as.character(substitute(lmu))
if (mode(llambda) != "character" && mode(llambda) != "name")
@@ -396,9 +429,17 @@ rinv.gaussian = function(n, mu, lambda) {
if (!is.list(emu)) emu = list()
if (!is.list(elambda)) elambda = list()
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
+ method.init > 3)
+ stop("argument 'method.init' must be 1 or 2 or 3")
+ if (!is.Numeric(shrinkage.init, allow = 1) || shrinkage.init < 0 ||
+ shrinkage.init > 1) stop("bad input for argument 'shrinkage.init'")
+
+
new("vglmff",
blurb = c("Inverse Gaussian distribution\n\n",
- "f(y) = sqrt(lambda/(2*pi*y^3)) * exp(-lambda*(y-mu)^2/(2*mu^2*y)), y&lambda>0",
+ "f(y) = sqrt(lambda/(2*pi*y^3)) * ",
+ "exp(-lambda*(y-mu)^2/(2*mu^2*y)); y, mu and lambda > 0",
"Link: ", namesof("mu", lmu, earg= emu), ", ",
namesof("lambda", llambda, earg= elambda), "\n",
"Mean: ", "mu\n",
@@ -409,67 +450,97 @@ rinv.gaussian = function(n, mu, lambda) {
initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
+
if (any(y <= 0)) stop("Require the response to have positive values")
+
predictors.names =
- c(namesof("mu", .lmu, earg = .emu, short= TRUE),
- namesof("lambda", .llambda, earg = .elambda, short= TRUE))
+ c(namesof("mu", .lmu, earg = .emu, short= TRUE),
+ namesof("lambda", .llambda, earg = .elambda, short= TRUE))
+
if (!length(etastart)) {
- initmu = y + 1/8
- initlambda = rep(if (length( .ilambda)) .ilambda else 1, len=n)
+ init.mu =
+ if ( .method.init == 3) {
+ 0 * y + 1.1 * median(y) + 1/8
+ } else if ( .method.init == 2) {
+ use.this = weighted.mean(y, w)
+ (1 - .sinit) * y + .sinit * use.this
+ } else {
+ 0 * y + weighted.mean(y, w) + 1/8
+ }
+
+ init.lambda = rep(if (length( .ilambda )) .ilambda else 1.0,
+ len = n)
+
etastart = cbind(
- theta2eta(initmu, link=.lmu, earg = .emu),
- theta2eta(initlambda, link=.llambda, earg = .elambda))
+ theta2eta(init.mu, link = .lmu, earg = .emu),
+ theta2eta(init.lambda, link = .llambda, earg = .elambda))
}
- }), list( .lmu=lmu, .llambda=llambda,
- .emu=emu, .elambda=elambda,
- .ilambda=ilambda ))),
+ }), list( .lmu = lmu, .llambda = llambda,
+ .emu = emu, .elambda = elambda,
+ .sinit = shrinkage.init,
+ .method.init = method.init, .ilambda = ilambda ))),
inverse = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta[,1], link=.lmu, earg = .emu)
- }, list( .lmu=lmu, .emu=emu, .elambda=elambda ))),
+ eta2theta(eta[,1], link = .lmu, earg = .emu)
+ }, list( .lmu = lmu, .emu = emu, .elambda = elambda ))),
last = eval(substitute(expression({
- misc$link = c(mu = .lmu, lambda = .llambda)
+ misc$link = c(mu = .lmu, lambda = .llambda)
misc$earg = list(mu = .emu, lambda = .elambda)
- }), list( .lmu=lmu, .llambda=llambda, .emu=emu, .elambda=elambda ))),
+ misc$method.init = .method.init
+ misc$shrinkage.init = .sinit
+ }), list( .lmu = lmu, .llambda = llambda,
+ .emu = emu, .elambda = elambda,
+ .sinit = shrinkage.init,
+ .method.init = method.init ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- lambda <- eta2theta(eta[,2], link=.llambda, earg = .elambda)
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(w * dinv.gaussian(x=y, mu=mu, lambda=lambda, log=TRUE))
- }
- }, list( .llambda=llambda, .emu=emu, .elambda=elambda ))),
+ lambda <- eta2theta(eta[,2], link = .llambda, earg = .elambda)
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else {
+ sum(w * dinv.gaussian(x=y, mu = mu, lambda = lambda, log = TRUE))
+ }
+ }, list( .llambda = llambda, .emu = emu,
+ .elambda = elambda ))),
vfamily = "inv.gaussianff",
deriv = eval(substitute(expression({
- lambda <- eta2theta(eta[,2], link=.llambda, earg = .elambda)
- dl.dmu = lambda * (y-mu) / mu^3
- dl.dlambda <- 0.5 / lambda - (y-mu)^2 / (2 * mu^2 * y)
- dmu.deta <- dtheta.deta(theta=mu, link=.lmu, earg = .emu)
- dlambda.deta <- dtheta.deta(theta=lambda, link=.llambda, earg = .elambda)
- w * cbind(dl.dmu * dmu.deta, dl.dlambda * dlambda.deta)
- }), list( .lmu=lmu, .llambda=llambda, .emu=emu, .elambda=elambda ))),
+ mymu <- eta2theta(eta[,1], link = .lmu, earg = .emu)
+ lambda <- eta2theta(eta[,2], link = .llambda, earg = .elambda)
+ dmu.deta <- dtheta.deta(theta = mymu, link = .lmu, earg = .emu)
+ dlambda.deta <- dtheta.deta(theta = lambda, link = .llambda,
+ earg = .elambda)
+
+ dl.dmu = lambda * (y - mymu) / mymu^3
+ dl.dlambda <- 0.5 / lambda - (y-mymu)^2 / (2 * mymu^2 * y)
+ w * cbind(dl.dmu * dmu.deta,
+ dl.dlambda * dlambda.deta)
+ }), list( .lmu = lmu, .llambda = llambda,
+ .emu = emu, .elambda = elambda ))),
weight = eval(substitute(expression({
- d2l.dmu2 = lambda / mu^3
+ d2l.dmu2 = lambda / mymu^3
d2l.dlambda2 = 0.5 / (lambda^2)
- w * cbind(dmu.deta^2 * d2l.dmu2, dlambda.deta^2 * d2l.dlambda2)
- }), list( .lmu=lmu, .llambda=llambda, .emu=emu, .elambda=elambda ))))
+ wz <- cbind(dmu.deta^2 * d2l.dmu2,
+ dlambda.deta^2 * d2l.dlambda2)
+ w * wz
+ }), list( .lmu = lmu, .llambda = llambda,
+ .emu = emu, .elambda = elambda ))))
}
- poissonff = function(link="loge", earg = list(),
- dispersion = 1, onedpar=FALSE,
- imu=NULL, method.init=1,
- parallel=FALSE, zero = NULL)
+
+ poissonff <- function(link = "loge", earg = list(),
+ dispersion = 1, onedpar = FALSE,
+ imu=NULL, method.init = 1,
+ parallel = FALSE, zero = NULL)
{
estimated.dispersion <- dispersion==0
if (mode(link )!= "character" && mode(link )!= "name")
link <- as.character(substitute(link))
if (!is.list(earg)) earg = list()
- if (!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
+ if (!is.Numeric(method.init, allow = 1, integ=TRUE, posit = TRUE) ||
method.init > 3)
- stop("'method.init' must be 1 or 2 or 3")
- if (length(imu) && !is.Numeric(imu, posit=TRUE))
+ stop("argument 'method.init' must be 1 or 2 or 3")
+ if (length(imu) && !is.Numeric(imu, posit = TRUE))
stop("bad input for argument 'imu'")
new("vglmff",
@@ -487,16 +558,20 @@ rinv.gaussian = function(n, mu, lambda) {
if (residuals) sign(y - mu) * sqrt(2 * abs(devi) * w) else
2 * sum(w * devi)
},
+ infos = eval(substitute(function(...) {
+ list(Musual = 1,
+ zero = .zero)
+ }, list( .zero = zero ))),
initialize = eval(substitute(expression({
y = as.matrix(y)
M = ncoly = ncol(y)
- assign("CQO.FastAlgorithm", ( .link == "loge"), envir = VGAMenv)
+ assign("CQO.FastAlgorithm", ( .link == "loge"), envir = VGAM:::VGAMenv)
dn2 = if (is.matrix(y)) dimnames(y)[[2]] else NULL
dn2 = if (length(dn2)) {
- paste("E[", dn2, "]", sep="")
+ paste("E[", dn2, "]", sep = "")
} else {
- paste("mu", 1:M, sep="")
+ paste("mu", 1:M, sep = "")
}
predictors.names = namesof(if (M > 1) dn2 else "mu", .link,
earg = .earg, short = TRUE)
@@ -510,7 +585,7 @@ rinv.gaussian = function(n, mu, lambda) {
mu.init[,iii] = median(y[,iii]) + 1/8
}
}
- if (length(.imu))
+ if (length( .imu ))
mu.init = matrix( .imu, n, ncoly, byrow=TRUE)
etastart <- theta2eta(mu.init, link = .link, earg = .earg)
}
@@ -521,14 +596,14 @@ rinv.gaussian = function(n, mu, lambda) {
mu
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- if (exists("CQO.FastAlgorithm", envir = VGAMenv))
- rm("CQO.FastAlgorithm", envir = VGAMenv)
+ if (exists("CQO.FastAlgorithm", envir = VGAM:::VGAMenv))
+ rm("CQO.FastAlgorithm", envir = VGAM:::VGAMenv)
dpar <- .dispersion
if (!dpar) {
temp87 = (y-mu)^2 *
wz / (dtheta.deta(mu, link = .link, earg = .earg)^2) # w cancel
if (M > 1 && ! .onedpar) {
- dpar = rep(as.numeric(NA), len=M)
+ dpar = rep(as.numeric(NA), len = M)
temp87 = cbind(temp87)
nrow.mu = if (is.matrix(mu)) nrow(mu) else length(mu)
for(ii in 1:M)
@@ -552,12 +627,12 @@ rinv.gaussian = function(n, mu, lambda) {
}), list( .dispersion = dispersion, .method.init=method.init,
.estimated.dispersion = estimated.dispersion,
.onedpar = onedpar, .link = link, .earg = earg ))),
- link = eval(substitute(function(mu, extra = NULL) {
+ link = eval(substitute( function(mu, extra = NULL) {
theta2eta(mu, link = .link, earg = .earg)
}, list( .link = link, .earg = earg ))),
loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
if (residuals) w*(y/mu - 1) else {
- sum(w * dpois(x=y, lambda=mu, log=TRUE))
+ sum(w * dpois(x=y, lambda=mu, log = TRUE))
}
},
vfamily = "poissonff",
@@ -567,7 +642,7 @@ rinv.gaussian = function(n, mu, lambda) {
} else {
lambda <- mu
dl.dlambda <- (y-lambda) / lambda
- dlambda.deta <- dtheta.deta(theta=lambda, link = .link, earg = .earg)
+ dlambda.deta <- dtheta.deta(theta = lambda, link = .link, earg = .earg)
w * dl.dlambda * dlambda.deta
}
}), list( .link = link, .earg = earg ))),
@@ -578,7 +653,7 @@ rinv.gaussian = function(n, mu, lambda) {
w * tmp600
} else {
d2l.dlambda2 = 1 / lambda
- d2lambda.deta2=d2theta.deta2(theta=lambda,link= .link,earg = .earg)
+ d2lambda.deta2=d2theta.deta2(theta = lambda,link= .link,earg = .earg)
w * dlambda.deta^2 * d2l.dlambda2
}
}), list( .link = link, .earg = earg ))))
@@ -614,9 +689,9 @@ poissonqn.control <- function(save.weight=TRUE, ...)
}
- poissonqn = function(link="loge", earg = list(),
- dispersion = 1, onedpar=FALSE,
- parallel=FALSE, zero = NULL,
+ poissonqn = function(link = "loge", earg = list(),
+ dispersion = 1, onedpar = FALSE,
+ parallel = FALSE, zero = NULL,
wwts=c("expected","observed","qn"))
{
estimated.dispersion <- dispersion==0
@@ -646,9 +721,9 @@ poissonqn.control <- function(save.weight=TRUE, ...)
M = if (is.matrix(y)) ncol(y) else 1
dn2 = if (is.matrix(y)) dimnames(y)[[2]] else NULL
dn2 = if (length(dn2)) {
- paste("E[", dn2, "]", sep="")
+ paste("E[", dn2, "]", sep = "")
} else {
- paste("mu", 1:M, sep="")
+ paste("mu", 1:M, sep = "")
}
predictors.names = namesof(if (M > 1) dn2 else "mu", .link,
earg = .earg, short = TRUE)
@@ -666,7 +741,7 @@ poissonqn.control <- function(save.weight=TRUE, ...)
if (!dpar) {
temp87= (y-mu)^2 * wz/(dtheta.deta(mu, link = .link, earg = .earg)^2)
if (M > 1 && ! .onedpar) {
- dpar = rep(as.numeric(NA), len=M)
+ dpar = rep(as.numeric(NA), len = M)
temp87 = cbind(temp87)
nrow.mu = if (is.matrix(mu)) nrow(mu) else length(mu)
for(i in 1:M)
@@ -697,7 +772,7 @@ poissonqn.control <- function(save.weight=TRUE, ...)
.earg = earg ))),
loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
if (residuals) w*(y/mu - 1) else {
- sum(w * dpois(x=y, lambda=mu, log=TRUE))
+ sum(w * dpois(x=y, lambda=mu, log = TRUE))
}
},
vfamily = "poissonqn",
@@ -716,7 +791,7 @@ poissonqn.control <- function(save.weight=TRUE, ...)
} else {
lambda <- mu
dl.dlambda <- (y-lambda) / lambda
- dlambda.deta <- dtheta.deta(theta=lambda, link = .link, earg = .earg)
+ dlambda.deta <- dtheta.deta(theta = lambda, link = .link, earg = .earg)
w * dl.dlambda * dlambda.deta
}
derivnew
@@ -758,8 +833,8 @@ poissonqn.control <- function(save.weight=TRUE, ...)
- dexppoisson = function(lmean="loge", emean=list(),
- ldispersion="logit", edispersion=list(),
+ dexppoisson = function(lmean = "loge", emean = list(),
+ ldispersion = "logit", edispersion = list(),
idispersion=0.8,
zero = NULL)
{
@@ -767,13 +842,13 @@ poissonqn.control <- function(save.weight=TRUE, ...)
lmean = as.character(substitute(lmean))
if (mode(ldispersion)!= "character" && mode(ldispersion)!= "name")
ldispersion = as.character(substitute(ldispersion))
- if (!is.Numeric(idispersion, posit=TRUE))
+ if (!is.Numeric(idispersion, posit = TRUE))
stop("bad input for 'idispersion'")
if (!is.list(emean)) emean = list()
if (!is.list(edispersion)) edispersion = list()
new("vglmff",
- blurb = c("Double Exponential Poisson distribution\n\n",
+ blurb = c("Double exponential Poisson distribution\n\n",
"Link: ",
namesof("mean", lmean, earg= emean), ", ",
namesof("dispersion", lmean, earg= edispersion), "\n",
@@ -788,7 +863,7 @@ poissonqn.control <- function(save.weight=TRUE, ...)
M = if (is.matrix(y)) ncol(y) else 1
dn2 = if (is.matrix(y)) dimnames(y)[[2]] else NULL
dn2 = if (length(dn2)) {
- paste("E[", dn2, "]", sep="")
+ paste("E[", dn2, "]", sep = "")
} else {
"mu"
}
@@ -799,7 +874,7 @@ poissonqn.control <- function(save.weight=TRUE, ...)
init.mu = pmax(y, 1/8)
if (!length(etastart))
etastart = cbind(theta2eta(init.mu, link = .lmean,earg= .emean),
- theta2eta(rep( .idispersion, len=n),
+ theta2eta(rep( .idispersion, len = n),
link = .ldispersion, earg = .edispersion))
}), list( .lmean = lmean, .emean = emean,
.ldispersion = ldispersion, .edispersion = edispersion,
@@ -830,8 +905,8 @@ poissonqn.control <- function(save.weight=TRUE, ...)
Disper = eta2theta(eta[,2], link = .ldispersion, earg = .edispersion)
dl.dlambda = Disper * (y / lambda - 1)
dl.dDisper = y * log(lambda) + y - lambda + 0.5 / Disper
- dlambda.deta = dtheta.deta(theta=lambda, link = .lmean, earg = .emean)
- dDisper.deta = dtheta.deta(theta=Disper, link = .ldispersion,
+ dlambda.deta = dtheta.deta(theta = lambda, link = .lmean, earg = .emean)
+ dDisper.deta = dtheta.deta(theta = Disper, link = .ldispersion,
earg = .edispersion)
w * cbind(dl.dlambda * dlambda.deta,
dl.dDisper * dDisper.deta)
@@ -849,8 +924,8 @@ poissonqn.control <- function(save.weight=TRUE, ...)
- dexpbinomial = function(lmean="logit", ldispersion="logit",
- emean=list(), edispersion=list(),
+ dexpbinomial = function(lmean = "logit", ldispersion = "logit",
+ emean = list(), edispersion = list(),
idispersion=0.25,
zero=2)
{
@@ -858,7 +933,7 @@ poissonqn.control <- function(save.weight=TRUE, ...)
lmean = as.character(substitute(lmean))
if (mode(ldispersion)!= "character" && mode(ldispersion)!= "name")
ldispersion = as.character(substitute(ldispersion))
- if (!is.Numeric(idispersion, posit=TRUE))
+ if (!is.Numeric(idispersion, posit = TRUE))
stop("bad input for 'idispersion'")
if (!is.list(emean)) emean = list()
if (!is.list(edispersion)) edispersion = list()
@@ -885,28 +960,43 @@ poissonqn.control <- function(save.weight=TRUE, ...)
is.data.frame(x)) ncol(x) else as.integer(1)
if (NCOL(y) == 1) {
- if (is.factor(y)) y = y != levels(y)[1]
+
+
+ if (is.factor(y)) y = (y != levels(y)[1])
nvec = rep(1, n)
- if (!all(y >= 0 & y <= 1))
- stop("response values must be in [0, 1]")
- init.mu = (0.5 + w * y) / (1 + w)
- no.successes = w * y
- if (any(abs(no.successes - round(no.successes)) > 0.001))
+ y[w == 0] <- 0
+ if (!all(y == 0 || y == 1))
+ stop("response values 'y' must be 0 or 1")
+ init.mu =
+ mustart = (0.5 + w * y) / (1 + w)
+
+
+ no.successes = y
+ if (min(y) < 0)
+ stop("Negative data not allowed!")
+ if (any(abs(no.successes - round(no.successes)) > 1.0e-8))
stop("Number of successes must be integer-valued")
} else if (NCOL(y) == 2) {
- if (any(abs(y - round(y)) > 0.001))
+ if (min(y) < 0)
+ stop("Negative data not allowed!")
+ if (any(abs(y - round(y)) > 1.0e-8))
stop("Count data must be integer-valued")
+ y = round(y)
nvec = y[,1] + y[,2]
y = ifelse(nvec > 0, y[,1] / nvec, 0)
w = w * nvec
- init.mu = (0.5 + nvec * y) / (1 + nvec)
+ init.mu =
+ mustart = (0.5 + nvec * y) / (1 + nvec)
} else
- stop("Response not of the right form")
-
+ stop("for the dexpbinomial family, response 'y' must be a ",
+ "vector of 0 and 1's\n",
+ "or a factor (first level = fail, other levels = success),\n",
+ "or a 2-column matrix where col 1 is the no. of ",
+ "successes and col 2 is the no. of failures")
dn2 = if (is.matrix(y)) dimnames(y)[[2]] else NULL
dn2 = if (length(dn2)) {
- paste("E[", dn2, "]", sep="")
+ paste("E[", dn2, "]", sep = "")
} else {
"mu"
}
@@ -916,7 +1006,7 @@ poissonqn.control <- function(save.weight=TRUE, ...)
earg = .edispersion, short = TRUE))
if (!length(etastart))
etastart = cbind(theta2eta(init.mu, link = .lmean,earg= .emean),
- theta2eta(rep( .idispersion, len=n),
+ theta2eta(rep( .idispersion, len = n),
link = .ldispersion, earg = .edispersion))
}), list( .lmean = lmean, .emean = emean,
.ldispersion = ldispersion, .edispersion = edispersion,
@@ -937,11 +1027,14 @@ poissonqn.control <- function(save.weight=TRUE, ...)
Disper = eta2theta(eta[,2], link = .ldispersion, earg = .edispersion)
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
+
+
+
temp1 = y * log(ifelse(y > 0, y, 1)) # y*log(y)
temp2 = (1.0-y) * log1p(ifelse(y < 1, -y, 0)) # (1-y)*log(1-y)
sum(0.5 * log(Disper) + w * (y * Disper * log(prob) +
(1-y) * Disper * log1p(-prob) +
- temp1 * (1-Disper) + temp2 * (1-Disper)))
+ temp1 * (1-Disper) + temp2 * (1 - Disper)))
}
}, list( .lmean = lmean, .emean = emean,
.ldispersion = ldispersion, .edispersion = edispersion ))),
@@ -957,7 +1050,7 @@ poissonqn.control <- function(save.weight=TRUE, ...)
dl.dDisper = 0.5 / Disper + w * (y * log(prob) +
(1-y)*log1p(-prob) - temp1 - temp2)
dprob.deta = dtheta.deta(theta=prob, link = .lmean, earg = .emean)
- dDisper.deta = dtheta.deta(theta=Disper, link = .ldispersion,
+ dDisper.deta = dtheta.deta(theta = Disper, link = .ldispersion,
earg = .edispersion)
cbind(dl.dprob * dprob.deta,
dl.dDisper * dDisper.deta)
@@ -981,7 +1074,7 @@ poissonqn.control <- function(save.weight=TRUE, ...)
if (mode(link )!= "character" && mode(link )!= "name")
link <- as.character(substitute(link))
if (!is.list(earg)) earg = list()
- if (!is.Numeric(smallno, positive=TRUE, allow=1) || smallno > 1e-4)
+ if (!is.Numeric(smallno, positive=TRUE, allow = 1) || smallno > 1e-4)
stop("bad input for 'smallno'")
if (is.logical(parallel) && !parallel)
stop("'parallel' must be TRUE")
@@ -1003,7 +1096,7 @@ poissonqn.control <- function(save.weight=TRUE, ...)
specialCM = list(a = vector("list", M-1))
for(ii in 1:(M-1)) {
- specialCM[[1]][[ii]] = (constraints[[extra$mvar]])[,1+ii,drop=FALSE]
+ specialCM[[1]][[ii]] = (constraints[[extra$mvar]])[,1+ii,drop = FALSE]
}
names(specialCM) = extra$mvar
}), list( .parallel = parallel ))),
@@ -1039,7 +1132,7 @@ poissonqn.control <- function(save.weight=TRUE, ...)
temp1 = attr(x, "assign")
if (colnames(x)[1] != "(Intercept)") stop("x must have an intercept")
M = CCC = length(temp1[[mvar]]) + (colnames(x)[1] == "(Intercept)")
- temp9 = x[,temp1[[mvar]],drop=FALSE]
+ temp9 = x[,temp1[[mvar]],drop = FALSE]
temp9 = temp9 * matrix(2:CCC, n, CCC-1, byrow=TRUE)
temp9 = apply(temp9, 1, max)
temp9[temp9 == 0] = 1
@@ -1050,7 +1143,7 @@ poissonqn.control <- function(save.weight=TRUE, ...)
extra$index9 = temp9
predictors.names = namesof("mu", .link, earg = .earg, short = TRUE)
- predictors.names = rep(predictors.names, len=M)
+ predictors.names = rep(predictors.names, len = M)
}), list( .link = link, .earg = earg, .mvar = mvar ))),
inverse = eval(substitute(function(eta, extra = NULL) {
mu = eta2theta(eta, link = .link, earg = .earg)
@@ -1059,7 +1152,7 @@ poissonqn.control <- function(save.weight=TRUE, ...)
last = eval(substitute(expression({
misc$link = rep( .link, length=M)
names(misc$link) = if (M > 1) paste("mu(matched set ",
- 1:M, ")", sep="") else "mu"
+ 1:M, ")", sep = "") else "mu"
misc$earg = vector("list", M)
names(misc$earg) = names(misc$link)
for(ii in 1:M) misc$earg[[ii]] = .earg
@@ -1078,7 +1171,7 @@ poissonqn.control <- function(save.weight=TRUE, ...)
nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
round(w)
- smallno = 1.0e4 * .Machine$double.eps
+ smallno = 1.0e6 * .Machine$double.eps
if (max(abs(ycounts - round(ycounts))) > smallno)
warning("converting 'ycounts' to integer in @loglikelihood")
ycounts = round(ycounts)
@@ -1154,7 +1247,7 @@ mypool = function(x, index) {
"Link: ", namesof("mu[,j]", link, earg = earg)),
constraints = eval(substitute(expression({
constraints <- cm.vgam(matrix(1,M,1), x, .parallel, constraints,
- intercept.apply=FALSE)
+ intercept.apply = FALSE)
}), list( .parallel = parallel ))),
initialize = eval(substitute(expression({
if (colnames(x)[1] == "(Intercept)")
diff --git a/R/family.loglin.R b/R/family.loglin.R
index 9e328a8..512e4bf 100644
--- a/R/family.loglin.R
+++ b/R/family.loglin.R
@@ -1,71 +1,74 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
-loglinb2 <- function(exchangeable=FALSE, zero=NULL)
+loglinb2 <- function(exchangeable = FALSE, zero = NULL)
{
new("vglmff",
- blurb=c("Log-linear model for binary data\n\n",
+ blurb = c("Log-linear model for binary data\n\n",
"Links: ",
"Identity: u1, u2, u12",
"\n"),
- constraints=eval(substitute(expression({
+ constraints = eval(substitute(expression({
constraints <- cm.vgam(matrix(c(1,1,0, 0,0,1), 3, 2), x,
- .exchangeable, constraints, intercept.apply=TRUE)
+ .exchangeable, constraints,
+ intercept.apply = TRUE)
constraints <- cm.zero.vgam(constraints, x, .zero, M)
- }), list(.exchangeable=exchangeable, .zero=zero))),
- initialize=expression({
+ }), list( .exchangeable = exchangeable, .zero = zero ))),
+ initialize = expression({
y <- as.matrix(y)
predictors.names <- c("u1", "u2", "u12")
if (ncol(y) != 2)
stop("ncol(y) must be = 2")
- if (is.null(mustart)) {
+ if (length(mustart) + length(etastart) == 0) {
mustart <- matrix(as.numeric(NA), nrow(y), 4)
mustart[,1] <- weighted.mean((1-y[,1])*(1-y[,2]), w)
mustart[,2] <- weighted.mean((1-y[,1])*y[,2], w)
mustart[,3] <- weighted.mean(y[,1]*(1-y[,2]), w)
mustart[,4] <- weighted.mean(y[,1]*y[,2], w)
- if (any(mustart==0))
+ if (any(mustart == 0))
stop("some combinations of the response not realized")
}
}),
- inverse= function(eta, extra=NULL) {
+ inverse = function(eta, extra = NULL) {
u1 <- eta[,1]
u2 <- eta[,2]
u12 <- eta[,3]
denom <- 1 + exp(u1) + exp(u2) + exp(u1 + u2 + u12)
- cbind("00"=1/denom,
- "01"=exp(u2) / denom,
- "10"=exp(u1) / denom,
- "11"=exp(u1+u2+u12) / denom)
+ cbind("00" = 1/denom,
+ "01" = exp(u2) / denom,
+ "10" = exp(u1) / denom,
+ "11" = exp(u1+u2+u12) / denom)
},
- last=expression({
+ last = expression({
misc$link = c("u1" = "identity", "u2" = "identity", "u12" = "identity")
misc$earg = list(u1=list(), u2=list(), u12=list())
}),
- link= function(mu, extra=NULL) {
+ link = function(mu, extra = NULL) {
u0 <- log(mu[,1])
u2 <- log(mu[,2]) - u0
u1 <- log(mu[,3]) - u0
u12 <- log(mu[,4]) - u0 - u1 - u2
cbind(u1, u2, u12)
},
- loglikelihood=function(mu,y,w,residuals=FALSE,eta,extra=NULL) {
+ loglikelihood = function(mu,y,w,residuals = FALSE,eta,extra = NULL) {
u1 <- eta[,1]
u2 <- eta[,2]
u12 <- eta[,3]
denom <- 1 + exp(u1) + exp(u2) + exp(u1 + u2 + u12)
u0 <- -log(denom)
- if (residuals) stop("loglikelihood residuals not implemented yet") else
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else
sum(w*(u0 + u1*y[,1] + u2*y[,2] + u12*y[,1]*y[,2]))
},
vfamily=c("loglinb2"),
- deriv=expression({
+ deriv = expression({
u1 <- eta[,1]
u2 <- eta[,2]
u12 <- eta[,3]
@@ -77,7 +80,7 @@ loglinb2 <- function(exchangeable=FALSE, zero=NULL)
du0.du2 + y[,2],
du0.du12 + y[,1]*y[,2])
}),
- weight=expression({
+ weight = expression({
d2u0.du1.2 <- -(exp(u1) + exp(u1 + u2 + u12)) * (1+exp(u2)) / denom^2
d2u0.du22 <- -(exp(u2) + exp(u1 + u2 + u12)) * (1+exp(u1)) / denom^2
d2u0.du122 <- -exp(u1 + u2 + u12) * (1+exp(u1)+exp(u2)) / denom^2
@@ -97,20 +100,21 @@ loglinb2 <- function(exchangeable=FALSE, zero=NULL)
}
-loglinb3 <- function(exchangeable=FALSE, zero=NULL)
+loglinb3 <- function(exchangeable = FALSE, zero = NULL)
{
new("vglmff",
- blurb=c("Log-linear model for trivariate binary data\n\n",
+ blurb = c("Log-linear model for trivariate binary data\n\n",
"Links: ",
"Identity: u1, u2, u3, u12, u13, u23",
"\n"),
- constraints=eval(substitute(expression({
- constraints <- cm.vgam(matrix(c(1,1,1,0,0,0, 0,0,0,1,1,1), 6, 2), x,
- .exchangeable, constraints, intercept.apply=TRUE)
- constraints <- cm.zero.vgam(constraints, x, .zero, M)
- }), list(.exchangeable=exchangeable, .zero=zero))),
- initialize=expression({
+ constraints = eval(substitute(expression({
+ constraints = cm.vgam(matrix(c(1,1,1,0,0,0, 0,0,0,1,1,1), 6, 2), x,
+ .exchangeable, constraints,
+ intercept.apply = TRUE)
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .exchangeable = exchangeable, .zero = zero ))),
+ initialize = expression({
y <- as.matrix(y)
predictors.names <- c("u1", "u2", "u3", "u12", "u13", "u23")
if (ncol(y) != 3)
@@ -128,14 +132,17 @@ loglinb3 <- function(exchangeable=FALSE, zero=NULL)
})
extra$deriv.expression <- expression({
allterms <- exp(u1+u2+u3+u12+u13+u23)
- A1 <- exp(u1) + exp(u1 + u2 + u12) + exp(u1 + u3 + u13) + allterms
- A2 <- exp(u2) + exp(u1 + u2 + u12) + exp(u2 + u3 + u23) + allterms
- A3 <- exp(u3) + exp(u3 + u2 + u23) + exp(u1 + u3 + u13) + allterms
+ A1 <- exp(u1) + exp(u1 + u2 + u12) + exp(u1 + u3 + u13) +
+ allterms
+ A2 <- exp(u2) + exp(u1 + u2 + u12) + exp(u2 + u3 + u23) +
+ allterms
+ A3 <- exp(u3) + exp(u3 + u2 + u23) + exp(u1 + u3 + u13) +
+ allterms
A12 <- exp(u1 + u2 + u12) + allterms
A13 <- exp(u1 + u3 + u13) + allterms
A23 <- exp(u2 + u3 + u23) + allterms
})
- if (!length(mustart)) {
+ if (length(mustart) + length(etastart) == 0) {
mustart <- matrix(as.numeric(NA), nrow(y), 2^3)
mustart[,1] <- weighted.mean((1-y[,1])*(1-y[,2])*(1-y[,3]), w)
mustart[,2] <- weighted.mean((1-y[,1])*(1-y[,2])*y[,3], w)
@@ -145,46 +152,47 @@ loglinb3 <- function(exchangeable=FALSE, zero=NULL)
mustart[,6] <- weighted.mean(y[,1]*(1-y[,2])*y[,3], w)
mustart[,7] <- weighted.mean(y[,1]*y[,2]*(1-y[,3]), w)
mustart[,8] <- weighted.mean(y[,1]*y[,2]*y[,3], w)
- if (any(mustart==0))
+ if (any(mustart == 0))
stop("some combinations of the response not realized")
}
}),
- inverse= function(eta, extra=NULL) {
+ inverse= function(eta, extra = NULL) {
eval(extra$my.expression)
- cbind("000"=1,
- "001"=exp(u3),
- "010"=exp(u2),
- "011"=exp(u2+u3+u23),
- "100"=exp(u1),
- "101"=exp(u1+u3+u13),
- "110"=exp(u1+u2+u12),
- "111"=exp(u1+u2+u3+u12+u13+u23)) / denom
+ cbind("000" = 1,
+ "001" = exp(u3),
+ "010" = exp(u2),
+ "011" = exp(u2+u3+u23),
+ "100" = exp(u1),
+ "101" = exp(u1+u3+u13),
+ "110" = exp(u1+u2+u12),
+ "111" = exp(u1+u2+u3+u12+u13+u23)) / denom
},
- last=expression({
+ last = expression({
misc$link = rep("identity", length=M)
names(misc$link) = predictors.names
misc$earg = list(u1=list(), u2=list(), u3=list(),
u12=list(), u13=list(), u23=list())
}),
- link= function(mu, extra=NULL) {
- u0 <- log(mu[,1])
- u3 <- log(mu[,2]) - u0
- u2 <- log(mu[,3]) - u0
+ link = function(mu, extra = NULL) {
+ u0 <- log(mu[,1])
+ u3 <- log(mu[,2]) - u0
+ u2 <- log(mu[,3]) - u0
u23 <- log(mu[,4]) - u0 - u2 - u3
- u1 <- log(mu[,5]) - u0
+ u1 <- log(mu[,5]) - u0
u13 <- log(mu[,6]) - u0 - u1 - u3
u12 <- log(mu[,7]) - u0 - u1 - u2
cbind(u1, u2, u3, u12, u13, u23)
},
- loglikelihood=function(mu,y,w,residuals=FALSE,eta,extra=NULL) {
+ loglikelihood = function(mu,y,w,residuals = FALSE,eta,extra = NULL) {
eval(extra$my.expression)
u0 <- -log(denom)
- if (residuals) stop("loglikelihood residuals not implemented yet") else
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else
sum(w*(u0 + u1*y[,1] + u2*y[,2] + u3*y[,3] +u12*y[,1]*y[,2] +
u13*y[,1]*y[,3] + u23*y[,2]*y[,3]))
},
vfamily=c("loglinb3"),
- deriv=expression({
+ deriv = expression({
eval(extra$my.expression)
eval(extra$deriv.expression)
w * cbind(-A1/denom + y[,1],
@@ -194,7 +202,7 @@ loglinb3 <- function(exchangeable=FALSE, zero=NULL)
-A13/denom + y[,1]*y[,3],
-A23/denom + y[,2]*y[,3])
}),
- weight=expression({
+ weight = expression({
u0 <- -log(denom)
dA2.du1 <- exp(u1 + u2 + u12) + allterms
dA3.du1 <- exp(u1 + u3 + u13) + allterms
diff --git a/R/family.math.R b/R/family.math.R
new file mode 100644
index 0000000..a893a78
--- /dev/null
+++ b/R/family.math.R
@@ -0,0 +1,67 @@
+# These functions are
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
+
+
+
+
+
+
+
+
+
+
+
+
+
+lambertW <- function(x, tolerance = 1.0e-10, maxit = 50) {
+ if (any(Im(x) != 0.0))
+ stop("argument 'x' must be real, not complex!")
+
+ ans = x
+ ans[!is.na(x) & x < -exp(-1)] = NA
+ ans[!is.na(x) & x >= -exp(-1)] = log1p(x[!is.na(x) & x >= -exp(-1)])
+ ans[!is.na(x) & x >= 0 ] = sqrt(x[!is.na(x) & x >= 0 ]) / 2
+
+ cutpt = 3.0
+ if (any(myTF <- !is.na(x) & x > cutpt)) {
+ L1 = log(x[!is.na(x) & x > cutpt]) # log(as.complex(x))
+ L2 = log(L1) # log(as.complex(L1))
+ wzinit = L1 - L2 +
+ (L2 +
+ (L2*( -2 + L2)/(2) +
+ (L2*( 6 + L2*(-9 + L2* 2)) / (6) +
+ L2*(-12 + L2*(36 + L2*(-22 + L2*3))) / (12*L1)) / L1) / L1) / L1
+
+ ans[myTF] = wzinit
+ }
+
+ for (ii in 1:maxit) {
+ exp1 = exp(ans)
+ exp2 = ans * exp1
+ delta = (exp2 - x) / (exp2 + exp1 -
+ ((ans + 2) * (exp2 - x) / (2 * (ans + 1.0))))
+ ans = ans - delta
+ if (all(is.na(delta) ||
+ max(abs(delta), na.rm = TRUE) < tolerance)) break
+ if (ii == maxit)
+ warning("did not converge")
+ }
+ ans[x == Inf] = Inf
+ ans
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/R/family.mixture.R b/R/family.mixture.R
index 0ee8604..b1e1aa1 100644
--- a/R/family.mixture.R
+++ b/R/family.mixture.R
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
@@ -130,7 +131,8 @@ mix2normal1 = function(lphi="logit",
sd2 = eta2theta(eta[,5], link= .lsd, earg= .esd2)
f1 = dnorm(y, mean=mu1, sd=sd1)
f2 = dnorm(y, mean=mu2, sd=sd2)
- if (residuals) stop("loglikelihood residuals not implemented yet") else
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
sum(w * log(phi*f1 + (1-phi)*f2))
}, list(.lphi=lphi, .lmu=lmu,
.ephi=ephi, .emu1=emu1, .emu2=emu2, .esd1=esd1, .esd2=esd2,
@@ -282,7 +284,8 @@ mix2poisson = function(lphi="logit", llambda="loge",
lambda2 = eta2theta(eta[,3], link= .llambda, earg= .el2)
f1 = dpois(y, lam=lambda1)
f2 = dpois(y, lam=lambda2)
- if (residuals) stop("loglikelihood residuals not implemented yet") else
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
sum(w * log(phi*f1 + (1-phi)*f2))
}, list(.lphi=lphi, .llambda=llambda,
.ephi=ephi, .el1=el1, .el2=el2 ))),
@@ -436,7 +439,8 @@ mix2exp = function(lphi="logit", llambda="loge",
lambda2 = eta2theta(eta[,3], link= .llambda, earg= .el2)
f1 = dexp(y, rate=lambda1)
f2 = dexp(y, rate=lambda2)
- if (residuals) stop("loglikelihood residuals not implemented yet") else
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
sum(w * log(phi*f1 + (1-phi)*f2))
}, list(.lphi=lphi, .llambda=llambda,
.ephi=ephi, .el1=el1, .el2=el2 ))),
diff --git a/R/family.nonlinear.R b/R/family.nonlinear.R
index db3415d..6f7ec3c 100644
--- a/R/family.nonlinear.R
+++ b/R/family.nonlinear.R
@@ -1,5 +1,11 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
+
+
+
+
+
@@ -15,16 +21,102 @@ vnonlinear.control <- function(save.weight = TRUE, ...)
}
+
+
+subset_lohi <- function(xvec, yvec,
+ prob.x = c(0.15, 0.85),
+ type = c("median", "wtmean", "unwtmean"),
+ wtvec = rep(1, len = length(xvec))) {
+
+
+ if (!is.Numeric(prob.x, allow = 2))
+ stop("argument 'prob.x' must be numeric and of length two")
+
+ min.q <- quantile(xvec, probs = prob.x[1] )
+ max.q <- quantile(xvec, probs = prob.x[2] )
+
+ if(mode(type) != "character" && mode(type) != "name")
+ type <- as.character(substitute(type))
+ type <- match.arg(type, c("median", "wtmean", "unwtmean"))[1]
+
+
+ if (type == "median") {
+ y1bar <- median(yvec[xvec < min.q])
+ x1bar <- median(xvec[xvec < min.q])
+ y2bar <- median(yvec[xvec > max.q])
+ x2bar <- median(xvec[xvec > max.q])
+ }
+ if (type == "wtmean") {
+ y1bar <- weighted.mean(yvec[xvec < min.q], w = wtvec[xvec < min.q])
+ x1bar <- weighted.mean(xvec[xvec < min.q], w = wtvec[xvec < min.q])
+ y2bar <- weighted.mean(yvec[xvec > max.q], w = wtvec[xvec > max.q])
+ x2bar <- weighted.mean(xvec[xvec > max.q], w = wtvec[xvec > max.q])
+ }
+ if (type == "unwtmean") {
+ y1bar <- mean(yvec[xvec < min.q])
+ x1bar <- mean(xvec[xvec < min.q])
+ y2bar <- mean(yvec[xvec > max.q])
+ x2bar <- mean(xvec[xvec > max.q])
+ }
+
+ if (x1bar >= x2bar)
+ stop("cannot find two distinct x values; try decreasing the first ",
+ "value of argument 'prob.x' and increasing the second value")
+
+ list(x1bar = x1bar,
+ y1bar = y1bar,
+ x2bar = x2bar,
+ y2bar = y2bar,
+ slopeUp = (y2bar > y1bar))
+}
+
+
+
+micmen.control <- function(save.weight = TRUE, ...)
+{
+ list(save.weight = save.weight)
+}
+
+
+
+
+
micmen <- function(rpar = 0.001, divisor = 10,
init1 = NULL, init2 = NULL,
+ method.init = 1,
+ oim = TRUE,
link1 = "identity", link2 = "identity",
+ firstDeriv = c("nsimEIM", "rpar"),
earg1 = list(), earg2 = list(),
+ prob.x = c(0.15, 0.85),
+ nsimEIM = 500,
dispersion = 0, zero = NULL)
{
- estimated.dispersion <- dispersion == 0
+
+
+
+ firstDeriv <- match.arg(firstDeriv, c("nsimEIM", "rpar"))[1]
+
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE))
+ stop("argument 'method.init' must be integer")
+ if (method.init > 3)
+ stop("argument 'method.init' must be 1, 2, or 3")
+ if (!is.Numeric(prob.x, allow = 2))
+ stop("argument 'prob.x' must be numeric and of length two")
+ if (!is.logical(oim) || length(oim) != 1)
+ stop("argument 'oim' must be single logical")
+
+ stopifnot(nsimEIM > 10, length(nsimEIM)==1, nsimEIM==round(nsimEIM))
+
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
+ method.init > 3)
+ stop("'method.init' must be 1 or 2 or 3")
+
+
+ estimated.dispersion <- (dispersion == 0)
if (mode(link1) != "character" && mode(link1) != "name")
link1 <- as.character(substitute(link1))
@@ -43,13 +135,13 @@ micmen <- function(rpar = 0.001, divisor = 10,
"Variance: constant"),
constraints = eval(substitute(expression({
constraints <- cm.zero.vgam(constraints, x, .zero, M = 2)
- }), list(.zero = zero))),
+ }), list( .zero = zero))),
deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
M <- if (is.matrix(y)) ncol(y) else 1
if (residuals) {
- if (M > 1) NULL else (y-mu) * sqrt(w)
+ if (M > 1) NULL else (y - mu) * sqrt(w)
} else
- rss.vgam(y-mu, w, M = M)
+ rss.vgam(y - mu, w, M = M)
},
initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
@@ -69,13 +161,31 @@ micmen <- function(rpar = 0.001, divisor = 10,
if (length(mustart) || length(coefstart))
stop("cannot handle 'mustart' or 'coefstart'")
+
if (!length(etastart)) {
- index <- (1:n)[Xm2 > quantile(Xm2, prob = 0.85)]
- init1 <- median(y[index])
- init2 <- median(init1 * Xm2 / y - Xm2)
+ if ( .method.init == 3 ) {
+ index0 <- (1:n)[Xm2 <= quantile(Xm2, prob = .prob.x[2] )]
+ init1 <- median(y[index0])
+ init2 <- median(init1 * Xm2 / y - Xm2)
+ }
+ if ( .method.init == 1 || .method.init == 2) {
+ mysubset <- subset_lohi(Xm2, y, prob.x = .prob.x,
+ type = ifelse( .method.init == 1, "median", "wtmean"),
+ wtvec = w)
+
+ mat.x <- with(mysubset, cbind(c(x1bar, x2bar), -c(y1bar, y2bar)))
+ theta.temp <- with(mysubset,
+ solve(mat.x, c(x1bar * y1bar, x2bar * y2bar)))
+ init1 <- theta.temp[1]
+ init2 <- theta.temp[2]
- if (length(.init1)) init1 = .init1
- if (length(.init2)) init2 = .init2
+
+
+ }
+
+
+ if (length( .init1 )) init1 <- .init1
+ if (length( .init2 )) init2 <- .init2
etastart <- cbind(
rep(theta2eta(init1, .link1, earg = .earg1), len = n),
@@ -84,86 +194,425 @@ micmen <- function(rpar = 0.001, divisor = 10,
stop("cannot handle 'etastart' or 'mustart'")
}
- }), list(.init1 = init1, .init2 = init2,
- .earg1 = earg1, .earg2 = earg2,
- .link1 = link1, .link2 = link2))),
+ }), list( .init1 = init1, .link1 = link1, .earg1 = earg1,
+ .init2 = init2, .link2 = link2, .earg2 = earg2,
+ .method.init = method.init,
+ .prob.x = prob.x ))),
inverse = eval(substitute(function(eta, extra = NULL) {
theta1 <- eta2theta(eta[,1], .link1, earg = .earg1)
theta2 <- eta2theta(eta[,2], .link2, earg = .earg2)
theta1 * extra$Xm2 / (theta2 + extra$Xm2)
- }, list(.link1 = link1, .link2 = link2,
- .earg1 = earg1, .earg2 = earg2 ))),
+ }, list( .link1 = link1, .earg1 = earg1,
+ .link2 = link2, .earg2 = earg2))),
last = eval(substitute(expression({
- misc$link <- c(theta1 = .link1, theta2 = .link2)
- misc$earg <- list(theta1 = .earg1, theta2 = .earg2 )
- misc$rpar <- rpar
- fit$df.residual <- n - rank # Not nrow_X_vlm - rank
- fit$df.total <- n # Not nrow_X_vlm
-
- extra$Xm2 <- NULL # Regressor is in control$regressor
- dpar <- .dispersion
- if (!dpar) {
- dpar <- sum(w * (y-mu)^2) / (n - ncol_X_vlm)
- }
- misc$dispersion <- dpar
- misc$default.dispersion <- 0
- misc$estimated.dispersion <- .estimated.dispersion
- }), list(.link1 = link1, .link2 = link2, .dispersion = dispersion,
- .earg1 = earg1, .earg2 = earg2,
- .estimated.dispersion = estimated.dispersion))),
+ misc$link <- c(theta1 = .link1, theta2 = .link2)
+ misc$earg <- list(theta1 = .earg1, theta2 = .earg2 )
+ misc$rpar <- rpar
+ fit$df.residual <- n - rank # Not nrow_X_vlm - rank
+ fit$df.total <- n # Not nrow_X_vlm
+
+ extra$Xm2 <- NULL # Regressor is in control$regressor
+ dpar <- .dispersion
+ if (!dpar) {
+ dpar <- sum(w * (y - mu)^2) / (n - ncol_X_vlm)
+ }
+ misc$dispersion <- dpar
+ misc$default.dispersion <- 0
+ misc$estimated.dispersion <- .estimated.dispersion
+ misc$method.init <- .method.init
+ misc$nsimEIM <- .nsimEIM
+ misc$firstDeriv <- .firstDeriv
+ misc$oim <- .oim
+ misc$rpar <- rpar
+ misc$orig.rpar <- .rpar
+ }), list( .link1 = link1, .earg1 = earg1,
+ .link2 = link2, .earg2 = earg2,
+ .dispersion = dispersion,
+ .method.init = method.init,
+ .firstDeriv = firstDeriv,
+ .oim = oim, .rpar = rpar,
+ .nsimEIM = nsimEIM,
+ .estimated.dispersion = estimated.dispersion ))),
summary.dispersion = FALSE,
- vfamily = c("micmen","vnonlinear"),
+ vfamily = c("micmen", "vnonlinear"),
deriv = eval(substitute(expression({
- if (iter > 1) {
- rpar <- max(rpar / .divisor, 1000 * .Machine$double.eps)
- } else {
- rpar <- .rpar
- d3 <- deriv3(~ theta1 * Xm2 / (theta2 + Xm2),
- c("theta1", "theta2"), hessian = FALSE)
- }
-
theta1 <- eta2theta(eta[,1], .link1, earg = .earg1)
theta2 <- eta2theta(eta[,2], .link2, earg = .earg2)
+ dthetas.detas <- cbind(dtheta.deta(theta1, .link1, earg = .earg1),
+ dtheta.deta(theta2, .link2, earg = .earg2))
- if (TRUE) {
- dmus.dthetas <- attr(eval(d3), "gradient")
+ rpar <- if ( .firstDeriv == "rpar") {
+ if (iter > 1) {
+ max(rpar / .divisor, 1000 * .Machine$double.eps)
+ } else {
+ d3 <- deriv3(~ theta1 * Xm2 / (theta2 + Xm2),
+ c("theta1", "theta2"), hessian = FALSE)
+ .rpar
+ }
} else {
- dmu.dtheta1 <- Xm2 / (theta2 + Xm2)
- dmu.dtheta2 <- -theta1 * Xm2 / (Xm2 + theta2)^2
- dmus.dthetas <- cbind(dmu.dtheta1, dmu.dtheta2)
+ .rpar
}
- dthetas.detas <- cbind(dtheta.deta(theta1, .link1, earg = .earg1),
- dtheta.deta(theta2, .link2, earg = .earg2))
+ dmus.dthetas <- if (FALSE) {
+ attr(eval(d3), "gradient")
+ } else {
+ dmu.dtheta1 <- Xm2 / (theta2 + Xm2)
+ dmu.dtheta2 <- -theta1 * Xm2 / (Xm2 + theta2)^2
+ cbind(dmu.dtheta1, dmu.dtheta2)
+ }
- if (TRUE) {
- index <- iam(NA, NA, M = M, both = TRUE)
- temp200809 <- dmus.dthetas * dthetas.detas
- if (M > 1)
- temp200809[,2:M] <- temp200809[,2:M] + sqrt(rpar)
- w * (y-mu) * temp200809
+ myderiv <- if ( .firstDeriv == "rpar") {
+ if (TRUE) {
+ index <- iam(NA, NA, M = M, both = TRUE)
+ temp200809 <- dmus.dthetas * dthetas.detas
+ if (M > 1)
+ temp200809[, 2:M] <- temp200809[, 2:M] + sqrt(rpar)
+ w * (y - mu) * temp200809
+ } else {
+ w * (y - mu) *
+ cbind(dmus.dthetas[,1] * dthetas.detas[,1],
+ dmus.dthetas[,2] * dthetas.detas[,2] + sqrt(rpar))
+ }
} else {
- w * (y-mu) *
- cbind(dmus.dthetas[,1] * dthetas.detas[,1],
- dmus.dthetas[,2] * dthetas.detas[,2] + sqrt(rpar))
+ temp20101111 <- dmus.dthetas * dthetas.detas
+ w * (y - mu) * temp20101111
}
- }), list( .link1 = link1, .link2 = link2, .rpar = rpar,
- .earg1 = earg1, .earg2 = earg2,
- .divisor = divisor))),
+
+ myderiv
+ }), list( .link1 = link1, .earg1 = earg1,
+ .link2 = link2, .earg2 = earg2,
+ .firstDeriv = firstDeriv,
+ .rpar = rpar, .divisor = divisor ))),
weight = eval(substitute(expression({
- if (TRUE) {
+ if ( .oim ) {
+ wz <- matrix(0, n, dimm(M))
+ wz[, iam(1, 1, M)] <- Xm2
+ wz[, iam(1, 2, M)] <- y - 2 * mu
+ wz[, iam(2, 2, M)] <- theta1 * (3 * mu - 2 * y) / (theta2 + Xm2)
+ wz <- wz * Xm2 / (theta2 + Xm2)^2
+ }
+
+
+ if ( .firstDeriv == "rpar") {
+ if (FALSE) {
wz <- dmus.dthetas[,index$row] * dmus.dthetas[,index$col] *
- dthetas.detas[,index$row] * dthetas.detas[,index$col]
+ dthetas.detas[,index$row] * dthetas.detas[,index$col]
if (M > 1)
- wz[,2:M] <- wz[,2:M] + rpar
+ wz[, 2:M] <- wz[, 2:M] + rpar
} else {
wz <- cbind(( dmus.dthetas[,1] * dthetas.detas[,1])^2,
( dmus.dthetas[,2] * dthetas.detas[,2])^2 + rpar,
dmus.dthetas[,1] * dmus.dthetas[,2] *
dthetas.detas[,1] * dthetas.detas[,2])
}
+ } else {
+ run.varcov <- 0
+ index0 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+
+ mysigma <- 1
+
+ for (ii in 1:( .nsimEIM )) {
+ ysim <- theta1 * Xm2 / (theta2 + Xm2) + rnorm(n, sd = mysigma)
+ temp3 <- (ysim - mu) * dmus.dthetas * dthetas.detas
+ run.varcov <- run.varcov +
+ temp3[, index0$row.index] * temp3[, index0$col.index]
+ }
+ run.varcov <- run.varcov / .nsimEIM
+
+ wz <- if (intercept.only)
+ matrix(colMeans(run.varcov),
+ n, ncol(run.varcov), byrow = TRUE) else run.varcov
+
+ }
+
+ w * wz
+ }), list( .link1 = link1, .link2 = link2,
+ .firstDeriv = firstDeriv,
+ .nsimEIM = nsimEIM, .oim = oim ))))
+}
+
+
+
+
+
+
+
+
+skira.control <- function(save.weight = TRUE, ...)
+{
+ list(save.weight = save.weight)
+}
+
+
+
+ skira <- function(rpar = 0.1, divisor = 10,
+ init1 = NULL, init2 = NULL,
+ link1 = "identity", link2 = "identity",
+ earg1 = list(),
+ earg2 = list(),
+ method.init = 1,
+ oim = TRUE,
+ prob.x = c(0.15, 0.85),
+ smallno = 1.0e-3,
+ nsimEIM = 500,
+ firstDeriv = c("nsimEIM", "rpar"),
+ dispersion = 0, zero = NULL)
+{
+
+ firstDeriv <- match.arg(firstDeriv, c("nsimEIM", "rpar"))[1]
+
+ if (!is.Numeric(prob.x, allow = 2))
+ stop("argument 'prob.x' must be numeric and of length two")
+
+ estimated.dispersion <- dispersion == 0
+ if (mode(link1) != "character" && mode(link1) != "name")
+ link1 <- as.character(substitute(link1))
+ if (mode(link2) != "character" && mode(link2) != "name")
+ link2 <- as.character(substitute(link2))
+
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE))
+ stop("argument 'method.init' must be integer")
+ if (method.init > 5)
+ stop("argument 'method.init' must be 1, 2, 3, 4 or 5")
+ if (!is.list(earg1))
+ earg1 = list()
+ if (!is.list(earg2))
+ earg2 = list()
+
+ stopifnot(nsimEIM > 10, length(nsimEIM)==1, nsimEIM==round(nsimEIM))
+
+ new("vglmff",
+ blurb = c("Shinozaki-Kira regression model\n",
+ "Y_i = 1 / (theta1 + theta2 * u_i) + e_i\n\n",
+ "Links: ",
+ namesof("theta1", link1, earg = earg1), ", ",
+ namesof("theta2", link2, earg = earg2)),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero, M = 2)
+ }), list( .zero = zero ))),
+ deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ M <- if (is.matrix(y))
+ ncol(y) else 1
+ if (residuals) {
+ if (M > 1) NULL else (y - mu) * sqrt(w)
+ } else rss.vgam(y - mu, w, M = M)
+ },
+ initialize = eval(substitute(expression({
+
+ warning("20101105; need to fix a bug in the signs of initial vals")
+
+ if (ncol(cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+ if (!length(Xm2)) stop("regressor not found")
+ if (ncol(as.matrix(Xm2)) != 1)
+ stop("regressor not found or is not a vector. ",
+ "Use the 'form2' argument without an intercept")
+ Xm2 <- as.vector(Xm2)
+ extra$Xm2 <- Xm2
+
+ predictors.names <-
+ c(namesof("theta1", .link1, earg = .earg1, tag = FALSE),
+ namesof("theta2", .link2, earg = .earg2, tag = FALSE))
+
+ if (length(mustart) || length(coefstart))
+ stop("cannot handle 'mustart' or 'coefstart'")
+
+ if (!length(etastart)) {
+
+
+ min.q <- quantile(Xm2, probs = .prob.x[1] )
+ max.q <- quantile(Xm2, probs = .prob.x[2] )
+ if ( .method.init == 3 || .method.init == 2 ) {
+
+ mysubset <- subset_lohi(Xm2, y, prob.x = .prob.x,
+ type = ifelse( .method.init == 2, "median", "wtmean"),
+ wtvec = w)
+
+ mat.x <- with(mysubset, cbind(c(1, 1),
+ c(x1bar, x2bar)) * c(y1bar, y2bar))
+ theta.temp <- solve(mat.x, c(1, 1))
+ init1 <- theta.temp[1]
+ init2 <- theta.temp[2]
+ } else if ( .method.init == 1 ) {
+ yy <- as.vector( y[(Xm2 > min.q) & (Xm2 < max.q)])
+ xx <- as.vector(Xm2[(Xm2 > min.q) & (Xm2 < max.q)])
+ ww <- as.vector( w[(Xm2 > min.q) & (Xm2 < max.q)])
+ yy[ abs(yy) < .smallno ] <- .smallno *
+ sign(yy[ abs(yy) < .smallno ])
+
+ wt.temp <- (yy^4) * ww
+ wt.temp.max <- median(wt.temp) * 100
+ wt.temp[wt.temp > wt.temp.max] <- wt.temp.max
+
+ mylm.wfit <- lm.wfit(x = cbind(1, xx), y = 1 / yy, w = wt.temp)
+ init1 <- mylm.wfit$coef[1]
+ init2 <- mylm.wfit$coef[2]
+ } else if (( .method.init == 4) || ( .method.init == 5)) {
+
+ tempfit <- if ( .method.init == 4 ) {
+ fitted(loess(y ~ Xm2))
+ } else {
+ fitted(smooth.spline(Xm2, y, w = w, df = 2.0))
+ }
+
+ mysubset <- subset_lohi(Xm2, y, prob.x = .prob.x,
+ type = "wtmean", wtvec = w)
+
+
+ mat.x <- with(mysubset, cbind(c(1, 1),
+ c(x1bar, x2bar)) * c(y1bar, y2bar))
+ theta.temp <- solve(mat.x, c(1, 1))
+ init1 <- theta.temp[1]
+ init2 <- theta.temp[2]
+ } else {
+ stop("argument 'method.init' unmatched")
+ }
+
+ mu <- 1 / (init1 + init2 * Xm2)
+
+
+
+ matplot(Xm2, cbind(y, mu), col = c("blue", "green"),
+ main = "Initial values in green")
+ if ( .method.init == 1 ) {
+ points(Xm2, 1 / (init1 + init2 * Xm2), col = "green")
+ } else {
+ with(mysubset,
+ points(c(x1bar, x2bar), c(y1bar, y2bar), col = "red", pch = "+", cex = 2))
+ }
+
+
+ if (length( .init1 )) init1 <- .init1
+ if (length( .init2 )) init2 <- .init2
+ etastart <- cbind(
+ rep(theta2eta(init1, .link1, earg = .earg1), len = n),
+ rep(theta2eta(init2, .link2, earg = .earg2), len = n))
+ } else {
+ stop("cannot handle 'etastart' or 'mustart'")
+ }
+ }), list( .init1 = init1, .link1 = link1, .earg1 = earg1,
+ .init2 = init2, .link2 = link2, .earg2 = earg2,
+ .smallno = smallno, .prob.x = prob.x,
+ .nsimEIM = nsimEIM,
+ .method.init = method.init ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ theta1 <- eta2theta(eta[, 1], .link1, earg = .earg1)
+ theta2 <- eta2theta(eta[, 2], .link2, earg = .earg2)
+ 1 / (theta1 + theta2 * extra$Xm2)
+ }, list( .link1 = link1, .earg1 = earg1,
+ .link2 = link2, .earg2 = earg2 ))),
+ last = eval(substitute(expression({
+ misc$link <- c(theta1 = .link1, theta2 = .link2)
+ misc$earg <- list(theta1 = .earg1, theta2 = .earg2)
+ misc$rpar <- rpar
+ misc$orig.rpar <- .rpar
+ fit$df.residual <- n - rank
+ fit$df.total <- n
+ dpar <- .dispersion
+ if (!dpar) {
+ dpar <- sum(w * (y - mu)^2) / (n - ncol_X_vlm)
+ }
+ misc$dispersion <- dpar
+ misc$default.dispersion <- 0
+ misc$estimated.dispersion <- .estimated.dispersion
+ misc$method.init <- .method.init
+ misc$nsimEIM <- .nsimEIM
+ misc$firstDeriv <- .firstDeriv
+ misc$oim <- .oim
+ }), list( .link1 = link1, .earg1 = earg1,
+ .link2 = link2, .earg2 = earg2,
+ .dispersion = dispersion, .rpar = rpar,
+ .method.init = method.init, .nsimEIM = nsimEIM,
+ .firstDeriv = firstDeriv, .oim = oim,
+ .estimated.dispersion = estimated.dispersion ))),
+ summary.dispersion = FALSE,
+ vfamily = c("skira", "vnonlinear"),
+ deriv = eval(substitute(expression({
+ rpar <- if ( .firstDeriv == "rpar") {
+ if (iter > 1) {
+ max(rpar / .divisor, 1000 * .Machine$double.eps)
+ } else {
+ d3 <- deriv3( ~ 1 / (theta1 + theta2 * Xm2),
+ c("theta1", "theta2"), hessian = FALSE)
+ .rpar
+ }
+ } else {
+ .rpar
+ }
+
+ theta1 <- eta2theta(eta[, 1], .link1, earg = .earg1)
+ theta2 <- eta2theta(eta[, 2], .link2, earg = .earg2)
+ dthetas.detas <- cbind(dtheta.deta(theta1, .link1, earg = .earg1),
+ dtheta.deta(theta2, .link2, earg = .earg2))
+
+ dmus.dthetas <- if (FALSE) {
+ attr(eval(d3), "gradient")
+ } else {
+ dmu.dtheta1 <- -1 / (theta1 + theta2 * Xm2)^2
+ dmu.dtheta2 <- -Xm2 / (theta1 + theta2 * Xm2)^2
+ cbind(dmu.dtheta1, dmu.dtheta2)
+ }
+
+
+ myderiv <- if ( .firstDeriv == "nsimEIM") {
+ w * (y - mu) * dmus.dthetas * dthetas.detas
+ } else {
+ w * (y - mu) *
+ cbind(dmus.dthetas[, 1] * dthetas.detas[, 1],
+ dmus.dthetas[, 2] * dthetas.detas[, 2] + sqrt(rpar))
+ }
+ myderiv
+ }), list( .link1 = link1, .earg1 = earg1,
+ .link2 = link2, .earg2 = earg2,
+ .firstDeriv = firstDeriv,
+ .rpar = rpar, .divisor = divisor ))),
+ weight = eval(substitute(expression({
+ if ( .firstDeriv == "rpar") {
+ if (FALSE) {
+ index5 <- iam(NA, NA, M = M, both = TRUE)
+ wz <- dmus.dthetas[, index5$row] *
+ dmus.dthetas[, index5$col] *
+ dthetas.detas[, index5$row] *
+ dthetas.detas[, index5$col]
+
+ if (M > 1) wz[, -(1:M)] <- wz[, -(1:M)] / 100
+ } else {
+ wz <- cbind((dmus.dthetas[, 1] * dthetas.detas[, 1])^2,
+ (dmus.dthetas[, 2] * dthetas.detas[, 2])^2 + rpar,
+ dmus.dthetas[, 1] * dmus.dthetas[, 2] *
+ dthetas.detas[, 1] * dthetas.detas[, 2])
+ }
+ } else {
+ run.varcov <- 0
+ index0 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+
+ mysigma <- sqrt( median( (y - mu)^2 ) ) / 100
+ mysigma <- 1
+
+ for(ii in 1:( .nsimEIM )) {
+ ysim <- 1 / (theta1 + theta2 * Xm2) + rnorm(n, sd = mysigma)
+ temp3 <- (ysim - mu) * dmus.dthetas * dthetas.detas
+ run.varcov <- run.varcov +
+ temp3[, index0$row.index] * temp3[, index0$col.index]
+ }
+ run.varcov <- run.varcov / .nsimEIM
+
+ wz <- if (intercept.only)
+ matrix(colMeans(run.varcov),
+ n, ncol(run.varcov), byrow = TRUE) else run.varcov
+ }
+
+
w * wz
- }), list( .link1 = link1, .link2 = link2 ))))
+ }), list( .link1 = link1, .link2 = link2,
+ .firstDeriv = firstDeriv,
+ .nsimEIM = nsimEIM, .oim = oim ))))
}
+
+
+
+
+
+
diff --git a/R/family.normal.R b/R/family.normal.R
index aab25b1..25e1f67 100644
--- a/R/family.normal.R
+++ b/R/family.normal.R
@@ -1,5 +1,10 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
+
+
+
+
@@ -27,36 +32,36 @@ VGAM.weights.function = function(w, M, n) {
- gaussianff = function(dispersion=0, parallel=FALSE, zero=NULL)
+ gaussianff = function(dispersion=0, parallel = FALSE, zero = NULL)
{
- if (!is.Numeric(dispersion, allow=1) || dispersion < 0)
+ if (!is.Numeric(dispersion, allow = 1) || dispersion < 0)
stop("bad input for argument 'dispersion'")
- estimated.dispersion = dispersion==0
+ estimated.dispersion = dispersion == 0
new("vglmff",
- blurb=c("Vector linear/additive model\n",
+ blurb = c("Vector linear/additive model\n",
"Links: identity for Y1,...,YM"),
- constraints=eval(substitute(expression({
+ constraints = eval(substitute(expression({
constraints = cm.vgam(matrix(1,M,1), x, .parallel, constraints)
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .parallel=parallel, .zero=zero ))),
- deviance=function(mu, y, w, residuals= FALSE, eta, extra=NULL) {
+ }), list( .parallel = parallel, .zero = zero ))),
+ deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
M = if (is.matrix(y)) ncol(y) else 1
n = if (is.matrix(y)) nrow(y) else length(y)
wz = VGAM.weights.function(w=w, M=M, n=n)
if (residuals) {
if (M > 1) {
U <- vchol(wz, M=M, n=n)
- temp = mux22(U, y-mu, M=M, upper=TRUE, as.matrix=TRUE)
+ temp = mux22(U, y-mu, M=M, upper = TRUE, as.matrix = TRUE)
dimnames(temp) = dimnames(y)
temp
} else (y-mu) * sqrt(wz)
} else
rss.vgam(y-mu, wz=wz, M=M)
},
- initialize=eval(substitute(expression({
+ initialize = eval(substitute(expression({
if (is.R())
- assign("CQO.FastAlgorithm", TRUE, envir = VGAMenv) else
+ assign("CQO.FastAlgorithm", TRUE, envir = VGAM:::VGAMenv) else
CQO.FastAlgorithm <<- TRUE
if (any(function.name == c("cqo","cao")) &&
(length( .zero ) || (is.logical( .parallel ) && .parallel )))
@@ -65,12 +70,12 @@ VGAM.weights.function = function(w, M, n) {
M = if (is.matrix(y)) ncol(y) else 1
dy = dimnames(y)
predictors.names = if (!is.null(dy[[2]])) dy[[2]] else
- paste("Y",1:M,sep="")
+ paste("Y", 1:M, sep = "")
if (!length(etastart))
etastart = 0 * y
- }), list( .parallel=parallel, .zero=zero ))),
- inverse=function(eta, extra=NULL) eta,
- last=eval(substitute(expression({
+ }), list( .parallel = parallel, .zero = zero ))),
+ inverse = function(eta, extra = NULL) eta,
+ last = eval(substitute(expression({
dy = dimnames(y)
if (!is.null(dy[[2]]))
dimnames(fit$fitted.values) = dy
@@ -79,7 +84,7 @@ VGAM.weights.function = function(w, M, n) {
wz = VGAM.weights.function(w=w, M=M, n=n)
temp = rss.vgam(y-mu, wz=wz, M=M)
dpar = temp / (length(y) -
- (if(is.numeric(ncol(X_vlm_save))) ncol(X_vlm_save) else 0))
+ (if(is.numeric(ncol(X_vlm_save))) ncol(X_vlm_save) else 0))
}
misc$dispersion = dpar
misc$default.dispersion = 0
@@ -88,26 +93,26 @@ VGAM.weights.function = function(w, M, n) {
names(misc$link) = predictors.names
if (is.R()) {
- if (exists("CQO.FastAlgorithm", envir = VGAMenv))
- rm("CQO.FastAlgorithm", envir = VGAMenv)
+ if (exists("CQO.FastAlgorithm", envir = VGAM:::VGAMenv))
+ rm("CQO.FastAlgorithm", envir = VGAM:::VGAMenv)
} else {
- while(exists("CQO.FastAlgorithm"))
+ while (exists("CQO.FastAlgorithm"))
remove("CQO.FastAlgorithm")
}
}), list( .dispersion=dispersion,
.estimated.dispersion=estimated.dispersion ))),
- loglikelihood= function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ loglikelihood = function(mu,y,w,residuals = FALSE, eta, extra = NULL) {
M = if (is.matrix(y)) ncol(y) else 1
n = if (is.matrix(y)) nrow(y) else length(y)
wz = VGAM.weights.function(w=w, M=M, n=n)
temp = rss.vgam(y-mu, wz=wz, M=M)
-0.5 * temp
},
- link=function(mu, extra=NULL) mu,
- vfamily="gaussianff",
+ link = function(mu, extra = NULL) mu,
+ vfamily = "gaussianff",
deriv=expression({
wz = VGAM.weights.function(w=w, M=M, n=n)
- mux22(cc=t(wz), xmat=y-mu, M=M, as.mat=TRUE)
+ mux22(cc=t(wz), xmat=y-mu, M=M, as.mat = TRUE)
}),
weight= expression({
wz
@@ -120,7 +125,7 @@ VGAM.weights.function = function(w, M, n) {
-dposnorm = function(x, mean=0, sd=1, log=FALSE) {
+dposnorm = function(x, mean = 0, sd = 1, log = FALSE) {
log.arg = log
rm(log)
if (!is.logical(log.arg) || length(log.arg)!=1)
@@ -129,47 +134,47 @@ dposnorm = function(x, mean=0, sd=1, log=FALSE) {
x = rep(x, len=L); mean = rep(mean, len=L); sd = rep(sd, len=L);
if (log.arg) {
- ifelse(x < 0, log(0), dnorm(x, m=mean, sd=sd, log=TRUE) -
- pnorm(mean/sd, log=TRUE))
+ ifelse(x < 0, log(0), dnorm(x, m=mean, sd = sd, log = TRUE) -
+ pnorm(mean/sd, log = TRUE))
} else {
- ifelse(x < 0, 0, dnorm(x=x, me=mean, sd=sd) / pnorm(mean/sd))
+ ifelse(x < 0, 0, dnorm(x=x, me=mean, sd = sd) / pnorm(mean/sd))
}
}
-pposnorm = function(q, mean=0, sd=1) {
+pposnorm = function(q, mean = 0, sd = 1) {
L = max(length(q), length(mean), length(sd))
q = rep(q, len=L); mean = rep(mean, len=L); sd = rep(sd, len=L);
- ifelse(q < 0, 0, (pnorm(q=q, mean=mean, sd=sd) -
- pnorm(q=0, mean=mean, sd=sd)) / pnorm(q= mean/sd))
+ ifelse(q < 0, 0, (pnorm(q, mean = mean, sd = sd) -
+ pnorm(0, mean = mean, sd = sd)) / pnorm(q = mean/sd))
}
-qposnorm = function(p, mean=0, sd=1) {
- if (!is.Numeric(p, posit=TRUE) || max(p) >= 1)
+qposnorm = function(p, mean = 0, sd = 1) {
+ if (!is.Numeric(p, posit = TRUE) || max(p) >= 1)
stop("bad input for argument 'p'")
- qnorm(p=p+(1-p)*pnorm(0, mean=mean, sd=sd), mean=mean, sd=sd)
+ qnorm(p=p+(1-p)*pnorm(0, mean = mean, sd = sd), mean = mean, sd = sd)
}
-rposnorm = function(n, mean=0, sd=1) {
- if (!is.Numeric(n, integ=TRUE, posit=TRUE))
+rposnorm = function(n, mean = 0, sd = 1) {
+ if (!is.Numeric(n, integ = TRUE, posit = TRUE))
stop("bad input for argument 'n'")
mean = rep(mean, length=n)
sd = rep(sd, length=n)
- qnorm(p=runif(n, min=pnorm(0, m=mean, sd=sd)), m=mean, sd=sd)
+ qnorm(p=runif(n, min=pnorm(0, m=mean, sd = sd)), m=mean, sd = sd)
}
- posnormal1.control <- function(save.weight=TRUE, ...) {
+ posnormal1.control <- function(save.weight = TRUE, ...) {
list(save.weight=save.weight)
}
- posnormal1 = function(lmean="identity", lsd="loge",
- emean=list(), esd=list(),
- imean=NULL, isd=NULL,
- nsimEIM=100, zero=NULL)
+ posnormal1 = function(lmean = "identity", lsd = "loge",
+ emean = list(), esd = list(),
+ imean = NULL, isd = NULL,
+ nsimEIM=100, zero = NULL)
{
warning("this VGAM family function is not working properly yet")
@@ -177,82 +182,84 @@ rposnorm = function(n, mean=0, sd=1) {
lmean = as.character(substitute(lmean))
if (mode(lsd) != "character" && mode(lsd) != "name")
lsd = as.character(substitute(lsd))
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
stop("bad input for argument 'zero'")
- if (length(isd) && !is.Numeric(isd, posit=TRUE))
+ if (length(isd) && !is.Numeric(isd, posit = TRUE))
stop("bad input for argument 'isd'")
if (!is.list(emean)) emean = list()
if (!is.list(esd)) esd = list()
if (length(nsimEIM))
- if (!is.Numeric(nsimEIM, allow=1, integ=TRUE) || nsimEIM <= 10)
+ if (!is.Numeric(nsimEIM, allow = 1, integ = TRUE) || nsimEIM <= 10)
stop("'nsimEIM' should be an integer greater than 10")
new("vglmff",
- blurb=c("Positive (univariate) normal distribution\n\n",
+ blurb = c("Positive (univariate) normal distribution\n\n",
"Links: ",
- namesof("mean", lmean, earg= emean, tag= TRUE), "; ",
- namesof("sd", lsd, earg= esd, tag= TRUE)),
- constraints=eval(substitute(expression({
+ namesof("mean", lmean, earg = emean, tag = TRUE), "; ",
+ namesof("sd", lsd, earg = esd, tag = TRUE)),
+ constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
if (ncol(y <- cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
if (min(y) <= 0)
stop("response must be positive")
- predictors.names = c(namesof("mean", .lmean, earg= .emean, tag= FALSE),
- namesof("sd", .lsd, earg= .esd, tag= FALSE))
+ predictors.names =
+ c(namesof("mean", .lmean, earg = .emean, tag = FALSE),
+ namesof("sd", .lsd, earg = .esd, tag = FALSE))
if (!length(etastart)) {
init.me = if (length( .imean)) rep( .imean, len=n) else NULL
init.sd = if (length( .isd )) rep( .isd , len=n) else NULL
if (!length(init.me)) init.me = rep(quantile(y, probs=0.40), len=n)
if (!length(init.sd)) init.sd = rep(sd(y)*1.2, len=n)
- etastart = cbind(theta2eta(init.me, .lmean, earg= .emean),
- theta2eta(init.sd, .lsd, earg= .esd))
+ etastart = cbind(theta2eta(init.me, .lmean, earg = .emean),
+ theta2eta(init.sd, .lsd, earg = .esd))
}
- }), list( .lmean=lmean, .lsd=lsd, .imean=imean, .isd=isd,
- .emean=emean, .esd=esd ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- mymu = eta2theta(eta[,1], .lmean, earg= .emean)
- mysd = eta2theta(eta[,2], .lsd, earg= .esd)
+ }), list( .lmean = lmean, .lsd = lsd, .imean = imean, .isd = isd,
+ .emean = emean, .esd = esd ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ mymu = eta2theta(eta[,1], .lmean, earg = .emean)
+ mysd = eta2theta(eta[,2], .lsd, earg = .esd)
mymu + mysd * dnorm(-mymu/mysd) / pnorm(mymu/mysd)
- }, list( .lmean=lmean, .lsd=lsd, .emean=emean, .esd=esd ))),
- last=eval(substitute(expression({
- misc$link = c("mean"= .lmean, "sd"= .lsd)
+ }, list( .lmean = lmean, .lsd = lsd, .emean = emean, .esd = esd ))),
+ last = eval(substitute(expression({
+ misc$link = c("mean"= .lmean, "sd"= .lsd)
misc$earg = list("mean"= .emean, "sd"= .esd )
misc$expected = TRUE
misc$nsimEIM = .nsimEIM
- }), list( .lmean=lmean, .lsd=lsd, .emean=emean, .esd=esd,
- .nsimEIM=nsimEIM ))),
+ }), list( .lmean = lmean, .lsd = lsd, .emean = emean, .esd = esd,
+ .nsimEIM = nsimEIM ))),
loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- mymu = eta2theta(eta[,1], .lmean, earg= .emean)
- mysd = eta2theta(eta[,2], .lsd, earg= .esd)
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
+ function(mu,y,w,residuals = FALSE, eta, extra = NULL) {
+ mymu = eta2theta(eta[,1], .lmean, earg = .emean)
+ mysd = eta2theta(eta[,2], .lsd, earg = .esd)
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else {
- sum(w * dposnorm(x=y, m=mymu, sd=mysd, log=TRUE))
+ sum(w * dposnorm(x=y, m=mymu, sd = mysd, log = TRUE))
}
- }, list( .lmean=lmean, .lsd=lsd, .emean=emean, .esd=esd ))),
+ }, list( .lmean = lmean, .lsd = lsd, .emean = emean, .esd = esd ))),
vfamily=c("posnormal1"),
- deriv=eval(substitute(expression({
- mymu = eta2theta(eta[,1], .lmean, earg= .emean)
- mysd = eta2theta(eta[,2], .lsd, earg= .esd)
+ deriv = eval(substitute(expression({
+ mymu = eta2theta(eta[,1], .lmean, earg = .emean)
+ mysd = eta2theta(eta[,2], .lsd, earg = .esd)
zedd = (y-mymu) / mysd
temp7 = dnorm(-mymu/mysd)
temp8 = pnorm(mymu/mysd) * mysd
dl.dmu = zedd / mysd^2 - temp7 / temp8
dl.dsd = (mymu*temp7/temp8 + zedd^3 / mysd - 1) / mysd
- dmu.deta = dtheta.deta(mymu, .lmean, earg= .emean)
- dsd.deta = dtheta.deta(mysd, .lsd, earg= .esd)
+ dmu.deta = dtheta.deta(mymu, .lmean, earg = .emean)
+ dsd.deta = dtheta.deta(mysd, .lsd, earg = .esd)
dthetas.detas = cbind(dmu.deta, dsd.deta)
w * dthetas.detas * cbind(dl.dmu, dl.dsd)
- }), list( .lmean=lmean, .lsd=lsd, .emean=emean, .esd=esd ))),
- weight=eval(substitute(expression({
+ }), list( .lmean = lmean, .lsd = lsd, .emean = emean, .esd = esd ))),
+ weight = eval(substitute(expression({
run.varcov = 0
- ind1 = iam(NA, NA, M=M, both=TRUE, diag=TRUE)
+ ind1 = iam(NA, NA, M=M, both = TRUE, diag = TRUE)
if (length( .nsimEIM )) {
for(ii in 1:( .nsimEIM )) {
- ysim <- rposnorm(n, m=mymu, sd=mysd)
+ ysim <- rposnorm(n, m=mymu, sd = mysd)
zedd = (ysim-mymu) / mysd
temp7 = dnorm(-mymu/mysd)
temp8 = pnorm(mymu/mysd) * mysd
@@ -262,11 +269,11 @@ rposnorm = function(n, mean=0, sd=1) {
rm(ysim)
temp3 = matrix(c(dl.dmu, dl.dsd), n, 2)
run.varcov = ((ii-1) * run.varcov +
- temp3[,ind1$row.index]*temp3[,ind1$col.index]) / ii
+ temp3[,ind1$row.index]*temp3[,ind1$col.index]) / ii
}
wz = if (intercept.only)
matrix(colMeans(run.varcov),
- n, ncol(run.varcov), byrow=TRUE) else run.varcov
+ n, ncol(run.varcov), byrow = TRUE) else run.varcov
wz = wz * dthetas.detas[,ind1$row] * dthetas.detas[,ind1$col]
wz = w * matrix(wz, n, dimm(M))
@@ -283,80 +290,84 @@ rposnorm = function(n, mean=0, sd=1) {
wz = w * wz
}
wz
- }), list( .lmean=lmean, .lsd=lsd, .emean=emean, .esd=esd,
- .nsimEIM=nsimEIM ))))
+ }), list( .lmean = lmean, .lsd = lsd, .emean = emean, .esd = esd,
+ .nsimEIM = nsimEIM ))))
}
-dbetanorm = function(x, shape1, shape2, mean=0, sd=1, log=FALSE) {
+dbetanorm = function(x, shape1, shape2, mean = 0, sd = 1, log = FALSE) {
log.arg = log
rm(log)
if (!is.logical(log.arg) || length(log.arg)!=1)
stop("bad input for argument 'log'")
ans =
if (is.R() && log.arg) {
- dnorm(x=x, mean=mean, sd=sd, log=TRUE) +
- (shape1-1) * pnorm(q=x, mean=mean, sd=sd, log=TRUE) +
- (shape2-1) * pnorm(q=x, mean=mean, sd=sd, lower=FALSE, log=TRUE) -
+ dnorm(x=x, mean = mean, sd = sd, log = TRUE) +
+ (shape1-1) * pnorm(q = x, mean = mean, sd = sd, log = TRUE) +
+ (shape2-1) * pnorm(q = x, mean = mean, sd = sd, log = TRUE,
+ lower = FALSE) -
lbeta(shape1, shape2)
} else {
- dnorm(x=x, mean=mean, sd=sd) *
- pnorm(q=x, mean=mean, sd=sd)^(shape1-1) *
- pnorm(q=x, mean=mean, sd=sd, lower=FALSE)^(shape2-1) / beta(shape1, shape2)
+ dnorm(x=x, mean = mean, sd = sd) *
+ pnorm(q=x, mean = mean, sd = sd)^(shape1-1) *
+ pnorm(q=x, mean = mean, sd = sd,
+ lower = FALSE)^(shape2-1) / beta(shape1, shape2)
}
if (!is.R() && log.arg) ans = log(ans)
ans
}
-pbetanorm = function(q, shape1, shape2, mean=0, sd=1,
- lower.tail=TRUE, log.p=FALSE) {
- pbeta(q=pnorm(q=q, mean=mean, sd=sd), shape1=shape1, shape2=shape2,
- lower.tail = lower.tail, log.p = log.p)
+pbetanorm = function(q, shape1, shape2, mean = 0, sd = 1,
+ lower.tail = TRUE, log.p = FALSE) {
+ pbeta(q=pnorm(q=q, mean = mean, sd = sd),
+ shape1=shape1, shape2=shape2,
+ lower.tail = lower.tail, log.p = log.p)
}
-qbetanorm = function(p, shape1, shape2, mean=0, sd=1) {
- if (!is.Numeric(p, posit=TRUE) || max(p) >= 1)
+qbetanorm = function(p, shape1, shape2, mean = 0, sd = 1) {
+ if (!is.Numeric(p, posit = TRUE) || max(p) >= 1)
stop("bad input for argument 'p'")
- qnorm(p=qbeta(p=p, shape1=shape1, shape2=shape2), mean=mean, sd=sd)
+ qnorm(p=qbeta(p=p, shape1=shape1, shape2=shape2), mean = mean, sd = sd)
}
-rbetanorm = function(n, shape1, shape2, mean=0, sd=1) {
- if (!is.Numeric(n, integ=TRUE, posit=TRUE))
+rbetanorm = function(n, shape1, shape2, mean = 0, sd = 1) {
+ if (!is.Numeric(n, integ = TRUE, posit = TRUE))
stop("bad input for argument 'n'")
- qnorm(p=qbeta(p=runif(n), shape1=shape1, shape2=shape2), mean=mean, sd=sd)
+ qnorm(p=qbeta(p=runif(n), shape1=shape1, shape2=shape2),
+ mean = mean, sd = sd)
}
-dtikuv = function(x, d, mean=0, sigma=1, log = FALSE) {
+dtikuv = function(x, d, mean = 0, sigma=1, log = FALSE) {
if (!is.logical(log.arg <- log))
stop("bad input for argument 'log'")
rm(log)
- if (!is.Numeric(d, allow=1) || max(d) >= 2)
+ if (!is.Numeric(d, allow = 1) || max(d) >= 2)
stop("bad input for argument 'd'")
L = max(length(x), length(mean), length(sigma))
x = rep(x, len=L); mean = rep(mean, len=L); sigma = rep(sigma, len=L);
hh = 2 - d
KK = 1 / (1 + 1/hh + 0.75/hh^2)
if (log.arg) {
- dnorm(x=x, mean=mean, sd=sigma, log=TRUE) + log(KK) +
+ dnorm(x=x, mean = mean, sd = sigma, log = TRUE) + log(KK) +
2 * log1p(((x-mean)/sigma)^2 / (2*hh))
} else {
- dnorm(x=x, mean=mean, sd=sigma) * KK *
+ dnorm(x=x, mean = mean, sd = sigma) * KK *
(1 + ((x-mean)/sigma)^2 / (2*hh))^2
}
}
-ptikuv = function(q, d, mean=0, sigma=1) {
- if (!is.Numeric(d, allow=1) || max(d) >= 2)
+ptikuv = function(q, d, mean = 0, sigma=1) {
+ if (!is.Numeric(d, allow = 1) || max(d) >= 2)
stop("bad input for argument 'd'")
L = max(length(q), length(mean), length(sigma))
q = rep(q, len=L); mean = rep(mean, len=L); sigma = rep(sigma, len=L);
@@ -372,16 +383,16 @@ ptikuv = function(q, d, mean=0, sigma=1) {
}
if (any(rhs <- q > mean)) {
ans[rhs] = 1.0 - Recall(q=(2*mean[rhs]-q[rhs]), d=d,
- mean=mean[rhs], sigma=sigma[rhs])
+ mean = mean[rhs], sigma=sigma[rhs])
}
ans
}
-qtikuv = function(p, d, mean=0, sigma=1, ...) {
- if (!is.Numeric(p, posit=TRUE) || max(p) >= 1)
+qtikuv = function(p, d, mean = 0, sigma=1, ...) {
+ if (!is.Numeric(p, posit = TRUE) || max(p) >= 1)
stop("bad input for argument 'p'")
- if (!is.Numeric(d, allow=1) || max(d) >= 2)
+ if (!is.Numeric(d, allow = 1) || max(d) >= 2)
stop("bad input for argument 'd'")
if (!is.Numeric(mean))
stop("bad input for argument 'mean'")
@@ -390,33 +401,34 @@ qtikuv = function(p, d, mean=0, sigma=1, ...) {
L = max(length(p), length(mean), length(sigma))
p = rep(p, len=L); mean = rep(mean, len=L); sigma = rep(sigma, len=L);
ans = rep(0.0, len=L)
- myfun = function(x, d, mean=0, sigma=1, p)
- ptikuv(q=x, d=d, mean=mean, sigma=sigma) - p
+ myfun = function(x, d, mean = 0, sigma=1, p)
+ ptikuv(q=x, d=d, mean = mean, sigma=sigma) - p
for(i in 1:L) {
Lower = ifelse(p[i] <= 0.5, mean[i] - 3 * sigma[i], mean[i])
- while(ptikuv(q=Lower, d=d, mean=mean[i], sigma=sigma[i]) > p[i])
+ while (ptikuv(q=Lower, d=d, mean = mean[i], sigma=sigma[i]) > p[i])
Lower = Lower - sigma[i]
Upper = ifelse(p[i] >= 0.5, mean[i] + 3 * sigma[i], mean[i])
- while(ptikuv(q=Upper, d=d, mean=mean[i], sigma=sigma[i]) < p[i])
+ while (ptikuv(q=Upper, d=d, mean = mean[i], sigma=sigma[i]) < p[i])
Upper = Upper + sigma[i]
- ans[i] = uniroot(f=myfun, lower=Lower, upper=Upper,
- d=d, mean=mean[i], sigma=sigma[i], p=p[i], ...)$root
+ ans[i] = uniroot(f=myfun, lower=Lower, upper=Upper, d=d, p=p[i],
+ mean = mean[i], sigma=sigma[i], ...)$root
}
ans
}
-rtikuv = function(n, d, mean=0, sigma=1, Smallno=1.0e-6) {
- if (!is.Numeric(n, posit=TRUE, integ=TRUE))
+rtikuv = function(n, d, mean = 0, sigma=1, Smallno=1.0e-6) {
+ if (!is.Numeric(n, posit = TRUE, integ = TRUE))
stop("bad input for argument 'n'")
- if (!is.Numeric(d, allow=1) || max(d) >= 2)
+ if (!is.Numeric(d, allow = 1) || max(d) >= 2)
stop("bad input for argument 'd'")
- if (!is.Numeric(mean, allow=1))
+ if (!is.Numeric(mean, allow = 1))
stop("bad input for argument 'mean'")
- if (!is.Numeric(sigma, allow=1))
+ if (!is.Numeric(sigma, allow = 1))
stop("bad input for argument 'sigma'")
- if (!is.Numeric(Smallno, posit=TRUE, allow=1) || Smallno > 0.01 ||
- Smallno < 2 * .Machine$double.eps)
+ if (!is.Numeric(Smallno, posit = TRUE, allow = 1) ||
+ Smallno > 0.01 ||
+ Smallno < 2 * .Machine$double.eps)
stop("bad input for argument 'Smallno'")
ans = rep(0.0, len=n)
@@ -424,14 +436,15 @@ rtikuv = function(n, d, mean=0, sigma=1, Smallno=1.0e-6) {
hh = 2 - d
KK = 1 / (1 + 1/hh + 0.75/hh^2)
ymax = ifelse(hh < 2,
- dtikuv(x=mean + sigma*sqrt(4 - 2*hh), d=d, m=mean, s=sigma),
+ dtikuv(x=mean + sigma*sqrt(4 - 2*hh),
+ d=d, m=mean, s=sigma),
KK / (sqrt(2 * pi) * sigma))
- while(ptr2 < n) {
+ while (ptr2 < n) {
Lower = mean - 5 * sigma
- while(ptikuv(q=Lower, d=d, mean=mean, sigma=sigma) > Smallno)
+ while (ptikuv(q=Lower, d=d, mean = mean, sigma=sigma) > Smallno)
Lower = Lower - sigma
Upper = mean + 5 * sigma
- while(ptikuv(q=Upper, d=d, mean=mean, sigma=sigma) < 1-Smallno)
+ while (ptikuv(q=Upper, d=d, mean = mean, sigma=sigma) < 1-Smallno)
Upper = Upper + sigma
x = runif(2*n, min=Lower, max=Upper)
index = runif(2*n, max=ymax) < dtikuv(x,d=d,m=mean,s=sigma)
@@ -448,39 +461,39 @@ rtikuv = function(n, d, mean=0, sigma=1, Smallno=1.0e-6) {
- tikuv = function(d, lmean="identity", lsigma="loge",
- emean=list(), esigma=list(),
- isigma=NULL, zero=2)
+ tikuv = function(d, lmean = "identity", lsigma="loge",
+ emean = list(), esigma=list(),
+ isigma = NULL, zero=2)
{
if (mode(lmean) != "character" && mode(lmean) != "name")
lmean = as.character(substitute(lmean))
if (mode(lsigma) != "character" && mode(lsigma) != "name")
lsigma = as.character(substitute(lsigma))
- if (length(zero) && (!is.Numeric(zero, integer=TRUE, posit=TRUE) ||
+ if (length(zero) && (!is.Numeric(zero, integer = TRUE, posit = TRUE) ||
max(zero) > 2))
stop("bad input for argument 'zero'")
- if (!is.Numeric(d, allow=1) || max(d) >= 2)
+ if (!is.Numeric(d, allow = 1) || max(d) >= 2)
stop("bad input for argument 'd'")
if (!is.list(emean)) emean = list()
if (!is.list(esigma)) esigma = list()
new("vglmff",
- blurb=c("Short-tailed symmetric [Tiku and Vaughan (1999)] distribution\n",
+ blurb = c("Short-tailed symmetric [Tiku and Vaughan (1999)] distribution\n",
"Link: ",
- namesof("mean", lmean, earg= emean), ", ",
- namesof("sigma", lsigma, earg= esigma),
+ namesof("mean", lmean, earg = emean), ", ",
+ namesof("sigma", lsigma, earg = esigma),
"\n",
"\n",
"Mean: mean"),
- constraints=eval(substitute(expression({
+ constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("the response must be a vector or one-column matrix")
predictors.names =
- c(namesof("mean", .lmean, earg= .emean, tag= FALSE),
- namesof("sigma", .lsigma, earg= .esigma, tag= FALSE))
+ c(namesof("mean", .lmean, earg = .emean, tag = FALSE),
+ namesof("sigma", .lsigma, earg = .esigma, tag = FALSE))
if (!length(etastart)) {
sigma.init = if (length(.isigma)) rep(.isigma, length=n) else {
hh = 2 - .d
@@ -489,46 +502,48 @@ rtikuv = function(n, d, mean=0, sigma=1, Smallno=1.0e-6) {
rep(sqrt(var(y) / (KK*K2)), len=n)
}
mean.init = rep(weighted.mean(y, w), len=n)
- etastart = cbind(theta2eta(mean.init, .lmean, earg= .emean),
- theta2eta(sigma.init, .lsigma, earg= .esigma))
+ etastart = cbind(theta2eta(mean.init, .lmean, earg = .emean),
+ theta2eta(sigma.init, .lsigma, earg = .esigma))
}
- }),list( .lmean=lmean, .lsigma=lsigma, .isigma=isigma, .d=d,
- .emean=emean, .esigma=esigma ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- eta2theta(eta[,1], .lmean, earg= .emean)
- }, list( .lmean=lmean,
- .emean=emean, .esigma=esigma ))),
- last=eval(substitute(expression({
+ }),list( .lmean = lmean, .lsigma=lsigma, .isigma=isigma, .d=d,
+ .emean = emean, .esigma=esigma ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ eta2theta(eta[,1], .lmean, earg = .emean)
+ }, list( .lmean = lmean,
+ .emean = emean, .esigma=esigma ))),
+ last = eval(substitute(expression({
misc$link = c("mean"= .lmean, "sigma"= .lsigma)
misc$earg = list("mean"= .emean, "sigma"= .esigma )
misc$expected = TRUE
misc$d = .d
- }), list( .lmean=lmean, .lsigma=lsigma, .d=d,
- .emean=emean, .esigma=esigma ))),
+ }), list( .lmean = lmean, .lsigma=lsigma, .d=d,
+ .emean = emean, .esigma=esigma ))),
loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- mymu = eta2theta(eta[,1], .lmean, earg= .emean)
- sigma = eta2theta(eta[,2], .lsigma, earg= .esigma)
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(w * dtikuv(x=y, d= .d, mean=mymu, sigma=sigma, log = TRUE))
+ function(mu,y,w,residuals = FALSE, eta, extra = NULL) {
+ mymu = eta2theta(eta[,1], .lmean, earg = .emean)
+ sigma = eta2theta(eta[,2], .lsigma, earg = .esigma)
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else {
+ sum(w * dtikuv(x=y, d= .d, mean = mymu, sigma=sigma, log = TRUE))
}
- }, list( .lmean=lmean, .lsigma=lsigma, .d=d,
- .emean=emean, .esigma=esigma ))),
+ }, list( .lmean = lmean, .lsigma=lsigma, .d=d,
+ .emean = emean, .esigma=esigma ))),
vfamily=c("tikuv"),
- deriv=eval(substitute(expression({
- mymu = eta2theta(eta[,1], .lmean, earg= .emean)
- sigma = eta2theta(eta[,2], .lsigma, earg= .esigma)
- dmu.deta = dtheta.deta(mymu, .lmean, earg= .emean)
- dsigma.deta = dtheta.deta(sigma, .lsigma, earg= .esigma)
+ deriv = eval(substitute(expression({
+ mymu = eta2theta(eta[,1], .lmean, earg = .emean)
+ sigma = eta2theta(eta[,2], .lsigma, earg = .esigma)
+ dmu.deta = dtheta.deta(mymu, .lmean, earg = .emean)
+ dsigma.deta = dtheta.deta(sigma, .lsigma, earg = .esigma)
zedd = (y - mymu) / sigma
hh = 2 - .d
gzedd = zedd / (1 + 0.5*zedd^2 / hh)
dl.dmu = zedd / sigma - 2 * gzedd / (hh*sigma)
dl.dsigma = (zedd^2 - 1 - 2 * zedd * gzedd / hh) / sigma
- w * cbind(dl.dmu * dmu.deta, dl.dsigma * dsigma.deta)
- }), list( .lmean=lmean, .lsigma=lsigma, .d=d,
- .emean=emean, .esigma=esigma ))),
- weight=eval(substitute(expression({
+ w * cbind(dl.dmu * dmu.deta,
+ dl.dsigma * dsigma.deta)
+ }), list( .lmean = lmean, .lsigma=lsigma, .d=d,
+ .emean = emean, .esigma=esigma ))),
+ weight = eval(substitute(expression({
ayy = 1 / (2*hh)
Dnos = 1 - (2/hh) * (1 - ayy) / (1 + 2*ayy + 3*ayy^2)
Dstar = -1 + 3 * (1 + 2*ayy + 11*ayy^2) / (1 + 2*ayy + 3*ayy^2)
@@ -538,14 +553,14 @@ rtikuv = function(n, d, mean=0, sigma=1, Smallno=1.0e-6) {
wz[,iam(1,1,M)] = ed2l.dmymu2 * dmu.deta^2
wz[,iam(2,2,M)] = ed2l.dnu2 * dsigma.deta^2
w * wz
- }), list( .lmean=lmean, .lsigma=lsigma,
- .emean=emean, .esigma=esigma ))))
+ }), list( .lmean = lmean, .lsigma=lsigma,
+ .emean = emean, .esigma=esigma ))))
}
-dfnorm = function(x, mean=0, sd=1, a1=1, a2=1) {
- if (!is.Numeric(a1, posit=TRUE) || !is.Numeric(a2, posit=TRUE))
+dfnorm = function(x, mean = 0, sd = 1, a1=1, a2=1) {
+ if (!is.Numeric(a1, posit = TRUE) || !is.Numeric(a2, posit = TRUE))
stop("bad input for arguments 'a1' and 'a2'")
if (any(a1 <= 0 | a2 <= 0))
stop("arguments 'a1' and 'a2' must have positive values only")
@@ -556,8 +571,8 @@ dfnorm = function(x, mean=0, sd=1, a1=1, a2=1) {
ans
}
-pfnorm = function(q, mean=0, sd=1, a1=1, a2=1) {
- if (!is.Numeric(a1, posit=TRUE) || !is.Numeric(a2, posit=TRUE))
+pfnorm = function(q, mean = 0, sd = 1, a1=1, a2=1) {
+ if (!is.Numeric(a1, posit = TRUE) || !is.Numeric(a2, posit = TRUE))
stop("bad input for arguments 'a1' and 'a2'")
if (any(a1 <= 0 | a2 <= 0))
stop("arguments 'a1' and 'a2' must have positive values only")
@@ -566,10 +581,10 @@ pfnorm = function(q, mean=0, sd=1, a1=1, a2=1) {
ifelse(q < 0, 0, pnorm(q=q/(a1*sd) - mean/sd) - pnorm(q=-q/(a2*sd) - mean/sd))
}
-qfnorm = function(p, mean=0, sd=1, a1=1, a2=1, ...) {
- if (!is.Numeric(p, posit=TRUE) || max(p) >= 1)
+qfnorm = function(p, mean = 0, sd = 1, a1=1, a2=1, ...) {
+ if (!is.Numeric(p, posit = TRUE) || max(p) >= 1)
stop("bad input for argument 'p'")
- if (!is.Numeric(a1, posit=TRUE) || !is.Numeric(a2, posit=TRUE))
+ if (!is.Numeric(a1, posit = TRUE) || !is.Numeric(a2, posit = TRUE))
stop("bad input for arguments 'a1' and 'a2'")
if (any(a1 <= 0 | a2 <= 0))
stop("arguments 'a1' and 'a2' must have positive values only")
@@ -578,43 +593,46 @@ qfnorm = function(p, mean=0, sd=1, a1=1, a2=1, ...) {
p = rep(p, len=L); mean = rep(mean, len=L); sd = rep(sd, len=L);
a1 = rep(a1, len=L); a2 = rep(a2, len=L);
ans = rep(0.0, len=L)
- myfun = function(x, mean=0, sd=1, a1=1, a2=2, p)
- pfnorm(q=x, mean=mean, sd=sd, a1=a1, a2=a2) - p
+ myfun = function(x, mean = 0, sd = 1, a1=1, a2=2, p)
+ pfnorm(q=x, mean = mean, sd = sd, a1 = a1, a2 = a2) - p
for(i in 1:L) {
mytheta = mean[i]/sd[i]
EY = sd[i] * ((a1[i]+a2[i]) * (mytheta * pnorm(mytheta) + dnorm(mytheta)) -
a2[i] * mytheta)
Upper = 2 * EY
- while(pfnorm(q=Upper, mean=mean[i], sd=sd[i], a1=a1[i], a2=a2[i]) < p[i])
+ while (pfnorm(q=Upper, mean = mean[i], sd = sd[i],
+ a1 = a1[i], a2 = a2[i]) < p[i])
Upper = Upper + sd[i]
- ans[i] = uniroot(f=myfun, lower=0, upper=Upper,
- mean=mean[i], sd=sd[i], a1=a1[i], a2=a2[i], p=p[i], ...)$root
+ ans[i] = uniroot(f=myfun, lower=0, upper=Upper, mean = mean[i],
+ sd = sd[i], a1 = a1[i], a2 = a2[i],
+ p=p[i], ...)$root
}
ans
}
-rfnorm = function(n, mean=0, sd=1, a1=1, a2=1) {
- if (!is.Numeric(n, integ=TRUE, posit=TRUE))
+rfnorm = function(n, mean = 0, sd = 1, a1=1, a2=1) {
+ if (!is.Numeric(n, integ = TRUE, posit = TRUE))
stop("bad input for argument 'n'")
- if (!is.Numeric(a1, posit=TRUE) || !is.Numeric(a2, posit=TRUE))
+ if (!is.Numeric(a1, posit = TRUE) || !is.Numeric(a2, posit = TRUE))
stop("bad input for arguments 'a1' and 'a2'")
if (any(a1 <= 0 | a2 <= 0))
stop("arguments 'a1' and 'a2' must have positive values only")
- X = rnorm(n, mean=mean, sd=sd)
+ X = rnorm(n, mean = mean, sd = sd)
pmax(a1 * X, -a2*X)
}
- fnormal1 = function(lmean="identity", lsd="loge", emean=list(), esd=list(),
- imean=NULL, isd=NULL, a1=1, a2=1, nsimEIM=500,
- method.init=1, zero=NULL)
+ fnormal1 = function(lmean = "identity", lsd = "loge",
+ emean = list(), esd = list(),
+ imean = NULL, isd = NULL, a1=1, a2=1,
+ nsimEIM=500, method.init = 1, zero = NULL)
{
- if (!is.Numeric(a1, posit=TRUE, allow=1) ||
- !is.Numeric(a2, posit=TRUE, allow=1))
+ if (!is.Numeric(a1, posit = TRUE, allow = 1) ||
+ !is.Numeric(a2, posit = TRUE, allow = 1))
stop("bad input for arguments 'a1' and 'a2'")
if (any(a1 <= 0 | a2 <= 0))
stop("arguments 'a1' and 'a2' must each be a positive value")
- if (!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
method.init > 2)
stop("'method.init' must be 1 or 2")
@@ -622,27 +640,29 @@ rfnorm = function(n, mean=0, sd=1, a1=1, a2=1) {
lmean = as.character(substitute(lmean))
if (mode(lsd) != "character" && mode(lsd) != "name")
lsd = as.character(substitute(lsd))
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
stop("bad input for argument 'zero'")
if (!is.list(emean)) emean = list()
if (!is.list(esd)) esd = list()
- if (!is.Numeric(nsimEIM, allow=1, integ=TRUE) || nsimEIM <= 10)
+ if (!is.Numeric(nsimEIM, allow = 1, integ = TRUE) || nsimEIM <= 10)
stop("'nsimEIM' should be an integer greater than 10")
if (length(imean) && !is.Numeric(imean))
stop("bad input for 'imean'")
- if (length(isd) && !is.Numeric(isd, posit=TRUE))
+ if (length(isd) && !is.Numeric(isd, posit = TRUE))
stop("bad input for 'isd'")
new("vglmff",
- blurb=c("(Generalized) folded univariate normal distribution\n\n",
+ blurb = c("(Generalized) folded univariate normal distribution\n\n",
"Link: ",
- namesof("mean", lmean, earg=emean, tag= TRUE), "; ",
- namesof("sd", lsd, earg=esd, tag= TRUE)),
- initialize=eval(substitute(expression({
- predictors.names = c(namesof("mean", .lmean, earg=.emean, tag=FALSE),
- namesof("sd", .lsd, earg=.esd, tag=FALSE))
+ namesof("mean", lmean, earg = emean, tag = TRUE), "; ",
+ namesof("sd", lsd, earg = esd, tag = TRUE)),
+ initialize = eval(substitute(expression({
+ predictors.names =
+ c(namesof("mean", .lmean, earg = .emean, tag = FALSE),
+ namesof("sd", .lsd, earg = .esd, tag = FALSE))
if ((ncol(y <- cbind(y)) != 1) || any(y <= 0))
- stop("response must be a vector or a one-column matrix with positive values")
+ stop("response must be a vector or a one-column ",
+ "matrix with positive values")
if (!length(etastart)) {
junk = if (is.R()) lm.wfit(x=x, y=y, w=w) else
lm.wfit(x=x, y=y, w=w, method="qr")
@@ -650,7 +670,8 @@ rfnorm = function(n, mean=0, sd=1, a1=1, a2=1) {
if (FALSE) {
if ((ncol(cbind(w)) != 1) || any(w != round(w)))
- stop("'weights' must be a vector or a one-column matrix with integer values")
+ stop("'weights' must be a vector or a one-column matrix ",
+ "with integer values")
m1d = meany = weighted.mean(y, w)
m2d = weighted.mean(y^2, w)
stddev = sqrt( sum(w * junk$resid^2) / junk$df.residual )
@@ -669,20 +690,21 @@ rfnorm = function(n, mean=0, sd=1, a1=1, a2=1) {
{if( .method.init == 1) median(y) else meany}, len=n)
sd.init = rep(if(length( .isd)) .isd else
{if( .method.init == 1) stddev else 1.2*sd(y)}, len=n)
- etastart = cbind(theta2eta(mean.init, .lmean, earg= .emean),
- theta2eta(sd.init, .lsd, earg= .esd))
+ etastart = cbind(theta2eta(mean.init, .lmean, earg = .emean),
+ theta2eta(sd.init, .lsd, earg = .esd))
}
- }), list( .lmean=lmean, .lsd=lsd, .emean=emean, .esd=esd,
- .imean=imean, .isd=isd, .a1=a1, .a2=a2,
- .method.init=method.init ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- mymu = eta2theta(eta[,1], .lmean, earg= .emean)
- mysd = eta2theta(eta[,2], .lsd, earg= .esd)
+ }), list( .lmean = lmean, .lsd = lsd, .emean = emean, .esd = esd,
+ .imean = imean, .isd = isd, .a1 = a1, .a2 = a2,
+ .method.init = method.init ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ mymu = eta2theta(eta[,1], .lmean, earg = .emean)
+ mysd = eta2theta(eta[,2], .lsd, earg = .esd)
mytheta = mymu/mysd
mysd * (( .a1+ .a2) * (mytheta * pnorm(mytheta) +
dnorm(mytheta)) - .a2 * mytheta)
- }, list( .lmean=lmean, .lsd=lsd, .emean=emean, .esd=esd, .a1=a1, .a2=a2 ))),
- last=eval(substitute(expression({
+ }, list( .lmean = lmean, .lsd = lsd,
+ .emean = emean, .esd = esd, .a1 = a1, .a2 = a2 ))),
+ last = eval(substitute(expression({
misc$link = c("mu"= .lmean, "sd"= .lsd)
misc$earg = list("mu"= .emean, "sd"= .esd)
misc$expected = TRUE
@@ -691,25 +713,29 @@ rfnorm = function(n, mean=0, sd=1, a1=1, a2=1) {
misc$method.init = .method.init
misc$a1 = .a1
misc$a2 = .a2
- }), list( .lmean=lmean, .lsd=lsd, .emean=emean, .esd=esd,
- .method.init=method.init, .nsimEIM=nsimEIM, .a1=a1, .a2=a2 ))),
+ }), list( .lmean = lmean, .lsd = lsd,
+ .emean = emean, .esd = esd,
+ .method.init = method.init, .nsimEIM = nsimEIM,
+ .a1 = a1, .a2 = a2 ))),
loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- mymu = eta2theta(eta[,1], .lmean, earg= .emean)
- mysd = eta2theta(eta[,2], .lsd, earg= .esd)
+ function(mu,y,w,residuals = FALSE, eta, extra = NULL) {
+ mymu = eta2theta(eta[,1], .lmean, earg = .emean)
+ mysd = eta2theta(eta[,2], .lsd, earg = .esd)
a1vec = .a1
a2vec = .a2
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else {
sum(w*log(dnorm(x=y/(a1vec*mysd) - mymu/mysd)/(a1vec*mysd) +
dnorm(x=y/(a2vec*mysd) + mymu/mysd)/(a2vec*mysd)))
}
- }, list( .lmean=lmean, .lsd=lsd, .emean=emean, .esd=esd, .a1=a1, .a2=a2 ))),
+ }, list( .lmean = lmean, .lsd = lsd,
+ .emean = emean, .esd = esd, .a1 = a1, .a2 = a2 ))),
vfamily=c("fnormal1"),
- deriv=eval(substitute(expression({
- mymu = eta2theta(eta[,1], .lmean, earg= .emean)
- mysd = eta2theta(eta[,2], .lsd, earg= .esd)
- dmu.deta = dtheta.deta(mymu, .lmean, earg= .emean)
- dsd.deta = dtheta.deta(mysd, .lsd, earg= .esd)
+ deriv = eval(substitute(expression({
+ mymu = eta2theta(eta[,1], .lmean, earg = .emean)
+ mysd = eta2theta(eta[,2], .lsd, earg = .esd)
+ dmu.deta = dtheta.deta(mymu, .lmean, earg = .emean)
+ dsd.deta = dtheta.deta(mysd, .lsd, earg = .esd)
a1vec = .a1
a2vec = .a2
d3 = deriv3(~ log((exp(-0.5*(y/(a1vec*mysd) - mymu/mysd)^2)/a1vec +
@@ -720,14 +746,18 @@ rfnorm = function(n, mean=0, sd=1, a1=1, a2=1) {
dl.dthetas = attr(eval.d3, "gradient") # == cbind(dl.dmu, dl.dsd)
dtheta.detas = cbind(dmu.deta, dsd.deta)
w * dtheta.detas * dl.dthetas
- }), list( .lmean=lmean, .lsd=lsd, .emean=emean, .esd=esd, .a1=a1, .a2=a2 ))),
- weight=eval(substitute(expression({
- de3 = deriv3(~ log((exp(-0.5*(ysim/(a1vec*mysd) - mymu/mysd)^2)/a1vec +
- exp(-0.5*(ysim/(a2vec*mysd) + mymu/mysd)^2)/a2vec)/(mysd*sqrt(2*pi))),
+ }), list( .lmean = lmean, .lsd = lsd, .emean = emean, .esd = esd,
+ .a1 = a1, .a2 = a2 ))),
+ weight = eval(substitute(expression({
+ de3 = deriv3(~ log((exp(-0.5*(ysim/(a1vec*mysd) -
+ mymu/mysd)^2)/a1vec +
+ exp(-0.5*(ysim/(a2vec*mysd) +
+ mymu/mysd)^2)/a2vec)/(mysd*sqrt(2*pi))),
name=c("mymu","mysd"), hessian= TRUE)
run.mean = 0
for(ii in 1:( .nsimEIM )) {
- ysim = rfnorm(n=n, mean=mymu, sd=mysd, a1= a1vec, a2= a2vec)
+ ysim = rfnorm(n=n, mean = mymu, sd = mysd,
+ a1 = a1vec, a2 = a2vec)
eval.de3 = eval(de3)
d2l.dthetas2 = attr(eval.de3, "hessian")
rm(ysim)
@@ -741,51 +771,52 @@ rfnorm = function(n, mean=0, sd=1, a1=1, a2=1) {
}
wz = if (intercept.only)
- matrix(colMeans(run.mean), n, dimm(M), byrow=TRUE) else run.mean
+ matrix(colMeans(run.mean), n, dimm(M), byrow = TRUE) else
+ run.mean
- index0 = iam(NA, NA, M=M, both=TRUE, diag=TRUE)
+ index0 = iam(NA, NA, M=M, both = TRUE, diag = TRUE)
wz = wz * dtheta.detas[,index0$row] * dtheta.detas[,index0$col]
w * wz
- }), list( .nsimEIM=nsimEIM, .a1=a1, .a2=a2 ))))
+ }), list( .nsimEIM = nsimEIM, .a1 = a1, .a2 = a2 ))))
}
-lqnorm.control = function(trace=TRUE, ...)
+lqnorm.control = function(trace = TRUE, ...)
{
list(trace=trace)
}
-lqnorm = function(qpower=2, link="identity", earg=list(),
- method.init=1, imu=NULL, shrinkage.init=0.95)
+lqnorm = function(qpower = 2, link = "identity", earg = list(),
+ method.init = 1, imu = NULL, shrinkage.init = 0.95)
{
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if (!is.list(earg)) eerg = list()
- if (!is.Numeric(qpower, allow=1) || qpower <= 1)
+ if (!is.Numeric(qpower, allow = 1) || qpower <= 1)
stop("bad input for argument 'qpower'")
- if (!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
- method.init > 3)
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
+ method.init > 3)
stop("'method.init' must be 1 or 2 or 3")
- if (!is.Numeric(shrinkage.init, allow=1) || shrinkage.init < 0 ||
+ if (!is.Numeric(shrinkage.init, allow = 1) || shrinkage.init < 0 ||
shrinkage.init > 1) stop("bad input for argument 'shrinkage.init'")
new("vglmff",
- blurb=c("Minimizing the q-norm of residuals\n",
+ blurb = c("Minimizing the q-norm of residuals\n",
"Links: ",
- namesof("Y1", link, earg=earg, tag= TRUE)),
- initialize=eval(substitute(expression({
+ namesof("Y1", link, earg = earg, tag = TRUE)),
+ initialize = eval(substitute(expression({
M = if (is.matrix(y)) ncol(y) else 1
if (M != 1)
stop("response must be a vector or a one-column matrix")
dy = dimnames(y)
predictors.names = if (!is.null(dy[[2]])) dy[[2]] else
paste("mu", 1:M, sep="")
- predictors.names = namesof(predictors.names, link= .link,
- earg= .earg, short=TRUE)
+ predictors.names = namesof(predictors.names, link = .link,
+ earg = .earg, short = TRUE)
if (!length(etastart)) {
meany = weighted.mean(y, w)
mean.init = rep(if(length( .imu)) .imu else
@@ -793,15 +824,15 @@ lqnorm = function(qpower=2, link="identity", earg=list(),
if ( .method.init == 1) meany else
.sinit * meany + (1 - .sinit) * y
}, len=n)
- etastart = theta2eta(mean.init, link= .link, earg= .earg)
+ etastart = theta2eta(mean.init, link = .link, earg = .earg)
}
- }), list( .method.init=method.init, .imu=imu, .sinit=shrinkage.init,
- .link=link, .earg=earg ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- mu = eta2theta(eta, link= .link, earg= .earg)
+ }), list( .method.init = method.init, .imu=imu,
+ .sinit=shrinkage.init, .link = link, .earg = earg ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ mu = eta2theta(eta, link = .link, earg = .earg)
mu
- }, list( .link=link, .earg=earg ))),
- last=eval(substitute(expression({
+ }, list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
dy = dimnames(y)
if (!is.null(dy[[2]]))
dimnames(fit$fitted.values) = dy
@@ -811,28 +842,707 @@ lqnorm = function(qpower=2, link="identity", earg=list(),
misc$qpower = .qpower
misc$method.init = .method.init
misc$objectiveFunction = sum( w * (abs(y - mu))^(.qpower) )
- }), list( .qpower=qpower,
- .link=link, .earg=earg,
- .method.init=method.init ))),
- link=eval(substitute(function(mu, extra=NULL) {
- theta2eta(mu, link= .link, earg=.earg)
- }, list( .link=link, .earg=earg ))),
- vfamily="lqnorm",
- deriv=eval(substitute(expression({
- dmu.deta = dtheta.deta(theta=mu, link=.link, earg= .earg )
+ }), list( .qpower = qpower,
+ .link = link, .earg = earg,
+ .method.init = method.init ))),
+ link = eval(substitute(function(mu, extra = NULL) {
+ theta2eta(mu, link = .link, earg = .earg)
+ }, list( .link = link, .earg = earg ))),
+ vfamily = "lqnorm",
+ deriv = eval(substitute(expression({
+ dmu.deta = dtheta.deta(theta=mu, link = .link, earg = .earg )
myresid = y - mu
signresid = sign(myresid)
temp2 = (abs(myresid))^(.qpower-1)
.qpower * w * temp2 * signresid * dmu.deta
- }), list( .qpower=qpower, .link=link, .earg=earg ))),
- weight=eval(substitute(expression({
+ }), list( .qpower = qpower, .link = link, .earg = earg ))),
+ weight = eval(substitute(expression({
temp3 = (abs(myresid))^(.qpower-2)
wz = .qpower * (.qpower - 1) * w * temp3 * dmu.deta^2
wz
- }), list( .qpower=qpower, .link=link, .earg=earg ))))
+ }), list( .qpower = qpower, .link = link, .earg = earg ))))
+}
+
+
+
+ tobit <- function(Lower = 0, Upper = Inf, lmu = "identity",
+ lsd = "loge", emu = list(), esd=list(),
+ method.init = 1, zero = 2) {
+
+
+ if (mode(lmu) != "character" && mode(lmu) != "name")
+ lmu = as.character(substitute(lmu))
+ if (mode(lsd) != "character" && mode(lsd) != "name")
+ lsd = as.character(substitute(lsd))
+ if (!is.Numeric(method.init, allow = 1, integer = TRUE, posi = TRUE) ||
+ method.init > 2)
+ stop("'method.init' must be 1 or 2")
+ if (length(Lower) != 1 || length(Upper) != 1 ||
+ !is.numeric(Lower) || !is.numeric(Upper) || Lower >= Upper)
+ stop("Lower and Upper must have length 1 and ",
+ "be numeric with Lower < Upper")
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+ stop("bad input for argument 'zero'")
+ if (!is.list(emu)) emu = list()
+ if (!is.list(esd)) esd = list()
+
+ new("vglmff",
+ blurb = c("Tobit model\n\n",
+ "Links: ", namesof("mu", lmu, earg = emu, tag = TRUE), "; ",
+ namesof("sd", lsd, earg = esd, tag = TRUE), "\n",
+ "Conditional variance: sd^2"),
+ constraints = eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ y = cbind(y)
+ if (ncol(y)!=1)stop("the response must be a vector or a 1-column matrix")
+ extra$censoredL = (y <= .Lower)
+ extra$censoredU = (y >= .Upper)
+ if (min(y) < .Lower) {
+ warning("replacing response values less than the value ",
+ .Lower, " by ", .Lower)
+ y[y < .Lower] = .Lower
+ }
+ if (max(y) > .Upper) {
+ warning("replacing response values greater than the value ",
+ .Upper, " by ", .Upper)
+ y[y > .Upper] = .Upper
+ }
+ predictors.names = c(namesof("mu", .lmu, earg = .emu, tag = FALSE),
+ namesof("sd", .lsd, earg = .esd, tag = FALSE))
+ if (!length(etastart)) {
+ anyc = extra$censoredL | extra$censoredU
+ i11 = if ( .method.init == 1) anyc else FALSE # can be all data
+ junk = lm.wfit(x=cbind(x[!i11,]), y=y[!i11], w=w[!i11])
+ sd.y.est = sqrt( sum(w[!i11] * junk$resid^2) / junk$df.residual )
+ etastart = cbind(mu=y, rep(theta2eta(sd.y.est, .lsd, earg = .esd),
+ length = n))
+ if (any(anyc)) etastart[anyc,1] = x[anyc,, drop = FALSE] %*% junk$coeff
+ }
+ }), list( .Lower=Lower, .Upper=Upper, .lmu = lmu, .lsd=lsd,
+ .emu = emu, .esd = esd, .method.init = method.init ))),
+ inverse = eval(substitute( function(eta, extra = NULL) {
+ eta2theta(eta[,1], .lmu, earg = .emu)
+ }, list( .lmu = lmu, .emu = emu ))),
+ last = eval(substitute(expression({
+ misc$link = c("mu" = .lmu, "sd" = .lsd)
+ misc$earg = list("mu" = .emu, "sd" = .esd)
+ misc$expected = TRUE
+ misc$Lower = .Lower
+ misc$Upper = .Upper
+ }), list( .lmu = lmu, .lsd=lsd,
+ .emu = emu, .esd = esd,
+ .Lower=Lower, .Upper=Upper ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ cenL = extra$censoredL
+ cenU = extra$censoredU
+ cen0 = !cenL & !cenU # uncensored obsns
+ mum = eta2theta(eta[,1], .lmu, earg = .emu)
+ sd = eta2theta(eta[,2], .lsd, earg = .esd)
+ ell1 = -log(sd[cen0]) - 0.5 * ((y[cen0] - mum[cen0])/sd[cen0])^2
+ ell2 = log1p(-pnorm((mum[cenL] - .Lower)/sd[cenL]))
+ ell3 = log1p(-pnorm(( .Upper - mum[cenU])/sd[cenU]))
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
+ sum(w[cen0] * ell1) + sum(w[cenL] * ell2) + sum(w[cenU] * ell3)
+ }, list( .lmu = lmu, .lsd=lsd,
+ .emu = emu, .esd = esd,
+ .Lower=Lower, .Upper=Upper ))),
+ vfamily = c("tobit"),
+ deriv = eval(substitute(expression({
+ cenL = extra$censoredL
+ cenU = extra$censoredU
+ cen0 = !cenL & !cenU # uncensored obsns
+ mum = eta2theta(eta[,1], .lmu, earg = .emu)
+ sd = eta2theta(eta[,2], .lsd, earg = .esd)
+ dl.dmu = (y-mum) / sd^2
+ dl.dsd = (((y-mum)/sd)^2 - 1) / sd
+ dmu.deta = dtheta.deta(mum, .lmu, earg = .emu)
+ dsd.deta = dtheta.deta(sd, .lsd, earg = .esd)
+ if (any(cenL)) {
+ mumL = mum - .Lower
+ temp21L = mumL[cenL] / sd[cenL]
+ PhiL = pnorm(temp21L)
+ phiL = dnorm(temp21L)
+ fred21 = phiL / (1 - PhiL)
+ dl.dmu[cenL] = -fred21 / sd[cenL]
+ dl.dsd[cenL] = mumL[cenL] * fred21 / sd[cenL]^2
+ rm(fred21)
+ }
+ if (any(cenU)) {
+ mumU = .Upper - mum
+ temp21U = mumU[cenU] / sd[cenU]
+ PhiU = pnorm(temp21U)
+ phiU = dnorm(temp21U)
+ fred21 = phiU / (1 - PhiU)
+ dl.dmu[cenU] = fred21 / sd[cenU] # Negated
+ dl.dsd[cenU] = mumU[cenU] * fred21 / sd[cenU]^2
+ rm(fred21)
+ }
+ w * cbind(dl.dmu * dmu.deta, dl.dsd * dsd.deta)
+ }), list( .lmu = lmu, .lsd=lsd,
+ .emu = emu, .esd = esd,
+ .Lower=Lower, .Upper=Upper ))),
+ weight = eval(substitute(expression({
+ A1 = 1 - pnorm((mum - .Lower) / sd) # Lower
+ A3 = 1 - pnorm(( .Upper - mum) / sd) # Upper
+ A2 = 1 - A1 - A3 # Middle; uncensored
+ wz = matrix(0, n, 3)
+ wz[,iam(1,1,M)] = A2 * 1 / sd^2 # ed2l.dmu2
+ wz[,iam(2,2,M)] = A2 * 2 / sd^2 # ed2l.dsd2
+ mumL = mum - .Lower
+ temp21L = mumL / sd
+ PhiL = pnorm(temp21L)
+ phiL = dnorm(temp21L)
+ temp31L = ((1-PhiL) * sd)^2
+ wz.cenL11 = phiL * (phiL - (1-PhiL)*temp21L) / temp31L
+ wz.cenL22 = mumL * phiL * ((1-PhiL) * (2 - temp21L^2) +
+ mumL * phiL / sd) / (sd * temp31L)
+ wz.cenL12 = phiL * ((1-PhiL)*(temp21L^2 - 1) - temp21L*phiL) / temp31L
+ wz.cenL11[!is.finite(wz.cenL11)] = 0
+ wz.cenL22[!is.finite(wz.cenL22)] = 0
+ wz.cenL12[!is.finite(wz.cenL12)] = 0
+ wz[,iam(1,1,M)] = wz[,iam(1,1,M)] + A1 * wz.cenL11
+ wz[,iam(2,2,M)] = wz[,iam(2,2,M)] + A1 * wz.cenL22
+ wz[,iam(1,2,M)] = A1 * wz.cenL12
+ mumU = .Upper - mum # often Inf
+ temp21U = mumU / sd # often Inf
+ PhiU = pnorm(temp21U) # often 1
+ phiU = dnorm(temp21U) # often 0
+ temp31U = ((1-PhiU) * sd)^2 # often 0
+ tmp8 = (1-PhiU)*temp21U
+ wzcenU11 = phiU * (phiU - tmp8) / temp31U
+ tmp9 = (1-PhiU) * (2 - temp21U^2)
+ wzcenU22 = mumU * phiU * (tmp9 + mumU * phiU / sd) / (sd * temp31U)
+ wzcenU12 = -phiU * ((1-PhiU)*(temp21U^2 - 1) - temp21U*phiU) / temp31U
+ wzcenU11[!is.finite(wzcenU11)] = 0 # Needed when .Upper==Inf
+ wzcenU22[!is.finite(wzcenU22)] = 0 # Needed when .Upper==Inf
+ wzcenU12[!is.finite(wzcenU12)] = 0 # Needed when .Upper==Inf
+ wz[,iam(1,1,M)] = wz[,iam(1,1,M)] + A3 * wzcenU11
+ wz[,iam(2,2,M)] = wz[,iam(2,2,M)] + A3 * wzcenU22
+ wz[,iam(1,2,M)] = wz[,iam(1,2,M)] + A3 * wzcenU12
+ wz[,iam(1,1,M)] = w * wz[,iam(1,1,M)] * dmu.deta^2
+ wz[,iam(2,2,M)] = w * wz[,iam(2,2,M)] * dsd.deta^2
+ wz[,iam(1,2,M)] = w * wz[,iam(1,2,M)] * dmu.deta * dsd.deta
+ wz
+ }), list( .lmu = lmu, .Lower=Lower, .Upper=Upper, .lsd=lsd ))))
}
+ normal1 <- function(lmean = "identity", lsd = "loge",
+ emean = list(), esd = list(),
+ method.init = 1,
+ zero = -2)
+{
+
+ lsdev <- lsd
+ esdev <- esd
+
+ if (mode(lmean) != "character" && mode(lmean) != "name")
+ lmean <- as.character(substitute(lmean))
+ if (mode(lsdev) != "character" && mode(lsdev) != "name")
+ lsdev <- as.character(substitute(lsdev))
+ if (length(zero) &&
+ !is.Numeric(zero, integer = TRUE))
+ stop("bad input for argument 'zero'")
+ if (!is.list(emean)) emean <- list()
+ if (!is.list(esdev)) esdev <- list()
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
+ method.init > 3)
+ stop("'method.init' must be 1 or 2 or 3")
+
+
+ new("vglmff",
+ blurb = c("Univariate normal distribution\n\n",
+ "Links: ",
+ namesof("mean", lmean, earg = emean, tag = TRUE), "; ",
+ namesof("sd", lsdev, earg = esdev, tag = TRUE), "\n",
+ "Variance: sd^2"),
+ constraints = eval(substitute(expression({
+ dotzero <- .zero
+ Musual <- 2
+ eval(negzero.expression)
+ }), list( .zero = zero ))),
+ infos = eval(substitute(function(...) {
+ list(Musual = 2,
+ zero = .zero)
+ }, list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ y <- cbind(y)
+
+ ncoly <- ncol(y)
+ Musual <- 2
+ extra$ncoly <- ncoly
+ extra$Musual <- Musual
+ M <- Musual * ncoly
+
+
+ mynames1 <- paste("mean", if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames2 <- paste("sd", if (ncoly > 1) 1:ncoly else "", sep = "")
+ predictors.names <-
+ c(namesof(mynames1, .lmean, earg = .emean, tag = FALSE),
+ namesof(mynames2, .lsdev, earg = .esdev, tag = FALSE))
+ predictors.names <- predictors.names[interleave.VGAM(M, M = ncoly)]
+
+ if (!length(etastart)) {
+ sdev.init <- mean.init <- matrix(0, n, ncoly)
+ for (jay in 1:ncoly) {
+ jfit <- lm.wfit(x = x, y = y[, jay], w = w)
+ mean.init[, jay] <- if ( .lmean == "loge")
+ pmax(1/1024, y[, jay]) else
+ if( .method.init == 1) median(y[, jay]) else
+ if( .method.init == 2) weighted.mean(y[, jay], w = w) else
+ mean(jfit$fitted)
+ sdev.init[, jay] <-
+ if( .method.init == 1)
+ sqrt( sum(w * (y[, jay] - mean.init[, jay])^2) / sum(w) ) else
+ if( .method.init == 2) {
+ if (jfit$df.resid > 0)
+ sqrt( sum(w * jfit$resid^2) / jfit$df.resid ) else
+ sqrt( sum(w * jfit$resid^2) / sum(w) )
+ } else {
+ sqrt( sum(w * abs(y[, jay] - mean.init[, jay])) / sum(w) )
+ }
+
+ }
+ etastart <- cbind(theta2eta(mean.init, .lmean, earg = .emean),
+ theta2eta(sdev.init, .lsdev, earg = .esdev))
+ etastart <- etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
+ }
+ }), list( .lmean = lmean, .lsdev = lsdev,
+ .emean = emean, .esdev = esdev,
+ .method.init = method.init ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ ncoly <- extra$ncoly
+ eta2theta(eta[, 2*(1:ncoly) - 1], .lmean, earg = .emean)
+ }, list( .lmean = lmean, .emean = emean, .esdev = esdev ))),
+ last = eval(substitute(expression({
+ Musual <- extra$Musual
+ misc$link <- c(rep( .lmean, length = ncoly),
+ rep( .lsdev, length = ncoly))
+ temp.names <- c(mynames1, mynames2)
+ temp.names <- temp.names[interleave.VGAM(Musual * ncoly, M = Musual)]
+ names(misc$link) <- temp.names
+ misc$earg <- vector("list", Musual * ncoly)
+ names(misc$earg) <- temp.names
+ for(ii in 1:ncoly) {
+ misc$earg[[Musual*ii-1]] <- .emean
+ misc$earg[[Musual*ii ]] <- .esdev
+ }
+
+ misc$Musual <- Musual
+ misc$expected <- TRUE
+ misc$method.init <- .method.init
+ }), list( .lmean = lmean, .lsdev = lsdev,
+ .emean = emean, .esdev = esdev,
+ .method.init = method.init ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ ncoly <- extra$ncoly
+ sdev <- eta2theta(eta[, 2*(1:ncoly) ], .lsdev, earg = .esdev)
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(w * dnorm(y, m = mu, sd = sdev, log = TRUE))
+ }
+ }, list( .lsdev = lsdev,
+ .esdev = esdev ))),
+ vfamily = c("normal1"),
+ deriv = eval(substitute(expression({
+ ncoly <- extra$ncoly
+
+ mymu <- eta2theta(eta[, 2*(1:ncoly) - 1], .lmean, earg = .emean)
+ sdev <- eta2theta(eta[, 2*(1:ncoly) ], .lsdev, earg = .esdev)
+ dl.dmu <- (y - mymu) / sdev^2
+ dl.dsd <- -1 / sdev + (y - mymu)^2 / sdev^3
+ dmu.deta <- dtheta.deta(mymu, .lmean, earg = .emean)
+ dsd.deta <- dtheta.deta(sdev, .lsdev, earg = .esdev)
+
+ ans <- w * cbind(dl.dmu * dmu.deta,
+ dl.dsd * dsd.deta)
+ ans <- ans[, interleave.VGAM(ncol(ans), M = Musual)]
+ ans
+ }), list( .lmean = lmean, .lsdev = lsdev,
+ .emean = emean, .esdev = esdev ))),
+ weight = expression({
+ wz <- matrix(as.numeric(NA), n, M) # diag matrix; y is one-column too
+ ed2l.dmu2 <- -1 / sdev^2
+ ed2l.dsd2 <- -2 / sdev^2
+ wz[, 2*(1:ncoly) - 1] <- -ed2l.dmu2 * dmu.deta^2
+ wz[, 2*(1:ncoly) ] <- -ed2l.dsd2 * dsd.deta^2
+ w * wz
+ }))
+}
+
+
+
+
+
+ lognormal <- function(lmeanlog = "identity", lsdlog = "loge",
+ emeanlog = list(), esdlog = list(),
+ zero = 2)
+{
+
+
+ if (mode(lmeanlog) != "character" && mode(lmeanlog) != "name")
+ lmeanlog = as.character(substitute(lmeanlog))
+ if (mode(lsdlog) != "character" && mode(lsdlog) != "name")
+ lsdlog = as.character(substitute(lsdlog))
+ if (length(zero) && (!is.Numeric(zero, integer = TRUE, posit = TRUE) ||
+ zero > 2))
+ stop("bad input for argument argument 'zero'")
+ if (!is.list(emeanlog)) emeanlog = list()
+ if (!is.list(esdlog)) esdlog = list()
+
+ new("vglmff",
+ blurb = c("Two-parameter (univariate) lognormal distribution\n\n",
+ "Links: ",
+ namesof("meanlog", lmeanlog, earg = emeanlog, tag = TRUE), ", ",
+ namesof("sdlog", lsdlog, earg = esdlog, tag = TRUE)),
+ constraints = eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ if (ncol(cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+ if (min(y) <= 0) stop("response must be positive")
+
+ predictors.names =
+ c(namesof("meanlog", .lmeanlog, earg = .emeanlog, tag = FALSE),
+ namesof("sdlog", .lsdlog, earg = .esdlog, tag = FALSE))
+
+ if (!length(etastart)) {
+ junk = lm.wfit(x=x, y=log(y), w=w)
+ sdlog.y.est = sqrt( sum(w * junk$resid^2) / junk$df.residual )
+ etastart = cbind(
+ meanlog = rep(theta2eta(log(median(y)), .lmeanlog,
+ earg = .emeanlog), length = n),
+ sdlog = rep(theta2eta(sdlog.y.est, .lsdlog,
+ earg = .esdlog), length = n))
+ }
+ }), list( .lmeanlog = lmeanlog, .lsdlog = lsdlog,
+ .emeanlog = emeanlog, .esdlog = esdlog ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ mulog = eta2theta(eta[,1], .lmeanlog, earg = .emeanlog)
+ sdlog = eta2theta(eta[,2], .lsdlog, earg = .esdlog)
+ exp(mulog + 0.5 * sdlog^2)
+ }, list( .lmeanlog = lmeanlog, .lsdlog = lsdlog,
+ .emeanlog = emeanlog, .esdlog = esdlog ))),
+ last = eval(substitute(expression({
+ misc$link = c("meanlog" = .lmeanlog, "sdlog" = .lsdlog)
+ misc$earg = list("meanlog" = .emeanlog, "sdlog" = .esdlog)
+ misc$expected = TRUE
+ }), list( .lmeanlog = lmeanlog, .lsdlog = lsdlog,
+ .emeanlog = emeanlog, .esdlog = esdlog ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ mulog = eta2theta(eta[,1], .lmeanlog, earg = .emeanlog)
+ sdlog = eta2theta(eta[,2], .lsdlog, earg = .esdlog)
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(w * dlnorm(y, meanlog = mulog, sdlog = sdlog, log = TRUE))
+ }
+ }, list( .lmeanlog = lmeanlog, .lsdlog = lsdlog,
+ .emeanlog = emeanlog, .esdlog = esdlog ))),
+ vfamily = c("lognormal"),
+ deriv = eval(substitute(expression({
+ mulog = eta2theta(eta[,1], .lmeanlog, earg = .emeanlog)
+ sdlog = eta2theta(eta[,2], .lsdlog, earg = .esdlog)
+ dmulog.deta = dtheta.deta(mulog, .lmeanlog, earg = .emeanlog)
+ dsdlog.deta = dtheta.deta(sdlog, .lsdlog, earg = .esdlog)
+
+ dl.dmulog = (log(y) - mulog) / sdlog^2
+ dl.dsdlog = -1 / sdlog + (log(y) - mulog)^2 / sdlog^3
+ dl.dlambda = (1 + (log(y) - mulog) / sdlog^2) / y
+
+ w * cbind(dl.dmulog * dmulog.deta,
+ dl.dsdlog * dsdlog.deta)
+ }), list( .lmeanlog = lmeanlog, .lsdlog = lsdlog,
+ .emeanlog = emeanlog, .esdlog = esdlog ))),
+ weight = expression({
+ wz = matrix(as.numeric(NA), n, 2) # Diagonal!
+ ed2l.dmulog2 = 1 / sdlog^2
+ ed2l.dsdlog2 = 2 * ed2l.dmulog2
+ wz[,iam(1,1,M)] = ed2l.dmulog2 * dmulog.deta^2
+ wz[,iam(2,2,M)] = ed2l.dsdlog2 * dsdlog.deta^2
+
+ wz = w * wz
+ wz
+ }))
+}
+
+
+
+
+
+
+ lognormal3 <- function(lmeanlog = "identity", lsdlog = "loge",
+ emeanlog = list(), esdlog = list(),
+ powers.try = (-3):3,
+ delta = NULL, zero = 2)
+{
+
+
+ if (length(delta) && !is.Numeric(delta, positive = TRUE))
+ stop("bad input for argument argument 'delta'")
+ if (mode(lmeanlog) != "character" && mode(lmeanlog) != "name")
+ lmeanlog = as.character(substitute(lmeanlog))
+ if (mode(lsdlog) != "character" && mode(lsdlog) != "name")
+ lsdlog = as.character(substitute(lsdlog))
+ if (length(zero) && (!is.Numeric(zero, integer = TRUE, posit = TRUE) ||
+ zero > 3))
+ stop("bad input for argument argument 'zero'")
+ if (!is.list(emeanlog)) emeanlog = list()
+ if (!is.list(esdlog)) esdlog = list()
+
+ new("vglmff",
+ blurb = c("Three-parameter (univariate) lognormal distribution\n\n",
+ "Links: ",
+ namesof("meanlog", lmeanlog, earg = emeanlog, tag = TRUE),
+ "; ", namesof("sdlog", lsdlog, earg = esdlog, tag = TRUE),
+ "; ", namesof("lambda", "identity", earg = list(), tag = TRUE)),
+ constraints = eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ if (ncol(cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+ predictors.names =
+ c(namesof("meanlog", .lmeanlog, earg = .emeanlog, tag = FALSE),
+ namesof("sdlog", .lsdlog, earg = .esdlog, tag = FALSE),
+ "lambda")
+
+ if (!length(etastart)) {
+ miny = min(y)
+ if (length( .delta)) {
+ lambda.init = rep(miny- .delta, length = n)
+ } else {
+ pvalue.vec = NULL
+ powers.try = .powers.try
+ for(delta in 10^powers.try) {
+ pvalue.vec = c(pvalue.vec,
+ shapiro.test(sample(log(y-miny+delta),
+ size=min(5000, length(y ))))$p.value)
+ }
+ index.lambda=(1:length(powers.try))[pvalue.vec==max(pvalue.vec)]
+ lambda.init = miny - 10^powers.try[index.lambda]
+ }
+ junk = lm.wfit(x=x, y=log(y-lambda.init), w=w)
+ sdlog.y.est = sqrt( sum(w * junk$resid^2) / junk$df.residual )
+ etastart = cbind(mu = log(median(y - lambda.init)),
+ sdlog=rep(theta2eta(sdlog.y.est, .lsdlog, earg = .esdlog), length = n),
+ lambda = lambda.init)
+ }
+ }), list( .lmeanlog = lmeanlog, .lsdlog = lsdlog,
+ .emeanlog = emeanlog, .esdlog = esdlog,
+ .delta = delta, .powers.try=powers.try ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ mymu = eta2theta(eta[,1], .lmeanlog, earg = .emeanlog)
+ sdlog = eta2theta(eta[,2], .lsdlog, earg = .esdlog)
+ lambda = eta2theta(eta[,3], "identity", earg = list())
+ lambda + exp(mymu + 0.5 * sdlog^2)
+ }, list( .lmeanlog = lmeanlog, .lsdlog = lsdlog,
+ .emeanlog = emeanlog, .esdlog = esdlog ))),
+ last = eval(substitute(expression({
+ misc$link = c("meanlog" = .lmeanlog,
+ "sdlog" = .lsdlog,
+ "lambda" = "identity")
+ misc$earg = list("meanlog" = .emeanlog,
+ "sdlog" = .esdlog,
+ "lambda" = list())
+ misc$expected = TRUE
+ }), list( .lmeanlog = lmeanlog, .lsdlog = lsdlog,
+ .emeanlog = emeanlog, .esdlog = esdlog ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ mymu = eta2theta(eta[,1], .lmeanlog, earg = .emeanlog)
+ sdlog = eta2theta(eta[,2], .lsdlog, earg = .esdlog)
+ lambda = eta2theta(eta[,3], "identity", earg = list())
+ if (any(y < lambda))
+ warning("bad 'y'")
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(w*dlnorm(y-lambda, meanlog=mymu, sdlog = sdlog, log = TRUE))
+ }
+ }, list( .lmeanlog = lmeanlog, .lsdlog = lsdlog,
+ .emeanlog = emeanlog, .esdlog = esdlog ))),
+ vfamily = c("lognormal3"),
+ deriv = eval(substitute(expression({
+ mymu = eta2theta(eta[,1], .lmeanlog, earg = .emeanlog)
+ sdlog = eta2theta(eta[,2], .lsdlog, earg = .esdlog)
+ lambda = eta2theta(eta[,3], "identity", earg = list())
+ if (any(y < lambda))
+ warning("bad 'y'")
+ dl.dmymu = (log(y-lambda)-mymu) / sdlog^2
+ dl.dsdlog = -1/sdlog + (log(y-lambda)-mymu)^2 / sdlog^3
+ dl.dlambda = (1 + (log(y-lambda)-mymu) / sdlog^2) / (y-lambda)
+ dmymu.deta = dtheta.deta(mymu, .lmeanlog, earg = .emeanlog)
+ dsdlog.deta = dtheta.deta(sdlog, .lsdlog, earg = .esdlog)
+ dlambda.deta = dtheta.deta(lambda, "identity", earg = list())
+ w * cbind(dl.dmymu * dmymu.deta,
+ dl.dsdlog * dsdlog.deta,
+ dl.dlambda * dlambda.deta)
+ }), list( .lmeanlog = lmeanlog, .lsdlog = lsdlog,
+ .emeanlog = emeanlog, .esdlog = esdlog ))),
+ weight = expression({
+ wz = matrix(0, n, dimm(M))
+ ed2l.dmymu2 = 1 / sdlog^2
+ ed2l.dsdlog = 2 / sdlog^2
+ temp9 = exp(-mymu+sdlog^2 / 2)
+ ed2l.dlambda2 = exp(2*(-mymu+sdlog^2)) * (1+sdlog^2) / sdlog^2
+ wz[,iam(1,1,M)] = ed2l.dmymu2 * dmymu.deta^2
+ wz[,iam(2,2,M)] = ed2l.dsdlog * dsdlog.deta^2
+ wz[,iam(3,3,M)] = ed2l.dlambda2 * dlambda.deta^2
+ wz[,iam(1,3,M)] = temp9 * dmymu.deta * dlambda.deta / sdlog^2
+ wz[,iam(2,3,M)] = -2 * temp9 / sdlog * dsdlog.deta * dlambda.deta
+ wz = w * wz
+ wz
+ }))
+}
+
+
+
+
+
+dsnorm = function(x, location=0, scale=1, shape=0, log = FALSE) {
+
+ if (!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ if (!is.Numeric(scale, posit = TRUE))
+ stop("bad input for argument 'scale'")
+ zedd = (x - location) / scale
+ loglik = log(2) + dnorm(zedd, log = TRUE) + pnorm(shape * zedd, log.p = TRUE) -
+ log(scale)
+ if (log.arg) {
+ loglik
+ } else {
+ exp(loglik)
+ }
+}
+
+
+
+rsnorm = function(n, location=0, scale=1, shape=0) {
+ if (!is.Numeric(n, posit = TRUE, integ = TRUE, allow = 1))
+ stop("bad input for argument 'n'")
+ if (!is.Numeric(scale, posit = TRUE))
+ stop("bad input for argument 'scale'")
+ if (!is.Numeric(shape)) stop("bad input for argument 'shape'")
+ rho = shape / sqrt(1 + shape^2)
+ u0 = rnorm(n)
+ v = rnorm(n)
+ u1 = rho*u0 + sqrt(1 - rho^2) * v
+ location + scale * ifelse(u0 >= 0, u1, -u1)
+}
+
+
+
+
+ skewnormal1 = function(lshape = "identity", earg = list(), ishape = NULL,
+ nsimEIM = NULL)
+{
+ if (mode(lshape) != "character" && mode(lshape) != "name")
+ lshape = as.character(substitute(lshape))
+ if (!is.list(earg)) earg = list()
+ if (length(nsimEIM) &&
+ (!is.Numeric(nsimEIM, allow = 1, integ = TRUE) || nsimEIM <= 10))
+ stop("argument 'nsimEIM' should be an integer greater than 10")
+
+ new("vglmff",
+ blurb = c("1-parameter Skew-normal distribution\n\n",
+ "Link: ",
+ namesof("shape", lshape, earg = earg), "\n",
+ "Mean: shape * sqrt(2 / (pi * (1+shape^2 )))\n",
+ "Variance: 1-mu^2"),
+ infos = eval(substitute(function(...) {
+ list(Musual = 1,
+ nsimEIM = .nsimEIM)
+ }, list( .nsimEIM = nsimEIM ))),
+ initialize = eval(substitute(expression({
+ y = cbind(y)
+ if (ncol(y) != 1)
+ stop("response must be a vector or a one-column matrix")
+ predictors.names =
+ namesof("shape", .lshape, earg = .earg, tag = FALSE)
+ if (!length(etastart)) {
+ init.shape = if (length( .ishape)) rep( .ishape, len = n) else {
+ temp = y
+ index = abs(y) < sqrt(2/pi)-0.01
+ temp[!index] = y[!index]
+ temp[index] = sign(y[index])/sqrt(2/(pi*y[index]*y[index])-1)
+ temp
+ }
+ etastart = matrix(init.shape, n, ncol(y))
+ }
+ }), list( .lshape = lshape, .earg = earg, .ishape = ishape ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ alpha = eta2theta(eta, .lshape, earg = .earg)
+ alpha * sqrt(2/(pi * (1+alpha^2 )))
+ }, list( .earg = earg, .lshape = lshape ))),
+ last = eval(substitute(expression({
+ misc$link = c(shape = .lshape)
+ misc$earg = list(shape = .earg )
+ misc$nsimEIM = .nsimEIM
+ misc$expected = (length( .nsimEIM ) > 0)
+ }), list( .earg = earg, .lshape = lshape, .nsimEIM = nsimEIM ))),
+ link = eval(substitute(function(mu, extra = NULL) {
+ alpha = mu / sqrt(2/pi - mu^2)
+ theta2eta(alpha, .lshape, earg = .earg)
+ }, list( .earg = earg, .lshape = lshape ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ alpha = eta2theta(eta, .lshape, earg = .earg)
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(w * dsnorm(x=y, location=0, scale=1, shape=alpha, log = TRUE))
+ }
+ }, list( .earg = earg, .lshape = lshape ))),
+ vfamily = c("skewnormal1"),
+ deriv = eval(substitute(expression({
+ alpha = eta2theta(eta, .lshape, earg = .earg)
+ zedd = y*alpha
+ tmp76 = pnorm(zedd)
+ tmp86 = dnorm(zedd)
+ dl.dshape = tmp86 * y / tmp76
+ dshape.deta = dtheta.deta(alpha, .lshape, earg = .earg)
+ w * dl.dshape * dshape.deta
+ }), list( .earg = earg, .lshape = lshape ))),
+ weight = eval(substitute(expression({
+ if ( length( .nsimEIM )) {
+ run.mean = 0
+ for(ii in 1:( .nsimEIM)) {
+ ysim = rsnorm(n, location = 0, scale = 1, shape = alpha)
+ zedd = ysim*alpha
+ tmp76 = pnorm(zedd)
+ tmp86 = dnorm(zedd)
+ d2l.dshape2 = -ysim*ysim*tmp86*(tmp76*zedd+tmp86)/tmp76^2
+ rm(ysim)
+ run.mean = ((ii-1) * run.mean + d2l.dshape2) / ii
+ }
+ if (intercept.only)
+ run.mean = mean(run.mean)
+ wz = -w * (dshape.deta^2) * run.mean
+ } else {
+ d2shape.deta2 = d2theta.deta2(alpha, .lshape, earg = .earg)
+ d2l.dshape2 = -y*y * tmp86 * (tmp76 * zedd + tmp86) / tmp76^2
+ wz = -(dshape.deta^2) * d2l.dshape2 - d2shape.deta2 * dl.dshape
+ wz = w * wz
+ }
+ wz
+ }), list( .earg = earg, .lshape = lshape, .nsimEIM = nsimEIM ))))
+}
+
+
+
+
+
+
+
+
+
diff --git a/R/family.positive.R b/R/family.positive.R
index 217b2b8..a085dec 100644
--- a/R/family.positive.R
+++ b/R/family.positive.R
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
@@ -7,7 +8,7 @@
-dposnegbin = function(x, size, prob=NULL, munb=NULL, log=FALSE) {
+dposnegbin = function(x, size, prob = NULL, munb = NULL, log = FALSE) {
if (length(munb)) {
if (length(prob))
stop("'prob' and 'munb' both specified")
@@ -35,7 +36,7 @@ dposnegbin = function(x, size, prob=NULL, munb=NULL, log=FALSE) {
}
-pposnegbin = function(q, size, prob=NULL, munb=NULL) {
+pposnegbin = function(q, size, prob = NULL, munb = NULL) {
if (length(munb)) {
if (length(prob))
stop("'prob' and 'munb' both specified")
@@ -50,22 +51,22 @@ pposnegbin = function(q, size, prob=NULL, munb=NULL) {
}
-qposnegbin = function(p, size, prob=NULL, munb=NULL) {
+qposnegbin = function(p, size, prob = NULL, munb = NULL) {
if (length(munb)) {
if (length(prob))
stop("'prob' and 'munb' both specified")
prob <- size/(size + munb)
}
- if (!is.Numeric(p, posit=TRUE) || any(p >= 1))
+ if (!is.Numeric(p, posit = TRUE) || any(p >= 1))
stop("bad input for argument 'p'")
qnbinom(p * pnbinom(q=p*0, size=size, prob=prob, lower.tail = FALSE) +
dnbinom(x=p*0, size=size, prob=prob), size=size, prob=prob)
}
-rposnegbin = function(n, size, prob=NULL, munb=NULL) {
+rposnegbin = function(n, size, prob = NULL, munb = NULL) {
use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integ=TRUE, allow=1, posit=TRUE))
+ if (!is.Numeric(n, integ = TRUE, allow = 1, posit = TRUE))
stop("bad input for argument 'n'") else n
if (length(munb)) {
@@ -94,47 +95,53 @@ rposnegbin = function(n, size, prob=NULL, munb=NULL) {
posnegbinomial = function(lmunb = "loge", lk = "loge",
emunb =list(), ek = list(),
ik = NULL, zero = -2, cutoff = 0.995,
- shrinkage.init=0.95, method.init=1)
+ shrinkage.init = 0.95, method.init = 1)
{
- if (!is.Numeric(cutoff, allow=1) || cutoff<0.8 || cutoff>=1)
+
+
+ lkayy <- lk
+ ekayy <- ek
+ ikayy <- ik
+
+ if (!is.Numeric(cutoff, allow = 1) || cutoff<0.8 || cutoff>=1)
stop("range error in the argument 'cutoff'")
- if (!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
method.init > 2) stop("argument 'method.init' must be 1 or 2")
- if (length(ik) && !is.Numeric(ik, posit=TRUE))
+ if (length(ikayy) && !is.Numeric(ikayy, posit = TRUE))
stop("bad input for argument 'ik'")
- if (!is.Numeric(shrinkage.init, allow=1) || shrinkage.init < 0 ||
+ if (!is.Numeric(shrinkage.init, allow = 1) || shrinkage.init < 0 ||
shrinkage.init > 1) stop("bad input for argument 'shrinkage.init'")
if (mode(lmunb) != "character" && mode(lmunb) != "name")
lmunb = as.character(substitute(lmunb))
- if (mode(lk) != "character" && mode(lk) != "name")
- lk = as.character(substitute(lk))
+ if (mode(lkayy) != "character" && mode(lkayy) != "name")
+ lkayy = as.character(substitute(lkayy))
if (!is.list(emunb)) emunb = list()
if (!is.list(ek)) ek = list()
new("vglmff",
- blurb=c("Positive-negative binomial distribution\n\n",
- "Links: ",
- namesof("munb", lmunb, earg= emunb ), ", ",
- namesof("k", lk, earg= ek ), "\n",
- "Mean: munb / (1 - (k/(k+munb))^k)"),
- constraints=eval(substitute(expression({
- temp752 = .zero
- if (length(temp752) && all(temp752 == -2))
- temp752 = 2*(1:ncol(y))
- constraints = cm.zero.vgam(constraints, x, temp752, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ blurb = c("Positive-negative binomial distribution\n\n",
+ "Links: ",
+ namesof("munb", lmunb, earg = emunb ), ", ",
+ namesof("k", lkayy, earg = ek ), "\n",
+ "Mean: munb / (1 - (k/(k+munb))^k)"),
+ constraints = eval(substitute(expression({
+
+ dotzero = .zero
+ Musual = 2
+ eval(negzero.expression)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
if (any(y==0)) stop("there are zero values in the response")
y = as.matrix(y)
M = 2 * ncol(y)
extra$NOS = NOS = ncoly = ncol(y) # Number of species
predictors.names = c(
- namesof(if(NOS==1) "munb" else paste("munb", 1:NOS, sep=""),
- .lmunb, earg= .emunb, tag= FALSE),
- namesof(if(NOS==1) "k" else paste("k", 1:NOS, sep=""),
- .lk, earg= .ek, tag= FALSE))
- predictors.names = predictors.names[interleave.VGAM(M, M=2)]
+ namesof(if (NOS == 1) "munb" else paste("munb", 1:NOS, sep = ""),
+ .lmunb, earg = .emunb, tag = FALSE),
+ namesof(if (NOS == 1) "k" else paste("k", 1:NOS, sep = ""),
+ .lkayy, earg = .ekayy, tag = FALSE))
+ predictors.names = predictors.names[interleave.VGAM(M, M = 2)]
if (!length(etastart)) {
mu.init = y
for(iii in 1:ncol(y)) {
@@ -143,16 +150,17 @@ rposnegbin = function(n, size, prob=NULL, munb=NULL) {
} else {
median(y[,iii])
}
- mu.init[,iii] = (1- .sinit) * y[,iii] + .sinit * use.this
+ mu.init[,iii] = (1 - .sinit) * y[,iii] + .sinit * use.this
}
- if ( is.Numeric( .ik )) {
- kmat0 = matrix( .ik, nr=n, nc=NOS, byrow=TRUE)
+ if ( is.Numeric( .ikayy )) {
+ kmat0 = matrix( .ikayy, nr = n, nc = NOS, byrow = TRUE)
} else {
posnegbinomial.Loglikfun =
function(kmat, y, x, w, extraargs) {
munb = extraargs
- sum(w * dposnegbin(x=y, size=kmat, munb=munb, log=TRUE))
+ sum(w * dposnegbin(x = y, size = kmat, munb = munb,
+ log = TRUE))
}
k.grid = 2^((-6):6)
kmat0 = matrix(0, nr=n, nc=NOS)
@@ -164,57 +172,67 @@ rposnegbin = function(n, size, prob=NULL, munb=NULL) {
}
}
p00 = (kmat0 / (kmat0 + mu.init))^kmat0
- etastart = cbind(theta2eta(mu.init*(1-p00), .lmunb, earg= .emunb ),
- theta2eta(kmat0, .lk, earg= .ek ))
- etastart = etastart[,interleave.VGAM(M, M=2),drop=FALSE]
+ etastart =
+ cbind(theta2eta(mu.init*(1-p00), .lmunb, earg = .emunb ),
+ theta2eta(kmat0, .lkayy, earg = .ekayy ))
+ etastart = etastart[,interleave.VGAM(M, M = 2), drop = FALSE]
}
- }), list( .lmunb=lmunb, .lk=lk, .ik=ik,
- .emunb=emunb, .ek=ek,
- .sinit=shrinkage.init,
- .method.init=method.init ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
+ }), list( .lmunb = lmunb, .lkayy = lkayy, .ikayy = ikayy,
+ .emunb = emunb, .ekayy = ekayy,
+ .sinit = shrinkage.init,
+ .method.init = method.init ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
NOS = ncol(eta) / 2
- munb = eta2theta(eta[,2*(1:NOS)-1,drop=FALSE], .lmunb, earg= .emunb )
- kmat = eta2theta(eta[,2*(1:NOS),drop=FALSE], .lk, earg= .ek )
+ munb = eta2theta(eta[,2*(1:NOS)-1, drop = FALSE],
+ .lmunb, earg = .emunb )
+ kmat = eta2theta(eta[,2*(1:NOS), drop = FALSE],
+ .lkayy, earg = .ekayy )
p0 = (kmat / (kmat + munb))^kmat
munb / (1 - p0)
- }, list( .lk=lk, .lmunb=lmunb,
- .emunb=emunb, .ek=ek ))),
- last=eval(substitute(expression({
- temp0303 = c(rep( .lmunb, length=NOS), rep( .lk, length=NOS))
- names(temp0303) = c(if(NOS==1) "munb" else paste("munb", 1:NOS, sep=""),
- if (NOS==1) "k" else paste("k", 1:NOS, sep=""))
- temp0303 = temp0303[interleave.VGAM(M, M=2)]
+ }, list( .lkayy = lkayy, .lmunb = lmunb,
+ .ekayy = ekayy, .emunb = emunb ))),
+ last = eval(substitute(expression({
+ temp0303 = c(rep( .lmunb, length = NOS),
+ rep( .lkayy, length = NOS))
+ names(temp0303) =
+ c(if (NOS == 1) "munb" else paste("munb", 1:NOS, sep = ""),
+ if (NOS == 1) "k" else paste("k", 1:NOS, sep = ""))
+ temp0303 = temp0303[interleave.VGAM(M, M = 2)]
misc$link = temp0303 # Already named
misc$earg = vector("list", 2*NOS)
names(misc$earg) = names(misc$link)
for(ii in 1:NOS) {
misc$earg[[2*ii-1]] = .emunb
- misc$earg[[2*ii ]] = .ek
+ misc$earg[[2*ii ]] = .ekayy
}
misc$cutoff = .cutoff
misc$method.init = .method.init
- }), list( .lmunb=lmunb, .lk=lk, .cutoff=cutoff,
- .emunb=emunb, .ek=ek,
- .method.init=method.init ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ }), list( .lmunb = lmunb, .lkayy = lkayy,
+ .emunb = emunb, .ekayy = ekayy,
+ .cutoff = cutoff, .method.init = method.init ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE,eta, extra = NULL) {
NOS = ncol(eta) / 2
- munb = eta2theta(eta[,2*(1:NOS)-1,drop=FALSE], .lmunb, earg= .emunb )
- kmat = eta2theta(eta[,2*(1:NOS),drop=FALSE], .lk, earg= .ek )
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(w * dposnegbin(x=y, size=kmat, munb=munb, log=TRUE))
+ munb = eta2theta(eta[,2*(1:NOS)-1, drop = FALSE],
+ .lmunb, earg = .emunb )
+ kmat = eta2theta(eta[,2*(1:NOS), drop = FALSE],
+ .lkayy, earg = .ekayy )
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dposnegbin(x = y, size = kmat, munb = munb, log = TRUE))
}
- }, list( .lmunb=lmunb, .lk=lk,
- .emunb=emunb, .ek=ek ))),
- vfamily=c("posnegbinomial"),
- deriv=eval(substitute(expression({
- NOS= extra$NOS
- munb = eta2theta(eta[,2*(1:NOS)-1,drop=FALSE], .lmunb, earg= .emunb )
- kmat = eta2theta(eta[,2*(1:NOS),drop=FALSE], .lk, earg= .ek )
- d3 = deriv3(~ -log(1 - (kmat. /(kmat. + munb. ))^kmat. ),
- c("munb.", "kmat."), hessian= TRUE) # Extra term
+ }, list( .lmunb = lmunb, .lkayy = lkayy,
+ .emunb = emunb, .ekayy = ekayy ))),
+ vfamily = c("posnegbinomial"),
+ deriv = eval(substitute(expression({
+ NOS = extra$NOS
+ munb = eta2theta(eta[,2*(1:NOS)-1, drop = FALSE],
+ .lmunb, earg = .emunb )
+ kmat = eta2theta(eta[,2*(1:NOS), drop = FALSE],
+ .lkayy, earg = .ekayy )
+ d3 = deriv3(~ -log(1 - (kmat. / (kmat. + munb. ))^kmat. ),
+ c("munb.", "kmat."), hessian = TRUE) # Extra term
dl0.dthetas = array(NA, c(n, NOS, 2))
d2l0.dthetas2 = array(NA, c(n, NOS, 3)) # matrix-band format
for(spp. in 1:NOS) {
@@ -230,52 +248,54 @@ rposnegbin = function(n, size, prob=NULL, munb=NULL) {
NOS = ncol(eta) / 2
dl.dmunb = y/munb - (y+kmat)/(kmat+munb) + dl0.dthetas[,,1]
- dl.dk = digamma(y+kmat) - digamma(kmat) - (y+kmat)/(munb+kmat) + 1 +
- log(kmat/(kmat+munb)) + dl0.dthetas[,,2]
- dmunb.deta = dtheta.deta(munb, .lmunb, earg= .emunb )
- dk.deta = dtheta.deta(kmat, .lk, earg= .ek )
- myderiv = w * cbind(dl.dmunb * dmunb.deta, dl.dk * dk.deta)
- myderiv[,interleave.VGAM(M, M=2)]
- }), list( .lmunb=lmunb, .lk=lk,
- .emunb=emunb, .ek=ek ))),
- weight=eval(substitute(expression({
+ dl.dkayy = digamma(y+kmat) - digamma(kmat) -
+ (y+kmat) / (munb+kmat) + 1 +
+ log(kmat /(kmat+munb)) + dl0.dthetas[,,2]
+ dmunb.deta = dtheta.deta(munb, .lmunb, earg = .emunb )
+ dkayy.deta = dtheta.deta(kmat, .lkayy, earg = .ekayy )
+ myderiv = w * cbind(dl.dmunb * dmunb.deta,
+ dl.dkayy * dkayy.deta)
+ myderiv[, interleave.VGAM(M, M = 2)]
+ }), list( .lmunb = lmunb, .lkayy = lkayy,
+ .emunb = emunb, .ekayy = ekayy ))),
+ weight = eval(substitute(expression({
wz = matrix(0, n, 4*NOS-1) # wz is no longer 'diagonal'
p0 = (kmat / (kmat + munb))^kmat
- ed2l.dmunb2 = (1/munb - (munb + kmat*(1-p0))/(munb+kmat)^2) / (1-p0) -
+ ed2l.dmunb2 = (1/munb -
+ (munb + kmat*(1-p0))/(munb+kmat)^2) / (1-p0) -
d2l0.dthetas2[,,1]
- fred = dotFortran(name="enbin8",
- ans=double(n*NOS),
- as.double(kmat),
- as.double(kmat/(munb+kmat)),
- as.double(.cutoff),
- as.integer(n), ok=as.integer(1), as.integer(NOS),
- sumpdf=double(1), macheps=as.double(.Machine$double.eps))
+ fred = dotFortran(name="enbin8", ans=double(n*NOS),
+ as.double(kmat), as.double(kmat/(munb+kmat)),
+ as.double(.cutoff), as.integer(n),
+ ok=as.integer(1), as.integer(NOS), sumpdf=double(1),
+ macheps = as.double(.Machine$double.eps))
if (fred$ok != 1)
stop("error in Fortran subroutine exnbin")
dim(fred$ans) = c(n, NOS)
ed2l.dk2 = -fred$ans/(1-p0) - 1/kmat + 1/(kmat+munb) -
munb * p0 / ((1-p0)*(munb+kmat)^2) - d2l0.dthetas2[,,2]
wz[,2*(1:NOS)-1] = dmunb.deta^2 * ed2l.dmunb2
- wz[,2*(1:NOS)] = dk.deta^2 * ed2l.dk2
- wz[,2*NOS+2*(1:NOS)-1] = -d2l0.dthetas2[,,3] * dmunb.deta * dk.deta
+ wz[,2*(1:NOS) ] = dkayy.deta^2 * ed2l.dk2
+ wz[,2*NOS+2*(1:NOS)-1] = -d2l0.dthetas2[,,3] *
+ dmunb.deta * dkayy.deta
w * wz
- }), list( .cutoff=cutoff ))))
+ }), list( .cutoff = cutoff ))))
}
-dpospois = function(x, lambda, log=FALSE) {
+dpospois = function(x, lambda, log = FALSE) {
if (!is.logical(log.arg <- log)) stop("bad input for 'log'")
rm(log)
- if (!is.Numeric(lambda, posit=TRUE))
+ if (!is.Numeric(lambda, posit = TRUE))
stop("bad input for argument 'lambda'")
L = max(length(x), length(lambda))
x = rep(x, len=L); lambda = rep(lambda, len=L);
ans = if (log.arg) {
- ifelse(x == 0, log(0.0), dpois(x, lambda, log=TRUE) -
+ ifelse(x == 0, log(0.0), dpois(x, lambda, log = TRUE) -
log1p(-exp(-lambda)))
} else {
ifelse(x == 0, 0, -dpois(x, lambda) / expm1(-lambda))
@@ -285,7 +305,7 @@ dpospois = function(x, lambda, log=FALSE) {
ppospois = function(q, lambda) {
- if (!is.Numeric(lambda, posit=TRUE))
+ if (!is.Numeric(lambda, posit = TRUE))
stop("bad input for argument 'lambda'")
L = max(length(q), length(lambda))
q = rep(q, len=L); lambda = rep(lambda, len=L);
@@ -293,9 +313,9 @@ ppospois = function(q, lambda) {
}
qpospois = function(p, lambda) {
- if (!is.Numeric(lambda, posit=TRUE))
+ if (!is.Numeric(lambda, posit = TRUE))
stop("bad input for argument 'lambda'")
- if (!is.Numeric(p, posit=TRUE) || any(p >= 1))
+ if (!is.Numeric(p, posit = TRUE) || any(p >= 1))
stop("bad input for argument 'p'")
qpois(p * (-expm1(-lambda)) + exp(-lambda), lambda)
}
@@ -303,7 +323,7 @@ qpospois = function(p, lambda) {
rpospois = function(n, lambda) {
use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integ=TRUE, allow=1, posit=TRUE))
+ if (!is.Numeric(n, integ = TRUE, allow = 1, posit = TRUE))
stop("bad input for argument 'n'") else n
if (any(lambda == 0))
@@ -322,159 +342,184 @@ rpospois = function(n, lambda) {
- pospoisson = function(link="loge", earg=list(), expected=TRUE,
- ilambda=NULL, method.init=1)
+ pospoisson = function(link = "loge", earg = list(), expected = TRUE,
+ ilambda = NULL, method.init = 1)
{
- if (!missing(link))
- link = as.character(substitute(link))
- if (!is.list(earg)) earg = list()
- if (!is.logical(expected) || length(expected) != 1)
- stop("bad input for argument 'expected'")
- if (length( ilambda) && !is.Numeric(ilambda, posit=TRUE))
- stop("bad input for argument 'ilambda'")
- if (!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
- method.init > 3) stop("argument 'method.init' must be 1 or 2 or 3")
- new("vglmff",
- blurb=c("Positive-Poisson distribution\n\n",
- "Links: ",
- namesof("lambda", link, earg= earg, tag=FALSE),
- "\n"),
- initialize=eval(substitute(expression({
- y = as.matrix(y)
- if (any(y < 1))
- stop("all y values must be in 1,2,3,...")
- if (any(y != round(y )))
- stop("the response must be integer-valued")
-
- predictors.names = namesof(if(ncol(y) == 1) "lambda"
- else paste("lambda", 1:ncol(y), sep=""), .link,
- earg= .earg, tag=FALSE)
- if ( .method.init == 1) {
- lambda.init = apply(y, 2, median) + 1/8
- lambda.init = matrix(lambda.init, n, ncol(y), byrow=TRUE)
- } else if ( .method.init == 2) {
- lambda.init = apply(y, 2, weighted.mean, w=w) + 1/8
- lambda.init = matrix(lambda.init, n, ncol(y), byrow=TRUE)
- } else {
- lambda.init = -y / expm1(-y)
- }
- if (length( .ilambda))
- lambda.init = lambda.init * 0 + .ilambda
- if (!length(etastart))
- etastart = theta2eta(lambda.init, .link, earg= .earg)
- }), list( .link = link, .earg = earg,
- .ilambda = ilambda, .method.init = method.init ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- lambda = eta2theta(eta, .link, earg = .earg )
- -lambda / expm1(-lambda)
- }, list( .link=link, .earg= earg ))),
- last=eval(substitute(expression({
- misc$expected = .expected
- misc$link = rep( .link, len=M)
- names(misc$link) = if (M == 1) "lambda" else paste("lambda", 1:M, sep="")
- misc$earg = vector("list", M)
- names(misc$earg) = names(misc$link)
- for(ii in 1:M)
- misc$earg[[ii]] = .earg
- }), list( .link=link, .earg= earg, .expected=expected ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals=FALSE, eta,extra=NULL) {
- lambda = eta2theta(eta, .link, earg= .earg )
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(w * dpospois(x = y, lambda = lambda, log = TRUE))
- }
- }, list( .link = link, .earg = earg ))),
- vfamily=c("pospoisson"),
- deriv=eval(substitute(expression({
- lambda = eta2theta(eta, .link, earg= .earg )
- temp6 = expm1(lambda)
- dl.dlambda = y/lambda - 1 - 1 / temp6
- dlambda.deta = dtheta.deta(lambda, .link, earg = .earg )
- w * dl.dlambda * dlambda.deta
- }), list( .link = link, .earg = earg ))),
- weight=eval(substitute(expression({
- if ( .expected ) {
- ed2l.dlambda2 = (temp6 + 1) * (1/lambda - 1/temp6) / temp6
- wz = (dlambda.deta^2) * ed2l.dlambda2
- } else {
- d2l.dlambda2 = y / lambda^2 - (temp6 + 1) / temp6^2
- d2lambda.deta2 = d2theta.deta2(lambda, .link, earg = .earg)
- wz = (dlambda.deta^2) * d2l.dlambda2 - dl.dlambda * d2lambda.deta2
- }
- w * wz
- }), list( .link = link, .earg = earg, .expected = expected ))))
+ if (!missing(link))
+ link <- as.character(substitute(link))
+ if (!is.list(earg)) earg <- list()
+
+ if (!is.logical(expected) || length(expected) != 1)
+ stop("bad input for argument 'expected'")
+ if (length( ilambda) && !is.Numeric(ilambda, posit = TRUE))
+ stop("bad input for argument 'ilambda'")
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
+ method.init > 3) stop("argument 'method.init' must be 1 or 2 or 3")
+
+ new("vglmff",
+ blurb = c("Positive-Poisson distribution\n\n",
+ "Links: ",
+ namesof("lambda", link, earg = earg, tag = FALSE)),
+ initialize = eval(substitute(expression({
+ y <- as.matrix(y)
+
+ if (any(y < 1))
+ stop("all y values must be in 1,2,3,...")
+ if (any(y != round(y )))
+ stop("the response must be integer-valued")
+
+ predictors.names <- namesof(
+ paste("lambda", if (ncol(y) > 1) 1:ncol(y) else "", sep = ""),
+ .link, earg = .earg, tag = FALSE)
+
+ if ( .method.init == 1) {
+ lambda.init <- apply(y, 2, median) + 1/8
+ lambda.init <- matrix(lambda.init, n, ncol(y), byrow = TRUE)
+ } else if ( .method.init == 2) {
+ lambda.init <- apply(y, 2, weighted.mean, w=w) + 1/8
+ lambda.init <- matrix(lambda.init, n, ncol(y), byrow = TRUE)
+ } else {
+ lambda.init <- -y / expm1(-y)
+ }
+ if (length( .ilambda))
+ lambda.init <- lambda.init * 0 + .ilambda
+ if (!length(etastart))
+ etastart <- theta2eta(lambda.init, .link, earg = .earg)
+ }), list( .link = link, .earg = earg,
+ .ilambda = ilambda, .method.init = method.init ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ lambda <- eta2theta(eta, .link, earg = .earg )
+ -lambda / expm1(-lambda)
+ }, list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
+ misc$expected <- .expected
+ misc$link <- rep( .link, len = M)
+ names(misc$link) <- if (M == 1) "lambda" else
+ paste("lambda", 1:M, sep = "")
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- names(misc$link)
+ for(ii in 1:M)
+ misc$earg[[ii]] <- .earg
+ }), list( .link = link, .earg = earg, .expected = expected ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ lambda <- eta2theta(eta, .link, earg = .earg )
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ sum(w * dpospois(x = y, lambda = lambda, log = TRUE))
+ }
+ }, list( .link = link, .earg = earg ))),
+ vfamily = c("pospoisson"),
+ deriv = eval(substitute(expression({
+ lambda <- eta2theta(eta, .link, earg = .earg )
+ temp6 <- expm1(lambda)
+ dl.dlambda <- y / lambda - 1 - 1 / temp6
+ dlambda.deta <- dtheta.deta(lambda, .link, earg = .earg )
+ w * dl.dlambda * dlambda.deta
+ }), list( .link = link, .earg = earg ))),
+ weight = eval(substitute(expression({
+ if ( .expected ) {
+ ed2l.dlambda2 <- (temp6 + 1) * (1/lambda - 1/temp6) / temp6
+ wz <- (dlambda.deta^2) * ed2l.dlambda2
+ } else {
+ d2l.dlambda2 <- y / lambda^2 - (temp6 + 1) / temp6^2
+ d2lambda.deta2 <- d2theta.deta2(lambda, .link, earg = .earg)
+ wz <- (dlambda.deta^2) * d2l.dlambda2 - dl.dlambda * d2lambda.deta2
+ }
+ w * wz
+ }), list( .link = link, .earg = earg, .expected = expected ))))
}
- posbinomial = function(link="logit", earg=list()) {
+ posbinomial = function(link = "logit", earg = list()) {
if (!missing(link))
link = as.character(substitute(link))
if (!is.list(earg)) earg = list()
new("vglmff",
- blurb=c("Positive-Binomial distribution\n\n",
- "Links: ",
- namesof("p", link, earg= earg, tag=FALSE), "\n"),
- initialize=eval(substitute(expression({
- eval(binomialff(link= .link)@initialize)
- yint = round(y*w)
- if (max(abs(yint - y*w)) > 0.0001)
- warning("rounding y*w to an integer")
- if (any(y <= 0))
- stop("the response must only contain positive values")
- predictors.names = namesof("p", .link, earg= .earg , tag=FALSE)
- if(length(extra)) extra$w = w else extra = list(w=w)
- if (!length(etastart))
- etastart = cbind(theta2eta(mustart, .link, earg= .earg ))
- }), list( .link = link, .earg=earg ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- mymu = eta2theta(eta, .link, earg= .earg )
- mymu / (1-(1-mymu)^(extra$w))
+ blurb = c("Positive-binomial distribution\n\n",
+ "Links: ",
+ namesof("prob", link, earg = earg, tag = FALSE), "\n"),
+ initialize = eval(substitute(expression({
+
+ mustart.orig = mustart
+ eval(binomialff(link = .link, earg = .earg )@initialize)
+ predictors.names = namesof("prob", .link, earg = .earg , tag = FALSE)
+ if (length(extra)) extra$w = w else extra = list(w = w)
+ if (!length(etastart)) {
+ mustart.use = if (length(mustart.orig)) mustart.orig else mustart
+ etastart = cbind(theta2eta(mustart.use, .link, earg = .earg ))
+ }
+ mustart = NULL
+ }), list( .link = link, .earg = earg ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ w = extra$w
+ mymu = eta2theta(eta, .link, earg = .earg )
+ nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ round(w)
+ mymu / (1-(1-mymu)^(nvec))
},
- list(.link=link, .earg=earg ))),
- last=eval(substitute(expression({
+ list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
extra$w = NULL # Kill it off
- misc$link = c(p = .link)
- misc$earg = list(p = .earg )
- }), list( .link=link, .earg=earg ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals=FALSE,eta,extra=NULL) {
- yint = round(y * w)
- mymu = eta2theta(eta, .link, earg= .earg )
- if (max(abs(w - round(w))) > 0.0001) {
- warning("rounding w to an integer")
- w = round(w)
- }
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(w * dposbinom(x=yint, size=w, prob=mymu, log=TRUE))
+ misc$link = c(prob = .link)
+ misc$earg = list(prob = .earg )
+ }), list( .link = link, .earg = earg ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+
+ ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
+ y * w # Convert proportions to counts
+ nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ round(w)
+ use.orig.w = if (is.numeric(extra$orig.w)) extra$orig.w else 1
+ mymu = eta2theta(eta, .link, earg = .earg )
+
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else {
+
+ sum(use.orig.w * dposbinom(x = ycounts, size = nvec,
+ prob = mymu, log = TRUE))
}
- }, list( .link=link, .earg=earg ))),
- vfamily=c("posbinomial"),
- deriv=eval(substitute(expression({
- yint = round(y * w)
- mymu = eta2theta(eta, .link, earg= .earg )
- dl.dmymu = yint / mymu - (w-yint) / (1-mymu) -
- w * (1-mymu)^(w-1) / (1-(1-mymu)^w)
- dmymu.deta = dtheta.deta(mymu, .link, earg= .earg )
- dl.dmymu * dmymu.deta
- }), list( .link=link, .earg=earg ))),
- weight=eval(substitute(expression({
- temp = 1 - (1-mymu)^w
- temp2 = (1-mymu)^2
- ed2l.dmymu2 = -w/(mymu*temp) - w/temp2 + w*mymu/(temp2*temp) +
- w * (w-1) * (1-mymu)^(w-2) /temp +
- w^2 * temp2^(w-1) / temp^2
- wz = -(dmymu.deta^2) * ed2l.dmymu2
+ }, list( .link = link, .earg = earg ))),
+ vfamily = c("posbinomial"),
+ deriv = eval(substitute(expression({
+ use.orig.w = if (is.numeric(extra$orig.w)) extra$orig.w else
+ rep(1, n)
+ nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ round(w)
+
+ mymu = eta2theta(eta, .link, earg = .earg )
+ dl.dmymu = y / mymu - (1 - y) / (1 - mymu) -
+ ((1 - mymu)^(nvec-1)) / (1 - (1 - mymu)^nvec)
+
+ dmymu.deta = dtheta.deta(mymu, .link, earg = .earg )
+ w * dl.dmymu * dmymu.deta
+ }), list( .link = link, .earg = earg ))),
+ weight = eval(substitute(expression({
+ temp1 = 1 - (1-mymu)^nvec
+ temp2 = (1-mymu)^2
+ ed2l.dmymu2 = -1/(mymu*temp1) - 1/temp2 + mymu/(temp2*temp1) +
+ (nvec-1) * (1-mymu)^(nvec-2) / temp1 +
+ nvec * (temp2^(nvec-1)) / temp1^2
+ wz = -w * (dmymu.deta^2) * ed2l.dmymu2
wz
- }), list( .link=link, .earg=earg ))))
+ }), list( .link = link, .earg = earg ))))
}
+
+
+
+
+
+
dposbinom = function(x, size, prob, log = FALSE) {
log.arg = log
rm(log)
@@ -484,7 +529,7 @@ dposbinom = function(x, size, prob, log = FALSE) {
answer = NaN * x
is0 <- (x == 0)
ok2 <- prob > 0 & prob <= 1 & size == round(size) & size > 0
- answer = dbinom(x=x, size=size, prob=prob, log=TRUE) -
+ answer = dbinom(x = x, size = size, prob = prob, log = TRUE) -
log1p(-dbinom(x=0*x, size=size, prob=prob))
answer[!ok2] = NaN
if (log.arg) {
@@ -497,7 +542,7 @@ dposbinom = function(x, size, prob, log = FALSE) {
}
pposbinom = function(q, size, prob, lower.tail = TRUE, log.p = FALSE) {
- if (!is.Numeric(prob, positive=TRUE))
+ if (!is.Numeric(prob, positive = TRUE))
stop("no zero or non-numeric values allowed for argument 'prob'")
L = max(length(q), length(size), length(prob))
q = rep(q, len=L); size = rep(size, len=L); prob = rep(prob, len=L);
@@ -506,9 +551,9 @@ pposbinom = function(q, size, prob, lower.tail = TRUE, log.p = FALSE) {
}
qposbinom = function(p, size, prob, lower.tail = TRUE, log.p = FALSE) {
- if (!is.Numeric(prob, positive=TRUE))
+ if (!is.Numeric(prob, positive = TRUE))
stop("no zero or non-numeric values allowed for argument 'prob'")
- if (!is.Numeric(p, posit=TRUE) || any(p >= 1))
+ if (!is.Numeric(p, posit = TRUE) || any(p >= 1))
stop("bad input for argument 'p'")
qbinom(p=p * (1 - (1-prob)^size) + (1-prob)^size, size=size, prob=prob,
lower.tail=lower.tail, log.p=log.p)
@@ -516,7 +561,7 @@ qposbinom = function(p, size, prob, lower.tail = TRUE, log.p = FALSE) {
rposbinom = function(n, size, prob) {
use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integ=TRUE, allow=1, posit=TRUE))
+ if (!is.Numeric(n, integ = TRUE, allow = 1, posit = TRUE))
stop("bad input for argument 'n'") else n
if (any(prob == 0))
diff --git a/R/family.qreg.R b/R/family.qreg.R
index e4a98ea..615cab2 100644
--- a/R/family.qreg.R
+++ b/R/family.qreg.R
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
@@ -17,19 +18,19 @@
lms.bcn.control <-
lms.bcg.control <-
-lms.yjn.control <- function(trace=TRUE, ...)
+lms.yjn.control <- function(trace = TRUE, ...)
list(trace=trace)
- lms.bcn <- function(percentiles=c(25,50,75),
- zero=c(1,3),
- llambda="identity",
- lmu="identity",
- lsigma="loge",
- elambda=list(), emu=list(), esigma=list(),
+ lms.bcn <- function(percentiles = c(25,50,75),
+ zero = c(1,3),
+ llambda = "identity",
+ lmu = "identity",
+ lsigma = "loge",
+ elambda = list(), emu=list(), esigma = list(),
dfmu.init=4,
dfsigma.init=2,
ilambda=1,
@@ -52,12 +53,12 @@ lms.yjn.control <- function(trace=TRUE, ...)
stop("bad input for argument 'expectiles'")
new("vglmff",
- blurb=c("LMS ", if (expectiles) "Expectile" else "Quantile",
+ blurb = c("LMS ", if (expectiles) "Expectile" else "Quantile",
" Regression (Box-Cox transformation to normality)\n",
"Links: ",
- namesof("lambda", link=llambda, earg= elambda), ", ",
- namesof("mu", link=lmu, earg= emu), ", ",
- namesof("sigma", link=lsigma, earg= esigma)),
+ namesof("lambda", link=llambda, earg = elambda), ", ",
+ namesof("mu", link=lmu, earg = emu), ", ",
+ namesof("sigma", link=lsigma, earg = esigma)),
constraints=eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
}), list(.zero=zero))),
@@ -68,50 +69,52 @@ lms.yjn.control <- function(trace=TRUE, ...)
stop("negative responses not allowed")
predictors.names =
- c(namesof("lambda", .llambda, earg= .elambda, short= TRUE),
- namesof("mu", .lmu, earg= .emu, short= TRUE),
- namesof("sigma", .lsigma, earg= .esigma, short= TRUE))
+ c(namesof("lambda", .llambda, earg = .elambda, short= TRUE),
+ namesof("mu", .lmu, earg = .emu, short= TRUE),
+ namesof("sigma", .lsigma, earg = .esigma, short= TRUE))
if (!length(etastart)) {
- fit500=vsmooth.spline(x=x[,min(ncol(x),2)],y=y,w=w, df= .dfmu.init)
- fv.init = c(predict(fit500, x=x[,min(ncol(x),2)])$y)
+ Fit5 = vsmooth.spline(x = x[, min(ncol(x), 2)],
+ y = y, w = w, df = .dfmu.init)
+ fv.init = c(predict(Fit5, x = x[, min(ncol(x), 2)])$y)
lambda.init = if (is.Numeric( .ilambda)) .ilambda else 1.0
sigma.init = if (is.null(.isigma)) {
myratio = ((y/fv.init)^lambda.init - 1) / lambda.init
if (is.Numeric( .dfsigma.init)) {
- fit600 = vsmooth.spline(x=x[,min(ncol(x),2)], y=myratio^2,
- w=w, df= .dfsigma.init)
- sqrt(c(abs(predict(fit600, x=x[,min(ncol(x),2)])$y)))
+ fit600 = vsmooth.spline(x = x[, min(ncol(x), 2)],
+ y = myratio^2,
+ w = w, df = .dfsigma.init)
+ sqrt(c(abs(predict(fit600, x = x[, min(ncol(x), 2)])$y)))
} else
sqrt(var(myratio))
} else .isigma
- etastart = cbind(theta2eta(lambda.init, .llambda, earg= .elambda),
- theta2eta(fv.init, .lmu, earg= .emu),
- theta2eta(sigma.init, .lsigma, earg= .esigma))
+ etastart = cbind(theta2eta(lambda.init, .llambda, earg = .elambda),
+ theta2eta(fv.init, .lmu, earg = .emu),
+ theta2eta(sigma.init, .lsigma, earg = .esigma))
}
- }), list( .llambda=llambda, .lmu=lmu, .lsigma=lsigma,
- .elambda=elambda, .emu=emu, .esigma=esigma,
- .dfmu.init=dfmu.init,
- .dfsigma.init=dfsigma.init,
- .ilambda=ilambda, .isigma=isigma ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- eta[,1] = eta2theta(eta[,1], .llambda, earg= .elambda)
- eta[,2] = eta2theta(eta[,2], .lmu, earg= .emu)
- eta[,3] = eta2theta(eta[,3], .lsigma, earg= .esigma)
+ }), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
+ .elambda = elambda, .emu = emu, .esigma = esigma,
+ .dfmu.init = dfmu.init,
+ .dfsigma.init = dfsigma.init,
+ .ilambda = ilambda, .isigma = isigma ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ eta[,1] = eta2theta(eta[,1], .llambda, earg = .elambda)
+ eta[,2] = eta2theta(eta[,2], .lmu, earg = .emu)
+ eta[,3] = eta2theta(eta[,3], .lsigma, earg = .esigma)
if ( .expectiles ) {
explot.lms.bcn(percentiles= .percentiles, eta=eta)
} else {
qtplot.lms.bcn(percentiles= .percentiles, eta=eta)
}
- }, list( .llambda=llambda, .lmu=lmu, .lsigma=lsigma,
- .elambda=elambda, .emu=emu, .esigma=esigma,
+ }, list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
+ .elambda = elambda, .emu = emu, .esigma = esigma,
.percentiles=percentiles, .expectiles = expectiles ))),
- last=eval(substitute(expression({
+ last = eval(substitute(expression({
misc$percentiles = .percentiles
- misc$links = c(lambda = .llambda, mu = .lmu, sigma = .lsigma)
+ misc$links = c(lambda = .llambda, mu = .lmu, sigma = .lsigma)
misc$earg = list(lambda = .elambda, mu = .emu, sigma = .esigma)
misc$true.mu = FALSE # $fitted is not a true mu
misc$expectiles = .expectiles
@@ -119,44 +122,45 @@ lms.yjn.control <- function(trace=TRUE, ...)
post$cdf = cdf.lms.bcn(y, eta0=matrix(c(lambda,mymu,sigma),
ncol=3, dimnames=list(dimnames(x)[[1]], NULL)))
}
- }), list( .llambda=llambda, .lmu=lmu, .lsigma=lsigma,
- .elambda=elambda, .emu=emu, .esigma=esigma,
+ }), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
+ .elambda = elambda, .emu = emu, .esigma = esigma,
.percentiles=percentiles, .expectiles = expectiles ))),
loglikelihood=eval(substitute(
- function(mu,y,w, residuals= FALSE, eta, extra=NULL) {
- lambda = eta2theta(eta[,1], .llambda, earg= .elambda)
- mu = eta2theta(eta[,2], .lmu, earg= .emu)
- sigma = eta2theta(eta[,3], .lsigma, earg= .esigma)
+ function(mu,y, w, residuals= FALSE, eta, extra = NULL) {
+ lambda = eta2theta(eta[,1], .llambda, earg = .elambda)
+ mu = eta2theta(eta[,2], .lmu, earg = .emu)
+ sigma = eta2theta(eta[,3], .lsigma, earg = .esigma)
zedd = ((y/mu)^lambda - 1) / (lambda * sigma)
- if (residuals) stop("loglikelihood residuals not implemented") else {
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented") else {
use.this = (lambda * log(y / mu) - log(sigma) - log(y) +
dnorm(zedd, log = TRUE))
use.this[abs(lambda) < 0.001] = (-log(y / mu) - log(sigma) +
dnorm(zedd, log = TRUE))[abs(lambda) < 0.001]
sum(w * use.this)
}
- }, list( .llambda=llambda, .lmu=lmu, .lsigma=lsigma,
- .elambda=elambda, .emu=emu, .esigma=esigma ))),
- vfamily=c("lms.bcn", "lmscreg"),
- deriv=eval(substitute(expression({
- lambda = eta2theta(eta[,1], .llambda, earg= .elambda)
- mymu = eta2theta(eta[,2], .lmu, earg= .emu)
- sigma = eta2theta(eta[,3], .lsigma, earg= .esigma)
+ }, list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
+ .elambda = elambda, .emu = emu, .esigma = esigma ))),
+ vfamily = c("lms.bcn", "lmscreg"),
+ deriv = eval(substitute(expression({
+ lambda = eta2theta(eta[,1], .llambda, earg = .elambda)
+ mymu = eta2theta(eta[,2], .lmu, earg = .emu)
+ sigma = eta2theta(eta[,3], .lsigma, earg = .esigma)
zedd = ((y/mymu)^lambda - 1) / (lambda * sigma)
z2m1 = zedd * zedd - 1
dl.dlambda = zedd*(zedd - log(y/mymu) / sigma) / lambda -
z2m1 * log(y/mymu)
dl.dmu = zedd / (mymu * sigma) + z2m1 * lambda / mymu
dl.dsigma = z2m1 / sigma
- dlambda.deta = dtheta.deta(lambda, .llambda, earg= .elambda)
- dmu.deta = dtheta.deta(mymu, .lmu, earg= .emu)
- dsigma.deta = dtheta.deta(sigma, .lsigma, earg= .esigma)
+ dlambda.deta = dtheta.deta(lambda, .llambda, earg = .elambda)
+ dmu.deta = dtheta.deta(mymu, .lmu, earg = .emu)
+ dsigma.deta = dtheta.deta(sigma, .lsigma, earg = .esigma)
w * cbind(dl.dlambda * dlambda.deta,
dl.dmu * dmu.deta,
dl.dsigma * dsigma.deta)
- }), list( .llambda=llambda, .lmu=lmu, .lsigma=lsigma,
- .elambda=elambda, .emu=emu, .esigma=esigma ))),
- weight=eval(substitute(expression({
+ }), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
+ .elambda = elambda, .emu = emu, .esigma = esigma ))),
+ weight = eval(substitute(expression({
wz = matrix(as.numeric(NA), n, 6)
wz[,iam(1,1,M)] = (7 * sigma^2 / 4) * dlambda.deta^2
wz[,iam(2,2,M)] = (1 + 2*(lambda*sigma)^2)/(mymu*sigma)^2 * dmu.deta^2
@@ -165,18 +169,18 @@ lms.yjn.control <- function(trace=TRUE, ...)
wz[,iam(1,3,M)] = (lambda * sigma) * dlambda.deta * dsigma.deta
wz[,iam(2,3,M)] = (2*lambda/(mymu * sigma)) * dmu.deta * dsigma.deta
wz * w
- }), list( .llambda=llambda, .lmu=lmu, .lsigma=lsigma,
- .elambda=elambda, .emu=emu, .esigma=esigma ))))
+ }), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
+ .elambda = elambda, .emu = emu, .esigma = esigma ))))
}
- lms.bcg = function(percentiles=c(25,50,75),
- zero=c(1,3),
- llambda="identity",
- lmu="identity",
- lsigma="loge",
- elambda=list(), emu=list(), esigma=list(),
+ lms.bcg = function(percentiles = c(25,50,75),
+ zero = c(1,3),
+ llambda = "identity",
+ lmu = "identity",
+ lsigma = "loge",
+ elambda = list(), emu=list(), esigma = list(),
dfmu.init=4,
dfsigma.init=2,
ilambda=1,
@@ -197,12 +201,12 @@ lms.yjn.control <- function(trace=TRUE, ...)
stop("bad input for argument 'isigma'")
new("vglmff",
- blurb=c("LMS Quantile Regression ",
+ blurb = c("LMS Quantile Regression ",
"(Box-Cox transformation to a Gamma distribution)\n",
"Links: ",
- namesof("lambda", link=llambda, earg= elambda), ", ",
- namesof("mu", link=lmu, earg= emu), ", ",
- namesof("sigma", link=lsigma, earg= esigma)),
+ namesof("lambda", link=llambda, earg = elambda), ", ",
+ namesof("mu", link=lmu, earg = emu), ", ",
+ namesof("sigma", link=lsigma, earg = esigma)),
constraints=eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
}), list(.zero=zero))),
@@ -213,74 +217,76 @@ lms.yjn.control <- function(trace=TRUE, ...)
stop("negative responses not allowed")
predictors.names = c(
- namesof("lambda", .llambda, earg= .elambda, short=TRUE),
- namesof("mu", .lmu, earg= .emu, short=TRUE),
- namesof("sigma", .lsigma, earg= .esigma, short=TRUE))
+ namesof("lambda", .llambda, earg = .elambda, short=TRUE),
+ namesof("mu", .lmu, earg = .emu, short=TRUE),
+ namesof("sigma", .lsigma, earg = .esigma, short=TRUE))
if (!length(etastart)) {
- fit500=vsmooth.spline(x=x[,min(ncol(x),2)],y=y,w=w, df= .dfmu.init)
- fv.init = c(predict(fit500, x=x[,min(ncol(x),2)])$y)
+ Fit5 = vsmooth.spline(x = x[, min(ncol(x), 2)],
+ y = y, w = w, df = .dfmu.init)
+ fv.init = c(predict(Fit5, x = x[, min(ncol(x), 2)])$y)
lambda.init = if (is.Numeric( .ilambda)) .ilambda else 1.0
sigma.init = if (is.null(.isigma)) {
myratio=((y/fv.init)^lambda.init-1)/lambda.init #~(0,var=sigma^2)
if (is.numeric( .dfsigma.init) && is.finite( .dfsigma.init)) {
- fit600 = vsmooth.spline(x=x[,min(ncol(x),2)],
+ fit600 = vsmooth.spline(x = x[, min(ncol(x), 2)],
y=(myratio)^2,
- w=w, df= .dfsigma.init)
- sqrt(c(abs(predict(fit600, x=x[,min(ncol(x),2)])$y)))
+ w = w, df = .dfsigma.init)
+ sqrt(c(abs(predict(fit600, x = x[, min(ncol(x), 2)])$y)))
} else
sqrt(var(myratio))
} else .isigma
- etastart = cbind(theta2eta(lambda.init, .llambda, earg= .elambda),
- theta2eta(fv.init, .lmu, earg= .emu),
- theta2eta(sigma.init, .lsigma, earg= .esigma))
+ etastart = cbind(theta2eta(lambda.init, .llambda, earg = .elambda),
+ theta2eta(fv.init, .lmu, earg = .emu),
+ theta2eta(sigma.init, .lsigma, earg = .esigma))
}
- }), list( .llambda=llambda, .lmu=lmu, .lsigma=lsigma,
- .elambda=elambda, .emu=emu, .esigma=esigma,
- .dfmu.init=dfmu.init,
- .dfsigma.init=dfsigma.init,
- .ilambda=ilambda, .isigma=isigma ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- eta[,1] = eta2theta(eta[,1], .llambda, earg= .elambda)
- eta[,2] = eta2theta(eta[,2], .lmu, earg= .emu)
- eta[,3] = eta2theta(eta[,3], .lsigma, earg= .esigma)
+ }), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
+ .elambda = elambda, .emu = emu, .esigma = esigma,
+ .dfmu.init = dfmu.init,
+ .dfsigma.init = dfsigma.init,
+ .ilambda = ilambda, .isigma = isigma ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ eta[,1] = eta2theta(eta[,1], .llambda, earg = .elambda)
+ eta[,2] = eta2theta(eta[,2], .lmu, earg = .emu)
+ eta[,3] = eta2theta(eta[,3], .lsigma, earg = .esigma)
qtplot.lms.bcg(percentiles= .percentiles, eta=eta)
- }, list( .llambda=llambda, .lmu=lmu, .lsigma=lsigma,
- .elambda=elambda, .emu=emu, .esigma=esigma,
+ }, list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
+ .elambda = elambda, .emu = emu, .esigma = esigma,
.percentiles=percentiles ))),
- last=eval(substitute(expression({
+ last = eval(substitute(expression({
misc$percentiles = .percentiles
- misc$link = c(lambda = .llambda, mu = .lmu, sigma = .lsigma)
+ misc$link = c(lambda = .llambda, mu = .lmu, sigma = .lsigma)
misc$earg = list(lambda = .elambda, mu = .emu, sigma = .esigma)
misc$true.mu = FALSE # $fitted is not a true mu
if (control$cdf) {
post$cdf = cdf.lms.bcg(y, eta0=matrix(c(lambda,mymu,sigma),
ncol=3, dimnames=list(dimnames(x)[[1]], NULL)))
}
- }), list( .llambda=llambda, .lmu=lmu, .lsigma=lsigma,
- .elambda=elambda, .emu=emu, .esigma=esigma,
+ }), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
+ .elambda = elambda, .emu = emu, .esigma = esigma,
.percentiles=percentiles ))),
loglikelihood=eval(substitute(
- function(mu,y,w, residuals= FALSE, eta, extra=NULL) {
- lambda = eta2theta(eta[,1], .llambda, earg= .elambda)
- mu = eta2theta(eta[,2], .lmu, earg= .emu)
- sigma = eta2theta(eta[,3], .lsigma, earg= .esigma)
+ function(mu,y, w, residuals= FALSE, eta, extra = NULL) {
+ lambda = eta2theta(eta[,1], .llambda, earg = .elambda)
+ mu = eta2theta(eta[,2], .lmu, earg = .emu)
+ sigma = eta2theta(eta[,3], .lsigma, earg = .esigma)
Gee = (y / mu)^lambda
theta = 1 / (sigma * lambda)^2
- if (residuals) stop("loglikelihood residuals not implemented yet") else
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
sum(w * (log(abs(lambda)) + theta * (log(theta) +
log(Gee)-Gee) - lgamma(theta) - log(y)))
- }, list( .llambda=llambda, .lmu=lmu, .lsigma=lsigma,
- .elambda=elambda, .emu=emu, .esigma=esigma ))),
- vfamily=c("lms.bcg", "lmscreg"),
- deriv=eval(substitute(expression({
- lambda = eta2theta(eta[,1], .llambda, earg= .elambda)
- mymu = eta2theta(eta[,2], .lmu, earg= .emu)
- sigma = eta2theta(eta[,3], .lsigma, earg= .esigma)
+ }, list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
+ .elambda = elambda, .emu = emu, .esigma = esigma ))),
+ vfamily = c("lms.bcg", "lmscreg"),
+ deriv = eval(substitute(expression({
+ lambda = eta2theta(eta[,1], .llambda, earg = .elambda)
+ mymu = eta2theta(eta[,2], .lmu, earg = .emu)
+ sigma = eta2theta(eta[,3], .lsigma, earg = .esigma)
Gee = (y / mymu)^lambda
theta = 1 / (sigma * lambda)^2
@@ -290,16 +296,16 @@ lms.yjn.control <- function(trace=TRUE, ...)
0.5 * (Gee + 1) * log(Gee))) / lambda
dl.dmu = lambda * theta * (Gee-1) / mymu
dl.dsigma = 2*theta*(dd + Gee - log(theta * Gee)-1) / sigma
- dlambda.deta = dtheta.deta(lambda, link=.llambda, earg= .elambda)
- dmu.deta = dtheta.deta(mymu, link=.lmu, earg= .emu)
- dsigma.deta = dtheta.deta(sigma, link=.lsigma, earg= .esigma)
+ dlambda.deta = dtheta.deta(lambda, link = .llambda, earg = .elambda)
+ dmu.deta = dtheta.deta(mymu, link = .lmu, earg = .emu)
+ dsigma.deta = dtheta.deta(sigma, link = .lsigma, earg = .esigma)
cbind(dl.dlambda * dlambda.deta,
dl.dmu * dmu.deta,
dl.dsigma * dsigma.deta) * w
- }), list( .llambda=llambda, .lmu=lmu, .lsigma=lsigma,
- .elambda=elambda, .emu=emu, .esigma=esigma ))),
- weight=eval(substitute(expression({
+ }), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
+ .elambda = elambda, .emu = emu, .esigma = esigma ))),
+ weight = eval(substitute(expression({
tritheta = trigamma(theta)
wz = matrix(0, n, 6)
@@ -323,8 +329,8 @@ lms.yjn.control <- function(trace=TRUE, ...)
wz[,iam(1,3,M)] = 2 * theta^1.5 * (2 * theta * tritheta - 2 -
1 / theta) * dlambda.deta * dsigma.deta
wz * w
- }), list( .llambda=llambda, .lmu=lmu, .lsigma=lsigma,
- .elambda=elambda, .emu=emu, .esigma=esigma ))))
+ }), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
+ .elambda = elambda, .emu = emu, .esigma = esigma ))))
}
@@ -378,7 +384,7 @@ dyj.dy.yeojohnson = function(y, lambda) {
if (any(index <- y < 0 & abs(lambda-2) <= epsilon))
ans[index] = -log1p(-y[index])
} else {
- psi <- Recall(y=y, lambda=lambda, derivative=derivative-1,
+ psi <- Recall(y = y, lambda=lambda, derivative=derivative-1,
epsilon=epsilon, inverse=inverse)
if (any(index <- y >= 0 & abs(lambda) > epsilon))
ans[index] = ( (y[index]+1)^(lambda[index]) *
@@ -554,12 +560,12 @@ lms.yjn2.control <- function(save.weight=TRUE, ...)
list(save.weight=save.weight)
}
- lms.yjn2 = function(percentiles=c(25,50,75),
- zero=c(1,3),
- llambda="identity",
- lmu="identity",
- lsigma="loge",
- elambda=list(), emu=list(), esigma=list(),
+ lms.yjn2 = function(percentiles = c(25,50,75),
+ zero = c(1,3),
+ llambda = "identity",
+ lmu = "identity",
+ lsigma = "loge",
+ elambda = list(), emu=list(), esigma = list(),
dfmu.init=4,
dfsigma.init=2,
ilambda=1.0,
@@ -583,14 +589,14 @@ lms.yjn2.control <- function(save.weight=TRUE, ...)
stop("bad input for argument 'isigma'")
new("vglmff",
- blurb=c("LMS Quantile Regression (Yeo-Johnson transformation",
+ blurb = c("LMS Quantile Regression (Yeo-Johnson transformation",
" to normality)\n",
"Links: ",
- namesof("lambda", link=llambda, earg= elambda),
+ namesof("lambda", link=llambda, earg = elambda),
", ",
- namesof("mu", link=lmu, earg= emu),
+ namesof("mu", link=lmu, earg = emu),
", ",
- namesof("sigma", link=lsigma, earg= esigma)),
+ namesof("sigma", link=lsigma, earg = esigma)),
constraints=eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
}), list(.zero=zero))),
@@ -598,9 +604,9 @@ lms.yjn2.control <- function(save.weight=TRUE, ...)
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
predictors.names =
- c(namesof("lambda", .llambda, earg= .elambda, short= TRUE),
- namesof("mu", .lmu, earg= .emu, short= TRUE),
- namesof("sigma", .lsigma, earg= .esigma, short= TRUE))
+ c(namesof("lambda", .llambda, earg = .elambda, short= TRUE),
+ namesof("mu", .lmu, earg = .emu, short= TRUE),
+ namesof("sigma", .lsigma, earg = .esigma, short= TRUE))
y.save = y
yoff = if (is.Numeric( .yoffset)) .yoffset else -median(y)
@@ -612,21 +618,21 @@ lms.yjn2.control <- function(save.weight=TRUE, ...)
y.tx = yeo.johnson(y, lambda.init)
fv.init =
- if (smoothok <- (length(unique(sort(x[,min(ncol(x),2)]))) > 7)) {
- fit700=vsmooth.spline(x=x[,min(ncol(x),2)],
- y=y.tx, w=w, df= .dfmu.init)
- c(predict(fit700, x=x[,min(ncol(x),2)])$y)
+ if (smoothok <- (length(unique(sort(x[, min(ncol(x), 2)]))) > 7)) {
+ fit700 = vsmooth.spline(x = x[, min(ncol(x), 2)],
+ y=y.tx, w = w, df = .dfmu.init)
+ c(predict(fit700, x = x[, min(ncol(x), 2)])$y)
} else {
rep(weighted.mean(y, w), len=n)
}
sigma.init = if (!is.Numeric(.isigma)) {
if (is.Numeric( .dfsigma.init) && smoothok) {
- fit710 = vsmooth.spline(x=x[,min(ncol(x),2)],
- y=(y.tx - fv.init)^2,
- w=w, df= .dfsigma.init)
+ fit710 = vsmooth.spline(x = x[, min(ncol(x), 2)],
+ y = (y.tx - fv.init)^2,
+ w = w, df = .dfsigma.init)
sqrt(c(abs(predict(fit710,
- x=x[,min(ncol(x),2)])$y)))
+ x = x[, min(ncol(x), 2)])$y)))
} else {
sqrt( sum( w * (y.tx - fv.init)^2 ) / sum(w) )
}
@@ -634,31 +640,31 @@ lms.yjn2.control <- function(save.weight=TRUE, ...)
.isigma
etastart = matrix(0, n, 3)
- etastart[,1] = theta2eta(lambda.init, .llambda, earg=.elambda)
- etastart[,2] = theta2eta(fv.init, .lmu, earg=.emu)
- etastart[,3] = theta2eta(sigma.init, .lsigma, earg=.esigma)
+ etastart[,1] = theta2eta(lambda.init, .llambda, earg = .elambda)
+ etastart[,2] = theta2eta(fv.init, .lmu, earg = .emu)
+ etastart[,3] = theta2eta(sigma.init, .lsigma, earg = .esigma)
}
- }), list(.llambda=llambda, .lmu=lmu, .lsigma=lsigma,
- .elambda=elambda, .emu=emu, .esigma=esigma,
- .dfmu.init=dfmu.init,
- .dfsigma.init=dfsigma.init,
- .ilambda=ilambda,
+ }), list(.llambda = llambda, .lmu = lmu, .lsigma = lsigma,
+ .elambda = elambda, .emu = emu, .esigma = esigma,
+ .dfmu.init = dfmu.init,
+ .dfsigma.init = dfsigma.init,
+ .ilambda = ilambda,
.yoffset=yoffset,
- .isigma=isigma))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- eta[,1] = eta2theta(eta[,1], .llambda, earg= .elambda)
- eta[,3] = eta2theta(eta[,3], .lsigma, earg= .esigma)
+ .isigma = isigma))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ eta[,1] = eta2theta(eta[,1], .llambda, earg = .elambda)
+ eta[,3] = eta2theta(eta[,3], .lsigma, earg = .esigma)
qtplot.lms.yjn(percentiles= .percentiles, eta=eta, yoffset= extra$yoff)
}, list(.percentiles=percentiles,
- .esigma=esigma, .elambda=elambda,
- .llambda=llambda,
- .lsigma=lsigma))),
- last=eval(substitute(expression({
+ .esigma = esigma, .elambda = elambda,
+ .llambda = llambda,
+ .lsigma = lsigma))),
+ last = eval(substitute(expression({
misc$expected = TRUE
misc$nsimEIM = .nsimEIM
misc$percentiles = .percentiles
- misc$link = c(lambda= .llambda, mu= .lmu, sigma= .lsigma)
+ misc$link = c(lambda = .llambda, mu = .lmu, sigma = .lsigma)
misc$earg = list(lambda = .elambda, mu = .emu, sigma = .esigma)
misc$true.mu = FALSE # $fitted is not a true mu
misc[["yoffset"]] = extra$yoffset
@@ -671,29 +677,30 @@ lms.yjn2.control <- function(save.weight=TRUE, ...)
ncol=3, dimnames=list(dimnames(x)[[1]], NULL)))
}
}), list(.percentiles=percentiles,
- .elambda=elambda, .emu=emu, .esigma=esigma,
+ .elambda = elambda, .emu = emu, .esigma = esigma,
.nsimEIM=nsimEIM,
- .llambda=llambda, .lmu=lmu, .lsigma=lsigma ))),
+ .llambda = llambda, .lmu = lmu, .lsigma = lsigma ))),
loglikelihood=eval(substitute(
- function(mu,y,w, residuals= FALSE, eta, extra=NULL) {
- lambda = eta2theta(eta[,1], .llambda, earg= .elambda)
- mu = eta2theta(eta[,2], .lmu, earg= .emu)
- sigma = eta2theta(eta[,3], .lsigma, earg= .esigma)
+ function(mu,y, w, residuals= FALSE, eta, extra = NULL) {
+ lambda = eta2theta(eta[,1], .llambda, earg = .elambda)
+ mu = eta2theta(eta[,2], .lmu, earg = .emu)
+ sigma = eta2theta(eta[,3], .lsigma, earg = .esigma)
psi = yeo.johnson(y, lambda)
- if (residuals) stop("loglikelihood residuals not implemented yet") else
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
sum(w * (-log(sigma) - 0.5 * ((psi-mu)/sigma)^2 +
(lambda-1) * sign(y) * log1p(abs(y))))
- }, list( .elambda=elambda, .emu=emu, .esigma=esigma,
- .llambda=llambda, .lmu=lmu,
- .lsigma=lsigma ))),
- vfamily=c("lms.yjn2", "lmscreg"),
- deriv=eval(substitute(expression({
- lambda = eta2theta(eta[,1], .llambda, earg= .elambda)
- mymu = eta2theta(eta[,2], .lmu, earg= .emu)
- sigma = eta2theta(eta[,3], .lsigma, earg= .esigma)
- dlambda.deta = dtheta.deta(lambda, link=.llambda, earg= .elambda)
- dmu.deta = dtheta.deta(mymu, link=.lmu, earg= .emu)
- dsigma.deta = dtheta.deta(sigma, link=.lsigma, earg= .esigma)
+ }, list( .elambda = elambda, .emu = emu, .esigma = esigma,
+ .llambda = llambda, .lmu = lmu,
+ .lsigma = lsigma ))),
+ vfamily = c("lms.yjn2", "lmscreg"),
+ deriv = eval(substitute(expression({
+ lambda = eta2theta(eta[,1], .llambda, earg = .elambda)
+ mymu = eta2theta(eta[,2], .lmu, earg = .emu)
+ sigma = eta2theta(eta[,3], .lsigma, earg = .esigma)
+ dlambda.deta = dtheta.deta(lambda, link = .llambda, earg = .elambda)
+ dmu.deta = dtheta.deta(mymu, link = .lmu, earg = .emu)
+ dsigma.deta = dtheta.deta(sigma, link = .lsigma, earg = .esigma)
psi = yeo.johnson(y, lambda)
d1 = yeo.johnson(y, lambda, deriv=1)
@@ -703,17 +710,17 @@ lms.yjn2.control <- function(save.weight=TRUE, ...)
dl.dsigma = (AA^2 -1) / sigma
dthetas.detas = cbind(dlambda.deta, dmu.deta, dsigma.deta)
w * cbind(dl.dlambda, dl.dmu, dl.dsigma) * dthetas.detas
- }), list( .elambda=elambda, .emu=emu, .esigma=esigma,
- .llambda=llambda, .lmu=lmu,
- .lsigma=lsigma ))),
- weight=eval(substitute(expression({
+ }), list( .elambda = elambda, .emu = emu, .esigma = esigma,
+ .llambda = llambda, .lmu = lmu,
+ .lsigma = lsigma ))),
+ weight = eval(substitute(expression({
run.varcov = 0
- ind1 = iam(NA, NA, M=M, both=TRUE, diag=TRUE)
+ ind1 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
for(ii in 1:( .nsimEIM )) {
psi = rnorm(n, mymu, sigma)
- ysim = yeo.johnson(y=psi, lam=lambda, inv=TRUE)
+ ysim = yeo.johnson(y=psi, lam=lambda, inv = TRUE)
d1 = yeo.johnson(ysim, lambda, deriv=1)
AA = (psi - mymu) / sigma
dl.dlambda = -AA * d1 /sigma + sign(ysim) * log1p(abs(ysim))
@@ -727,29 +734,29 @@ lms.yjn2.control <- function(save.weight=TRUE, ...)
if (intercept.only)
run.varcov = matrix(colMeans(run.varcov),
- nr=n, nc=ncol(run.varcov), byrow=TRUE)
+ nr=n, nc=ncol(run.varcov), byrow = TRUE)
wz = run.varcov * dthetas.detas[,ind1$row] * dthetas.detas[,ind1$col]
dimnames(wz) = list(rownames(wz), NULL) # Remove the colnames
wz * w
- }), list(.lsigma=lsigma,
- .esigma=esigma, .elambda=elambda,
+ }), list(.lsigma = lsigma,
+ .esigma = esigma, .elambda = elambda,
.nsimEIM=nsimEIM,
- .llambda=llambda))))
+ .llambda = llambda))))
}
- lms.yjn <- function(percentiles=c(25,50,75),
- zero=c(1,3),
- llambda="identity",
- lsigma="loge",
- elambda=list(), esigma=list(),
+ lms.yjn <- function(percentiles = c(25,50,75),
+ zero = c(1,3),
+ llambda = "identity",
+ lsigma = "loge",
+ elambda = list(), esigma = list(),
dfmu.init=4,
dfsigma.init=2,
ilambda=1.0,
isigma=NULL,
- rule=c(10,5),
+ rule = c(10,5),
yoffset=NULL,
diagW=FALSE, iters.diagW=6)
{
@@ -768,12 +775,12 @@ lms.yjn2.control <- function(save.weight=TRUE, ...)
stop("only rule=5 or 10 is supported")
new("vglmff",
- blurb=c("LMS Quantile Regression ",
+ blurb = c("LMS Quantile Regression ",
"(Yeo-Johnson transformation to normality)\n",
"Links: ",
- namesof("lambda", link=llambda, earg= elambda),
+ namesof("lambda", link=llambda, earg = elambda),
", mu, ",
- namesof("sigma", link=lsigma, earg= esigma)),
+ namesof("sigma", link=lsigma, earg = esigma)),
constraints=eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
}), list(.zero=zero))),
@@ -781,9 +788,9 @@ lms.yjn2.control <- function(save.weight=TRUE, ...)
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
predictors.names =
- c(namesof("lambda", .llambda, earg= .elambda, short= TRUE),
+ c(namesof("lambda", .llambda, earg = .elambda, short= TRUE),
"mu",
- namesof("sigma", .lsigma, earg= .esigma, short= TRUE))
+ namesof("sigma", .lsigma, earg = .esigma, short= TRUE))
y.save = y
yoff = if (is.Numeric( .yoffset)) .yoffset else -median(y)
@@ -795,52 +802,52 @@ lms.yjn2.control <- function(save.weight=TRUE, ...)
lambda.init = if (is.Numeric( .ilambda)) .ilambda else 1.0
y.tx = yeo.johnson(y, lambda.init)
- if (smoothok <- (length(unique(sort(x[,min(ncol(x),2)]))) > 7)) {
- fit700=vsmooth.spline(x=x[,min(ncol(x),2)],
- y=y.tx, w=w, df= .dfmu.init)
- fv.init = c(predict(fit700, x=x[,min(ncol(x),2)])$y)
+ if (smoothok <- (length(unique(sort(x[, min(ncol(x), 2)]))) > 7)) {
+ fit700 = vsmooth.spline(x = x[, min(ncol(x), 2)],
+ y = y.tx, w = w, df = .dfmu.init)
+ fv.init = c(predict(fit700, x = x[, min(ncol(x), 2)])$y)
} else {
fv.init = rep(weighted.mean(y, w), len=n)
}
sigma.init = if (!is.Numeric(.isigma)) {
- if (is.Numeric( .dfsigma.init) && smoothok) {
- fit710 = vsmooth.spline(x=x[,min(ncol(x),2)],
- y=(y.tx - fv.init)^2,
- w=w, df= .dfsigma.init)
- sqrt(c(abs(predict(fit710,
- x=x[,min(ncol(x),2)])$y)))
- } else {
+ if (is.Numeric( .dfsigma.init) && smoothok) {
+ fit710 = vsmooth.spline(x = x[, min(ncol(x), 2)],
+ y = (y.tx - fv.init)^2,
+ w = w, df = .dfsigma.init)
+ sqrt(c(abs(predict(fit710,
+ x = x[, min(ncol(x), 2)])$y)))
+ } else {
sqrt( sum( w * (y.tx - fv.init)^2 ) / sum(w) )
- }
- } else
- .isigma
+ }
+ } else
+ .isigma
- etastart = cbind(theta2eta(lambda.init,.llambda, earg=.elambda),
+ etastart = cbind(theta2eta(lambda.init,.llambda, earg = .elambda),
fv.init,
- theta2eta(sigma.init, .lsigma, earg=.esigma))
+ theta2eta(sigma.init, .lsigma, earg = .esigma))
}
- }), list(.lsigma=lsigma,
- .llambda=llambda,
- .esigma=esigma, .elambda=elambda,
- .dfmu.init=dfmu.init,
- .dfsigma.init=dfsigma.init,
- .ilambda=ilambda,
+ }), list(.lsigma = lsigma,
+ .llambda = llambda,
+ .esigma = esigma, .elambda = elambda,
+ .dfmu.init = dfmu.init,
+ .dfsigma.init = dfsigma.init,
+ .ilambda = ilambda,
.yoffset=yoffset,
- .isigma=isigma))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- eta[,1] = eta2theta(eta[,1], .llambda, earg= .elambda)
- eta[,3] = eta2theta(eta[,3], .lsigma, earg= .esigma)
+ .isigma = isigma))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ eta[,1] = eta2theta(eta[,1], .llambda, earg = .elambda)
+ eta[,3] = eta2theta(eta[,3], .lsigma, earg = .esigma)
qtplot.lms.yjn(percentiles= .percentiles, eta=eta, yoffset= extra$yoff)
}, list(.percentiles=percentiles,
- .esigma=esigma, .elambda=elambda,
- .llambda=llambda,
- .lsigma=lsigma))),
- last=eval(substitute(expression({
+ .esigma = esigma, .elambda = elambda,
+ .llambda = llambda,
+ .lsigma = lsigma))),
+ last = eval(substitute(expression({
misc$percentiles = .percentiles
- misc$link = c(lambda= .llambda, mu= "identity", sigma= .lsigma)
- misc$earg = list(lambda = .elambda, mu = list(), sigma = .esigma)
+ misc$link = c(lambda = .llambda, mu = "identity", sigma = .lsigma)
+ misc$earg = list(lambda = .elambda, mu = list(), sigma = .esigma)
misc$true.mu = FALSE # $fitted is not a true mu
misc[["yoffset"]] = extra$yoff
@@ -852,25 +859,26 @@ lms.yjn2.control <- function(save.weight=TRUE, ...)
ncol=3, dimnames=list(dimnames(x)[[1]], NULL)))
}
}), list(.percentiles=percentiles,
- .esigma=esigma, .elambda=elambda,
- .llambda=llambda,
- .lsigma=lsigma))),
+ .esigma = esigma, .elambda = elambda,
+ .llambda = llambda,
+ .lsigma = lsigma))),
loglikelihood=eval(substitute(
- function(mu,y,w, residuals= FALSE, eta, extra=NULL) {
- lambda = eta2theta(eta[,1], .llambda, earg= .elambda)
+ function(mu,y, w, residuals= FALSE, eta, extra = NULL) {
+ lambda = eta2theta(eta[,1], .llambda, earg = .elambda)
mu = eta[,2]
- sigma = eta2theta(eta[,3], .lsigma, earg= .esigma)
+ sigma = eta2theta(eta[,3], .lsigma, earg = .esigma)
psi = yeo.johnson(y, lambda)
- if (residuals) stop("loglikelihood residuals not implemented yet") else
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
sum(w * (-log(sigma) - 0.5 * ((psi-mu)/sigma)^2 +
(lambda-1) * sign(y) * log1p(abs(y))))
- }, list( .esigma=esigma, .elambda=elambda,
- .lsigma=lsigma, .llambda=llambda))),
- vfamily=c("lms.yjn", "lmscreg"),
- deriv=eval(substitute(expression({
- lambda = eta2theta(eta[,1], .llambda, earg= .elambda)
+ }, list( .esigma = esigma, .elambda = elambda,
+ .lsigma = lsigma, .llambda = llambda))),
+ vfamily = c("lms.yjn", "lmscreg"),
+ deriv = eval(substitute(expression({
+ lambda = eta2theta(eta[,1], .llambda, earg = .elambda)
mymu = eta[,2]
- sigma = eta2theta(eta[,3], .lsigma, earg= .esigma)
+ sigma = eta2theta(eta[,3], .lsigma, earg = .esigma)
psi = yeo.johnson(y, lambda)
d1 = yeo.johnson(y, lambda, deriv=1)
@@ -879,15 +887,15 @@ lms.yjn2.control <- function(save.weight=TRUE, ...)
dl.dlambda = -AA * d1 /sigma + sign(y) * log1p(abs(y))
dl.dmu = AA / sigma
dl.dsigma = (AA^2 -1) / sigma
- dlambda.deta = dtheta.deta(lambda, link=.llambda, earg= .elambda)
- dsigma.deta = dtheta.deta(sigma, link=.lsigma, earg= .esigma)
+ dlambda.deta = dtheta.deta(lambda, link = .llambda, earg = .elambda)
+ dsigma.deta = dtheta.deta(sigma, link = .lsigma, earg = .esigma)
cbind(dl.dlambda * dlambda.deta,
dl.dmu,
dl.dsigma * dsigma.deta) * w
- }), list( .esigma=esigma, .elambda=elambda,
- .lsigma=lsigma, .llambda=llambda ))),
- weight=eval(substitute(expression({
+ }), list( .esigma = esigma, .elambda = elambda,
+ .lsigma = lsigma, .llambda = llambda ))),
+ weight = eval(substitute(expression({
wz = matrix(0, n, 6)
@@ -896,7 +904,7 @@ lms.yjn2.control <- function(save.weight=TRUE, ...)
if (.rule == 10) {
- glag.abs=c(0.13779347054,0.729454549503,1.80834290174,3.40143369785,
+ glag.abs = c(0.13779347054,0.729454549503,1.80834290174,3.40143369785,
5.55249614006,8.33015274676,11.8437858379,16.2792578314,
21.996585812, 29.9206970123)
glag.wts = c(0.308441115765, 0.401119929155, 0.218068287612,
@@ -906,7 +914,7 @@ lms.yjn2.control <- function(save.weight=TRUE, ...)
} else {
glag.abs = c(0.2635603197180449, 1.4134030591060496, 3.5964257710396850,
7.0858100058570503, 12.6408008442729685)
- glag.wts=c(5.217556105826727e-01,3.986668110832433e-01,7.594244968176882e-02,
+ glag.wts = c(5.217556105826727e-01,3.986668110832433e-01,7.594244968176882e-02,
3.611758679927785e-03, 2.336997238583738e-05)
}
@@ -915,14 +923,14 @@ lms.yjn2.control <- function(save.weight=TRUE, ...)
0.81686197962535023, 1.23454146277833154, 1.70679833036403172,
2.22994030591819214, 2.80910399394755972, 3.46387269067033854,
4.25536209637269280)
- sgh.wts=c(9.855210713854302e-02,2.086780884700499e-01,2.520517066468666e-01,
+ sgh.wts = c(9.855210713854302e-02,2.086780884700499e-01,2.520517066468666e-01,
1.986843323208932e-01,9.719839905023238e-02,2.702440190640464e-02,
3.804646170194185e-03, 2.288859354675587e-04, 4.345336765471935e-06,
1.247734096219375e-08)
} else {
sgh.abs = c(0.1002421519682381, 0.4828139660462573, 1.0609498215257607,
1.7797294185202606, 2.6697603560875995)
- sgh.wts=c(0.2484061520284881475,0.3923310666523834311,0.2114181930760276606,
+ sgh.wts = c(0.2484061520284881475,0.3923310666523834311,0.2114181930760276606,
0.0332466603513424663, 0.0008248533445158026)
}
@@ -936,7 +944,7 @@ lms.yjn2.control <- function(save.weight=TRUE, ...)
} else {
gleg.abs = c(-0.9061798459386643,-0.5384693101056820, 0,
0.5384693101056828, 0.9061798459386635)
- gleg.wts=c(0.2369268850561853,0.4786286704993680,0.5688888888888889,
+ gleg.wts = c(0.2369268850561853,0.4786286704993680,0.5688888888888889,
0.4786286704993661, 0.2369268850561916)
}
@@ -963,7 +971,7 @@ lms.yjn2.control <- function(save.weight=TRUE, ...)
gleg.weight.yjn.13(abscissae, lambda, mymu, sigma, temp9)
}
} else {
- temp9 = dotFortran(name="yjngintf", as.double(LL), as.double(UU),
+ temp9 = dotFortran(name = "yjngintf", as.double(LL), as.double(UU),
as.double(gleg.abs), as.double(gleg.wts), as.integer(n),
as.integer(length(gleg.abs)), as.double(lambda),
as.double(mymu), as.double(sigma), answer=double(3*n),
@@ -1017,24 +1025,24 @@ lms.yjn2.control <- function(save.weight=TRUE, ...)
wz = wz * w
wz
- }), list(.lsigma=lsigma,
- .esigma=esigma, .elambda=elambda,
+ }), list(.lsigma = lsigma,
+ .esigma = esigma, .elambda = elambda,
.rule=rule,
.diagW=diagW,
.iters.diagW=iters.diagW,
- .llambda=llambda))))
+ .llambda = llambda))))
}
-lmscreg.control <- function(cdf= TRUE, at.arg=NULL, x0=NULL, ...)
+lmscreg.control <- function(cdf = TRUE, at.arg=NULL, x0=NULL, ...)
{
if (!is.logical(cdf)) {
warning("'cdf' is not logical; using TRUE instead")
cdf = TRUE
}
- list(cdf=cdf, at.arg=at.arg, x0=x0)
+ list(cdf =cdf, at.arg=at.arg, x0=x0)
}
@@ -1047,7 +1055,7 @@ Wr1 <- function(r, w) ifelse(r <= 0, 1, w)
Wr2 <- function(r, w) (r <= 0) * 1 + (r > 0) * w
-amlnormal.deviance = function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
+amlnormal.deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
M <- length(extra$w.aml)
@@ -1089,14 +1097,14 @@ amlnormal.deviance = function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
stop("bad input for argument 'iexpectile'")
new("vglmff",
- blurb=c("Asymmetric least squares quantile regression\n\n",
+ blurb = c("Asymmetric least squares quantile regression\n\n",
"Links: ",
- namesof("expectile", link=lexpectile, earg= eexpectile)),
+ namesof("expectile", link=lexpectile, earg = eexpectile)),
constraints=eval(substitute(expression({
constraints = cm.vgam(matrix(1,M,1), x, .parallel, constraints)
}), list( .parallel=parallel ))),
- deviance= function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
- amlnormal.deviance(mu=mu, y=y, w=w, residuals=residuals,
+ deviance= function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ amlnormal.deviance(mu=mu, y = y, w = w, residuals=residuals,
eta=eta, extra=extra)
},
initialize=eval(substitute(expression({
@@ -1105,38 +1113,38 @@ amlnormal.deviance = function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
stop("response must be a vector or a one-column matrix")
extra$M = M = length(extra$w.aml) # Recycle if necessary
extra$n = n
- extra$y.names = y.names = paste("w.aml=", round(extra$w.aml, dig=.digw),
- sep="")
+ extra$y.names = y.names =
+ paste("w.aml = ", round(extra$w.aml, dig = .digw), sep = "")
predictors.names = c(namesof(
- paste("expectile(",y.names,")", sep=""), .lexpectile,
- earg=.eexpectile, tag=FALSE))
+ paste("expectile(",y.names,")", sep = ""), .lexpectile,
+ earg = .eexpectile, tag = FALSE))
if (!length(etastart)) {
mean.init = if ( .method.init == 1)
rep(median(y), length=n) else
if ( .method.init == 2)
rep(weighted.mean(y, w), length=n) else {
- junk = if (is.R()) lm.wfit(x=x, y=y, w=w) else
- lm.wfit(x=x, y=y, w=w, method="qr")
+ junk = if (is.R()) lm.wfit(x=x, y = y, w=w) else
+ lm.wfit(x=x, y = y, w = w, method = "qr")
junk$fitted
}
if (length( .iexpectile))
- mean.init = matrix( .iexpectile, n, M, byrow=TRUE)
+ mean.init = matrix( .iexpectile, n, M, byrow = TRUE)
etastart = matrix(theta2eta(mean.init, .lexpectile,
- earg= .eexpectile), n, M)
+ earg = .eexpectile), n, M)
}
}), list( .lexpectile=lexpectile, .eexpectile=eexpectile,
.iexpectile=iexpectile,
- .method.init=method.init, .digw = digw, .w.aml=w.aml ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
+ .method.init = method.init, .digw = digw, .w.aml = w.aml ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
ans = eta = as.matrix(eta)
for(ii in 1:ncol(eta))
- ans[,ii] = eta2theta(eta[,ii], .lexpectile, earg= .eexpectile)
+ ans[,ii] = eta2theta(eta[,ii], .lexpectile, earg = .eexpectile)
dimnames(ans) = list(dimnames(eta)[[1]], extra$y.names)
ans
}, list( .lexpectile=lexpectile, .eexpectile=eexpectile ))),
- last=eval(substitute(expression({
- misc$link = rep(.lexpectile, length=M)
+ last = eval(substitute(expression({
+ misc$link = rep(.lexpectile, length = M)
names(misc$link) = extra$y.names
misc$earg = vector("list", M)
names(misc$earg) = names(misc$link)
@@ -1151,22 +1159,22 @@ amlnormal.deviance = function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
extra$individual = TRUE
if (!(M > 1 && ncol(cbind(w)) == M)) {
- extra$deviance = amlnormal.deviance(mu=mu, y=y, w=w,
+ extra$deviance = amlnormal.deviance(mu=mu, y = y, w = w,
residuals=FALSE, eta=eta, extra=extra)
names(extra$deviance) = extra$y.names
}
}), list( .lexpectile=lexpectile, .eexpectile=eexpectile,
.parallel=parallel ))),
- vfamily=c("amlnormal"),
- deriv=eval(substitute(expression({
- mymu = eta2theta(eta, .lexpectile, earg= .eexpectile)
- dexpectile.deta = dtheta.deta(mymu, .lexpectile, earg= .eexpectile)
+ vfamily = c("amlnormal"),
+ deriv = eval(substitute(expression({
+ mymu = eta2theta(eta, .lexpectile, earg = .eexpectile)
+ dexpectile.deta = dtheta.deta(mymu, .lexpectile, earg = .eexpectile)
myresid = matrix(y,extra$n,extra$M) - cbind(mu)
wor1 = Wr2(myresid, w= matrix(extra$w.aml, extra$n, extra$M,
- byrow=TRUE))
+ byrow = TRUE))
w * myresid * wor1 * dexpectile.deta
}), list( .lexpectile=lexpectile, .eexpectile=eexpectile ))),
- weight=eval(substitute(expression({
+ weight = eval(substitute(expression({
wz = w * wor1 * dexpectile.deta^2
wz
}), list( .lexpectile=lexpectile, .eexpectile=eexpectile ))))
@@ -1181,7 +1189,7 @@ amlnormal.deviance = function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
-amlpoisson.deviance = function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
+amlpoisson.deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
M <- length(extra$w.aml)
@@ -1215,14 +1223,14 @@ amlpoisson.deviance = function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
if (!is.list(earg)) earg = list()
new("vglmff",
- blurb=c("Poisson expectile regression by",
+ blurb = c("Poisson expectile regression by",
" asymmetric maximum likelihood estimation\n\n",
- "Link: ", namesof("expectile", link, earg= earg)),
+ "Link: ", namesof("expectile", link, earg = earg)),
constraints=eval(substitute(expression({
constraints = cm.vgam(matrix(1,M,1), x, .parallel, constraints)
}), list( .parallel=parallel ))),
- deviance= function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
- amlpoisson.deviance(mu=mu, y=y, w=w, residuals=residuals,
+ deviance= function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ amlpoisson.deviance(mu=mu, y = y, w = w, residuals=residuals,
eta=eta, extra=extra)
},
initialize=eval(substitute(expression({
@@ -1231,34 +1239,34 @@ amlpoisson.deviance = function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
stop("response must be a vector or a one-column matrix")
extra$M = M = length(extra$w.aml) # Recycle if necessary
extra$n = n
- extra$y.names = y.names = paste("w.aml=", round(extra$w.aml, dig=.digw),
- sep="")
+ extra$y.names = y.names =
+ paste("w.aml = ", round(extra$w.aml, dig = .digw), sep = "")
extra$individual = FALSE
- predictors.names = c(namesof(paste("expectile(",y.names,")", sep=""),
- .link, earg=.earg, tag=FALSE))
+ predictors.names = c(namesof(paste("expectile(",y.names,")", sep = ""),
+ .link, earg = .earg, tag = FALSE))
if (!length(etastart)) {
mean.init = if ( .method.init == 2)
rep(median(y), length=n) else
if ( .method.init == 1)
rep(weighted.mean(y, w), length=n) else {
- junk = if (is.R()) lm.wfit(x=x, y=y, w=w) else
- lm.wfit(x=x, y=y, w=w, method="qr")
+ junk = if (is.R()) lm.wfit(x=x, y = y, w=w) else
+ lm.wfit(x=x, y = y, w = w, method = "qr")
abs(junk$fitted)
}
- etastart = matrix(theta2eta(mean.init, .link, earg= .earg), n, M)
+ etastart = matrix(theta2eta(mean.init, .link, earg = .earg), n, M)
}
- }), list( .link=link, .earg=earg, .method.init=method.init,
- .digw = digw, .w.aml=w.aml ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
+ }), list( .link = link, .earg = earg, .method.init = method.init,
+ .digw = digw, .w.aml = w.aml ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
mu.ans = eta = as.matrix(eta)
for(ii in 1:ncol(eta))
- mu.ans[,ii] = eta2theta(eta[,ii], .link, earg= .earg)
+ mu.ans[,ii] = eta2theta(eta[,ii], .link, earg = .earg)
dimnames(mu.ans) = list(dimnames(eta)[[1]], extra$y.names)
mu.ans
- }, list( .link=link, .earg=earg ))),
- last=eval(substitute(expression({
- misc$link = rep(.link, length=M)
+ }, list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
+ misc$link = rep(.link, length = M)
names(misc$link) = extra$y.names
misc$earg = vector("list", M)
names(misc$earg) = names(misc$link)
@@ -1270,35 +1278,35 @@ amlpoisson.deviance = function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
names(extra$percentile) = names(misc$link)
extra$individual = TRUE
- extra$deviance = amlpoisson.deviance(mu=mu, y=y, w=w,
+ extra$deviance = amlpoisson.deviance(mu=mu, y = y, w = w,
residuals=FALSE, eta=eta, extra=extra)
names(extra$deviance) = extra$y.names
- }), list( .link=link, .earg=earg, .parallel=parallel ))),
- link=eval(substitute(function(mu, extra=NULL) {
- theta2eta(mu, link= .link, earg= .earg)
- }, list( .link=link, .earg=earg ))),
- vfamily=c("amlpoisson"),
- deriv=eval(substitute(expression({
- mymu = eta2theta(eta, .link, earg= .earg)
- dexpectile.deta = dtheta.deta(mymu, .link, earg=.earg)
+ }), list( .link = link, .earg = earg, .parallel=parallel ))),
+ link=eval(substitute(function(mu, extra = NULL) {
+ theta2eta(mu, link= .link, earg = .earg)
+ }, list( .link = link, .earg = earg ))),
+ vfamily = c("amlpoisson"),
+ deriv = eval(substitute(expression({
+ mymu = eta2theta(eta, .link, earg = .earg)
+ dexpectile.deta = dtheta.deta(mymu, .link, earg = .earg)
myresid = matrix(y,extra$n,extra$M) - cbind(mu)
wor1 = Wr2(myresid, w= matrix(extra$w.aml, extra$n, extra$M,
- byrow=TRUE))
+ byrow = TRUE))
w * myresid * wor1 * (dexpectile.deta / mymu)
- }), list( .link=link, .earg=earg ))),
- weight=eval(substitute(expression({
+ }), list( .link = link, .earg = earg ))),
+ weight = eval(substitute(expression({
use.mu = mymu
use.mu[use.mu < .Machine$double.eps^(3/4)] = .Machine$double.eps^(3/4)
wz = w * wor1 * use.mu * (dexpectile.deta / mymu)^2
wz
- }), list( .link=link, .earg=earg ))))
+ }), list( .link = link, .earg = earg ))))
}
-amlbinomial.deviance = function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
+amlbinomial.deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
M <- length(extra$w.aml)
@@ -1337,8 +1345,9 @@ amlbinomial.deviance = function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
amlbinomial <- function(w.aml = 1, parallel= FALSE, digw = 4,
- link = "logit", earg = list())
+ link = "logit", earg = list())
{
+
if (!is.Numeric(w.aml, posit=TRUE))
stop("'w.aml' must be a vector of positive values")
if (mode(link)!= "character" && mode(link)!= "name")
@@ -1346,17 +1355,19 @@ amlbinomial.deviance = function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
if (!is.list(earg)) earg = list()
new("vglmff",
- blurb=c("Logistic expectile regression by ",
+ blurb = c("Logistic expectile regression by ",
"asymmetric maximum likelihood estimation\n\n",
- "Link: ", namesof("expectile", link, earg= earg)),
+ "Link: ", namesof("expectile", link, earg = earg)),
constraints=eval(substitute(expression({
constraints = cm.vgam(matrix(1,M,1), x, .parallel, constraints)
}), list( .parallel=parallel ))),
- deviance= function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
- amlbinomial.deviance(mu=mu, y=y, w=w, residuals=residuals,
+ deviance= function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ amlbinomial.deviance(mu=mu, y = y, w = w, residuals=residuals,
eta=eta, extra=extra)
},
initialize=eval(substitute(expression({
+
+
{
NCOL = function (x)
if (is.array(x) && length(dim(x)) > 1 ||
@@ -1367,7 +1378,8 @@ amlbinomial.deviance = function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
nn = rep(1, n)
if (!all(y >= 0 & y <= 1))
stop("response values must be in [0, 1]")
- mustart = (0.5 + w * y) / (1 + w)
+ if (!length(mustart) && !length(etastart))
+ mustart = (0.5 + w * y) / (1 + w)
no.successes = w * y
if (any(abs(no.successes - round(no.successes)) > 0.001))
stop("Number of successes must be integer-valued")
@@ -1377,35 +1389,41 @@ amlbinomial.deviance = function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
nn = y[,1] + y[,2]
y = ifelse(nn > 0, y[,1]/nn, 0)
w = w * nn
- mustart = (0.5 + nn * y) / (1 + nn)
+ if (!length(mustart) && !length(etastart))
+ mustart = (0.5 + nn * y) / (1 + nn)
} else
stop("Response not of the right form")
}
- mustart = matrix(mustart, n, length( .w.aml ))
-
extra$w.aml = .w.aml
if (ncol(y <- cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
extra$M = M = length(extra$w.aml) # Recycle if necessary
extra$n = n
- extra$y.names = y.names = paste("w.aml=", round(extra$w.aml, dig=.digw),
- sep="")
+ extra$y.names = y.names =
+ paste("w.aml = ", round(extra$w.aml, dig = .digw), sep = "")
extra$individual = FALSE
- predictors.names = c(namesof(paste("expectile(",y.names,")", sep=""),
- .link, earg=.earg, tag=FALSE))
+ predictors.names =
+ c(namesof(paste("expectile(", y.names, ")", sep = ""),
+ .link, earg = .earg, tag = FALSE))
+
+ if (!length(etastart)) {
+ etastart = matrix(theta2eta(mustart, .link, earg = .earg), n, M)
+ mustart = NULL
+ }
- }), list( .link=link, .earg=earg,
- .digw = digw, .w.aml=w.aml ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
+
+ }), list( .link = link, .earg = earg,
+ .digw = digw, .w.aml = w.aml ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
mu.ans = eta = as.matrix(eta)
for(ii in 1:ncol(eta))
- mu.ans[,ii] = eta2theta(eta[,ii], .link, earg= .earg)
+ mu.ans[,ii] = eta2theta(eta[,ii], .link, earg = .earg)
dimnames(mu.ans) = list(dimnames(eta)[[1]], extra$y.names)
mu.ans
- }, list( .link=link, .earg=earg ))),
- last=eval(substitute(expression({
- misc$link = rep(.link, length=M)
+ }, list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
+ misc$link = rep(.link, length = M)
names(misc$link) = extra$y.names
misc$earg = vector("list", M)
names(misc$earg) = names(misc$link)
@@ -1417,28 +1435,28 @@ amlbinomial.deviance = function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
names(extra$percentile) = names(misc$link)
extra$individual = TRUE
- extra$deviance = amlbinomial.deviance(mu=mu, y=y, w=w,
+ extra$deviance = amlbinomial.deviance(mu=mu, y = y, w = w,
residuals=FALSE, eta=eta, extra=extra)
names(extra$deviance) = extra$y.names
- }), list( .link=link, .earg=earg, .parallel=parallel ))),
- link=eval(substitute(function(mu, extra=NULL) {
- theta2eta(mu, link= .link, earg= .earg)
- }, list( .link=link, .earg=earg ))),
- vfamily=c("amlbinomial"),
- deriv=eval(substitute(expression({
- mymu = eta2theta(eta, .link, earg= .earg)
+ }), list( .link = link, .earg = earg, .parallel=parallel ))),
+ link=eval(substitute(function(mu, extra = NULL) {
+ theta2eta(mu, link= .link, earg = .earg)
+ }, list( .link = link, .earg = earg ))),
+ vfamily = c("amlbinomial"),
+ deriv = eval(substitute(expression({
+ mymu = eta2theta(eta, .link, earg = .earg)
use.mu = mymu
use.mu[use.mu < .Machine$double.eps^(3/4)] = .Machine$double.eps^(3/4)
- dexpectile.deta = dtheta.deta(use.mu, .link, earg=.earg)
+ dexpectile.deta = dtheta.deta(use.mu, .link, earg = .earg)
myresid = matrix(y,extra$n,extra$M) - cbind(mu)
wor1 = Wr2(myresid, w= matrix(extra$w.aml, extra$n, extra$M,
- byrow=TRUE))
+ byrow = TRUE))
w * myresid * wor1 * (dexpectile.deta / (use.mu * (1-use.mu)))
- }), list( .link=link, .earg=earg ))),
- weight=eval(substitute(expression({
- wz = w * wor1 * (dexpectile.deta^2 / (use.mu * (1-use.mu)))
+ }), list( .link = link, .earg = earg ))),
+ weight = eval(substitute(expression({
+ wz = w * wor1 * (dexpectile.deta^2 / (use.mu * (1 - use.mu)))
wz
- }), list( .link=link, .earg=earg ))))
+ }), list( .link = link, .earg = earg ))))
}
@@ -1450,7 +1468,7 @@ amlbinomial.deviance = function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
-amlexponential.deviance = function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
+amlexponential.deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
M <- length(extra$w.aml)
@@ -1485,20 +1503,20 @@ amlexponential.deviance = function(mu, y, w, residuals = FALSE, eta, extra=NULL)
if (!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
method.init > 3) stop("argument 'method.init' must be 1, 2 or 3")
- y.names = paste("w.aml=", round(w.aml, dig=digw), sep="")
+ y.names = paste("w.aml = ", round(w.aml, dig=digw), sep = "")
predictors.names = c(namesof(
- paste("expectile(", y.names,")", sep=""), link, earg=earg))
- predictors.names = paste(predictors.names, collapse=", ")
+ paste("expectile(", y.names,")", sep = ""), link, earg = earg))
+ predictors.names = paste(predictors.names, collapse = ", ")
new("vglmff",
- blurb=c("Exponential expectile regression by",
+ blurb = c("Exponential expectile regression by",
" asymmetric maximum likelihood estimation\n\n",
"Link: ", predictors.names),
constraints=eval(substitute(expression({
constraints = cm.vgam(matrix(1,M,1), x, .parallel, constraints)
}), list( .parallel=parallel ))),
- deviance= function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
- amlexponential.deviance(mu=mu, y=y, w=w, residuals=residuals,
+ deviance= function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ amlexponential.deviance(mu=mu, y = y, w = w, residuals=residuals,
eta=eta, extra=extra)
},
initialize=eval(substitute(expression({
@@ -1509,11 +1527,11 @@ amlexponential.deviance = function(mu, y, w, residuals = FALSE, eta, extra=NULL)
stop("all responses must be positive")
extra$M = M = length(extra$w.aml) # Recycle if necessary
extra$n = n
- extra$y.names = y.names = paste("w.aml=", round(extra$w.aml, dig=.digw),
- sep="")
+ extra$y.names = y.names =
+ paste("w.aml = ", round(extra$w.aml, dig = .digw), sep = "")
extra$individual = FALSE
predictors.names = c(namesof(
- paste("expectile(",y.names,")", sep=""), .link, earg=.earg, tag=FALSE))
+ paste("expectile(",y.names,")", sep = ""), .link, earg = .earg, tag = FALSE))
if (!length(etastart)) {
mean.init = if ( .method.init == 1)
@@ -1522,19 +1540,20 @@ amlexponential.deviance = function(mu, y, w, residuals = FALSE, eta, extra=NULL)
rep(weighted.mean(y, w), length=n) else {
1 / (y + 1)
}
- etastart = matrix(theta2eta(mean.init, .link, earg= .earg), n, M)
+ etastart = matrix(theta2eta(mean.init, .link, earg = .earg),
+ n, M)
}
- }), list( .link=link, .earg=earg, .method.init=method.init,
- .digw = digw, .w.aml=w.aml ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
+ }), list( .link = link, .earg = earg, .method.init = method.init,
+ .digw = digw, .w.aml = w.aml ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
mu.ans = eta = as.matrix(eta)
for(ii in 1:ncol(eta))
- mu.ans[,ii] = eta2theta(eta[,ii], .link, earg= .earg)
+ mu.ans[,ii] = eta2theta(eta[,ii], .link, earg = .earg)
dimnames(mu.ans) = list(dimnames(eta)[[1]], extra$y.names)
mu.ans
- }, list( .link=link, .earg=earg ))),
- last=eval(substitute(expression({
- misc$link = rep(.link, length=M)
+ }, list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
+ misc$link = rep(.link, length = M)
names(misc$link) = extra$y.names
misc$earg = vector("list", M)
names(misc$earg) = names(misc$link)
@@ -1546,29 +1565,29 @@ amlexponential.deviance = function(mu, y, w, residuals = FALSE, eta, extra=NULL)
names(extra$percentile) = names(misc$link)
extra$individual = TRUE
- extra$deviance = amlexponential.deviance(mu=mu, y=y, w=w,
+ extra$deviance = amlexponential.deviance(mu=mu, y = y, w = w,
residuals=FALSE, eta=eta, extra=extra)
names(extra$deviance) = extra$y.names
- }), list( .link=link, .earg=earg, .parallel=parallel ))),
- link=eval(substitute(function(mu, extra=NULL) {
- theta2eta(mu, link= .link, earg= .earg)
- }, list( .link=link, .earg=earg ))),
- vfamily=c("amlexponential"),
- deriv=eval(substitute(expression({
- mymu = eta2theta(eta, .link, earg= .earg)
+ }), list( .link = link, .earg = earg, .parallel=parallel ))),
+ link=eval(substitute(function(mu, extra = NULL) {
+ theta2eta(mu, link= .link, earg = .earg)
+ }, list( .link = link, .earg = earg ))),
+ vfamily = c("amlexponential"),
+ deriv = eval(substitute(expression({
+ mymu = eta2theta(eta, .link, earg = .earg)
bigy = matrix(y,extra$n,extra$M)
dl.dmu = (bigy - mymu) / mymu^2
- dmu.deta = dtheta.deta(mymu, .link, earg=.earg)
+ dmu.deta = dtheta.deta(mymu, .link, earg = .earg)
myresid = bigy - cbind(mymu)
wor1 = Wr2(myresid, w= matrix(extra$w.aml, extra$n, extra$M,
- byrow=TRUE))
+ byrow = TRUE))
w * wor1 * dl.dmu * dmu.deta
- }), list( .link=link, .earg=earg ))),
- weight=eval(substitute(expression({
+ }), list( .link = link, .earg = earg ))),
+ weight = eval(substitute(expression({
ned2l.dmu2 = 1 / mymu^2
wz = w * wor1 * ned2l.dmu2 * dmu.deta^2
wz
- }), list( .link=link, .earg=earg ))))
+ }), list( .link = link, .earg = earg ))))
}
@@ -1600,7 +1619,8 @@ dalap = function(x, location=0, scale=1, tau=0.5,
}
-ralap = function(n, location=0, scale=1, tau=0.5, kappa=sqrt(tau/(1-tau))) {
+ralap = function(n, location=0, scale=1, tau=0.5,
+ kappa=sqrt(tau/(1-tau))) {
use.n = if ((length.n <- length(n)) > 1) length.n else
if (!is.Numeric(n, integ=TRUE, allow=1, posit=TRUE))
stop("bad input for argument 'n'") else n
@@ -1705,12 +1725,12 @@ pqregal = function(q, tau=0.5, location=0, scale=1) {
}
if (FALSE)
-qregal = function(tau=c(0.25, 0.5, 0.75),
- llocation="identity",
+qregal = function(tau = c(0.25, 0.5, 0.75),
+ llocation = "identity",
elocation=list(),
- lscale="loge", escale=list(),
+ lscale = "loge", escale=list(),
ilocation=NULL,
- parallel=FALSE, method.init=1, digt=4) {
+ parallel=FALSE, method.init=1, digt = 4) {
if (mode(llocation) != "character" && mode(llocation) != "name")
llocation = as.character(substitute(llocation))
if (!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
@@ -1723,10 +1743,10 @@ qregal = function(tau=c(0.25, 0.5, 0.75),
if (!is.list(escale)) escale = list()
new("vglmff",
- blurb=c("Quantile REGression via an Asymmetric Laplace distribution\n\n",
+ blurb = c("Quantile REGression via an Asymmetric Laplace distribution\n\n",
"Links: ",
- namesof("scale", lscale, earg=escale), ", ",
- namesof("location", llocation, earg=elocation)),
+ namesof("scale", lscale, earg =escale), ", ",
+ namesof("location", llocation, earg =elocation)),
constraints=eval(substitute(expression({
constraints = cm.vgam(matrix(1,M,1), x, .parallel, constraints)
}), list( .parallel=parallel ))),
@@ -1736,46 +1756,46 @@ qregal = function(tau=c(0.25, 0.5, 0.75),
stop("response must be a vector or a one-column matrix")
extra$M = M = 1 + length(extra$tau)
extra$n = n
- extra$y.names = y.names = paste("tau=", round(extra$tau, dig=.digt),
- sep="")
+ extra$y.names = y.names =
+ paste("tau = ", round(extra$tau, dig = .digt), sep = "")
extra$individual = FALSE
predictors.names = c(
- namesof("scale", .lscale, earg=.escale, tag=FALSE),
- namesof(paste("quantile(",y.names,")", sep=""),
- link = .llocation, earg=.elocation, tag=FALSE))
+ namesof("scale", .lscale, earg = .escale, tag = FALSE),
+ namesof(paste("quantile(",y.names,")", sep = ""),
+ link = .llocat, earg = .elocat, tag = FALSE))
if (!length(etastart)) {
if ( .method.init == 1) {
- location.init = median(y)
+ locat.init = median(y)
} else {
- location.init = y
+ locat.init = y
}
- location.init = if (length(.ilocation)) {
- matrix( .ilocation, n, M-1, byrow=TRUE)
+ locat.init = if (length(.ilocat)) {
+ matrix( .ilocat, n, M-1, byrow = TRUE)
} else {
- rep(location.init, len=n)
+ rep(locat.init, len=n)
}
scale.init = rep(1.0, len=n)
etastart = cbind(
- theta2eta(scale.init, .lscale, earg = .escale),
+ theta2eta(scale.init, .lscale, earg = .escale),
matrix(
- theta2eta(location.init, .llocation, earg= .elocation), n, M-1))
+ theta2eta(locat.init, .llocat, earg = .elocat), n, M-1))
}
- }), list( .method.init=method.init, .tau=tau, .digt=digt,
- .elocation=elocation, .escale=escale,
- .llocation=llocation, .lscale=lscale,
- .ilocation=ilocation ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
+ }), list( .method.init = method.init, .tau = tau, .digt = digt,
+ .elocat = elocation, .escale = escale,
+ .llocat = llocation, .lscale = lscale,
+ .ilocat = ilocation ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
eta = as.matrix(eta)
xi.ans = matrix(0, nrow(eta), ncol(eta)-1)
for(ii in 1:(ncol(eta)-1))
- xi.ans[,ii] = eta2theta(eta[,ii+1], .llocation, earg= .elocation)
+ xi.ans[,ii] = eta2theta(eta[,ii+1], .llocat, earg = .elocat)
dimnames(xi.ans) = list(dimnames(eta)[[1]], extra$y.names)
xi.ans
- }, list( .elocation=elocation, .llocation=llocation, .tau=tau,
- .escale=escale, .lscale=lscale ))),
- last=eval(substitute(expression({
- misc$link = rep( .llocation, length=M)
+ }, list( .elocat = elocation, .llocat = llocation, .tau = tau,
+ .escale = escale, .lscale = lscale ))),
+ last = eval(substitute(expression({
+ misc$link = rep( .llocat, length = M)
names(misc$link) = extra$y.names
misc$earg = vector("list", M)
names(misc$earg) = names(misc$link)
@@ -1789,36 +1809,39 @@ qregal = function(tau=c(0.25, 0.5, 0.75),
misc$expected = TRUE
misc$RegCondOK = FALSE # Save this for later
misc$tau = .tau
- }), list( .elocation=elocation, .llocation=llocation, .tau=tau,
- .escale=escale, .lscale=lscale ))),
+ }), list( .elocat = elocation, .llocat = llocation, .tau = tau,
+ .escale = escale, .lscale = lscale ))),
loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- locmat = eta2theta(eta[,-1,drop=FALSE], .llocation, earg= .elocation)
- scalemat = matrix(eta2theta(eta[,1,drop=FALSE], .lscale,
- earg= .escale), nrow=extra$n, ncol=extra$M - 1)
- taumat = matrix(extra$tau, nrow=extra$n, ncol=extra$M - 1, byrow=TRUE)
+ function(mu, y, w, residuals = FALSE,eta, extra = NULL) {
+ locmat = eta2theta(eta[, -1, drop = FALSE],
+ .llocat, earg = .elocat)
+ scalemat = matrix(eta2theta(eta[,1,drop = FALSE], .lscale,
+ earg = .escale), nrow=extra$n, ncol=extra$M - 1)
+ taumat = matrix(extra$tau, nrow=extra$n, ncol=extra$M - 1, byrow = TRUE)
ymat = matrix(y, nrow=extra$n, ncol=extra$M - 1)
- if (residuals) stop("loglikelihood residuals not implemented yet") else
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
sum(w * (-log(scalemat) + log(taumat) + log1p(-taumat) -
rho1check((ymat-locmat)/scalemat, tau=taumat)))
- }, list( .elocation=elocation, .llocation=llocation,
- .escale=escale, .lscale=lscale, .tau=tau ))),
- vfamily=c("qregal"),
- deriv=eval(substitute(expression({
+ }, list( .elocat = elocation, .llocat = llocation,
+ .escale = escale, .lscale = lscale, .tau = tau ))),
+ vfamily = c("qregal"),
+ deriv = eval(substitute(expression({
ymat = matrix(y, nrow=extra$n, ncol=extra$M - 1)
- taumat = matrix(extra$tau, nrow=extra$n, ncol=extra$M - 1, byrow=TRUE)
- scalemat = matrix(eta2theta(eta[,1,drop=FALSE], .lscale,
- earg= .escale), nrow=extra$n, ncol=extra$M - 1)
- locmat = eta2theta(eta[,-1,drop=FALSE], .llocation, earg= .elocation)
+ taumat = matrix(extra$tau, nrow=extra$n, ncol=extra$M - 1,
+ byrow = TRUE)
+ scalemat = matrix(eta2theta(eta[,1,drop = FALSE], .lscale,
+ earg = .escale), nrow=extra$n, ncol=extra$M - 1)
+ locmat = eta2theta(eta[,-1,drop = FALSE], .llocat, earg = .elocat)
dl.dlocation = taumat /scalemat
index1 = (ymat < locmat)
dl.dlocation[index1] = ((taumat - 1)/scalemat)[index1]
- dlocation.deta = dtheta.deta(locmat, .llocation, earg= .elocation)
- dscale.deta = dtheta.deta(scalemat, .lscale, earg= .escale)
+ dlocation.deta = dtheta.deta(locmat, .llocat, earg = .elocat)
+ dscale.deta = dtheta.deta(scalemat, .lscale, earg = .escale)
w * cbind(dl.dlocation * dlocation.deta)
- }), list( .tau=tau, .elocation=elocation, .llocation=llocation,
- .escale=escale, .lscale=lscale ))),
- weight=eval(substitute(expression({
+ }), list( .tau = tau, .elocat = elocation, .llocat = llocation,
+ .escale = escale, .lscale = lscale ))),
+ weight = eval(substitute(expression({
wz = matrix(0, nrow=n, M) # Diagonal
ed2l.dlocation2 = taumat * (1 - taumat) / scalemat^2
ed2l.dscale2 = 2 * (3*taumat^2 - 3*taumat+1) / (scalemat^2 *
@@ -1826,8 +1849,8 @@ qregal = function(tau=c(0.25, 0.5, 0.75),
wz[,iam(1,1,M)] = ed2l.dscale2 * dscale.deta^2
wz[,-1] = ed2l.dlocation2 * dlocation.deta^2
w * wz
- }), list( .tau=tau, .elocation=elocation, .llocation=llocation,
- .escale=escale, .lscale=lscale ))))
+ }), list( .tau = tau, .elocat = elocation, .llocat = llocation,
+ .escale = escale, .lscale = lscale ))))
}
@@ -1926,14 +1949,14 @@ ploglap = function(q, location.ald=0, scale.ald=1,
rlogitlap = function(n, location.ald=0, scale.ald=1, tau=0.5,
- kappa=sqrt(tau/(1-tau)), earg=list()) {
+ kappa=sqrt(tau/(1-tau)), earg =list()) {
logit(ralap(n=n, location=location.ald, scale=scale.ald,
- tau=tau, kappa=kappa), inverse=TRUE, earg=earg)
+ tau=tau, kappa=kappa), inverse=TRUE, earg = earg)
}
dlogitlap = function(x, location.ald=0, scale.ald=1, tau=0.5,
- kappa=sqrt(tau/(1-tau)), log=FALSE, earg=list()) {
+ kappa=sqrt(tau/(1-tau)), log=FALSE, earg =list()) {
if (!is.logical(log.arg <- log)) stop("bad input for argument 'log'")
rm(log)
@@ -1944,10 +1967,10 @@ dlogitlap = function(x, location.ald=0, scale.ald=1, tau=0.5,
Alpha = sqrt(2) * kappa / scale.ald
Beta = sqrt(2) / (scale.ald * kappa)
- Delta = logit(location.ald, inverse=TRUE, earg=earg)
+ Delta = logit(location.ald, inverse=TRUE, earg = earg)
exponent = ifelse(x >= Delta, -Alpha, Beta) *
- (logit(x, earg=earg) - location.ald)
+ (logit(x, earg = earg) - location.ald)
logdensity = log(Alpha) + log(Beta) - log(Alpha + Beta) -
log(x) - log1p(-x) + exponent
indexTF = (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
@@ -1959,10 +1982,10 @@ dlogitlap = function(x, location.ald=0, scale.ald=1, tau=0.5,
qlogitlap = function(p, location.ald=0, scale.ald=1,
- tau=0.5, kappa=sqrt(tau/(1-tau)), earg=list()) {
+ tau=0.5, kappa=sqrt(tau/(1-tau)), earg =list()) {
qqq = qalap(p=p, location=location.ald, scale=scale.ald,
tau=tau, kappa=kappa)
- ans = logit(qqq, inverse=TRUE, earg=earg)
+ ans = logit(qqq, inverse=TRUE, earg = earg)
ans[(p < 0) | (p > 1)] = NaN
ans[p == 0] = 0
ans[p == 1] = 1
@@ -1972,7 +1995,7 @@ qlogitlap = function(p, location.ald=0, scale.ald=1,
plogitlap = function(q, location.ald=0, scale.ald=1,
- tau=0.5, kappa=sqrt(tau/(1-tau)), earg=list()) {
+ tau=0.5, kappa=sqrt(tau/(1-tau)), earg =list()) {
NN = max(length(q), length(location.ald), length(scale.ald),
length(kappa))
location.ald = rep(location.ald, len=NN); scale.ald= rep(scale.ald, len=NN)
@@ -1980,7 +2003,7 @@ plogitlap = function(q, location.ald=0, scale.ald=1,
tau = rep(tau, len=NN);
indexTF = (q > 0) & (q < 1)
- qqq = logit(q[indexTF], earg=earg)
+ qqq = logit(q[indexTF], earg = earg)
ans = q
ans[indexTF] = palap(q=qqq, location=location.ald[indexTF],
scale=scale.ald[indexTF],
@@ -1993,15 +2016,15 @@ plogitlap = function(q, location.ald=0, scale.ald=1,
rprobitlap = function(n, location.ald=0, scale.ald=1, tau=0.5,
- kappa=sqrt(tau/(1-tau)), earg=list()) {
+ kappa=sqrt(tau/(1-tau)), earg =list()) {
probit(ralap(n=n, location=location.ald, scale=scale.ald,
- tau=tau, kappa=kappa), inverse=TRUE, earg=earg)
+ tau=tau, kappa=kappa), inverse=TRUE, earg = earg)
}
dprobitlap = function(x, location.ald=0, scale.ald=1, tau=0.5,
kappa=sqrt(tau/(1-tau)), log=FALSE,
- earg=list(), meth2=TRUE) {
+ earg =list(), meth2=TRUE) {
if (!is.logical(log.arg <- log)) stop("bad input for argument 'log'")
rm(log)
@@ -2015,7 +2038,7 @@ dprobitlap = function(x, location.ald=0, scale.ald=1, tau=0.5,
indexTF = (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
if (meth2) {
dx.dy = x
- use.x = probit(x[index1], earg=earg)
+ use.x = probit(x[index1], earg = earg)
logdensity[index1] = dalap(x=use.x, location=location.ald[index1],
scale=scale.ald[index1], tau=tau[index1],
kappa=kappa[index1], log=TRUE)
@@ -2037,7 +2060,7 @@ dprobitlap = function(x, location.ald=0, scale.ald=1, tau=0.5,
logdensity[x > 1 & indexTF] = -Inf
if (meth2) {
- dx.dy[index1] = probit(x[index1], earg=earg, inverse=FALSE, deriv=1)
+ dx.dy[index1] = probit(x[index1], earg = earg, inverse=FALSE, deriv=1)
dx.dy[!index1] = 0
dx.dy[!indexTF] = NaN
if (log.arg) logdensity - log(abs(dx.dy)) else exp(logdensity) / abs(dx.dy)
@@ -2048,10 +2071,10 @@ dprobitlap = function(x, location.ald=0, scale.ald=1, tau=0.5,
qprobitlap = function(p, location.ald=0, scale.ald=1,
- tau=0.5, kappa=sqrt(tau/(1-tau)), earg=list()) {
+ tau=0.5, kappa=sqrt(tau/(1-tau)), earg =list()) {
qqq = qalap(p=p, location=location.ald, scale=scale.ald,
tau=tau, kappa=kappa)
- ans = probit(qqq, inverse=TRUE, earg=earg)
+ ans = probit(qqq, inverse=TRUE, earg = earg)
ans[(p < 0) | (p > 1)] = NaN
ans[p == 0] = 0
ans[p == 1] = 1
@@ -2061,7 +2084,7 @@ qprobitlap = function(p, location.ald=0, scale.ald=1,
pprobitlap = function(q, location.ald=0, scale.ald=1,
- tau=0.5, kappa=sqrt(tau/(1-tau)), earg=list()) {
+ tau=0.5, kappa=sqrt(tau/(1-tau)), earg =list()) {
NN = max(length(q), length(location.ald), length(scale.ald),
length(kappa))
location.ald = rep(location.ald, len=NN); scale.ald= rep(scale.ald, len=NN)
@@ -2069,7 +2092,7 @@ pprobitlap = function(q, location.ald=0, scale.ald=1,
tau = rep(tau, len=NN);
indexTF = (q > 0) & (q < 1)
- qqq = probit(q[indexTF], earg=earg)
+ qqq = probit(q[indexTF], earg = earg)
ans = q
ans[indexTF] = palap(q=qqq, location=location.ald[indexTF],
scale=scale.ald[indexTF],
@@ -2081,15 +2104,15 @@ pprobitlap = function(q, location.ald=0, scale.ald=1,
rclogloglap = function(n, location.ald=0, scale.ald=1, tau=0.5,
- kappa=sqrt(tau/(1-tau)), earg=list()) {
+ kappa=sqrt(tau/(1-tau)), earg =list()) {
cloglog(ralap(n=n, location=location.ald, scale=scale.ald,
- tau=tau, kappa=kappa), inverse=TRUE, earg=earg)
+ tau=tau, kappa=kappa), inverse=TRUE, earg = earg)
}
dclogloglap = function(x, location.ald=0, scale.ald=1, tau=0.5,
kappa=sqrt(tau/(1-tau)), log=FALSE,
- earg=list(), meth2=TRUE) {
+ earg =list(), meth2=TRUE) {
if (!is.logical(log.arg <- log)) stop("bad input for argument 'log'")
rm(log)
@@ -2103,7 +2126,7 @@ dclogloglap = function(x, location.ald=0, scale.ald=1, tau=0.5,
indexTF = (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
if (meth2) {
dx.dy = x
- use.w = cloglog(x[index1], earg=earg)
+ use.w = cloglog(x[index1], earg = earg)
logdensity[index1] = dalap(x=use.w, location=location.ald[index1],
scale=scale.ald[index1], tau=tau[index1],
kappa=kappa[index1], log=TRUE)
@@ -2123,7 +2146,7 @@ dclogloglap = function(x, location.ald=0, scale.ald=1, tau=0.5,
logdensity[x > 1 & indexTF] = -Inf
if (meth2) {
- dx.dy[index1] = cloglog(x[index1], earg=earg, inverse=FALSE, deriv=1)
+ dx.dy[index1] = cloglog(x[index1], earg = earg, inverse=FALSE, deriv=1)
dx.dy[!index1] = 0
dx.dy[!indexTF] = NaN
if (log.arg) logdensity - log(abs(dx.dy)) else
@@ -2135,10 +2158,10 @@ dclogloglap = function(x, location.ald=0, scale.ald=1, tau=0.5,
qclogloglap = function(p, location.ald=0, scale.ald=1,
- tau=0.5, kappa=sqrt(tau/(1-tau)), earg=list()) {
+ tau=0.5, kappa=sqrt(tau/(1-tau)), earg =list()) {
qqq = qalap(p=p, location=location.ald, scale=scale.ald,
tau=tau, kappa=kappa)
- ans = cloglog(qqq, inverse=TRUE, earg=earg)
+ ans = cloglog(qqq, inverse=TRUE, earg = earg)
ans[(p < 0) | (p > 1)] = NaN
ans[p == 0] = 0
ans[p == 1] = 1
@@ -2148,7 +2171,7 @@ qclogloglap = function(p, location.ald=0, scale.ald=1,
pclogloglap = function(q, location.ald=0, scale.ald=1,
- tau=0.5, kappa=sqrt(tau/(1-tau)), earg=list()) {
+ tau=0.5, kappa=sqrt(tau/(1-tau)), earg =list()) {
NN = max(length(q), length(location.ald), length(scale.ald),
length(kappa))
location.ald = rep(location.ald, len=NN); scale.ald= rep(scale.ald, len=NN)
@@ -2156,7 +2179,7 @@ pclogloglap = function(q, location.ald=0, scale.ald=1,
tau = rep(tau, len=NN);
indexTF = (q > 0) & (q < 1)
- qqq = cloglog(q[indexTF], earg=earg)
+ qqq = cloglog(q[indexTF], earg = earg)
ans = q
ans[indexTF] = palap(q=qqq, location=location.ald[indexTF],
scale=scale.ald[indexTF],
@@ -2175,6 +2198,2203 @@ pclogloglap = function(q, location.ald=0, scale.ald=1,
+alaplace2.control <- function(maxit = 100, ...)
+{
+ list(maxit = maxit)
+}
+
+
+ alaplace2 <- function(tau = NULL,
+ llocation = "identity", lscale = "loge",
+ elocation = list(), escale = list(),
+ ilocation = NULL, iscale = NULL,
+ kappa = sqrt(tau / (1-tau)),
+ shrinkage.init = 0.95,
+ parallelLocation = FALSE, digt = 4,
+ sameScale = TRUE,
+ dfmu.init = 3,
+ intparloc = FALSE,
+ method.init = 1,
+ zero = -2) {
+
+ llocat <- llocation
+ elocat <- elocation
+ ilocat <- ilocation
+
+ if (!is.Numeric(kappa, posit = TRUE))
+ stop("bad input for argument 'kappa'")
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
+ method.init > 4)
+ stop("argument 'method.init' must be 1, 2 or ... 4")
+ if (length(iscale) && !is.Numeric(iscale, posit = TRUE))
+ stop("bad input for argument 'iscale'")
+ if (!is.Numeric(shrinkage.init, allow = 1) || shrinkage.init < 0 ||
+ shrinkage.init > 1)
+ stop("bad input for argument 'shrinkage.init'")
+ if (length(zero) &&
+ !(is.Numeric(zero, integer = TRUE) ||
+ is.character(zero )))
+ stop("bad input for argument 'zero'")
+
+ if (length(tau) && max(abs(kappa - sqrt(tau/(1-tau)))) > 1.0e-6)
+ stop("arguments 'kappa' and 'tau' do not match")
+ if (mode(llocat) != "character" && mode(llocat) != "name")
+ llocat = as.character(substitute(llocat))
+ if (mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
+
+ if (!is.list(elocat)) elocat = list()
+ if (!is.list(escale)) escale = list()
+
+ if (!is.logical(intparloc) || length(intparloc) != 1)
+ stop("argument 'intparloc' must be a single logical")
+ if (!is.logical(sameScale) || length(sameScale) != 1)
+ stop("argument 'sameScale' must be a single logical")
+ if (!is.logical(parallelLocation) || length(parallelLocation) != 1)
+ stop("argument 'parallelLocation' must be a single logical")
+ fittedMean = FALSE
+ if (!is.logical(fittedMean) || length(fittedMean) != 1)
+ stop("bad input for argument 'fittedMean'")
+
+ new("vglmff",
+ blurb = c("Two-parameter asymmetric Laplace distribution\n\n",
+ "Links: ",
+ namesof("location", llocat, earg = elocat), ", ",
+ namesof("scale", lscale, earg = escale), "\n\n",
+ "Mean: ",
+ "location + scale * (1/kappa - kappa) / sqrt(2)", "\n",
+ "Quantiles: location", "\n",
+ "Variance: scale^2 * (1 + kappa^4) / (2 * kappa^2)"),
+ constraints = eval(substitute(expression({
+
+
+
+ orig.constraints = constraints
+
+
+
+ .PARALLEL = .parallelLocation
+
+
+ onemat = matrix(1, Mdiv2, 1)
+ locatHmat1 = kronecker(if ( .intparloc ) onemat else
+ diag(Mdiv2), rbind(1, 0))
+ scaleHmat1 = kronecker(if ( .sameScale ) onemat else
+ diag(Mdiv2), rbind(0, 1))
+
+ locatHmatk = kronecker(if ( .PARALLEL ) onemat else
+ diag(Mdiv2), rbind(1, 0))
+ scaleHmatk = scaleHmat1
+
+
+ constraints = cm.vgam(cbind(locatHmatk, scaleHmatk),
+ x, .PARALLEL, constraints,
+ intercept = FALSE)
+
+ if (names(constraints)[1] == "(Intercept)") {
+ constraints[["(Intercept)"]] = cbind(locatHmat1, scaleHmat1)
+ }
+
+
+ dotzero <- .zero
+ Musual <- 2
+ eval(negzero.expression)
+ constraints = cm.zero.vgam(constraints, x, z_Index, M)
+
+
+
+
+ if (length(orig.constraints)) {
+ if (!identical(orig.constraints, constraints)) {
+ warning("the inputted 'constraints' argument does not match with ",
+ "the 'zero', 'parallel', 'sameScale' arguments. ",
+ "Using the inputted 'constraints'.")
+ constraints = orig.constraints
+ }
+ }
+
+ }), list( .sameScale = sameScale,
+ .parallelLocation = parallelLocation,
+ .intparloc = intparloc,
+ .zero = zero ))),
+ infos = eval(substitute(function(...) {
+ list(Musual = 2,
+ zero = .zero)
+ }, list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ extra$Musual <- Musual <- 2
+ y <- cbind(y)
+ extra$ncoly <- ncoly <- ncol(y)
+ if ((ncoly > 1) && (length( .kappa ) > 1))
+ stop("response must be a vector if 'kappa' or 'tau' ",
+ "has a length greater than one")
+
+
+ extra$kappa = .kappa
+ extra$tau = extra$kappa^2 / (1 + extra$kappa^2)
+
+ extra$Mdiv2 = Mdiv2 = max(ncoly, length( .kappa ))
+ extra$M = M = Musual * Mdiv2
+ extra$n = n
+
+
+
+ extra$tau.names = tau.names =
+ paste("(tau = ", round(extra$tau, dig = .digt), ")", sep = "")
+ extra$Y.names = Y.names = if (ncoly > 1) dimnames(y)[[2]] else "y"
+ if (is.null(Y.names) || any(Y.names == ""))
+ extra$Y.names = Y.names = paste("y", 1:ncoly, sep = "")
+ extra$y.names = y.names =
+ if (ncoly > 1) paste(Y.names, tau.names, sep = "") else tau.names
+
+ extra$individual = FALSE
+
+
+ mynames1 <- paste("location", if (Mdiv2 > 1) 1:Mdiv2 else "", sep = "")
+ mynames2 <- paste("scale", if (Mdiv2 > 1) 1:Mdiv2 else "", sep = "")
+ predictors.names <-
+ c(namesof(mynames1, .llocat, earg = .elocat, tag = FALSE),
+ namesof(mynames2, .lscale, earg = .escale, tag = FALSE))
+ predictors.names <-
+ predictors.names[interleave.VGAM(M, M = Musual)]
+
+
+
+
+ locat.init <- scale.init <- matrix(0, n, Mdiv2)
+ if (!length(etastart)) {
+ for(jay in 1:Mdiv2) {
+ y.use <- if (ncoly > 1) y[, jay] else y
+ if ( .method.init == 1) {
+ locat.init[, jay] = weighted.mean(y.use, w)
+ scale.init[, jay] = sqrt(var(y.use) / 2)
+ } else if ( .method.init == 2) {
+ locat.init[, jay] = median(y.use)
+ scale.init[, jay] =
+ sqrt(sum(w * abs(y - median(y.use))) / (sum(w) * 2))
+ } else if ( .method.init == 3) {
+ Fit5 = vsmooth.spline(x = x[, min(ncol(x), 2)],
+ y = y.use, w = w, df = .dfmu.init)
+ locat.init[, jay] = predict(Fit5, x = x[, min(ncol(x), 2)])$y
+ scale.init[, jay] =
+ sqrt(sum(w * abs(y.use - median(y.use))) / (sum(w) * 2))
+ } else {
+ use.this = weighted.mean(y.use, w)
+ locat.init[, jay] = (1 - .sinit) * y.use + .sinit * use.this
+ scale.init[, jay] =
+ sqrt(sum(w * abs(y.use - median(y.use ))) / (sum(w) * 2))
+ }
+ }
+
+
+
+ if (length( .ilocat )) {
+ locat.init = matrix( .ilocat , n, Mdiv2, byrow = TRUE)
+ }
+ if (length( .iscale )) {
+ scale.init = matrix( .iscale , n, Mdiv2, byrow = TRUE)
+ }
+
+ etastart =
+ cbind(theta2eta(locat.init, .llocat, earg = .elocat),
+ theta2eta(scale.init, .lscale, earg = .escale))
+ etastart = etastart[, interleave.VGAM(M, M = Musual), drop = FALSE]
+ }
+ }), list( .method.init = method.init,
+ .dfmu.init = dfmu.init,
+ .sinit = shrinkage.init, .digt = digt,
+ .elocat = elocat, .escale = escale,
+ .llocat = llocat, .lscale = lscale, .kappa = kappa,
+ .ilocat = ilocat, .iscale = iscale ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ Mdiv2 = extra$Mdiv2
+ locat = eta2theta(eta[, 2 * (1:Mdiv2) - 1, drop = FALSE],
+ .llocat, earg = .elocat)
+ dimnames(locat) = list(dimnames(eta)[[1]], extra$y.names)
+ myans <- if ( .fittedMean ) {
+ kappamat = matrix(extra$kappa, extra$n, extra$Mdiv2,
+ byrow = TRUE)
+ Scale = eta2theta(eta[, 2 * (1:Mdiv2) , drop = FALSE],
+ .lscale, earg = .escale)
+ locat + Scale * (1/kappamat - kappamat)
+ } else {
+ locat
+ }
+ dimnames(myans) <- list(dimnames(myans)[[1]], extra$y.names)
+ myans
+ }, list( .elocat = elocat, .llocat = llocat,
+ .escale = escale, .lscale = lscale,
+ .fittedMean = fittedMean,
+ .kappa = kappa ))),
+ last = eval(substitute(expression({
+ Musual <- extra$Musual
+
+ tmp34 = c(rep( .llocat , length = Mdiv2),
+ rep( .lscale , length = Mdiv2))
+ names(tmp34) = c(mynames1, mynames2)
+ tmp34 = tmp34[interleave.VGAM(M, M = Musual)]
+ misc$link = tmp34 # Already named
+
+ misc$earg = vector("list", M)
+ misc$Musual <- Musual
+ names(misc$earg) = names(misc$link)
+ for(ii in 1:Mdiv2) {
+ misc$earg[[Musual * ii - 1]] = .elocat
+ misc$earg[[Musual * ii ]] = .escale
+ }
+
+
+ misc$expected = TRUE
+ extra$kappa = misc$kappa = .kappa
+ extra$tau = misc$tau = misc$kappa^2 / (1 + misc$kappa^2)
+ misc$true.mu = .fittedMean # @fitted is not a true mu?
+ misc$intparloc = .intparloc
+
+ extra$percentile = numeric(Mdiv2) # length(misc$kappa)
+ locat = as.matrix(locat)
+ for(ii in 1:Mdiv2) {
+ y.use <- if (ncoly > 1) y[, ii] else y
+ extra$percentile[ii] = 100 * weighted.mean(y.use <= locat[, ii], w)
+ }
+ names(extra$percentile) = y.names # if (ncoly > 1) names(misc$link) else zz
+ }), list( .elocat = elocat, .llocat = llocat,
+ .escale = escale, .lscale = lscale,
+ .fittedMean = fittedMean,
+ .intparloc = intparloc,
+ .kappa = kappa ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ Mdiv2 = extra$Mdiv2
+ ymat = matrix(y, extra$n, extra$Mdiv2)
+ kappamat = matrix(extra$kappa, extra$n, extra$Mdiv2, byrow = TRUE)
+
+ locat = eta2theta(eta[, 2 * (1:Mdiv2) - 1, drop = FALSE],
+ .llocat, earg = .elocat)
+ Scale = eta2theta(eta[, 2 * (1:Mdiv2) , drop = FALSE],
+ .lscale, earg = .escale)
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ sum(w * dalap(x = c(ymat), location = c(locat),
+ scale = c(Scale), kappa = c(kappamat),
+ log = TRUE))
+ }
+ }, list( .elocat = elocat, .llocat = llocat,
+ .escale = escale, .lscale = lscale,
+ .kappa = kappa ))),
+ vfamily = c("alaplace2"),
+ deriv = eval(substitute(expression({
+ Mdiv2 = extra$Mdiv2
+ ymat = matrix(y, n, Mdiv2)
+
+ locat = eta2theta(eta[, 2 * (1:(Mdiv2)) - 1, drop = FALSE],
+ .llocat, earg = .elocat)
+ Scale = eta2theta(eta[, 2 * (1:(Mdiv2)) , drop = FALSE],
+ .lscale, earg = .escale)
+
+
+ kappamat = matrix(extra$kappa, n, Mdiv2, byrow = TRUE)
+ zedd = abs(ymat - locat) / Scale
+ dl.dlocat = sqrt(2) * ifelse(ymat >= locat, kappamat, 1/kappamat) *
+ sign(ymat - locat) / Scale
+ dl.dscale = sqrt(2) * ifelse(ymat >= locat, kappamat, 1/kappamat) *
+ zedd / Scale - 1 / Scale
+ dlocat.deta = dtheta.deta(locat, .llocat, earg = .elocat)
+ dscale.deta = dtheta.deta(Scale, .lscale, earg = .escale)
+
+ ans <- w * cbind(dl.dlocat * dlocat.deta,
+ dl.dscale * dscale.deta)
+ ans <- ans[, interleave.VGAM(ncol(ans), M = Musual)]
+ ans
+ }), list( .escale = escale, .lscale = lscale,
+ .elocat = elocat, .llocat = llocat,
+ .kappa = kappa ))),
+ weight = eval(substitute(expression({
+ wz <- matrix(as.numeric(NA), n, M)
+ d2l.dlocat2 = 2 / Scale^2
+ d2l.dscale2 = 1 / Scale^2
+
+ wz[, 2*(1:Mdiv2) - 1] <- d2l.dlocat2 * dlocat.deta^2
+ wz[, 2*(1:Mdiv2) ] <- d2l.dscale2 * dscale.deta^2
+
+ w * wz
+ }), list( .escale = escale, .lscale = lscale,
+ .elocat = elocat, .llocat = llocat ))))
+}
+
+
+
+
+
+
+
+
+
+
+alaplace1.control <- function(maxit = 100, ...)
+{
+ list(maxit = maxit)
+}
+
+
+
+ alaplace1 = function(tau = NULL,
+ llocation = "identity",
+ elocation = list(),
+ ilocation = NULL,
+ kappa = sqrt(tau/(1-tau)),
+ Scale.arg = 1,
+ shrinkage.init = 0.95, parallelLocation = FALSE, digt = 4,
+ dfmu.init = 3,
+ intparloc = FALSE,
+ method.init = 1) {
+
+
+
+ if (!is.Numeric(kappa, posit = TRUE))
+ stop("bad input for argument 'kappa'")
+ if (length(tau) && max(abs(kappa - sqrt(tau/(1-tau)))) > 1.0e-6)
+ stop("arguments 'kappa' and 'tau' do not match")
+ if (mode(llocation) != "character" && mode(llocation) != "name")
+ llocation = as.character(substitute(llocation))
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
+ method.init > 4)
+ stop("argument 'method.init' must be 1, 2 or ... 4")
+
+ if (!is.list(elocation)) elocation = list()
+
+ if (!is.Numeric(shrinkage.init, allow = 1) || shrinkage.init < 0 ||
+ shrinkage.init > 1)
+ stop("bad input for argument 'shrinkage.init'")
+ if (!is.Numeric(Scale.arg, posit = TRUE))
+ stop("bad input for argument 'Scale.arg'")
+
+ if (!is.logical(parallelLocation) || length(parallelLocation) != 1)
+ stop("bad input for argument 'parallelLocation'")
+
+ fittedMean = FALSE
+ if (!is.logical(fittedMean) || length(fittedMean) != 1)
+ stop("bad input for argument 'fittedMean'")
+
+ new("vglmff",
+ blurb = c("One-parameter asymmetric Laplace distribution\n\n",
+ "Links: ",
+ namesof("location", llocation, earg = elocation),
+ "\n", "\n",
+ "Mean: location + scale * (1/kappa - kappa) / sqrt(2)", "\n",
+ "Quantiles: location", "\n",
+ "Variance: scale^2 * (1 + kappa^4) / (2 * kappa^2)"),
+ constraints = eval(substitute(expression({
+
+ orig.constraints = constraints
+
+
+
+
+ onemat = matrix(1, M, 1)
+ locatHmat1 = if ( .intparloc ) onemat else diag(M)
+ locatHmatk = if ( .parallelLocation ) onemat else diag(M)
+
+ constraints = cm.vgam(locatHmatk, x, .parallelLocation, constraints,
+ intercept = FALSE)
+
+ if (names(constraints)[1] == "(Intercept)") {
+ constraints[["(Intercept)"]] = locatHmat1
+ }
+
+
+
+
+ if (length(orig.constraints)) {
+ if (!identical(orig.constraints, constraints)) {
+ warning("the inputted 'constraints' argument does not match with ",
+ "the 'parallel', 'sameScale' arguments. ",
+ "Using the inputted 'constraints'.")
+ constraints = orig.constraints
+ }
+ }
+
+ }), list( .parallelLocation = parallelLocation,
+ .intparloc = intparloc ))),
+ infos = eval(substitute(function(...) {
+ list(Musual = 1,
+ tau = .tau,
+ kappa = .kappa)
+ }, list( .kappa = kappa,
+ .tau = tau ))),
+ initialize = eval(substitute(expression({
+ extra$Musual <- Musual <- 1
+ y <- cbind(y)
+ extra$ncoly <- ncoly <- ncol(y)
+ if ((ncoly > 1) && (length( .kappa ) > 1 || length( .Scale.arg ) > 1))
+ stop("response must be a vector if 'kappa' or 'Scale.arg' ",
+ "has a length greater than one")
+
+ extra$kappa = .kappa
+ extra$tau = extra$kappa^2 / (1 + extra$kappa^2)
+
+
+ extra$M = M = max(length( .Scale.arg ),
+ ncoly,
+ length( .kappa )) # Recycle
+ extra$Scale = rep( .Scale.arg, length = M)
+ extra$kappa = rep( .kappa, length = M)
+ extra$tau = extra$kappa^2 / (1 + extra$kappa^2)
+ extra$n = n
+
+
+
+
+ extra$tau.names = tau.names =
+ paste("(tau = ", round(extra$tau, dig = .digt), ")", sep = "")
+ extra$Y.names = Y.names = if (ncoly > 1) dimnames(y)[[2]] else "y"
+ if (is.null(Y.names) || any(Y.names == ""))
+ extra$Y.names = Y.names = paste("y", 1:ncoly, sep = "")
+ extra$y.names = y.names =
+ if (ncoly > 1) paste(Y.names, tau.names, sep = "") else tau.names
+
+ extra$individual = FALSE
+
+ mynames1 <- paste("location", if (M > 1) 1:M else "", sep = "")
+ predictors.names <-
+ c(namesof(mynames1, .llocat, earg = .elocat, tag = FALSE))
+
+
+ locat.init <- matrix(0, n, M)
+ if (!length(etastart)) {
+
+ for(jay in 1:M) {
+ y.use <- if (ncoly > 1) y[, jay] else y
+ if ( .method.init == 1) {
+ locat.init[, jay] = weighted.mean(y.use, w)
+ } else if ( .method.init == 2) {
+ locat.init[, jay] = median(y.use)
+ } else if ( .method.init == 3) {
+ Fit5 = vsmooth.spline(x = x[, min(ncol(x), 2)],
+ y = y.use, w = w, df = .dfmu.init)
+ locat.init[, jay] = c(predict(Fit5, x = x[, min(ncol(x), 2)])$y)
+ } else {
+ use.this = weighted.mean(y.use, w)
+ locat.init[, jay] = (1- .sinit) * y.use + .sinit * use.this
+ }
+
+
+ if (length( .ilocat )) {
+ locat.init = matrix( .ilocat , n, M, byrow = TRUE)
+ }
+
+ if ( .llocat == "loge") locat.init = abs(locat.init)
+ etastart = cbind(theta2eta(locat.init, .llocat, earg = .elocat))
+ }
+ }
+ }), list( .method.init = method.init,
+ .dfmu.init = dfmu.init,
+ .sinit = shrinkage.init, .digt = digt,
+ .elocat = elocation, .Scale.arg = Scale.arg,
+ .llocat = llocation, .kappa = kappa,
+ .ilocat = ilocation ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ if ( .fittedMean ) {
+ kappamat = matrix(extra$kappa, extra$n, extra$M, byrow = TRUE)
+ location = eta2theta(eta, .llocat, earg = .elocat)
+ Scale = matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
+ location + Scale * (1/kappamat - kappamat)
+ } else {
+ location = eta2theta(eta, .llocat, earg = .elocat)
+ if (length(location) > extra$n)
+ dimnames(location) = list(dimnames(eta)[[1]], extra$y.names)
+ location
+ }
+ }, list( .elocat = elocation, .llocat = llocation,
+ .fittedMean = fittedMean, .Scale.arg = Scale.arg,
+ .kappa = kappa ))),
+ last = eval(substitute(expression({
+ Musual <- extra$Musual
+
+ tmp34 = c(rep( .llocat , length = M))
+ names(tmp34) = mynames1
+ misc$link = tmp34 # Already named
+
+ misc$earg = vector("list", M)
+ misc$Musual <- Musual
+ names(misc$earg) = names(misc$link)
+ for(ii in 1:M) {
+ misc$earg[[ii]] = .elocat
+ }
+
+
+ misc$expected = TRUE
+ extra$kappa = misc$kappa = .kappa
+ extra$tau = misc$tau = misc$kappa^2 / (1 + misc$kappa^2)
+ misc$true.mu = .fittedMean # @fitted is not a true mu?
+
+ extra$percentile = numeric(M)
+ locat = as.matrix(location)
+ for(ii in 1:M) {
+ y.use <- if (ncoly > 1) y[, ii] else y
+ extra$percentile[ii] = 100 * weighted.mean(y.use <= locat[, ii], w)
+ }
+ names(extra$percentile) = y.names # if (ncoly > 1) names(misc$link) else zz
+
+ extra$Scale.arg = .Scale.arg
+ }), list( .elocat = elocation,
+ .llocat = llocation,
+ .Scale.arg = Scale.arg, .fittedMean = fittedMean,
+ .kappa = kappa ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ ymat = matrix(y, extra$n, extra$M)
+ kappamat = matrix(extra$kappa, extra$n, extra$M, byrow = TRUE)
+ location = eta2theta(eta, .llocat, earg = .elocat)
+ Scale = matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ sum(w * dalap(x = c(ymat), location = c(location),
+ scale = c(Scale), kappa = c(kappamat), log = TRUE))
+ }
+ }, list( .elocat = elocation,
+ .llocat = llocation,
+ .Scale.arg = Scale.arg, .kappa = kappa ))),
+ vfamily = c("alaplace1"),
+ deriv = eval(substitute(expression({
+ ymat = matrix(y, n, M)
+ Scale = matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
+ location = eta2theta(eta, .llocat, earg = .elocat)
+ kappamat = matrix(extra$kappa, n, M, byrow = TRUE)
+ zedd = abs(ymat-location) / Scale
+ dl.dlocation = ifelse(ymat >= location, kappamat, 1/kappamat) *
+ sqrt(2) * sign(ymat - location) / Scale
+ dlocation.deta = dtheta.deta(location, .llocat, earg = .elocat)
+ w * cbind(dl.dlocation * dlocation.deta)
+ }), list( .Scale.arg = Scale.arg, .elocat = elocation,
+ .llocat = llocation, .kappa = kappa ))),
+ weight = eval(substitute(expression({
+ d2l.dlocation2 = 2 / Scale^2
+ wz = cbind(d2l.dlocation2 * dlocation.deta^2)
+
+ w * wz
+ }), list( .Scale.arg = Scale.arg,
+ .elocat = elocation, .llocat = llocation ))))
+}
+
+
+
+
+
+
+
+
+
+alaplace3.control <- function(maxit = 100, ...)
+{
+ list(maxit = maxit)
+}
+
+
+
+
+ alaplace3 = function(
+ llocation = "identity", lscale = "loge", lkappa = "loge",
+ elocation = list(), escale = list(), ekappa = list(),
+ ilocation = NULL, iscale = NULL, ikappa = 1.0,
+ method.init = 1, zero = 2:3) {
+ if (mode(llocation) != "character" && mode(llocation) != "name")
+ llocation = as.character(substitute(llocation))
+ if (mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
+ if (mode(lkappa) != "character" && mode(lkappa) != "name")
+ lkappa = as.character(substitute(lkappa))
+
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
+ method.init > 2)
+ stop("argument 'method.init' must be 1 or 2")
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+ stop("bad input for argument 'zero'")
+ if (length(iscale) && !is.Numeric(iscale, posit = TRUE))
+ stop("bad input for argument 'iscale'")
+
+ if (!is.list(elocation)) elocation = list()
+ if (!is.list(escale)) escale = list()
+ if (!is.list(ekappa)) ekappa = list()
+
+ new("vglmff",
+ blurb = c("Three-parameter asymmetric Laplace distribution\n\n",
+ "Links: ",
+ namesof("location", llocation, earg = elocation), ", ",
+ namesof("scale", lscale, earg = escale), ", ",
+ namesof("kappa", lkappa, earg = ekappa),
+ "\n", "\n",
+ "Mean: location + scale * (1/kappa - kappa) / sqrt(2)",
+ "\n",
+ "Variance: Scale^2 * (1 + kappa^4) / (2 * kappa^2)"),
+ constraints = eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ if (ncol(cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+ predictors.names =
+ c(namesof("location", .llocat, earg = .elocat, tag = FALSE),
+ namesof("scale", .lscale, earg = .escale, tag = FALSE),
+ namesof("kappa", .lkappa, earg = .ekappa, tag = FALSE))
+ if (!length(etastart)) {
+ kappa.init = if (length( .ikappa)) rep( .ikappa, len = n) else
+ rep( 1.0, len = n)
+ if ( .method.init == 1) {
+ locat.init = median(y)
+ scale.init = sqrt(var(y) / 2)
+ } else {
+ locat.init = y
+ scale.init = sqrt(sum(w*abs(y-median(y ))) / (sum(w) *2))
+ }
+ locat.init = if (length( .ilocat)) rep( .ilocat, len = n) else
+ rep(locat.init, len = n)
+ scale.init = if (length( .iscale)) rep( .iscale, len = n) else
+ rep(scale.init, len = n)
+ etastart =
+ cbind(theta2eta(locat.init, .llocat, earg = .elocat),
+ theta2eta(scale.init, .lscale, earg = .escale),
+ theta2eta(kappa.init, .lkappa, earg = .ekappa))
+ }
+ }), list( .method.init = method.init,
+ .elocat = elocation, .escale = escale, .ekappa = ekappa,
+ .llocat = llocation, .lscale = lscale, .lkappa = lkappa,
+ .ilocat = ilocation, .iscale = iscale, .ikappa = ikappa ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ location = eta2theta(eta[,1], .llocat, earg = .elocat)
+ Scale = eta2theta(eta[,2], .lscale, earg = .escale)
+ kappa = eta2theta(eta[,3], .lkappa, earg = .ekappa)
+ location + Scale * (1/kappa - kappa) / sqrt(2)
+ }, list( .elocat = elocation, .llocat = llocation,
+ .escale = escale, .lscale = lscale,
+ .ekappa = ekappa, .lkappa = lkappa ))),
+ last = eval(substitute(expression({
+ misc$link = c(location = .llocat,
+ scale = .lscale,
+ kappa = .lkappa)
+ misc$earg = list(location = .elocat,
+ scale = .escale,
+ kappa = .ekappa)
+ misc$expected = TRUE
+ }), list( .elocat = elocation, .llocat = llocation,
+ .escale = escale, .lscale = lscale,
+ .ekappa = ekappa, .lkappa = lkappa ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ location = eta2theta(eta[,1], .llocat, earg = .elocat)
+ Scale = eta2theta(eta[,2], .lscale, earg = .escale)
+ kappamat = eta2theta(eta[,3], .lkappa, earg = .ekappa)
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ sum(w * dalap(x=y, location=location,
+ scale=Scale, kappa=kappamat, log = TRUE))
+ }
+ }, list( .elocat = elocation, .llocat = llocation,
+ .escale = escale, .lscale = lscale,
+ .ekappa = ekappa, .lkappa = lkappa ))),
+ vfamily = c("alaplace3"),
+ deriv = eval(substitute(expression({
+ location = eta2theta(eta[,1], .llocat, earg = .elocat)
+ Scale = eta2theta(eta[,2], .lscale, earg = .escale)
+ kappa = eta2theta(eta[,3], .lkappa, earg = .ekappa)
+ zedd = abs(y-location) / Scale
+ dl.dlocation = sqrt(2) * ifelse(y >= location, kappa, 1/kappa) *
+ sign(y-location) / Scale
+ dl.dscale = sqrt(2) * ifelse(y >= location, kappa, 1/kappa) *
+ zedd / Scale - 1 / Scale
+ dl.dkappa = 1 / kappa - 2 * kappa / (1+kappa^2) -
+ (sqrt(2) / Scale) *
+ ifelse(y > location, 1, -1/kappa^2) * abs(y-location)
+ dlocation.deta = dtheta.deta(location, .llocat, earg = .elocat)
+ dscale.deta = dtheta.deta(Scale, .lscale, earg = .escale)
+ dkappa.deta = dtheta.deta(kappa, .lkappa, earg = .ekappa)
+ w * cbind(dl.dlocation * dlocation.deta,
+ dl.dscale * dscale.deta,
+ dl.dkappa * dkappa.deta)
+ }), list( .escale = escale, .lscale = lscale,
+ .elocat = elocation, .llocat = llocation,
+ .ekappa = ekappa, .lkappa = lkappa ))),
+ weight = eval(substitute(expression({
+ d2l.dlocation2 = 2 / Scale^2
+ d2l.dscale2 = 1 / Scale^2
+ d2l.dkappa2 = 1 / kappa^2 + 4 / (1+kappa^2)^2
+ d2l.dkappadloc = -sqrt(8) / ((1+kappa^2) * Scale)
+ d2l.dkappadscale = -(1-kappa^2) / ((1+kappa^2) * kappa * Scale)
+ wz = matrix(0, nrow=n, dimm(M))
+ wz[,iam(1,1,M)] = d2l.dlocation2 * dlocation.deta^2
+ wz[,iam(2,2,M)] = d2l.dscale2 * dscale.deta^2
+ wz[,iam(3,3,M)] = d2l.dkappa2 * dkappa.deta^2
+ wz[,iam(1,3,M)] = d2l.dkappadloc * dkappa.deta * dlocation.deta
+ wz[,iam(2,3,M)] = d2l.dkappadscale * dkappa.deta * dscale.deta
+ w * wz
+ }), list( .escale = escale, .lscale = lscale,
+ .elocat = elocation, .llocat = llocation ))))
+}
+
+
+
+
+
+
+
+dlaplace = function(x, location=0, scale=1, log = FALSE) {
+ if (!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ logdensity = (-abs(x-location)/scale) - log(2*scale)
+ if (log.arg) logdensity else exp(logdensity)
+}
+
+plaplace = function(q, location=0, scale=1) {
+ if (!is.Numeric(scale, posit = TRUE))
+ stop("argument 'scale' must be positive")
+ zedd = (q-location) / scale
+ L = max(length(q), length(location), length(scale))
+ q = rep(q, len=L); location = rep(location, len=L); scale= rep(scale, len=L)
+ ifelse(q < location, 0.5*exp(zedd), 1-0.5*exp(-zedd))
+}
+
+qlaplace = function(p, location=0, scale=1) {
+ if (!is.Numeric(scale, posit = TRUE))
+ stop("argument 'scale' must be positive")
+ L = max(length(p), length(location), length(scale))
+ p = rep(p, len=L); location = rep(location, len=L); scale= rep(scale, len=L)
+ location - sign(p-0.5) * scale * log(2*ifelse(p < 0.5, p, 1-p))
+}
+
+rlaplace = function(n, location=0, scale=1) {
+ if (!is.Numeric(n, posit = TRUE, integ = TRUE, allow = 1))
+ stop("bad input for argument 'n'")
+ if (!is.Numeric(scale, posit = TRUE)) stop("'scale' must be positive")
+ location = rep(location, len = n); scale= rep(scale, len = n)
+ r = runif(n)
+ location - sign(r-0.5) * scale * log(2*ifelse(r < 0.5, r, 1-r))
+}
+
+
+ laplace = function(llocation = "identity", lscale = "loge",
+ elocation = list(), escale = list(),
+ ilocation = NULL, iscale = NULL,
+ method.init = 1, zero = 2) {
+ if (mode(llocation) != "character" && mode(llocation) != "name")
+ llocation = as.character(substitute(llocation))
+ if (mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
+ method.init > 3) stop("argument 'method.init' must be 1 or 2 or 3")
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+ stop("bad input for argument 'zero'")
+ if (!is.list(elocation)) elocation = list()
+ if (!is.list(escale)) escale = list()
+ if (length(iscale) && !is.Numeric(iscale, posit = TRUE))
+ stop("bad input for argument 'iscale'")
+
+ new("vglmff",
+ blurb = c("Two-parameter Laplace distribution\n\n",
+ "Links: ",
+ namesof("location", llocation, earg = elocation), ", ",
+ namesof("scale", lscale, earg = escale),
+ "\n", "\n",
+ "Mean: location", "\n",
+ "Variance: 2*scale^2"),
+ constraints = eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ if (ncol(cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+ predictors.names =
+ c(namesof("location", .llocat, earg = .elocat, tag = FALSE),
+ namesof("scale", .lscale, earg = .escale, tag = FALSE))
+ if (!length(etastart)) {
+ if ( .method.init == 1) {
+ locat.init = median(y)
+ scale.init = sqrt(var(y) / 2)
+ } else if ( .method.init == 2) {
+ locat.init = weighted.mean(y, w)
+ scale.init = sqrt(var(y) / 2)
+ } else {
+ locat.init = median(y)
+ scale.init = sqrt(sum(w*abs(y-median(y ))) / (sum(w) *2))
+ }
+ locat.init = if (length( .ilocat)) rep( .ilocat, len = n) else
+ rep(locat.init, len = n)
+ scale.init = if (length( .iscale)) rep( .iscale, len = n) else
+ rep(scale.init, len = n)
+ etastart =
+ cbind(theta2eta(locat.init, .llocat, earg = .elocat),
+ theta2eta(scale.init, .lscale, earg = .escale))
+ }
+ }), list( .method.init = method.init,
+ .elocat = elocation, .escale = escale,
+ .llocat = llocation, .lscale = lscale,
+ .ilocat = ilocation, .iscale = iscale ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ eta2theta(eta[,1], .llocat, earg = .elocat)
+ }, list( .elocat = elocation, .llocat = llocation ))),
+ last = eval(substitute(expression({
+ misc$link = c(location = .llocat, scale = .lscale)
+ misc$earg = list(location = .elocat, scale = .escale)
+ misc$expected = TRUE
+ misc$RegCondOK = FALSE # Save this for later
+ }), list( .escale = escale, .lscale = lscale,
+ .elocat = elocation, .llocat = llocation ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ location = eta2theta(eta[,1], .llocat, earg = .elocat)
+ Scale = eta2theta(eta[,2], .lscale, earg = .escale)
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ sum(w * dlaplace(x=y, location=location, scale=Scale, log = TRUE))
+ }
+ }, list( .escale = escale, .lscale = lscale,
+ .elocat = elocation, .llocat = llocation ))),
+ vfamily = c("laplace"),
+ deriv = eval(substitute(expression({
+ location = eta2theta(eta[,1], .llocat, earg = .elocat)
+ Scale = eta2theta(eta[,2], .lscale, earg = .escale)
+ zedd = abs(y-location) / Scale
+ dl.dlocation = sign(y-location) / Scale
+ dl.dscale = zedd / Scale - 1/Scale
+ dlocation.deta = dtheta.deta(location, .llocat, earg = .elocat)
+ dscale.deta = dtheta.deta(Scale, .lscale, earg = .escale)
+ w * cbind(dl.dlocation * dlocation.deta, dl.dscale * dscale.deta)
+ }), list( .escale = escale, .lscale = lscale,
+ .elocat = elocation, .llocat = llocation ))),
+ weight = eval(substitute(expression({
+ d2l.dlocation2 = d2l.dscale2 = 1 / Scale^2
+ wz = matrix(0, nrow=n, ncol=M) # diagonal
+ wz[,iam(1,1,M)] = d2l.dlocation2 * dlocation.deta^2
+ wz[,iam(2,2,M)] = d2l.dscale2 * dscale.deta^2
+ w * wz
+ }), list( .escale = escale, .lscale = lscale,
+ .elocat = elocation, .llocat = llocation ))))
+}
+
+
+
+fff.control <- function(save.weight = TRUE, ...)
+{
+ list(save.weight = save.weight)
+}
+
+ fff = function(link = "loge", earg = list(),
+ idf1 = NULL, idf2 = NULL, nsimEIM = 100, # ncp=0,
+ method.init = 1, zero = NULL) {
+ if (mode(link) != "character" && mode(link) != "name")
+ link = as.character(substitute(link))
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
+ method.init > 2) stop("argument 'method.init' must be 1 or 2")
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+ stop("bad input for argument 'zero'")
+ if (!is.list(earg)) earg = list()
+ if (!is.Numeric(nsimEIM, allow = 1, integ = TRUE) || nsimEIM <= 10)
+ stop("argument 'nsimEIM' should be an integer greater than 10")
+ ncp = 0
+ if (any(ncp != 0)) warning("not sure about ncp != 0 wrt dl/dtheta")
+
+ new("vglmff",
+ blurb = c("F-distribution\n\n",
+ "Links: ",
+ namesof("df1", link, earg = earg), ", ",
+ namesof("df2", link, earg = earg),
+ "\n", "\n",
+ "Mean: df2/(df2-2) provided df2>2 and ncp=0", "\n",
+ "Variance: ",
+ "2*df2^2*(df1+df2-2)/(df1*(df2-2)^2*(df2-4)) ",
+ "provided df2>4 and ncp=0"),
+ constraints = eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ if (ncol(cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+ predictors.names = c(namesof("df1", .link, earg = .earg, tag = FALSE),
+ namesof("df2", .link, earg = .earg, tag = FALSE))
+ if (!length(etastart)) {
+ if ( .method.init == 1) {
+ df2.init = b = 2*mean(y) / (mean(y)-1)
+ df1.init = 2*b^2*(b-2)/(var(y)*(b-2)^2 * (b-4) - 2*b^2)
+ if (df2.init < 4) df2.init = 5
+ if (df1.init < 2) df1.init = 3
+ } else {
+ df2.init = b = 2*median(y) / (median(y)-1)
+ summy = summary(y)
+ var.est = summy[5] - summy[2]
+ df1.init = 2*b^2*(b-2)/(var.est*(b-2)^2 * (b-4) - 2*b^2)
+ }
+ df1.init = if (length( .idf1)) rep( .idf1, len = n) else
+ rep(df1.init, len = n)
+ df2.init = if (length( .idf2)) rep( .idf2, len = n) else rep(1, len = n)
+ etastart = cbind(theta2eta(df1.init, .link, earg = .earg),
+ theta2eta(df2.init, .link, earg = .earg))
+ }
+ }), list( .method.init = method.init, .idf1=idf1, .earg = earg,
+ .idf2=idf2, .link = link ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ df2 = eta2theta(eta[,2], .link, earg = .earg)
+ ans = df2 * NA
+ ans[df2>2] = df2[df2>2] / (df2[df2>2]-2)
+ ans
+ }, list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
+ misc$link = c(df1 = .link, df2 = .link)
+ misc$earg = list(df1 = .earg, df2 = .earg)
+ misc$nsimEIM = .nsimEIM
+ misc$ncp = .ncp
+ }), list( .link = link, .earg = earg,
+ .ncp=ncp,
+ .nsimEIM = nsimEIM ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ df1 = eta2theta(eta[,1], .link, earg = .earg)
+ df2 = eta2theta(eta[,2], .link, earg = .earg)
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ sum(w * df(x=y, df1=df1, df2=df2, ncp= .ncp, log = TRUE))
+ }
+ }, list( .link = link, .earg = earg, .ncp=ncp ))),
+ vfamily = c("fff"),
+ deriv = eval(substitute(expression({
+ df1 = eta2theta(eta[,1], .link, earg = .earg)
+ df2 = eta2theta(eta[,2], .link, earg = .earg)
+ dl.ddf1 = 0.5*digamma(0.5*(df1+df2)) + 0.5 + 0.5*log(df1/df2) +
+ 0.5*log(y) - 0.5*digamma(0.5*df1) -
+ 0.5*(df1+df2)*(y/df2) / (1 + df1*y/df2) -
+ 0.5*log1p(df1*y/df2)
+ dl.ddf2 = 0.5*digamma(0.5*(df1+df2)) - 0.5*df1/df2 -
+ 0.5*digamma(0.5*df2) -
+ 0.5*(df1+df2) * (-df1*y/df2^2) / (1 + df1*y/df2) -
+ 0.5*log1p(df1*y/df2)
+ ddf1.deta = dtheta.deta(df1, .link, earg = .earg)
+ ddf2.deta = dtheta.deta(df2, .link, earg = .earg)
+ dthetas.detas = cbind(ddf1.deta, ddf2.deta)
+ w * dthetas.detas * cbind(dl.ddf1, dl.ddf2)
+ }), list( .link = link, .earg = earg ))),
+ weight = eval(substitute(expression({
+ run.varcov = 0
+ ind1 = iam(NA, NA, M=M, both = TRUE, diag = TRUE)
+ for(ii in 1:( .nsimEIM )) {
+ ysim = rf(n=n, df1=df1, df2=df2)
+ dl.ddf1 = 0.5*digamma(0.5*(df1+df2)) + 0.5 + 0.5*log(df1/df2) +
+ 0.5*log(ysim) - 0.5*digamma(0.5*df1) -
+ 0.5*(df1+df2)*(ysim/df2) / (1 + df1*ysim/df2) -
+ 0.5*log1p(df1*ysim/df2)
+ dl.ddf2 = 0.5*digamma(0.5*(df1+df2)) - 0.5*df1/df2 -
+ 0.5*digamma(0.5*df2) -
+ 0.5*(df1+df2) * (-df1*ysim/df2^2)/(1 + df1*ysim/df2) -
+ 0.5*log1p(df1*ysim/df2)
+ rm(ysim)
+ temp3 = cbind(dl.ddf1, dl.ddf2)
+ run.varcov = ((ii-1) * run.varcov +
+ temp3[,ind1$row.index]*temp3[,ind1$col.index]) / ii
+ }
+ wz = if (intercept.only)
+ matrix(colMeans(run.varcov),
+ n, ncol(run.varcov), byrow = TRUE) else run.varcov
+
+ wz = w * wz * dthetas.detas[,ind1$row] * dthetas.detas[,ind1$col]
+ wz
+ }), list( .link = link, .earg = earg, .nsimEIM = nsimEIM,
+ .ncp = ncp ))))
+}
+
+
+
+
+ hyperg = function(N = NULL, D = NULL,
+ lprob = "logit", earg = list(),
+ iprob = NULL) {
+ if (mode(lprob) != "character" && mode(lprob) != "name")
+ lprob = as.character(substitute(lprob))
+ inputN = is.Numeric(N, positive = TRUE)
+ inputD = is.Numeric(D, positive = TRUE)
+ if (inputD && inputN)
+ stop("only one of 'N' and 'D' is to be inputted")
+ if (!inputD && !inputN)
+ stop("one of 'N' and 'D' needs to be inputted")
+ if (!is.list(earg)) earg = list()
+
+ new("vglmff",
+ blurb = c("Hypergeometric distribution\n\n",
+ "Link: ",
+ namesof("prob", lprob, earg = earg), "\n",
+ "Mean: D/N\n"),
+ initialize = eval(substitute(expression({
+ NCOL = function (x)
+ if (is.array(x) && length(dim(x)) > 1 ||
+ is.data.frame(x)) ncol(x) else as.integer(1)
+ if (NCOL(y) == 1) {
+ if (is.factor(y)) y = y != levels(y)[1]
+ nn = rep(1, len = n)
+ if (!all(y >= 0 & y <= 1))
+ stop("response values must be in [0, 1]")
+ mustart = (0.5 + w * y) / (1 + w)
+ no.successes = w * y
+ if (any(abs(no.successes - round(no.successes)) > 0.001))
+ stop("Number of successes must be integer-valued")
+ } else if (NCOL(y) == 2) {
+ if (any(abs(y - round(y)) > 0.001))
+ stop("Count data must be integer-valued")
+ nn = y[,1] + y[,2]
+ y = ifelse(nn > 0, y[,1]/nn, 0)
+ w = w * nn
+ mustart = (0.5 + nn * y) / (1 + nn)
+ mustart[mustart >= 1] = 0.95
+ } else
+ stop("Response not of the right form")
+
+ predictors.names = namesof("prob", .lprob, earg = .earg, tag = FALSE)
+ extra$Nvector = .N
+ extra$Dvector = .D
+ extra$Nunknown = length(extra$Nvector) == 0
+ if (!length(etastart)) {
+ init.prob = if (length( .iprob)) rep( .iprob, len = n) else
+ mustart
+ etastart = matrix(init.prob, n, ncol(cbind(y )))
+
+ }
+ }), list( .lprob = lprob, .earg = earg, .N = N, .D = D,
+ .iprob = iprob ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ eta2theta(eta, .lprob, earg = .earg)
+ }, list( .lprob = lprob, .earg = earg ))),
+ last = eval(substitute(expression({
+ misc$link <- c("prob" = .lprob)
+ misc$earg <- list("prob" = .earg)
+ misc$Dvector <- .D
+ misc$Nvector <- .N
+ }), list( .N = N, .D = D, .lprob = lprob, .earg = earg ))),
+ link = eval(substitute(function(mu, extra = NULL) {
+ theta2eta(mu, .lprob, earg = .earg)
+ }, list( .lprob = lprob, .earg = earg ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ N = extra$Nvector
+ Dvec = extra$Dvector
+ prob = mu
+ yvec = w * y
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ if (extra$Nunknown) {
+ tmp12 = Dvec * (1-prob) / prob
+
+
+ sum(lgamma(1+tmp12) + lgamma(1+Dvec/prob-w) -
+ lgamma(1+tmp12-w+yvec) - lgamma(1+Dvec/prob))
+
+ } else {
+
+
+ sum(lgamma(1+N*prob) + lgamma(1+N*(1-prob)) -
+ lgamma(1+N*prob-yvec) - lgamma(1+N*(1-prob) -w + yvec))
+ }
+ }
+ }, list( .lprob = lprob, .earg = earg ))),
+ vfamily = c("hyperg"),
+ deriv = eval(substitute(expression({
+ prob = mu # equivalently, eta2theta(eta, .lprob, earg = .earg)
+ dprob.deta = dtheta.deta(prob, .lprob, earg = .earg)
+ Dvec = extra$Dvector
+ Nvec = extra$Nvector
+ yvec = w * y
+ if (extra$Nunknown) {
+ tmp72 = -Dvec / prob^2
+ tmp12 = Dvec * (1-prob) / prob
+ dl.dprob = tmp72 * (digamma(1 + tmp12) + digamma(1 + Dvec/prob -w) -
+ digamma(1 + tmp12-w+yvec) - digamma(1 + Dvec/prob))
+ } else {
+ dl.dprob = Nvec * (digamma(1+Nvec*prob) - digamma(1+Nvec*(1-prob)) -
+ digamma(1+Nvec*prob-yvec) + digamma(1+Nvec*(1-prob)-w+yvec))
+ }
+ w * dl.dprob * dprob.deta
+ }), list( .lprob = lprob, .earg = earg ))),
+ weight = eval(substitute(expression({
+ if (extra$Nunknown) {
+ tmp722 = tmp72^2
+ tmp13 = 2*Dvec / prob^3
+ d2l.dprob2 = tmp722 * (trigamma(1 + tmp12) +
+ trigamma(1 + Dvec/prob - w) -
+ trigamma(1 + tmp12 - w + yvec) -
+ trigamma(1 + Dvec/prob)) +
+ tmp13 * (digamma(1 + tmp12) +
+ digamma(1 + Dvec/prob - w) -
+ digamma(1 + tmp12 - w + yvec) -
+ digamma(1 + Dvec/prob))
+ } else {
+ d2l.dprob2 = Nvec^2 * (trigamma(1+Nvec*prob) +
+ trigamma(1+Nvec*(1-prob)) -
+ trigamma(1+Nvec*prob-yvec) -
+ trigamma(1+Nvec*(1-prob)-w+yvec))
+ }
+ d2prob.deta2 = d2theta.deta2(prob, .lprob, earg = .earg)
+
+ wz = -(dprob.deta^2) * d2l.dprob2
+ wz = w * wz
+ wz[wz < .Machine$double.eps] = .Machine$double.eps
+ wz
+ }), list( .lprob = lprob, .earg = earg ))))
+}
+
+
+
+dbenini = function(x, shape, y0, log = FALSE) {
+ if (!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ N = max(length(x), length(shape), length(y0))
+ x = rep(x, len=N); shape = rep(shape, len=N); y0 = rep(y0, len=N);
+
+ logdensity = rep(log(0), len=N)
+ xok = (x > y0)
+ tempxok = log(x[xok]/y0[xok])
+ logdensity[xok] = log(2*shape[xok]) - shape[xok] * tempxok^2 +
+ log(tempxok) - log(x[xok])
+ if (log.arg) logdensity else exp(logdensity)
+}
+
+pbenini = function(q, shape, y0) {
+ if (!is.Numeric(q)) stop("bad input for argument 'q'")
+ if (!is.Numeric(shape, posit = TRUE)) stop("bad input for argument 'shape'")
+ if (!is.Numeric(y0, posit = TRUE)) stop("bad input for argument 'y0'")
+ N = max(length(q), length(shape), length(y0))
+ q = rep(q, len=N); shape = rep(shape, len=N); y0 = rep(y0, len=N);
+ ans = y0 * 0
+ ok = q > y0
+ ans[ok] = -expm1(-shape[ok] * (log(q[ok]/y0[ok]))^2)
+ ans
+}
+
+qbenini = function(p, shape, y0) {
+ if (!is.Numeric(p, posit = TRUE) || any(p >= 1))
+ stop("bad input for argument 'p'")
+ if (!is.Numeric(shape, posit = TRUE)) stop("bad input for argument 'shape'")
+ if (!is.Numeric(y0, posit = TRUE)) stop("bad input for argument 'y0'")
+ y0 * exp(sqrt(-log1p(-p) / shape))
+}
+
+rbenini = function(n, shape, y0) {
+ if (!is.Numeric(n, posit = TRUE, integ = TRUE, allow = 1))
+ stop("bad input for argument 'n'")
+ if (!is.Numeric(shape, posit = TRUE)) stop("bad input for argument 'shape'")
+ if (!is.Numeric(y0, posit = TRUE)) stop("bad input for argument 'y0'")
+ y0 * exp(sqrt(-log(runif(n)) / shape))
+}
+
+ benini = function(y0=stop("argument 'y0' must be specified"),
+ lshape = "loge", earg = list(),
+ ishape = NULL, method.init = 1) {
+ if (mode(lshape) != "character" && mode(lshape) != "name")
+ lshape = as.character(substitute(lshape))
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
+ method.init > 2) stop("argument 'method.init' must be 1 or 2")
+ if (!is.Numeric(y0, allow = 1, posit = TRUE))
+ stop("bad input for argument 'y0'")
+ if (!is.list(earg)) earg = list()
+
+ new("vglmff",
+ blurb = c("1-parameter Benini distribution\n\n",
+ "Link: ",
+ namesof("shape", lshape, earg = earg),
+ "\n", "\n"),
+ initialize = eval(substitute(expression({
+ if (ncol(cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+ predictors.names =
+ c(namesof("shape", .lshape, earg = .earg, tag = FALSE))
+ extra$y0 = .y0
+ if (min(y) <= extra$y0) stop("argument 'y0' is too large")
+ if (!length(etastart)) {
+ probs = (1:3) / 4
+ qofy= quantile(rep(y, times=w), probs=probs) # fails if w != integer
+ if ( .method.init == 1) {
+ shape.init = mean(-log1p(-probs) / (log(qofy))^2)
+ } else {
+ shape.init = median(-log1p(-probs) / (log(qofy))^2)
+ }
+ shape.init = if (length( .ishape)) rep( .ishape, len = n) else
+ rep(shape.init, len = n)
+ etastart = cbind(theta2eta(shape.init, .lshape, earg = .earg))
+ }
+ }), list( .method.init = method.init, .ishape=ishape, .lshape = lshape, .earg = earg,
+ .y0=y0 ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ shape = eta2theta(eta, .lshape, earg = .earg)
+ temp = 1/(4*shape)
+ extra$y0 * exp(temp) *
+ ((sqrt(pi) * (1 - pgamma(temp, 0.5 ))) / (2*sqrt(shape)) +
+ 1 - pgamma(temp, 1))
+ }, list( .lshape = lshape, .earg = earg ))),
+ last = eval(substitute(expression({
+ misc$link = c(shape = .lshape)
+ misc$earg = list(shape = .earg )
+ }), list( .lshape = lshape, .earg = earg ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ shape = eta2theta(eta, .lshape, earg = .earg)
+ y0 = extra$y0
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ sum(w * dbenini(x=y, shape=shape, y0=y0, log = TRUE))
+ }
+ }, list( .lshape = lshape, .earg = earg ))),
+ vfamily = c("benini"),
+ deriv = eval(substitute(expression({
+ shape = eta2theta(eta, .lshape, earg = .earg)
+ y0 = extra$y0
+ dl.dshape = 1/shape - (log(y/y0))^2
+ dshape.deta = dtheta.deta(shape, .lshape, earg = .earg)
+ w * dl.dshape * dshape.deta
+ }), list( .lshape = lshape, .earg = earg ))),
+ weight = eval(substitute(expression({
+ d2l.dshape2 = 1 / shape^2
+ wz = d2l.dshape2 * dshape.deta^2
+ w * wz
+ }), list( .lshape = lshape, .earg = earg ))))
+}
+
+
+
+
+dpolono = function(x, meanlog=0, sdlog = 1, bigx=Inf, ...) {
+ if (!is.Numeric(x)) stop("bad input for argument 'x'")
+ if (!is.Numeric(meanlog)) stop("bad input for argument 'meanlog'")
+ if (!is.Numeric(sdlog, posit = TRUE)) stop("bad input for argument 'sdlog'")
+ if (length(bigx) != 1)
+ stop("bad input for argument 'bigx'")
+ if (bigx < 10)
+ warning("argument 'bigx' is probably too small")
+ N = max(length(x), length(meanlog), length(sdlog))
+ x = rep(x, len=N); meanlog = rep(meanlog, len=N); sdlog = rep(sdlog, len=N)
+ ans = x * 0
+ integrand = function(t, x, meanlog, sdlog)
+ exp(t*x - exp(t) - 0.5*((t-meanlog)/sdlog)^2)
+ for(ii in 1:N) {
+ if (x[ii] == round(x[ii]) && x[ii] >= 0) {
+ if (x[ii] >= bigx) {
+ zedd = (log(x[ii])-meanlog[ii]) / sdlog[ii]
+ temp = 1 + (zedd^2 + log(x[ii]) - meanlog[ii] -
+ 1) / (2*x[ii]*(sdlog[ii])^2)
+ ans[ii] = temp * exp(-0.5*zedd^2)/(sqrt(2*pi)*sdlog[ii] * x[ii])
+ } else {
+ temp = integrate(f=integrand, lower=-Inf, upper=Inf, x = x[ii],
+ meanlog=meanlog[ii], sdlog = sdlog[ii], ...)
+ if (temp$message == "OK") {
+ ans[ii] = temp$value / (sqrt(2*pi) * sdlog[ii] *
+ exp(lgamma(x[ii]+1)))
+ } else {
+ warning("could not integrate (numerically) observation ",ii)
+ ans[ii] = NA
+ }
+ }
+ }
+ }
+ ans
+}
+
+
+rpolono = function(n, meanlog=0, sdlog = 1) {
+ lambda = rlnorm(n=n, meanlog=meanlog, sdlog = sdlog)
+ rpois(n=n, lambda=lambda)
+}
+
+
+
+
+
+
+
+
+
+
+
+dtriangle = function(x, theta, lower=0, upper=1, log = FALSE) {
+ if (!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ N = max(length(x), length(theta), length(lower), length(upper))
+ x = rep(x, len=N); lower = rep(lower, len=N); upper = rep(upper, len=N);
+ theta = rep(theta, len=N)
+
+ denom1 = ((upper-lower)*(theta-lower))
+ denom2 = ((upper-lower)*(upper-theta))
+ logdensity = rep(log(0), len=N)
+ xok.neg = (lower < x) & (x <= theta)
+ xok.pos = (theta <= x) & (x < upper)
+ logdensity[xok.neg] = log(2 * (x[xok.neg]-lower[xok.neg]) / denom1[xok.neg])
+ logdensity[xok.pos] = log(2 * (upper[xok.pos]-x[xok.pos]) / denom2[xok.pos])
+ logdensity[lower >= upper] = NaN
+ logdensity[lower > theta] = NaN
+ logdensity[upper < theta] = NaN
+ if (log.arg) logdensity else exp(logdensity)
+}
+
+
+rtriangle = function(n, theta, lower=0, upper=1) {
+ if (!is.Numeric(n, integ = TRUE,allow = 1)) stop("bad input for argument 'n'")
+ if (!is.Numeric(theta)) stop("bad input for argument 'theta'")
+ if (!is.Numeric(lower)) stop("bad input for argument 'lower'")
+ if (!is.Numeric(upper)) stop("bad input for argument 'upper'")
+ if (!all(lower < theta & theta < upper))
+ stop("lower < theta < upper values are required")
+ N = n
+ lower = rep(lower, len=N); upper = rep(upper, len=N);
+ theta = rep(theta, len=N)
+ t1 = sqrt(runif(n))
+ t2 = sqrt(runif(n))
+ ifelse(runif(n) < (theta-lower)/(upper-lower),
+ lower + (theta-lower)*t1,
+ upper - (upper-theta)*t2)
+}
+
+
+qtriangle = function(p, theta, lower=0, upper=1) {
+ if (!is.Numeric(p, posit = TRUE)) stop("bad input for argument 'p'")
+ if (!is.Numeric(theta)) stop("bad input for argument 'theta'")
+ if (!is.Numeric(lower)) stop("bad input for argument 'lower'")
+ if (!is.Numeric(upper)) stop("bad input for argument 'upper'")
+ if (!all(lower < theta & theta < upper))
+ stop("lower < theta < upper values are required")
+
+ N = max(length(p), length(theta), length(lower), length(upper))
+ p = rep(p, len=N); lower = rep(lower, len=N); upper = rep(upper, len=N);
+ theta = rep(theta, len=N)
+
+ bad = (p < 0) | (p > 1)
+ if (any(bad))
+ stop("bad input for 'p'")
+
+ Neg = (p <= (theta - lower)/(upper - lower))
+ ans = as.numeric(NA) * p
+ temp1 = p * (upper-lower) * (theta-lower)
+ ans[ Neg] = lower[ Neg] + sqrt(temp1[ Neg])
+
+ Pos = (p >= (theta - lower)/(upper - lower))
+ if (any(Pos)) {
+ pstar = (p - (theta-lower)/(upper-lower)) / (1 -
+ (theta-lower)/(upper-lower))
+ qstar = cbind(1 - sqrt(1-pstar), 1 + sqrt(1-pstar))
+ qstar = qstar[Pos,, drop = FALSE]
+ qstar = ifelse(qstar[,1] >= 0 & qstar[,1] <= 1, qstar[,1], qstar[,2])
+ ans[Pos] = theta[Pos] + qstar * (upper-theta)[Pos]
+ }
+ ans
+}
+
+
+ptriangle = function(q, theta, lower=0, upper=1) {
+ if (!is.Numeric(q)) stop("bad input for argument 'q'")
+ if (!is.Numeric(theta)) stop("bad input for argument 'theta'")
+ if (!is.Numeric(lower)) stop("bad input for argument 'lower'")
+ if (!is.Numeric(upper)) stop("bad input for argument 'upper'")
+ if (!all(lower < theta & theta < upper))
+ stop("lower < theta < upper values are required")
+
+ N = max(length(q), length(theta), length(lower), length(upper))
+ q = rep(q, len=N); lower = rep(lower, len=N); upper = rep(upper, len=N);
+ theta = rep(theta, len=N)
+ ans = q * 0
+
+ qstar = (q - lower)^2 / ((upper-lower) * (theta-lower))
+ Neg = (lower <= q & q <= theta)
+ ans[Neg] = (qstar)[Neg]
+
+ Pos = (theta <= q & q <= upper)
+ qstar = (q - theta) / (upper-theta)
+ ans[Pos] = ((theta-lower)/(upper-lower))[Pos] +
+ (qstar * (2-qstar) * (upper-theta) / (upper - lower))[Pos]
+ ans[q >= upper] = 1
+ ans
+}
+
+
+
+ triangle = function(lower=0, upper=1,
+ link = "elogit", earg = if (link == "elogit")
+ list(min = lower, max = upper) else list(), itheta = NULL)
+{
+ if (!is.Numeric(lower)) stop("bad input for argument 'lower'")
+ if (!is.Numeric(upper)) stop("bad input for argument 'upper'")
+ if (!all(lower < upper))
+ stop("lower < upper values are required")
+ if (length(itheta) && !is.Numeric(itheta))
+ stop("bad input for 'itheta'")
+
+ if (mode(link) != "character" && mode(link) != "name")
+ link = as.character(substitute(link))
+ if (!is.list(earg)) earg = list()
+
+ new("vglmff",
+ blurb = c(
+ "Triangle distribution\n\n",
+ "Link: ",
+ namesof("theta", link, earg = earg)),
+ initialize = eval(substitute(expression({
+ y = as.numeric(y)
+ if (ncol(cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+ extra$lower = rep( .lower, len = n)
+ extra$upper = rep( .upper, len = n)
+
+ if (any(y <= extra$lower | y >= extra$upper))
+ stop("some y values in [lower,upper] detected")
+ predictors.names = namesof("theta", .link, earg = .earg, tag = FALSE)
+ if (!length(etastart)) {
+ Theta.init = if (length( .itheta)) .itheta else {
+ weighted.mean(y, w)
+ }
+ Theta.init = rep(Theta.init, length = n)
+ etastart = theta2eta(Theta.init, .link, earg = .earg )
+ }
+ }), list( .link = link, .earg = earg, .itheta=itheta,
+ .upper=upper, .lower=lower ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ Theta = eta2theta(eta, .link, earg = .earg )
+ lower = extra$lower
+ upper = extra$upper
+ mu = ((Theta^3 / 3 - lower * Theta^2 / 2 +
+ lower^3 / 6) / (Theta - lower) +
+ ((Theta^3 / 3 - upper * Theta^2 / 2 +
+ upper^3 / 6) / (upper - Theta))) * 2 / (upper-lower)
+ mu
+ }, list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
+ misc$link = c(theta = .link)
+ misc$earg = list(theta = .earg)
+ misc$expected = TRUE
+ }), list( .link = link, .earg = earg ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ Theta = eta2theta(eta, .link, earg = .earg )
+ lower = extra$lower
+ upper = extra$upper
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ sum(w * dtriangle(x=y, theta=Theta, lower=lower,
+ upper=upper, log = TRUE))
+ }
+ }, list( .link = link, .earg = earg ))),
+ vfamily = c("triangle"),
+ deriv = eval(substitute(expression({
+ Theta = eta2theta(eta, .link, earg = .earg )
+ dTheta.deta = dtheta.deta(Theta, .link, earg = .earg )
+ pos = y > Theta
+ neg = y < Theta
+ lower = extra$lower
+ upper = extra$upper
+ dl.dTheta = 0 * y
+ dl.dTheta[neg] = -1 / (Theta[neg]-lower[neg])
+ dl.dTheta[pos] = 1 / (upper[pos]-Theta[pos])
+ dl.dTheta * dTheta.deta
+ }), list( .link = link, .earg = earg ))),
+ weight = eval(substitute(expression({
+ d2l.dTheta2 = 1 / ((Theta-lower)*(upper-Theta))
+ wz = dTheta.deta^2 * d2l.dTheta2
+ w * wz
+ }), list( .link = link, .earg = earg ))))
+}
+
+
+
+
+
+
+
+adjust0.loglaplace1 = function(ymat, y, w, rep0) {
+ rangey0 = range(y[y > 0])
+ ymat[ymat <= 0] = min(rangey0[1] / 2, rep0)
+ ymat
+}
+
+
+loglaplace1.control <- function(maxit = 300, ...)
+{
+ list(maxit = maxit)
+}
+
+ loglaplace1 = function(tau = NULL,
+ llocation = "loge",
+ elocation = list(),
+ ilocation = NULL,
+ kappa = sqrt(tau/(1-tau)),
+ Scale.arg = 1,
+ shrinkage.init = 0.95, parallelLocation = FALSE, digt = 4,
+ dfmu.init = 3,
+ rep0 = 0.5, # 0.0001,
+ minquantile = 0, maxquantile = Inf,
+ method.init = 1, zero = NULL) {
+
+ if (length(minquantile) != 1)
+ stop("bad input for argument 'minquantile'")
+ if (length(maxquantile) != 1)
+ stop("bad input for argument 'maxquantile'")
+ if (!is.Numeric(rep0, posit = TRUE, allow = 1) || rep0 > 1)
+ stop("bad input for argument 'rep0'")
+ if (!is.Numeric(kappa, posit = TRUE))
+ stop("bad input for argument 'kappa'")
+ if (length(tau) && max(abs(kappa - sqrt(tau/(1-tau)))) > 1.0e-6)
+ stop("arguments 'kappa' and 'tau' do not match")
+ if (mode(llocation) != "character" && mode(llocation) != "name")
+ llocation = as.character(substitute(llocation))
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
+ method.init > 4) stop("argument 'method.init' must be 1, 2 or ... 4")
+ if (!is.list(elocation)) elocation = list()
+ if (!is.Numeric(shrinkage.init, allow = 1) || shrinkage.init < 0 ||
+ shrinkage.init > 1) stop("bad input for argument 'shrinkage.init'")
+ if (length(zero) &&
+ !(is.Numeric(zero, integer = TRUE, posit = TRUE) || is.character(zero )))
+ stop("bad input for argument 'zero'")
+ if (!is.Numeric(Scale.arg, posit = TRUE))
+ stop("bad input for argument 'Scale.arg'")
+ if (!is.logical(parallelLocation) || length(parallelLocation) != 1)
+ stop("bad input for argument 'parallelLocation'")
+ fittedMean = FALSE
+ if (!is.logical(fittedMean) || length(fittedMean) != 1)
+ stop("bad input for argument 'fittedMean'")
+
+ mystring0 = namesof("location", llocation, earg = elocation)
+ mychars = substring(mystring0, fi=1:nchar(mystring0), la=1:nchar(mystring0))
+ mychars[nchar(mystring0)] = ", inverse = TRUE)"
+ mystring1 = paste(mychars, collapse = "")
+
+ new("vglmff",
+ blurb = c("One-parameter ",
+ if (llocation == "loge") "log-Laplace" else
+ c(llocation, "-Laplace"),
+ " distribution\n\n",
+ "Links: ", mystring0, "\n", "\n",
+ "Quantiles: ", mystring1),
+ constraints = eval(substitute(expression({
+ constraints = cm.vgam(matrix(1, M, 1), x, .parallelLocation,
+ constraints, intercept = FALSE)
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .parallelLocation = parallelLocation,
+ .Scale.arg = Scale.arg, .zero = zero ))),
+ initialize = eval(substitute(expression({
+ extra$M = M = max(length( .Scale.arg ), length( .kappa )) # Recycle
+ extra$Scale = rep( .Scale.arg, length = M)
+ extra$kappa = rep( .kappa, length = M)
+ extra$tau = extra$kappa^2 / (1 + extra$kappa^2)
+ if (ncol(y <- cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+ extra$n = n
+ extra$y.names = y.names =
+ paste("tau = ", round(extra$tau, dig = .digt), sep = "")
+ extra$individual = FALSE
+ predictors.names = namesof(paste("quantile(", y.names, ")", sep = ""),
+ .llocat, earg = .elocat, tag = FALSE)
+
+
+ if (FALSE) {
+ if (min(y) < 0) stop("negative response values detected")
+ if ((prop.0. <- weighted.mean(1*(y == 0), w)) >= min(extra$tau))
+ stop("sample proportion of 0s == ", round(prop.0., dig=4),
+ " > minimum 'tau' value. Choose larger values for 'tau'.")
+ if ( .rep0 == 0.5 &&
+ (ave.tau <- (weighted.mean(1*(y <= 0), w) +
+ weighted.mean(1*(y <= 1), w))/2) >= min(extra$tau))
+ warning("the minimum 'tau' value should be greater than ",
+ round(ave.tau, dig=4))
+ }
+
+ if (!length(etastart)) {
+ if ( .method.init == 1) {
+ locat.init = quantile(rep(y, w), probs= extra$tau) + 1/16
+ } else if ( .method.init == 2) {
+ locat.init = weighted.mean(y, w)
+ } else if ( .method.init == 3) {
+ locat.init = median(y)
+ } else if ( .method.init == 4) {
+ Fit5 = vsmooth.spline(x = x[, min(ncol(x), 2)], y = y, w = w,
+ df = .dfmu.init)
+ locat.init = c(predict(Fit5, x = x[, min(ncol(x), 2)])$y)
+ } else {
+ use.this = weighted.mean(y, w)
+ locat.init = (1- .sinit)*y + .sinit * use.this
+ }
+ locat.init = if (length( .ilocat)) rep( .ilocat, len = M) else
+ rep(locat.init, len = M)
+ locat.init = matrix(locat.init, n, M, byrow = TRUE)
+ if ( .llocat == "loge")
+ locat.init = abs(locat.init)
+ etastart =
+ cbind(theta2eta(locat.init, .llocat, earg = .elocat))
+ }
+ }), list( .method.init = method.init,
+ .dfmu.init = dfmu.init, .rep0 = rep0,
+ .sinit = shrinkage.init, .digt = digt,
+ .elocat = elocation, .Scale.arg = Scale.arg,
+ .llocat = llocation, .kappa = kappa,
+ .ilocat = ilocation ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ location.y = eta2theta(eta, .llocat, earg = .elocat)
+ if ( .fittedMean ) {
+ stop("Yet to do: handle 'fittedMean = TRUE'")
+ kappamat = matrix(extra$kappa, extra$n, extra$M, byrow = TRUE)
+ Scale = matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
+ location.y + Scale * (1/kappamat - kappamat)
+ } else {
+ if (length(location.y) > extra$n)
+ dimnames(location.y) = list(dimnames(eta)[[1]], extra$y.names)
+ location.y
+ }
+ location.y[location.y < .minquantile] = .minquantile
+ location.y[location.y > .maxquantile] = .maxquantile
+ location.y
+ }, list( .elocat = elocation, .llocat = llocation,
+ .minquantile = minquantile, .maxquantile = maxquantile,
+ .fittedMean = fittedMean, .Scale.arg = Scale.arg,
+ .kappa = kappa ))),
+ last = eval(substitute(expression({
+ misc$link = c(location = .llocat)
+ misc$earg = list(location = .elocat)
+ misc$expected = TRUE
+ extra$kappa = misc$kappa = .kappa
+ extra$tau = misc$tau = misc$kappa^2 / (1 + misc$kappa^2)
+ extra$Scale.arg = .Scale.arg
+ misc$true.mu = .fittedMean # @fitted is not a true mu?
+ misc$rep0 = .rep0
+ misc$minquantile = .minquantile
+ misc$maxquantile = .maxquantile
+ extra$percentile = numeric(length(misc$kappa))
+ location.y = as.matrix(location.y)
+ for(ii in 1:length(misc$kappa))
+ extra$percentile[ii] = 100 * weighted.mean(y <= location.y[,ii], w)
+ }), list( .elocat = elocation, .llocat = llocation,
+ .Scale.arg = Scale.arg, .fittedMean = fittedMean,
+ .minquantile = minquantile, .maxquantile = maxquantile,
+ .rep0 = rep0, .kappa = kappa ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ kappamat = matrix(extra$kappa, extra$n, extra$M, byrow = TRUE)
+ Scale.w = matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
+ ymat = matrix(y, extra$n, extra$M)
+
+
+ if ( .llocat == "loge")
+ ymat = adjust0.loglaplace1(ymat=ymat, y = y, w = w, rep0= .rep0)
+ w.mat = theta2eta(ymat, .llocat, earg = .elocat) # e.g., logoff()
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ALDans = sum(w * dalap(x = c(w.mat), location = c(eta),
+ scale = c(Scale.w), kappa = c(kappamat),
+ log = TRUE))
+ ALDans
+ }
+ }, list( .elocat = elocation, .llocat = llocation,
+ .rep0 = rep0,
+ .Scale.arg = Scale.arg, .kappa = kappa ))),
+ vfamily = c("loglaplace1"),
+ deriv = eval(substitute(expression({
+ ymat = matrix(y, n, M)
+ Scale.w = matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
+ location.w = eta
+ location.y = eta2theta(location.w, .llocat, earg = .elocat)
+ kappamat = matrix(extra$kappa, n, M, byrow = TRUE)
+
+ ymat = adjust0.loglaplace1(ymat=ymat, y = y, w = w, rep0= .rep0)
+ w.mat = theta2eta(ymat, .llocat, earg = .elocat) # e.g., logit()
+ zedd = abs(w.mat-location.w) / Scale.w
+ dl.dlocation = ifelse(w.mat >= location.w, kappamat, 1/kappamat) *
+ sqrt(2) * sign(w.mat-location.w) / Scale.w
+ dlocation.deta = dtheta.deta(location.w, "identity", earg = .elocat)
+ w * cbind(dl.dlocation * dlocation.deta)
+ }), list( .Scale.arg = Scale.arg, .elocat = elocation,
+ .rep0 = rep0,
+ .llocat = llocation, .kappa = kappa ))),
+ weight = eval(substitute(expression({
+ d2l.dlocation2 = 2 / Scale.w^2
+ wz = cbind(d2l.dlocation2 * dlocation.deta^2)
+ w * wz
+ }), list( .Scale.arg = Scale.arg,
+ .elocat = elocation, .llocat = llocation ))))
+}
+
+
+
+
+
+loglaplace2.control <- function(save.weight = TRUE, ...)
+{
+ list(save.weight = save.weight)
+}
+
+ loglaplace2 = function(tau = NULL,
+ llocation = "loge", lscale = "loge",
+ elocation = list(), escale = list(),
+ ilocation = NULL, iscale = NULL,
+ kappa = sqrt(tau/(1-tau)),
+ shrinkage.init = 0.95, parallelLocation = FALSE, digt = 4,
+ sameScale = TRUE,
+ dfmu.init = 3,
+ rep0 = 0.5, nsimEIM = NULL,
+ method.init = 1, zero = "(1 + M/2):M") {
+ warning("it is best to use loglaplace1()")
+
+ if (length(nsimEIM) &&
+ (!is.Numeric(nsimEIM, allow = 1, integ = TRUE) || nsimEIM <= 10))
+ stop("argument 'nsimEIM' should be an integer greater than 10")
+ if (!is.Numeric(rep0, posit = TRUE, allow = 1) || rep0 > 1)
+ stop("bad input for argument 'rep0'")
+ if (!is.Numeric(kappa, posit = TRUE))
+ stop("bad input for argument 'kappa'")
+ if (length(tau) && max(abs(kappa - sqrt(tau/(1-tau)))) > 1.0e-6)
+ stop("arguments 'kappa' and 'tau' do not match")
+ if (mode(llocation) != "character" && mode(llocation) != "name")
+ llocation = as.character(substitute(llocation))
+ if (mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
+ method.init > 4) stop("argument 'method.init' must be 1, 2 or ... 4")
+ if (length(iscale) && !is.Numeric(iscale, posit = TRUE))
+ stop("bad input for argument 'iscale'")
+ if (!is.list(elocation)) elocation = list()
+ if (!is.list(escale)) escale = list()
+ if (!is.Numeric(shrinkage.init, allow = 1) || shrinkage.init < 0 ||
+ shrinkage.init > 1) stop("bad input for argument 'shrinkage.init'")
+ if (length(zero) &&
+ !(is.Numeric(zero, integer = TRUE, posit = TRUE) || is.character(zero )))
+ stop("bad input for argument 'zero'")
+ if (!is.logical(sameScale) || length(sameScale) != 1)
+ stop("bad input for argument 'sameScale'")
+ if (!is.logical(parallelLocation) || length(parallelLocation) != 1)
+ stop("bad input for argument 'parallelLocation'")
+ fittedMean = FALSE
+ if (!is.logical(fittedMean) || length(fittedMean) != 1)
+ stop("bad input for argument 'fittedMean'")
+
+ if (llocation != "loge")
+ stop("argument 'llocation' must be \"loge\"")
+
+ new("vglmff",
+ blurb = c("Two-parameter log-Laplace distribution\n\n",
+ "Links: ",
+ namesof("location", llocation, earg = elocation), ", ",
+ namesof("scale", lscale, earg = escale),
+ "\n", "\n",
+ "Mean: zz location + scale * (1/kappa - kappa) / sqrt(2)", "\n",
+ "Quantiles: location", "\n",
+ "Variance: zz scale^2 * (1 + kappa^4) / (2 * kappa^2)"),
+ constraints = eval(substitute(expression({
+ .ZERO = .zero
+ if (is.character( .ZERO)) .ZERO = eval(parse(text = .ZERO))
+ .PARALLEL = .parallelLocation
+ parelHmat = if (is.logical( .PARALLEL ) && .PARALLEL )
+ matrix(1, M/2, 1) else diag(M/2)
+ scaleHmat = if (is.logical( .sameScale ) && .sameScale )
+ matrix(1, M/2, 1) else diag(M/2)
+ mycmatrix = cbind(rbind( parelHmat, 0*parelHmat),
+ rbind(0*scaleHmat, scaleHmat))
+ constraints=cm.vgam(mycmatrix, x, .PARALLEL, constraints, int = FALSE)
+ constraints = cm.zero.vgam(constraints, x, .ZERO, M)
+
+ if ( .PARALLEL && names(constraints)[1] == "(Intercept)") {
+ parelHmat = diag(M/2)
+ mycmatrix = cbind(rbind( parelHmat, 0*parelHmat),
+ rbind(0*scaleHmat, scaleHmat))
+ constraints[["(Intercept)"]] = mycmatrix
+ }
+ if (is.logical( .sameScale) && .sameScale &&
+ names(constraints)[1] == "(Intercept)") {
+ temp3 = constraints[["(Intercept)"]]
+ temp3 = cbind(temp3[,1:(M/2)], rbind(0*scaleHmat, scaleHmat))
+ constraints[["(Intercept)"]] = temp3
+ }
+ }), list( .sameScale=sameScale, .parallelLocation = parallelLocation,
+ .zero = zero ))),
+ initialize = eval(substitute(expression({
+ extra$kappa = .kappa
+ extra$tau = extra$kappa^2 / (1 + extra$kappa^2)
+ if (ncol(y <- cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+ extra$M = M = 2 * length(extra$kappa)
+ extra$n = n
+ extra$y.names = y.names =
+ paste("tau = ", round(extra$tau, dig = .digt), sep = "")
+ extra$individual = FALSE
+ predictors.names =
+ c(namesof(paste("quantile(", y.names, ")", sep = ""),
+ .llocat, earg = .elocat, tag = FALSE),
+ namesof(if (M == 2) "scale" else paste("scale", 1:(M/2), sep = ""),
+ .lscale, earg = .escale, tag = FALSE))
+ if (weighted.mean(1 * (y < 0.001), w) >= min(extra$tau))
+ stop("sample proportion of 0s > minimum 'tau' value. ",
+ "Choose larger values for 'tau'.")
+
+ if (!length(etastart)) {
+ if ( .method.init == 1) {
+ locat.init.y = weighted.mean(y, w)
+ scale.init = sqrt(var(y) / 2)
+ } else if ( .method.init == 2) {
+ locat.init.y = median(y)
+ scale.init = sqrt(sum(w*abs(y-median(y))) / (sum(w) *2))
+ } else if ( .method.init == 3) {
+ Fit5 = vsmooth.spline(x = x[, min(ncol(x), 2)], y = y, w = w,
+ df = .dfmu.init)
+ locat.init.y = c(predict(Fit5, x = x[, min(ncol(x), 2)])$y)
+ scale.init = sqrt(sum(w*abs(y-median(y))) / (sum(w) *2))
+ } else {
+ use.this = weighted.mean(y, w)
+ locat.init.y = (1- .sinit)*y + .sinit * use.this
+ scale.init = sqrt(sum(w*abs(y-median(y ))) / (sum(w) *2))
+ }
+ locat.init.y = if (length( .ilocat)) rep( .ilocat, len = n) else
+ rep(locat.init.y, len = n)
+ locat.init.y = matrix(locat.init.y, n, M/2)
+ scale.init = if (length( .iscale)) rep( .iscale, len = n) else
+ rep(scale.init, len = n)
+ scale.init = matrix(scale.init, n, M/2)
+ etastart =
+ cbind(theta2eta(locat.init.y, .llocat, earg = .elocat),
+ theta2eta(scale.init, .lscale, earg = .escale))
+ }
+ }), list( .method.init = method.init,
+ .dfmu.init = dfmu.init,
+ .sinit = shrinkage.init, .digt = digt,
+ .elocat = elocation, .escale = escale,
+ .llocat = llocation, .lscale = lscale, .kappa = kappa,
+ .ilocat = ilocation, .iscale = iscale ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ location.y = eta2theta(eta[,1:(extra$M/2), drop = FALSE],
+ .llocat, earg = .elocat)
+ if ( .fittedMean ) {
+ kappamat = matrix(extra$kappa, extra$n, extra$M/2, byrow = TRUE)
+ Scale.y = eta2theta(eta[,(1+extra$M/2):extra$M], .lscale, earg = .escale)
+ location.y + Scale.y * (1/kappamat - kappamat)
+ } else {
+ dimnames(location.y) = list(dimnames(eta)[[1]], extra$y.names)
+ location.y
+ }
+ }, list( .elocat = elocation, .llocat = llocation,
+ .fittedMean = fittedMean, .escale = escale, .lscale = lscale,
+ .kappa = kappa ))),
+ last = eval(substitute(expression({
+ misc$link = c(location = .llocat, scale = .lscale)
+ misc$earg = list(location = .elocat, scale = .escale)
+ misc$expected = TRUE
+ extra$kappa = misc$kappa = .kappa
+ extra$tau = misc$tau = misc$kappa^2 / (1 + misc$kappa^2)
+ misc$true.mu = .fittedMean # @fitted is not a true mu?
+ misc$nsimEIM = .nsimEIM
+ misc$rep0 = .rep0
+ extra$percentile = numeric(length(misc$kappa))
+ location = as.matrix(location.y)
+ for(ii in 1:length(misc$kappa))
+ extra$percentile[ii] = 100 * weighted.mean(y <= location.y[,ii], w)
+ }), list( .elocat = elocation, .llocat = llocation,
+ .escale = escale, .lscale = lscale,
+ .fittedMean = fittedMean,
+ .nsimEIM = nsimEIM, .rep0 = rep0,
+ .kappa = kappa ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ kappamat = matrix(extra$kappa, extra$n, extra$M/2, byrow = TRUE)
+ Scale.w = eta2theta(eta[,(1+extra$M/2):extra$M], .lscale, earg = .escale)
+ ymat = matrix(y, extra$n, extra$M/2)
+ ymat[ymat <= 0] = min(min(y[y > 0]), .rep0) # Adjust for 0s
+ ell.mat = matrix(c(dloglaplace(x = c(ymat),
+ location.ald = c(eta[,1:(extra$M/2)]),
+ scale.ald = c(Scale.w),
+ kappa = c(kappamat), log = TRUE)),
+ extra$n, extra$M/2)
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ sum(w * ell.mat)
+ }
+ }, list( .elocat = elocation, .llocat = llocation,
+ .escale = escale, .lscale = lscale,
+ .rep0 = rep0, .kappa = kappa ))),
+ vfamily = c("loglaplace2"),
+ deriv = eval(substitute(expression({
+ ymat = matrix(y, n, M/2)
+ Scale.w = eta2theta(eta[,(1+extra$M/2):extra$M], .lscale, earg = .escale)
+ location.w = eta[,1:(extra$M/2), drop = FALSE]
+ location.y = eta2theta(location.w, .llocat, earg = .elocat)
+ kappamat = matrix(extra$kappa, n, M/2, byrow = TRUE)
+ w.mat = ymat
+ w.mat[w.mat <= 0] = min(min(w.mat[w.mat > 0]), .rep0) # Adjust for 0s
+ w.mat= theta2eta(w.mat, .llocat, earg = .elocat) # w.mat=log(w.mat)
+ zedd = abs(w.mat-location.w) / Scale.w
+ dl.dlocation = sqrt(2) *
+ ifelse(w.mat >= location.w, kappamat, 1/kappamat) *
+ sign(w.mat-location.w) / Scale.w
+ dl.dscale = sqrt(2) *
+ ifelse(w.mat >= location.w, kappamat, 1/kappamat) *
+ zedd / Scale.w - 1 / Scale.w
+ dlocation.deta = dtheta.deta(location.w, .llocat, earg = .elocat)
+ dscale.deta = dtheta.deta(Scale.w, .lscale, earg = .escale)
+ w * cbind(dl.dlocation * dlocation.deta,
+ dl.dscale * dscale.deta)
+ }), list( .escale = escale, .lscale = lscale,
+ .elocat = elocation, .llocat = llocation,
+ .rep0 = rep0, .kappa = kappa ))),
+ weight = eval(substitute(expression({
+ run.varcov = 0
+ ind1 = iam(NA, NA, M=M, both = TRUE, diag = TRUE)
+ dthetas.detas = cbind(dlocation.deta, dscale.deta)
+ if (length( .nsimEIM )) {
+ for(ii in 1:( .nsimEIM )) {
+ wsim = matrix(rloglap(n*M/2, loc = c(location.w),
+ sca = c(Scale.w),
+ kappa = c(kappamat)), n, M/2)
+ zedd = abs(wsim-location.w) / Scale.w
+ dl.dlocation = sqrt(2) *
+ ifelse(wsim >= location.w, kappamat, 1/kappamat) *
+ sign(wsim-location.w) / Scale.w
+ dl.dscale = sqrt(2) *
+ ifelse(wsim >= location.w, kappamat, 1/kappamat) *
+ zedd / Scale.w - 1 / Scale.w
+
+ rm(wsim)
+ temp3 = cbind(dl.dlocation, dl.dscale) # n x M matrix
+ run.varcov = ((ii-1) * run.varcov +
+ temp3[,ind1$row.index]*temp3[,ind1$col.index]) / ii
+ }
+ wz = if (intercept.only)
+ matrix(colMeans(run.varcov),
+ n, ncol(run.varcov), byrow = TRUE) else run.varcov
+
+ wz = wz * dthetas.detas[,ind1$row] * dthetas.detas[,ind1$col]
+ wz = w * matrix(wz, n, dimm(M))
+ wz
+ } else {
+ d2l.dlocation2 = 2 / (Scale.w * location.w)^2
+ d2l.dscale2 = 1 / Scale.w^2
+ wz = cbind(d2l.dlocation2 * dlocation.deta^2,
+ d2l.dscale2 * dscale.deta^2)
+ w * wz
+ }
+ }), list( .elocat = elocation, .escale = escale,
+ .llocat = llocation, .lscale = lscale,
+ .nsimEIM = nsimEIM) )))
+}
+
+
+
+
+logitlaplace1.control <- function(maxit = 300, ...)
+{
+ list(maxit = maxit)
+}
+
+
+adjust01.logitlaplace1 = function(ymat, y, w, rep01) {
+ rangey01 = range(y[(y > 0) & (y < 1)])
+ ymat[ymat <= 0] = min(rangey01[1] / 2, rep01 / w[y <= 0])
+ ymat[ymat >= 1] = max((1 + rangey01[2]) / 2, 1 - rep01 / w[y >= 1])
+ ymat
+}
+
+ logitlaplace1 = function(tau = NULL,
+ llocation = "logit",
+ elocation = list(),
+ ilocation = NULL,
+ kappa = sqrt(tau/(1-tau)),
+ Scale.arg = 1,
+ shrinkage.init = 0.95, parallelLocation = FALSE, digt = 4,
+ dfmu.init = 3,
+ rep01 = 0.5,
+ method.init = 1, zero = NULL) {
+
+ if (!is.Numeric(rep01, posit = TRUE, allow = 1) || rep01 > 0.5)
+ stop("bad input for argument 'rep01'")
+ if (!is.Numeric(kappa, posit = TRUE))
+ stop("bad input for argument 'kappa'")
+ if (length(tau) && max(abs(kappa - sqrt(tau/(1-tau)))) > 1.0e-6)
+ stop("arguments 'kappa' and 'tau' do not match")
+ if (mode(llocation) != "character" && mode(llocation) != "name")
+ llocation = as.character(substitute(llocation))
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
+ method.init > 4) stop("argument 'method.init' must be 1, 2 or ... 4")
+ if (!is.list(elocation)) elocation = list()
+ if (!is.Numeric(shrinkage.init, allow = 1) || shrinkage.init < 0 ||
+ shrinkage.init > 1) stop("bad input for argument 'shrinkage.init'")
+ if (length(zero) &&
+ !(is.Numeric(zero, integer = TRUE, posit = TRUE) ||
+ is.character(zero )))
+ stop("bad input for argument 'zero'")
+ if (!is.Numeric(Scale.arg, posit = TRUE))
+ stop("bad input for argument 'Scale.arg'")
+ if (!is.logical(parallelLocation) || length(parallelLocation) != 1)
+ stop("bad input for argument 'parallelLocation'")
+ fittedMean = FALSE
+ if (!is.logical(fittedMean) || length(fittedMean) != 1)
+ stop("bad input for argument 'fittedMean'")
+
+ mystring0 = namesof("location", llocation, earg = elocation)
+ mychars = substring(mystring0, fi = 1:nchar(mystring0),
+ la = 1:nchar(mystring0))
+ mychars[nchar(mystring0)] = ", inverse = TRUE)"
+ mystring1 = paste(mychars, collapse = "")
+
+ new("vglmff",
+ blurb = c("One-parameter ", llocation, "-Laplace distribution\n\n",
+ "Links: ", mystring0, "\n", "\n",
+ "Quantiles: ", mystring1),
+ constraints = eval(substitute(expression({
+ constraints = cm.vgam(matrix(1, M, 1), x, .parallelLocation,
+ constraints, intercept = FALSE)
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .parallelLocation = parallelLocation,
+ .Scale.arg = Scale.arg, .zero = zero ))),
+ initialize = eval(substitute(expression({
+ extra$M = M = max(length( .Scale.arg ), length( .kappa )) # Recycle
+ extra$Scale = rep( .Scale.arg, length = M)
+ extra$kappa = rep( .kappa, length = M)
+ extra$tau = extra$kappa^2 / (1 + extra$kappa^2)
+ if (ncol(y <- cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+ extra$n = n
+ extra$y.names = y.names =
+ paste("tau = ", round(extra$tau, dig = .digt), sep = "")
+ extra$individual = FALSE
+ predictors.names =
+ namesof(paste("quantile(", y.names, ")", sep = ""),
+ .llocat, earg = .elocat, tag = FALSE)
+
+ if (all(y == 0 | y == 1)) stop("response cannot be all 0s or 1s")
+ if (min(y) < 0) stop("negative response values detected")
+ if (max(y) > 1) stop("response values greater than 1 detected")
+ if ((prop.0. <- weighted.mean(1*(y == 0), w)) >= min(extra$tau))
+ stop("sample proportion of 0s == ", round(prop.0., dig=4),
+ " > minimum 'tau' value. Choose larger values for 'tau'.")
+ if ((prop.1. <- weighted.mean(1*(y == 1), w)) >= max(extra$tau))
+ stop("sample proportion of 1s == ", round(prop.1., dig=4),
+ " < maximum 'tau' value. Choose smaller values for 'tau'.")
+ if (!length(etastart)) {
+ if ( .method.init == 1) {
+ locat.init = quantile(rep(y, w), probs= extra$tau)
+ } else if ( .method.init == 2) {
+ locat.init = weighted.mean(y, w)
+ locat.init = median(rep(y, w))
+ } else if ( .method.init == 3) {
+ use.this = weighted.mean(y, w)
+ locat.init = (1- .sinit)*y + use.this * .sinit
+ } else {
+ stop("this option not implemented")
+ }
+
+
+ locat.init = if (length( .ilocat)) rep( .ilocat, len = M) else
+ rep(locat.init, len = M)
+ locat.init = matrix(locat.init, n, M, byrow = TRUE)
+ locat.init = abs(locat.init)
+ etastart =
+ cbind(theta2eta(locat.init, .llocat, earg = .elocat))
+ }
+ }), list( .method.init = method.init,
+ .dfmu.init = dfmu.init,
+ .sinit = shrinkage.init, .digt = digt,
+ .elocat = elocation, .Scale.arg = Scale.arg,
+ .llocat = llocation, .kappa = kappa,
+ .ilocat = ilocation ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ location.y = eta2theta(eta, .llocat, earg = .elocat)
+ if ( .fittedMean ) {
+ stop("Yet to do: handle 'fittedMean = TRUE'")
+ kappamat = matrix(extra$kappa, extra$n, extra$M, byrow = TRUE)
+ Scale = matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
+ location.y + Scale * (1/kappamat - kappamat)
+ } else {
+ if (length(location.y) > extra$n)
+ dimnames(location.y) = list(dimnames(eta)[[1]], extra$y.names)
+ location.y
+ }
+ }, list( .elocat = elocation, .llocat = llocation,
+ .fittedMean = fittedMean, .Scale.arg = Scale.arg,
+ .kappa = kappa ))),
+ last = eval(substitute(expression({
+ misc$link = c(location = .llocat)
+ misc$earg = list(location = .elocat)
+ misc$expected = TRUE
+ extra$kappa = misc$kappa = .kappa
+ extra$tau = misc$tau = misc$kappa^2 / (1 + misc$kappa^2)
+ extra$Scale.arg = .Scale.arg
+ misc$true.mu = .fittedMean # @fitted is not a true mu?
+ misc$rep01 = .rep01
+
+ extra$percentile = numeric(length(misc$kappa))
+ location.y = eta2theta(eta, .llocat, earg = .elocat)
+ location.y = as.matrix(location.y)
+ for(ii in 1:length(misc$kappa))
+ extra$percentile[ii] = 100 * weighted.mean(y <= location.y[,ii], w)
+
+ }), list( .elocat = elocation, .llocat = llocation,
+ .Scale.arg = Scale.arg, .fittedMean = fittedMean,
+ .rep01 = rep01,
+ .kappa = kappa ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ kappamat = matrix(extra$kappa, extra$n, extra$M, byrow = TRUE)
+ Scale.w = matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
+ ymat = matrix(y, extra$n, extra$M)
+ ymat = adjust01.logitlaplace1(ymat=ymat, y = y, w = w, rep01= .rep01)
+ w.mat = theta2eta(ymat, .llocat, earg = .elocat) # e.g., logit()
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ALDans =
+ sum(w * dalap(x = c(w.mat), location = c(eta),
+ scale = c(Scale.w), kappa = c(kappamat),
+ log = TRUE))
+ ALDans
+ }
+ }, list( .elocat = elocation, .llocat = llocation,
+ .rep01 = rep01,
+ .Scale.arg = Scale.arg, .kappa = kappa ))),
+ vfamily = c("logitlaplace1"),
+ deriv = eval(substitute(expression({
+ ymat = matrix(y, n, M)
+ Scale.w = matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
+ location.w = eta
+ kappamat = matrix(extra$kappa, n, M, byrow = TRUE)
+ ymat = adjust01.logitlaplace1(ymat=ymat, y = y, w = w, rep01= .rep01)
+ w.mat = theta2eta(ymat, .llocat, earg = .elocat) # e.g., logit()
+ zedd = abs(w.mat-location.w) / Scale.w
+ dl.dlocation = ifelse(w.mat >= location.w, kappamat, 1/kappamat) *
+ sqrt(2) * sign(w.mat-location.w) / Scale.w
+ dlocation.deta = dtheta.deta(location.w, "identity", earg = .elocat)
+ w * cbind(dl.dlocation * dlocation.deta)
+ }), list( .Scale.arg = Scale.arg, .elocat = elocation,
+ .rep01 = rep01,
+ .llocat = llocation, .kappa = kappa ))),
+ weight = eval(substitute(expression({
+ d2l.dlocation2 = 2 / Scale.w^2
+ wz = cbind(d2l.dlocation2 * dlocation.deta^2)
+ w * wz
+ }), list( .Scale.arg = Scale.arg,
+ .elocat = elocation, .llocat = llocation ))))
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/R/family.rcam.R b/R/family.rcam.R
new file mode 100644
index 0000000..5e2386d
--- /dev/null
+++ b/R/family.rcam.R
@@ -0,0 +1,1316 @@
+# These functions are
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ rcam <- function(y, Rank = 0,
+ family = poissonff,
+ Musual = NULL,
+ Index.corner = if (!Rank) NULL else 1 + Musual * (1:Rank),
+ rprefix = "Row.",
+ cprefix = "Col.",
+ szero = if (!Rank) NULL else
+ {if (Musual == 1) 1 else
+ setdiff(1:(Musual*ncol(y)),
+ c( # 1:Musual,
+ 1 + (1:ncol(y)) * Musual,
+ Index.corner))},
+
+
+
+
+
+ summary.arg = FALSE, h.step = 0.0001,
+ rbaseline = 1, cbaseline = 1, ...) {
+
+
+
+
+
+ if (!is.character(rprefix))
+ stop("argument 'rprefix' must be character")
+ if (!is.character(cprefix))
+ stop("argument 'cprefix' must be character")
+
+
+
+
+ if (is.character(family))
+ family <- get(family)
+ if (is.function(family))
+ family <- ((family)())
+ if (!inherits(family, "vglmff")) {
+ stop("'family = ", family, "' is not a VGAM family function")
+ }
+ efamily = family
+
+
+
+ if (!is.Numeric(Musual)) {
+ iefamily <- efamily at infos
+ if (is.function(iefamily))
+ Musual <- (iefamily())$Musual
+ }
+ if (!is.Numeric(Musual)) {
+ warning("cannot determine the value of 'Musual'.",
+ "Assuming the value one.")
+ Musual <- 1
+ }
+
+
+
+
+
+ object.save <- y
+ y <- if (is(y, "rrvglm")) {
+ object.save at y
+ } else {
+ as(as.matrix(y), "matrix")
+ }
+ if (length(dim(y)) != 2 || nrow(y) < 3 || ncol(y) < 3)
+ stop("argument 'y' must be a matrix with >= 3 rows & columns, or ",
+ "a rrvglm() object")
+
+
+
+ eifun <- function(i, n) diag(n)[, i, drop = FALSE]
+
+ .rcam.df <- data.frame("Row.2" = eifun(2, nrow(y)))
+ colnames( .rcam.df )<- paste(rprefix, "2", sep = "") # Overwrite "Row.2"
+
+
+
+
+
+ yn1 <- if (length(dimnames(y)[[1]])) dimnames(y)[[1]] else
+ paste("X2.", 1:nrow(y), sep = "")
+ warn.save = options()$warn
+ options(warn = -3) # Suppress the warnings (hopefully, temporarily)
+ if (any(!is.na(as.numeric(substring(yn1, 1, 1)))))
+ yn1 <- paste("X2.", 1:nrow(y), sep = "")
+ options(warn = warn.save)
+
+
+ nrprefix <- as.name(rprefix)
+ ncprefix <- as.name(cprefix)
+
+
+ assign(rprefix, factor(1:nrow(y)))
+ modmat.row <- substitute(
+ model.matrix( ~ .rprefix ), list( .rprefix = nrprefix ))
+ assign(cprefix, factor(1:ncol(y)))
+ modmat.col <- substitute(
+ model.matrix( ~ .cprefix ), list( .cprefix = ncprefix ))
+ modmat.row <- eval( modmat.row )
+ modmat.col <- eval( modmat.col )
+
+
+
+
+
+
+
+
+ Hlist <- list("(Intercept)" = matrix(1, ncol(y), 1))
+
+ for(ii in 2:nrow(y)) {
+ Hlist[[ paste(rprefix, ii, sep = "")]] <- matrix(1, ncol(y), 1)
+
+
+ .rcam.df[[paste(rprefix, ii, sep = "")]] <- modmat.row[, ii]
+ }
+
+
+ for(ii in 2:ncol(y)) {
+
+
+ Hlist[[ paste(cprefix, ii, sep = "")]] <- modmat.col[, ii, drop = FALSE]
+ .rcam.df[[paste(cprefix, ii, sep = "")]] <- rep(1, nrow(y))
+ }
+
+ if (Rank > 0) {
+ for(ii in 2:nrow(y)) {
+ Hlist[[yn1[ii]]] <- diag(ncol(y))
+ .rcam.df[[yn1[ii]]] <- eifun(ii, nrow(y))
+ }
+ }
+
+
+ dimnames(.rcam.df) <- list(if (length(dimnames(y)[[1]]))
+ dimnames(y)[[1]] else
+ as.character(1:nrow(y)),
+ dimnames(.rcam.df)[[2]])
+
+ str1 <- paste("~ ", rprefix, "2", sep = "")
+
+ if (nrow(y) > 2)
+ for(ii in 3:nrow(y)) {
+ str1 <- paste(str1, paste(rprefix, ii, sep = ""), sep = " + ")
+ }
+
+
+
+ for(ii in 2:ncol(y)) {
+ str1 <- paste(str1, paste(cprefix, ii, sep = ""), sep = " + ")
+ }
+
+
+
+ str2 <- paste("y ", str1)
+ if (Rank > 0) {
+ for(ii in 2:nrow(y))
+ str2 <- paste(str2, yn1[ii], sep = " + ")
+ }
+
+
+
+ controlfun <- if (Rank == 0) rrvglm.control else rrvglm.control
+ controlfun <- if (Rank == 0) vglm.control else rrvglm.control # orig.
+
+
+ mycontrol <- controlfun(Rank = Rank,
+ Index.corner = Index.corner,
+ szero = szero, ...)
+
+ if (mycontrol$trace) {
+ }
+
+
+
+ if ((mindim <- min(nrow(y), ncol(y))) <= Rank) {
+ stop("argument 'Rank' is too high. Must be a value from 0 ",
+ "to ", mindim - 1, " inclusive")
+ }
+
+
+
+ if (Rank > 0)
+ mycontrol$Norrr <- as.formula(str1) # Overwrite this
+
+ assign(".rcam.df", .rcam.df, envir = VGAM:::VGAMenv)
+
+ warn.save <- options()$warn
+ options(warn = -3) # Suppress the warnings (hopefully, temporarily)
+
+ if (mycontrol$trace) {
+ }
+
+
+ if (Musual > 1) {
+ orig.Hlist <- Hlist
+ for (ii in 1:length(Hlist))
+ Hlist[[ii]] <- kronecker(Hlist[[ii]], rbind(1, 0))
+ Hlist[["(Intercept)"]] <-
+ cbind(Hlist[["(Intercept)"]],
+ kronecker(matrix(1, nrow(orig.Hlist[[1]]), 1), rbind(0, 1)))
+
+
+
+ if (mycontrol$trace) {
+ }
+
+ }
+
+
+
+
+
+ answer <- if (Rank > 0) {
+ if (is(object.save, "rrvglm")) object.save else
+ rrvglm(as.formula(str2),
+ family = family,
+ constraints = Hlist,
+ control = mycontrol, data = .rcam.df)
+ } else {
+ if (is(object.save, "vglm")) object.save else
+ vglm(as.formula(str2),
+ family = family,
+ constraints = Hlist,
+ control = mycontrol, data = .rcam.df)
+ }
+
+ options(warn = warn.save)
+
+
+ answer <- if (summary.arg) {
+ if (Rank > 0) {
+ summary.rrvglm(as(answer, "rrvglm"), h.step = h.step)
+ } else {
+ summary(answer)
+ }
+ } else {
+ as(answer, ifelse(Rank > 0, "rrvglm", "vglm"))
+ }
+
+
+ answer at misc$rbaseline <- rbaseline
+ answer at misc$cbaseline <- cbaseline
+
+ answer
+}
+
+
+
+
+
+
+
+
+summaryrcam = function(object, ...) {
+ rcam(object, summary.arg = TRUE, ...)
+}
+
+
+
+
+
+
+
+
+
+ setClass("rcam", representation(not.needed = "numeric"),
+ contains = "rrvglm")
+
+
+setMethod("summary", "rcam",
+ function(object, ...)
+ summaryrcam(object, ...))
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Rcam <- function (mat, rbaseline = 1, cbaseline = 1) {
+
+
+ mat <- as.matrix(mat)
+ RRR <- dim(mat)[1]
+ CCC <- dim(mat)[2]
+
+
+ if (is.null(rownames(mat)))
+ rnames <- paste("X", 1:RRR, sep="") else
+ rnames <- rownames(mat)
+
+ if (is.null(colnames(mat)))
+ cnames <- paste("Y", 1:CCC, sep="") else
+ cnames <- colnames(mat)
+
+
+ r.index <- if (is.character(rbaseline))
+ which(rownames(mat) == rbaseline) else
+ if (is.numeric(rbaseline)) rbaseline else
+ stop("argement 'rbaseline' must be numeric",
+ "or character of the level of row")
+
+ c.index <- if (is.character(cbaseline))
+ which(colnames(mat) == cbaseline) else
+ if (is.numeric(cbaseline)) cbaseline else
+ stop("argement 'cbaseline' must be numeric",
+ "or character of the level of row")
+
+
+ yswap <- rbind(mat[r.index:RRR, ],
+ if (r.index > 1) mat[1:(r.index - 1),] else NULL)
+
+
+ if (length(r.index) != 1)
+ stop("Could not match with argument 'rbaseline'")
+
+ if (length(c.index) != 1)
+ stop("Could not match with argument 'cbaseline'")
+
+ yswap <- cbind(yswap[, c.index:CCC],
+ if (c.index > 1) yswap[, 1:(c.index - 1)] else NULL)
+
+ new.rnames <- rnames[ c(r.index:RRR,
+ if (r.index > 1) 1:(r.index - 1) else NULL)]
+
+ new.cnames <- cnames[ c(c.index:CCC,
+ if (c.index > 1) 1:(c.index - 1) else NULL)]
+
+ colnames(yswap) <- new.cnames
+ rownames(yswap) <- new.rnames
+
+ yswap
+}
+
+
+
+
+
+ plotrcam0 <- function (object, rfirst = 1, cfirst = 1,
+ rtype = "h", ctype = "h",
+ rlas = 1, rcex.lab = 1,
+ rcex.axis = 1, rlabels = FALSE,
+ rtick = FALSE, clas = 1, ccex.lab = 1,
+ ccex.axis = 1, clabels = FALSE, ctick = FALSE,
+ rmain = "Row effects", rsub = "",
+ rxlabel = "", rylabel = "Row effects",
+ cmain = "Column effects", csub = "", cxlabel= "",
+ cylabel = "Column effects",
+ rcol = par()$col, ccol = par()$col,
+ ...) {
+
+
+ if (object at family@infos()$Musual == 1) nparff <- 1 else nparff <- 2
+
+
+ orig.roweff <- c(0, coefficients(object)[(nparff+1): (nparff+nrow(object at y)-1)])
+
+ orig.coleff <- c(0, coefficients(object)[(nparff+nrow(object at y)):
+ (length(coefficients(object)))])
+ rlast <- length(orig.roweff)
+ clast <- length(orig.coleff)
+
+ orig.raxisl <- rownames(object at y)
+ orig.caxisl <- colnames(object at y)
+
+ roweff <- orig.roweff[c(rfirst:rlast,
+ if (rfirst > 1) 1:(rfirst-1) else NULL)]
+
+ coleff <- orig.coleff[c(cfirst:clast,
+ if (cfirst > 1) 1:(cfirst-1) else NULL)]
+
+ raxisl <- orig.raxisl[ c(rfirst:rlast,
+ if (rfirst > 1) 1:(rfirst-1) else NULL)]
+
+ caxisl <- orig.caxisl[ c(cfirst:clast,
+ if (cfirst > 1) 1:(cfirst-1) else NULL)]
+
+
+ plot(roweff, type = rtype,
+ axes = FALSE, col = rcol,
+ main = rmain,
+ sub = rsub,
+ xlab = rxlabel, ylab = rylabel, ...)
+
+
+ axis(1, at = 1:length(raxisl), cex.lab = rcex.lab,
+ cex.axis = rcex.axis, label = raxisl)
+ axis(2, cex.lab = rcex.lab, las = rlas)
+ axis(3:4, labels = rlabels, tick = rtick)
+
+
+
+ plot(coleff, type = ctype, col = ccol, # lwd=2, xpd=F,
+ axes = FALSE, main = cmain,
+ sub = csub, xlab = cxlabel, ylab = cylabel, ...)
+
+ axis(1, at = 1:length(caxisl), cex.lab = ccex.lab,
+ cex.axis = ccex.axis, label = caxisl)
+ axis(2, cex.lab= ccex.lab, las = clas)
+
+
+ invisible(object)
+}
+
+
+
+
+
+
+
+
+
+
+
+moffset <- function (mat, roffset=1, coffset=1){
+
+ y <- mat
+
+
+ rowoffset <- function(y, roffset=1) {
+ y <- as.matrix(y)
+
+ roffset <- if (is.character(roffset))
+ which(rownames(y) == roffset) else
+ if (is.numeric(roffset)) roffset else
+ stop("argument rstart/cstart must be numeric ",
+ "or character of the level of row/column")
+
+ if (roffset == 1) ye <- y else {
+ ye <- y[1:roffset-1,,drop = FALSE]
+ ye <- rbind(y[(roffset):nrow(y),,drop = FALSE],
+ cbind(ye[,2:ncol(y),drop = FALSE], ye[,1,drop = FALSE]))
+ ye
+ }
+ }
+
+ if (((coffset >= 1) && (coffset <= ncol(y))) ||
+ ((roffset >=1) && (roffset <= nrow(y)))) {
+ y <- rowoffset(y, roffset)
+ y <- t(rowoffset(t(y), coffset))
+ y
+ } else
+ stop ("Error argument in 'rstart' or 'cstart'.",
+ "It must be numeric or chacarter argument of row or column of",
+ "'mat' matrix input")
+ }
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+confint_rrnb <- function(rrnb2) {
+
+ if (class(rrnb2) != "rrvglm")
+ stop("argument 'rrnb2' does not appear to be a rrvglm() object")
+
+ if (!any(rrnb2 at family@vfamily == "negbinomial"))
+ stop("argument 'rrnb2' does not appear to be a negbinomial() fit")
+
+ if (rrnb2 at control$Rank != 1)
+ stop("argument 'rrnb2' is not Rank-1")
+
+ if (rrnb2 at misc$M != 2)
+ stop("argument 'rrnb2' does not have M = 2")
+
+ if (!all(rrnb2 at misc$link == "loge"))
+ stop("argument 'rrnb2' does not have log links for both parameters")
+
+ a21.hat <- (Coef(rrnb2)@A)["log(k)", 1]
+ beta11.hat <- Coef(rrnb2)@B1["(Intercept)", "log(mu)"]
+ beta21.hat <- Coef(rrnb2)@B1["(Intercept)", "log(k)"]
+ delta1.hat <- exp(a21.hat * beta11.hat - beta21.hat)
+ delta2.hat <- 2 - a21.hat
+
+ se.a21.hat <- sqrt(vcovrrvglm(rrnb2)["I(lv.mat)", "I(lv.mat)"])
+
+
+ ci.a21 <- a21.hat + c(-1, 1) * 1.96 * se.a21.hat
+ (ci.delta2 <- 2 - rev(ci.a21)) # The 95 percent confidence interval
+
+ list(a21.hat = a21.hat,
+ beta11.hat = beta11.hat,
+ beta21.hat = beta21.hat,
+ ci.delta2 = ci.delta2,
+ delta1 = delta1.hat,
+ delta2 = delta2.hat,
+ se.a21.hat = se.a21.hat)
+}
+
+
+
+confint_nb1 <- function(nb1) {
+
+ if (class(nb1) != "vglm")
+ stop("argument 'nb1' does not appear to be a vglm() object")
+
+ if (!any(nb1 at family@vfamily == "negbinomial"))
+ stop("argument 'nb1' does not appear to be a negbinomial() fit")
+
+ if (!all(unlist(constraints(nb1)[-1]) == 1))
+ stop("argument 'nb1' does not appear to have parallel = TRUE")
+
+ if (!all(unlist(constraints(nb1)[1]) == c(diag(nb1 at misc$M))))
+ stop("argument 'nb1' does not have parallel = FALSE for the intercept")
+
+ if (nb1 at misc$M != 2)
+ stop("argument 'nb1' does not have M = 2")
+
+ if (!all(nb1 at misc$link == "loge"))
+ stop("argument 'nb1' does not have log links for both parameters")
+
+ cnb1 <- coefficients(as(nb1, "vglm"), matrix = TRUE)
+ mydiff <- (cnb1["(Intercept)", "log(k)"] - cnb1["(Intercept)", "log(mu)"])
+ delta0.hat <- exp(mydiff)
+ (phi0.hat <- 1 + 1 / delta0.hat) # MLE of phi0
+
+ myvcov <- vcovvlm(as(nb1, "vglm")) # Not great; improve this!
+ myvec <- cbind(c(-1, 1, rep(0, len = nrow(myvcov) - 2)))
+ (se.mydiff <- sqrt(t(myvec) %*% myvcov %*% myvec))
+ ci.mydiff <- mydiff + c(-1.96, 1.96) * se.mydiff
+ ci.delta0 <- ci.exp.mydiff <- exp(ci.mydiff)
+ (ci.phi0 <- 1 + 1 / rev(ci.delta0)) # The 95 percent conf int. for phi0
+
+ list(ci.phi0 = ci.phi0,
+ delta0 = delta0.hat,
+ phi0 = phi0.hat)
+}
+
+
+
+
+
+
+
+
+
+
+
+ # ref: Kus, section 4.1, pg 4500
+ # updated on 22/15/2010
+dexppois <- function(x, lambda, betave = 1, log = FALSE) {
+ if (!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ N <- max(length(x), length(lambda), length(betave))
+ x <- rep(x, len = N); lambda = rep(lambda, len = N);
+ betave <- rep(betave, len = N)
+
+ logdensity <- rep(log(0), len = N)
+ xok <- (0 < x)
+ # logdensity[xok] <- log(lambda[xok]) + log(betave[xok]) - lambda[xok] -
+ # betave[xok] * x[xok] + lambda[xok] * exp(-betave[xok] *
+ # x[xok]) - log(expm1(lambda[xok])) + 1
+
+ logdensity[xok] <- log(lambda[xok]) + log(betave[xok]) -
+ log1p(-exp(-lambda[xok])) - lambda[xok] - betave[xok] *
+ x[xok] + lambda[xok] * exp(-betave[xok] * x[xok])
+
+ logdensity[lambda <= 0] <- NaN
+ logdensity[betave <= 0] <- NaN
+ if (log.arg) logdensity else exp(logdensity)
+}
+
+ # ref: calculated from F(x) from Kus, pg 4499
+ # Not working 13/12/10
+ # updated and working on 22/15/2010
+rexppois <- function(n, lambda, betave = 1) {
+ # ans <- log(lambda/(log((exp(lambda) + 1) * runif(n))))/betave
+ ans <- -log(log(runif(n) * -(expm1(lambda)) +
+ exp(lambda)) / lambda) / betave
+ ans[(lambda <= 0) | (betave <= 0)] <- NaN
+ ans
+}
+
+
+ # ref: calculated from F(x) from Kus, pg 4499
+ # Not working 13/12/10
+ # updated and working on 22/15/2010
+qexppois<- function(p, lambda, betave = 1) {
+ # ans <- log(lambda/(log((exp(lambda) + 1) * p)))/betave
+ ans <- -log(log(p * -(expm1(lambda)) +
+ exp(lambda)) / lambda) / betave
+ ans[(lambda <= 0) | (betave <= 0)] = NaN
+ ans[p < 0] <- NaN
+ ans[p > 1] <- NaN
+ ans
+}
+
+
+
+ # ref: Kus, eqn 2, pg 4499
+ # Updated on 22/12/2010
+pexppois<- function(q, lambda, betave = 1) {
+ #ans <- -(exp(lambda * exp(-betave * q)) - exp(lambda))/expm1(lambda)
+ ans <-(exp(lambda * exp(-betave * q)) - exp(lambda)) / -expm1(lambda)
+ ans[(lambda <= 0) | (betave <= 0)] <- NaN
+ ans
+}
+
+
+
+
+ exppoisson = function (llambda = "loge", lbetave = "loge",
+ elambda = list(), ebetave = list(),
+ ilambda = 1.1, ibetave = 1.5,
+ zero = NULL) {
+
+ if (mode(llambda) != "character" && mode(llambda) != "name")
+ llambda = as.character(substitute(llambda))
+ if (mode(lbetave) != "character" && mode(lbetave) != "name")
+ lbetave = as.character(substitute(lbetave))
+
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+ stop("bad input for argument 'zero'")
+
+ if (!is.Numeric(ilambda, posit = TRUE))
+ stop("bad input for argument 'ilambda'")
+ if (length(ibetave) && !is.Numeric(ibetave, posit = TRUE))
+ stop("bad input for argument 'ibetave'")
+
+ ilambda[ilambda == 1] = 1.1
+
+ if (!is.list(ebetave))
+ ebetave = list()
+ if (!is.list(lambda))
+ elambda = list()
+
+ new("vglmff",
+ blurb = c("Exponential Poisson Distribution \n \n",
+ "Links: ",
+ namesof("lambda", llambda, earg = elambda), ", ",
+ namesof("betave", lbetave, earg = ebetave), "\n",
+ "Mean: ",
+ "(lambda/(expm1(lambda) * betave)) *",
+ "genhypergeo(c(1,1),c(2,2),lambda)"),
+
+ # genhypergeo() from package: hypergeo
+ # ref = mean from Kus pg 4499
+
+ constraints = eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .zero = zero))),
+
+ initialize = eval(substitute(expression({
+ if (ncol(cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+
+ predictors.names = c(
+ namesof("lambda", .llambda, earg = .elambda, short = TRUE),
+ namesof("betave", .lbetave, earg = .ebetave, short = TRUE))
+ if (!length(etastart)) {
+
+ lambda.init = if (!is.Numeric( .ilambda , posit = TRUE))
+ stop("argument 'ilambda' must be positive") else
+ rep( .ilambda , len = n)
+ betave.init = if (length( .ibetave ))
+ rep( .ibetave , len = n) else
+ stop("zz need to fix this code")
+ ## (lambda.init/(expm1(lambda.init) * (y + 1/8))) *
+ ## genhypergeo(c(1,1),c(2,2),lambda.init)
+
+
+ betave.init = rep(weighted.mean(betave.init, w = w), len = n)
+ etastart = cbind(theta2eta(lambda.init, .llambda , earg = .elambda ),
+ theta2eta(betave.init, .lbetave , earg = .ebetave ))
+ }
+ }), list( .llambda = llambda, .lbetave = lbetave,
+ .ilambda = ilambda, .ibetave = ibetave,
+ .elambda = elambda, .ebetave = ebetave))),
+
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ lambda = eta2theta(eta[, 1], .llambda , earg = .elambda )
+ betave = eta2theta(eta[, 2], .lbetave , earg = .ebetave )
+ warning("returning dud means")
+ runif(nrow(eta))
+ }, list( .llambda = llambda, .lbetave = lbetave,
+ .elambda = elambda, .ebetave = ebetave))),
+
+ last = eval(substitute(expression({
+ misc$link = c(lambda = .llambda , betave = .lbetave )
+ misc$earg = list(lambda = .elambda , betave = .ebetave )
+ misc$expected = TRUE
+
+ }), list( .llambda = llambda, .lbetave = lbetave,
+ .elambda = elambda, .ebetave = ebetave))),
+
+ loglikelihood = eval(substitute(function(mu, y, w,
+ residuals = FALSE, eta, extra = NULL) {
+ lambda = eta2theta(eta[, 1], .llambda , earg = .elambda )
+ betave = eta2theta(eta[, 2], .lbetave , earg = .ebetave )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(w * dexppois(x = y, lambda = lambda, betave = betave, log = TRUE))
+ }
+ }, list( .lbetave = lbetave , .llambda = llambda ,
+ .elambda = elambda , .ebetave = ebetave ))),
+
+ vfamily = c("exppoisson"),
+
+ deriv = eval(substitute(expression({
+ lambda = eta2theta(eta[, 1], .llambda , earg = .elambda )
+ betave = eta2theta(eta[, 2], .lbetave , earg = .ebetave )
+
+ dl.dbetave = 1/betave - y - y * lambda * exp(-betave * y)
+ dl.dlambda = 1/lambda - 1/expm1(lambda) - 1 + exp(-betave * y)
+
+ dbetave.deta = dtheta.deta(betave, .lbetave , earg = .ebetave )
+ dlambda.deta = dtheta.deta(lambda, .llambda , earg = .elambda )
+
+ w * cbind(dl.dlambda * dlambda.deta,
+ dl.dbetave * dbetave.deta)
+ }), list( .llambda = llambda , .lbetave = lbetave,
+ .elambda = elambda, .ebetave = ebetave ))),
+
+ weight = eval(substitute(expression({
+
+ temp1 = -expm1(-lambda)
+
+ ed2l.dlambda2 = (1 + exp(2 * lambda) - lambda^2 * exp(lambda) - 2 *
+ exp(lambda)) / (lambda * temp1)^2
+
+
+ ed2l.dbetave2 = 1 / betave^2 - (lambda^2 * exp(-lambda) / (4 * betave^2 *
+ temp1)) * genhypergeo(c(2,2,2),c(3,3,3),lambda)
+
+ ed2l.dbetavelambda = (lambda * exp(-lambda) / (4 * betave * temp1)) *
+ genhypergeo(c(2,2),c(3,3),lambda)
+
+ wz <- matrix(0, n, dimm(M))
+ wz[, iam(1, 1, M)] = ed2l.dlambda2 * dlambda.deta^2
+ wz[, iam(2, 2, M)] = ed2l.dbetave2 * dbetave.deta^2
+ wz[, iam(1, 2, M)] = dbetave.deta * dlambda.deta * ed2l.dbetavelambda
+ w * wz
+ }), list( .zero = zero ))))
+}
+
+
+
+
+
+dgenray <- function(x, shape, scale = 1, log = FALSE) {
+ if (!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ N <- max(length(x), length(shape), length(scale))
+ x <- rep(x, len = N)
+ shape <- rep(shape, len = N)
+ scale <- rep(scale, len = N)
+
+ logdensity <- rep(log(0), len = N)
+ if (any(xok <- (x > 0))) {
+ temp1 <- x[xok] / scale[xok]
+ logdensity[xok] <- log(2) + log(shape[xok]) + log(x[xok]) -
+ 2 * log(scale[xok]) - temp1^2 +
+ (shape[xok] - 1) * log1p(-exp(-temp1^2))
+ }
+ logdensity[(shape <= 0) | (scale <= 0)] <- NaN
+ if (log.arg) {
+ logdensity
+ } else {
+ exp(logdensity)
+ }
+}
+
+
+pgenray <- function(q, shape, scale = 1) {
+ ans <- (-expm1(-(q/scale)^2))^shape
+ ans[q <= 0] <- 0
+ ans[(shape <= 0) | (scale <= 0)] <- NaN
+ ans
+}
+
+
+
+rgenray <- function(n, shape, scale = 1) {
+ ans <- qgenray(runif(n), shape = shape, scale = scale)
+ ans[(shape <= 0) | (scale <= 0)] <- NaN
+ ans
+}
+
+
+qgenray <- function(p, shape, scale = 1) {
+ ans <- scale * sqrt(-log1p(-(p^(1/shape))))
+ ans[(shape <= 0) | (scale <= 0)] = NaN
+ ans[p < 0] <- NaN
+ ans[p > 1] <- NaN
+ ans[p == 0] <- 0
+ ans[p == 1] <- Inf
+ ans
+}
+
+
+
+
+
+
+
+
+genrayleigh.control <- function(save.weight = TRUE, ...)
+{
+ list(save.weight = save.weight)
+}
+
+
+
+
+
+ genrayleigh = function (lshape = "loge", lscale = "loge",
+ eshape = list(), escale = list(),
+ ishape = NULL, iscale = NULL,
+ tol12 = 1.0e-05,
+ nsimEIM = 300, zero = 1) {
+
+ if (mode(lshape) != "character" && mode(lshape) != "name")
+ lshape = as.character(substitute(lshape))
+ if (mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
+
+ if (length(ishape) && !is.Numeric(ishape, posit = TRUE))
+ stop("bad input for argument 'ishape'")
+ if (length(iscale) && !is.Numeric(iscale, posit = TRUE))
+ stop("bad input for argument 'iscale'")
+
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+ stop("bad input for argument 'zero'")
+ if (!is.Numeric(nsimEIM, allow = 1, integ = TRUE) || nsimEIM <= 50)
+ stop("'nsimEIM' should be an integer greater than 50")
+
+ if (!is.list(escale))
+ escale = list()
+ if (!is.list(eshape))
+ eshape = list()
+
+
+ new("vglmff",
+ blurb = c("Generalized Rayleigh distribution\n",
+ "Links: ",
+ namesof("shape", lshape, earg = eshape), ", ",
+ namesof("scale", lscale, earg = escale), "\n"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ if (ncol(cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+
+ predictors.names = c(
+ namesof("shape", .lshape , earg = .eshape , short = TRUE),
+ namesof("scale", .lscale , earg = .escale , short = TRUE))
+
+ if (!length(etastart)) {
+ genrayleigh.Loglikfun = function(scale, y, x, w, extraargs) {
+ temp1 <- y / scale
+ shape = -1 / weighted.mean(log1p(-exp(-temp1^2)), w = w)
+
+ ans <-
+ sum(w * (log(2) + log(shape) + log(y) - 2 * log(scale) -
+ temp1^2 + (shape - 1) * log1p(-exp(-temp1^2))))
+ ans
+ }
+ scale.grid = seq(0.2 * sd(y), 5 * sd(y), len = 29)
+ scale.init = if (length( .iscale )) .iscale else
+ getMaxMin(scale.grid, objfun = genrayleigh.Loglikfun,
+ y = y, x = x, w = w)
+ scale.init = rep(scale.init, length = length(y))
+
+ shape.init = if (length( .ishape )) .ishape else
+ -1 / weighted.mean(log1p(-exp(-(y/scale.init)^2)), w = w)
+ shape.init = rep(shape.init, length = length(y))
+
+ etastart = cbind(theta2eta(shape.init, .lshape, earg = .eshape),
+ theta2eta(scale.init, .lscale, earg = .escale))
+ }
+ }), list( .lscale = lscale, .lshape = lshape,
+ .iscale = iscale, .ishape = ishape,
+ .escale = escale, .eshape = eshape))),
+
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ shape = eta2theta(eta[, 1], .lshape , earg = .eshape )
+ Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
+ qgenray(p = 0.5, shape = shape, scale = Scale)
+ }, list( .lshape = lshape, .lscale = lscale,
+ .eshape = eshape, .escale = escale ))),
+
+ last = eval(substitute(expression({
+ misc$link = c(shape = .lshape , scale = .lscale )
+ misc$earg = list(shape = .eshape , scale = .escale )
+ misc$expected = TRUE
+ misc$nsimEIM = .nsimEIM
+ }), list( .lshape = lshape, .lscale = lscale,
+ .eshape = eshape, .escale = escale,
+ .nsimEIM = nsimEIM ))),
+
+ loglikelihood = eval(substitute(function(mu, y, w,
+ residuals = FALSE, eta, extra = NULL) {
+
+ shape = eta2theta(eta[, 1], .lshape , earg = .eshape )
+ Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
+
+ if (residuals) stop("loglikelihood residuals",
+ "not implemented yet") else {
+ sum(w * dgenray(x = y, shape = shape, scale = Scale, log = TRUE))
+ }
+ }, list( .lshape = lshape , .lscale = lscale ,
+ .eshape = eshape , .escale = escale ))),
+
+ vfamily = c("genrayleigh"),
+
+ deriv = eval(substitute(expression({
+ shape = eta2theta(eta[, 1], .lshape , earg = .eshape )
+ Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
+ dshape.deta = dtheta.deta(shape, .lshape , earg = .eshape )
+ dscale.deta = dtheta.deta(Scale, .lscale , earg = .escale )
+ dthetas.detas = cbind(dshape.deta, dscale.deta)
+
+ temp1 <- y / Scale
+ temp2 <- exp(-temp1^2)
+ temp3 <- temp1^2 / Scale
+ AAA <- 2 * temp1^2 / Scale # 2 * y^2 / Scale^3
+ BBB <- -expm1(-temp1^2) # denominator
+ dl.dshape = 1/shape + log1p(-temp2)
+ dl.dscale = -2 / Scale + AAA * (1 - (shape - 1) * temp2 / BBB)
+
+ dl.dshape[!is.finite(dl.dshape)] = max(dl.dshape[is.finite(dl.dshape)])
+
+ answer <- w * cbind(dl.dshape, dl.dscale) * dthetas.detas
+ answer
+ }), list( .lshape = lshape , .lscale = lscale,
+ .eshape = eshape, .escale = escale ))),
+
+ weight = eval(substitute(expression({
+
+
+ run.varcov = 0
+ ind1 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ for(ii in 1:( .nsimEIM )) {
+ ysim = rgenray(n = n, shape = shape, scale = Scale)
+
+ temp1 <- ysim / Scale
+ temp2 <- exp(-temp1^2) # May be 1 if ysim is very close to 0.
+ temp3 <- temp1^2 / Scale
+ AAA <- 2 * temp1^2 / Scale # 2 * y^2 / Scale^3
+ BBB <- -expm1(-temp1^2) # denominator
+ dl.dshape = 1/shape + log1p(-temp2)
+ dl.dscale = -2 / Scale + AAA * (1 - (shape - 1) * temp2 / BBB)
+
+ dl.dshape[!is.finite(dl.dshape)] = max(dl.dshape[is.finite(dl.dshape)])
+
+ temp3 = cbind(dl.dshape, dl.dscale)
+ run.varcov = run.varcov + temp3[, ind1$row.index] *
+ temp3[, ind1$col.index]
+ }
+ run.varcov = run.varcov / .nsimEIM
+
+ wz = if (intercept.only)
+ matrix(colMeans(run.varcov, na.rm = FALSE),
+ n, ncol(run.varcov), byrow = TRUE) else run.varcov
+ wz = wz * dthetas.detas[, ind1$row] * dthetas.detas[, ind1$col]
+ w * wz
+ }), list( .lshape = lshape , .lscale = lscale,
+ .eshape = eshape, .escale = escale,
+ .tol12 = tol12, .nsimEIM = nsimEIM ))))
+}
+
+
+
+
+
+
+
+
+
+ # Ref: Mudholker pg 293
+ # Updated and working: 06/01/11
+desnorm <- function(x, location = 0, Scale = 1, epsilon = 0, log = FALSE) {
+ if (!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ N <- max(length(x), length(location), length(Scale), length(epsilon))
+ x <- rep(x, len = N)
+ location <- rep(location, len = N)
+ Scale <- rep(Scale, len = N)
+ epsilon <- rep(epsilon, len = N)
+ zedd <- (x - location)/Scale
+
+ logdensity <- rep(log(0), len = N)
+ xneg <- (zedd < 0)
+ xpos <- (zedd >= 0)
+ logdensity[xneg] <- 1/2 * log(2 * pi) - zedd[xneg]^2/(2 * (1 + epsilon[xneg])^2)
+ logdensity[xpos] <- 1/2 * log(2 * pi) - zedd[xpos]^2/(2 * (1 - epsilon[xpos])^2)
+ logdensity[(epsilon < -1) | (epsilon > 1)] <- NaN
+
+ if (log.arg)
+ logdensity
+ else exp(logdensity)
+
+}
+
+
+ # Ref: Mudholker pg 293
+ # Updated and working: 06/01/11
+pesnorm <- function(q, location = 0, Scale = 1, epsilon = 0) {
+
+ N <- max(length(q), length(location), length(Scale), length(epsilon))
+ q <- rep(q, len = N)
+ location <- rep(location, len = N)
+ Scale <- rep(Scale, len = N)
+ epsilon <- rep(epsilon, len = N)
+ zedd <- (q - location)/Scale
+
+ qneg <- (zedd < 0)
+ qpos <- (zedd >= 0)
+ ans <- rep(0, len = length(q))
+
+ ans[qneg] <- (1 + epsilon[qneg]) * pnorm(q = zedd[qneg]/(1 + epsilon[qneg]),
+ mean = 0, sd = 1)
+ ans[qpos] <- epsilon[qpos] + (1 - epsilon[qpos]) * pnorm(q = zedd[qpos]/(1 -
+ epsilon[qpos]),mean = 0, sd = 1)
+
+ ans
+}
+
+
+
+
+
+
+
+
+
+
+
+dexpgeom <- function(x, scale = 1, shape, log = FALSE) {
+ if (!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ N <- max(length(x), length(scale), length(shape))
+ x <- rep(x, len = N)
+ scale <- rep(scale, len = N)
+ shape <- rep(shape, len = N)
+
+ logdensity <- rep(log(0), len = N)
+ if (any(xok <- (x > 0))) {
+ temp1 <- (-x[xok]) * scale[xok]
+ logdensity[xok] <- log(scale[xok]) + log1p(-shape[xok]) +
+ temp1 - 2 * log1p(-shape[xok] * exp(temp1))
+ }
+
+ logdensity[(scale <= 0) | (shape <= 0) | (shape >= 1)] <- NaN
+ if (log.arg) {
+ logdensity
+ } else {
+ exp(logdensity)
+ }
+}
+
+
+
+pexpgeom <- function(q, scale = 1, shape) {
+ temp1 <- (-q) * scale
+ ans <- -expm1(temp1) / (1 - shape * exp(temp1))
+ ans[q <= 0] <- 0
+ ans[(scale <= 0) | (shape <= 0) | (shape >= 1)] <- NaN
+ ans
+}
+
+
+rexpgeom <- function(n, scale = 1, shape) {
+ ans <- qexpgeom(runif(n), shape = shape, scale = scale)
+ ans[(scale <= 0) | (shape <= 0) | (shape >= 1)] <- NaN
+ ans
+}
+
+
+
+qexpgeom <- function(p, scale = 1, shape) {
+ ans <- (-1/scale) * log((p - 1) / (p * shape - 1))
+ ans[(scale <= 0) | (shape <= 0) | (shape >= 1)] <- NaN
+ ans[p < 0] <- NaN
+ ans[p > 1] <- NaN
+ ans[p == 0] <- 0
+ ans[p == 1] <- Inf
+ ans
+}
+
+
+
+
+
+expgeometric.control <- function(save.weight = TRUE, ...)
+{
+ list(save.weight = save.weight)
+}
+
+
+
+ expgeometric = function (lscale = "loge", lshape = "logit",
+ escale = list(), eshape = list(),
+ iscale = NULL, ishape = NULL,
+ zero = 1, nsimEIM = 400) {
+
+
+ if (mode(lshape) != "character" && mode(lshape) != "name")
+ lshape = as.character(substitute(lshape))
+ if (mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
+
+ if (length(ishape))
+ if (!is.Numeric(ishape, posit = TRUE) || any(ishape >= 1))
+ stop("bad input for argument 'ishape'")
+
+ if (length(iscale))
+ if (!is.Numeric(iscale, posit = TRUE))
+ stop("bad input for argument 'iscale'")
+
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+ stop("bad input for argument 'zero'")
+
+ if (!is.list(escale))
+ escale = list()
+ if (!is.list(eshape))
+ eshape = list()
+
+ if (!is.Numeric(nsimEIM, allow = 1, integ = TRUE))
+ stop("bad input for argument 'nsimEIM'")
+ if (nsimEIM <= 50)
+ stop("'nsimEIM' should be an integer greater than 50")
+
+
+ new("vglmff",
+ blurb = c("Exponential geometric distribution\n\n",
+ "Links: ",
+ namesof("Scale", lscale, earg = escale), ", ",
+ namesof("shape", lshape, earg = eshape), "\n",
+ "Mean: ", "(shape - 1) * log(1 - ",
+ "shape) / (Scale * shape)"),
+
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero = zero ))),
+
+
+
+ initialize = eval(substitute(expression({
+ if (ncol(cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+
+ predictors.names = c(
+ namesof("Scale", .lscale , earg = .escale , short = TRUE),
+ namesof("shape", .lshape , earg = .eshape , short = TRUE))
+
+ if (!length(etastart)) {
+
+ scale.init = if (is.Numeric( .iscale , posit = TRUE)) {
+ rep( .iscale , len = n)
+ } else {
+ 1 / sd(y) # The papers scale parameter beta
+ }
+
+ shape.init = if (is.Numeric( .ishape , posit = TRUE)) {
+ rep( .ishape , len = n)
+ } else {
+ rep(2 - exp(scale.init * median(y)), len = n)
+ }
+ shape.init[shape.init >= 0.95] = 0.95
+ shape.init[shape.init <= 0.05] = 0.05
+
+ etastart = cbind(theta2eta(scale.init, .lscale , earg = .escale ),
+ theta2eta(shape.init, .lshape , earg = .eshape ))
+
+ }
+ }), list( .lscale = lscale, .lshape = lshape,
+ .iscale = iscale, .ishape = ishape,
+ .escale = escale, .eshape = eshape))),
+
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
+ shape = eta2theta(eta[, 2], .lshape , earg = .eshape )
+
+ (shape - 1) * log1p(-shape) / (Scale * shape)
+
+ }, list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape ))),
+
+ last = eval(substitute(expression({
+ misc$link = c(Scale = .lscale , shape = .lshape )
+ misc$earg = list(Scale = .escale , shape = .eshape )
+ misc$expected = TRUE
+ misc$nsimEIM = .nsimEIM
+ }), list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape,
+ .nsimEIM = nsimEIM ))),
+
+ loglikelihood = eval(substitute(function(mu, y, w,
+ residuals = FALSE, eta, extra = NULL) {
+
+ Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
+ shape = eta2theta(eta[, 2], .lshape , earg = .eshape )
+
+ if (residuals) stop("loglikelihood residuals",
+ "not implemented yet") else {
+ sum(w * dexpgeom(x = y, shape = shape, scale = Scale, log = TRUE))
+ }
+ }, list( .lscale = lscale , .lshape = lshape ,
+ .escale = escale , .eshape = eshape ))),
+
+ vfamily = c("expgeometric"),
+
+ deriv = eval(substitute(expression({
+ Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
+ shape = eta2theta(eta[, 2], .lshape , earg = .eshape )
+
+ temp2 <- exp(-Scale * y)
+ temp3 <- shape * temp2
+ dl.dscale = 1 / Scale - y - 2 * y * temp3 / (1 - temp3)
+ dl.dshape = -1 / (1 - shape) + 2 * temp2 / (1 - temp3)
+
+ dscale.deta = dtheta.deta(Scale, .lscale , earg = .escale )
+ dshape.deta = dtheta.deta(shape, .lshape , earg = .eshape )
+ dthetas.detas = cbind(dscale.deta, dshape.deta)
+
+ answer <- w * cbind(dl.dscale, dl.dshape) * dthetas.detas
+ answer
+ }), list( .lscale = lscale , .lshape = lshape,
+ .escale = escale, .eshape = eshape ))),
+
+ weight = eval(substitute(expression({
+
+
+
+
+
+
+
+
+
+ run.varcov = 0
+ ind1 = iam(NA, NA, M=M, both = TRUE, diag = TRUE)
+
+ if (length( .nsimEIM )) {
+ for(ii in 1:( .nsimEIM )) {
+ ysim = rexpgeom(n, scale=Scale, shape=shape)
+
+ temp2 <- exp(-Scale * ysim)
+ temp3 <- shape * temp2
+ dl.dscale = 1 / Scale - ysim - 2 * ysim *
+ temp3 / (1 - temp3)
+ dl.dshape = -1 / (1 - shape) + 2 * temp2 / (1 - temp3)
+
+ temp6 = cbind(dl.dscale, dl.dshape)
+ run.varcov = run.varcov +
+ temp6[,ind1$row.index] * temp6[,ind1$col.index]
+ }
+
+ run.varcov = run.varcov / .nsimEIM
+
+ wz = if (intercept.only)
+ matrix(colMeans(run.varcov),
+ n, ncol(run.varcov), byrow = TRUE) else run.varcov
+
+ wz = wz * dthetas.detas[, ind1$row] *
+ dthetas.detas[, ind1$col]
+ }
+
+ w * wz
+ }), list( .nsimEIM = nsimEIM ))))
+}
+
+
+
+
+
+
+
+
+
diff --git a/R/family.rcqo.R b/R/family.rcqo.R
index c2a7beb..6e32437 100644
--- a/R/family.rcqo.R
+++ b/R/family.rcqo.R
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
diff --git a/R/family.robust.R b/R/family.robust.R
new file mode 100644
index 0000000..2fa0e11
--- /dev/null
+++ b/R/family.robust.R
@@ -0,0 +1,259 @@
+# These functions are
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
+
+
+
+
+
+
+
+
+
+edhuber <- function(x, k = 0.862, mu = 0, sigma = 1, log = FALSE) {
+ if (!is.logical(log.arg <- log)) stop("bad input for argument 'log'")
+ rm(log)
+
+ zedd <- (x - mu) / sigma
+ fk <- dnorm(k)
+ eps <- 1 - 1 / (pnorm(k) - pnorm(-k) + 2 * fk /k)
+ ceps <- 1 / (pnorm(k) - pnorm(-k) + 2 * fk / k)
+
+ if (log.arg) {
+ val <- log(ceps) + dnorm(zedd, log = TRUE)
+ val[zedd < (-k)] <- (log(ceps) + log(fk) + ( k * (zedd+k)))[zedd < (-k)]
+ val[zedd > (+k)] <- (log(ceps) + log(fk) + (-k * (zedd-k)))[zedd > (+k)]
+ } else {
+ val <- (ceps) * dnorm(zedd)
+ val[zedd < (-k)] <- ((ceps) * fk * exp( k * (zedd + k)))[zedd < (-k)]
+ val[zedd > (+k)] <- ((ceps) * fk * exp(-k * (zedd - k)))[zedd > (+k)]
+ }
+ list(val = if (log.arg) val - log(sigma) else val / sigma,
+ eps = eps)
+}
+
+
+dhuber <- function(x, k = 0.862, mu = 0, sigma = 1, log = FALSE)
+ edhuber(x, k, mu, sigma, log = log)$val
+
+
+
+
+
+rhuber <- function(n, k = 0.862, mu = 0, sigma = 1) {
+ use.n = if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integ=TRUE, allow = 1, posit = TRUE))
+ stop("bad input for argument 'n'") else n
+
+ myl <- rep(0.0, len = use.n)
+
+ lowlim <- 1
+ upplim <- 0
+ chunksize <- 2 * use.n
+ while (lowlim <= use.n) {
+ x <- rexp(chunksize)
+ s <- sample(c(-1, 1), size = chunksize, replace = TRUE)
+ y <- s*x/k
+ u <- runif(chunksize)
+
+ yok <- (abs(y) >= k | u <= exp(k * abs(y) - (k * k + y * y) / 2))
+ sumyok <- sum(yok)
+ if (sumyok > 0) {
+ upplim <- upplim + sumyok
+
+ if (upplim > use.n)
+ myl <- rep(myl, len = upplim)
+
+ myl[lowlim:upplim] <- y[yok]
+ lowlim <- lowlim + sumyok
+ }
+ }
+ myl <- rep(myl, len = use.n) # Prune to right length
+
+ rep(mu + sigma * myl, len = use.n)
+}
+
+
+
+
+
+
+
+
+
+
+
+qhuber <- function (p, k = 0.862, mu = 0, sigma = 1)
+{
+ if(min(sigma) <= 0) stop("'sigma' must be positive")
+ if(min(k) <= 0) stop("'k' must be positive")
+
+ cnorm <- sqrt(2 * pi) * ((2 * pnorm(k) - 1) + 2 * dnorm(k) / k)
+ x <- pmin(p, 1 - p)
+ q <- ifelse(x <= sqrt(2 * pi) * dnorm(k) / ( k * cnorm),
+ log(k * cnorm * x) / k - k / 2,
+ qnorm(abs(1 - pnorm(k) + x * cnorm / sqrt(2 * pi) -
+ dnorm(k) / k)))
+ ifelse(p < 0.5, mu + q * sigma,
+ mu - q * sigma)
+}
+
+
+
+
+phuber <- function(q, k = 0.862, mu = 0, sigma = 1)
+{
+ if (any(sigma <= 0)) stop("sigma must be positive")
+
+ A1 <- (2 * dnorm(k) / k - 2 * pnorm(-k))
+ eps <- A1 / (1 + A1)
+ zedd <- (q - mu) / sigma
+ x <- -abs(zedd)
+ p <- ifelse(x <= -k ,
+ exp(k^2 / 2) / k * exp(k * x) / sqrt(2 * pi),
+ dnorm(k) / k + pnorm(x) - pnorm(-k))
+ p <- p * (1 - eps)
+ ifelse(zedd <= 0, p, 1 - p)
+}
+
+
+
+
+
+ huber <- function(llocation = "identity", lscale = "loge",
+ elocation = list(), escale = list(),
+ k = 0.862,
+ method.init = 1,
+ zero = 2) {
+ A1 <- (2 * dnorm(k) / k - 2 * pnorm(-k))
+ eps <- A1 / (1 + A1)
+
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
+ method.init > 4)
+ stop("'method.init' must be 1 or 2 or 3 or 4")
+
+ if (!is.Numeric(k, allow = 1, posit = TRUE))
+ stop("bad input for argument 'k'")
+
+ if (mode(llocation) != "character" && mode(llocation) != "name")
+ llocation = as.character(substitute(llocation))
+ if (mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+ stop("bad input for argument 'zero'")
+ if (!is.list(elocation)) elocation = list()
+ if (!is.list(escale)) escale = list()
+
+ new("vglmff",
+ blurb = c("Huber least favorable distribution\n\n",
+ "Links: ",
+ namesof("location", llocation, earg = elocation), ", ",
+ namesof("scale", lscale, earg = escale), "\n\n",
+ "Mean: location"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ predictors.names <-
+ c(namesof("location", .llocat, earg = .elocat, tag = FALSE),
+ namesof("scale", .lscale, earg = .escale, tag = FALSE))
+ if (ncol(y <- cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+ if (!length(etastart)) {
+ junk = lm.wfit(x = x, y = y, w = w)
+ scale.y.est <- sqrt( sum(w * junk$resid^2) / junk$df.residual )
+ location.init <- if ( .llocat == "loge") pmax(1/1024, y) else {
+ if ( .method.init == 3) {
+ rep(weighted.mean(y, w), len = n)
+ } else if ( .method.init == 2) {
+ rep(median(rep(y, w)), len = n)
+ } else if ( .method.init == 1) {
+ junk$fitted
+ } else {
+ y
+ }
+ }
+ etastart <- cbind(
+ theta2eta(location.init, .llocat, earg = .elocat),
+ theta2eta(scale.y.est, .lscale, earg = .escale))
+ }
+ }), list( .llocat = llocation, .lscale = lscale,
+ .elocat = elocation, .escale = escale,
+ .method.init=method.init ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ eta2theta(eta[,1], .llocat, earg = .elocat)
+ }, list( .llocat = llocation,
+ .elocat = elocation, .escale = escale ))),
+ last = eval(substitute(expression({
+ misc$link <- c("location" = .llocat, "scale" = .lscale)
+ misc$earg <- list("location" = .elocat, "scale" = .escale)
+ misc$expected <- TRUE
+ misc$k.huber <- .k
+ misc$method.init <- .method.init
+ }), list( .llocat = llocation, .lscale = lscale,
+ .elocat = elocation, .escale = escale,
+ .k = k, .method.init = method.init ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ location <- eta2theta(eta[,1], .llocat, earg = .elocat)
+ myscale <- eta2theta(eta[,2], .lscale, earg = .escale)
+ kay <- .k
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(w * dhuber(y, k = kay, mu = location, sigma = myscale,
+ log = TRUE))
+ }
+ }, list( .llocat = llocation, .lscale = lscale,
+ .elocat = elocation, .escale = escale,
+ .k = k ))),
+ vfamily = c("huber"),
+ deriv = eval(substitute(expression({
+ mylocat <- eta2theta(eta[,1], .llocat, earg = .elocat)
+ myscale <- eta2theta(eta[,2], .lscale, earg = .escale)
+ myk <- .k
+
+ zedd <- (y - mylocat) / myscale
+ cond2 <- (abs(zedd) <= myk)
+ cond3 <- (zedd > myk)
+
+ dl.dlocat <- -myk + 0 * zedd # cond1
+ dl.dlocat[cond2] <- zedd[cond2]
+ dl.dlocat[cond3] <- myk # myk is a scalar
+ dl.dlocat <- dl.dlocat / myscale
+
+
+ dl.dscale <- (-myk * zedd)
+ dl.dscale[cond2] <- (zedd^2)[cond2]
+ dl.dscale[cond3] <- ( myk * zedd)[cond3]
+ dl.dscale <- (-1 + dl.dscale) / myscale
+
+ dlocat.deta <- dtheta.deta(mylocat, .llocat, earg = .elocat)
+ dscale.deta <- dtheta.deta(myscale, .lscale, earg = .escale)
+ ans <-
+ w * cbind(dl.dlocat * dlocat.deta,
+ dl.dscale * dscale.deta)
+ ans
+ }), list( .llocat = llocation, .lscale = lscale,
+ .elocat = elocation, .escale = escale,
+ .eps = eps, .k = k ))),
+ weight = eval(substitute(expression({
+ wz <- matrix(as.numeric(NA), n, 2) # diag matrix; y is one-col too
+
+
+
+
+ temp4 <- erf(myk / sqrt(2))
+ ed2l.dlocat2 <- temp4 * (1 - .eps) / myscale^2
+
+ ed2l.dscale2 <- (dnorm(myk) * (1 - myk^2) + temp4) *
+ 2 * (1 - .eps) / (myk * myscale^2)
+
+ wz[, iam(1,1,M)] <- ed2l.dlocat2 * dlocat.deta^2
+ wz[, iam(2,2,M)] <- ed2l.dscale2 * dscale.deta^2
+ ans
+ w * wz
+ }), list( .eps = eps ))))
+}
+
+
+
diff --git a/R/family.rrr.R b/R/family.rrr.R
index 60b0feb..e24bca2 100644
--- a/R/family.rrr.R
+++ b/R/family.rrr.R
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
@@ -17,9 +18,9 @@ replace.constraints = function(Blist, cm, index) {
Alphavec=c(2, 4, 6, 9, 12, 16, 20, 25, 30, 40, 50,
60, 80, 100, 125, 2^(8:12)),
Criterion = c("rss", "coefficients"),
- Linesearch = FALSE, Maxit=7,
- Suppress.warning=TRUE,
- Tolerance=1e-7, ...)
+ Linesearch = FALSE, Maxit = 7,
+ Suppress.warning = TRUE,
+ Tolerance = 1e-7, ...)
{
if (mode(Criterion) != "character" && mode(Criterion) != "name")
@@ -40,7 +41,7 @@ qrrvglm.xprod = function(numat, Aoffset, Quadratic, ITolerances) {
Rank = ncol(numat)
moff = NULL
ans = if (Quadratic) {
- index = iam(NA, NA, M=Rank, diagonal=TRUE, both=TRUE)
+ index = iam(NA, NA, M=Rank, diagonal = TRUE, both = TRUE)
temp1 = cbind(numat[,index$row] * numat[,index$col])
if (ITolerances) {
moff = 0
@@ -50,7 +51,7 @@ qrrvglm.xprod = function(numat, Aoffset, Quadratic, ITolerances) {
cbind(numat, if (ITolerances) NULL else temp1)
} else
as.matrix(numat)
- list(matrix = if (Aoffset>0) ans else ans[,-(1:Rank),drop=FALSE],
+ list(matrix = if (Aoffset>0) ans else ans[,-(1:Rank),drop = FALSE],
offset = moff)
}
@@ -58,22 +59,22 @@ qrrvglm.xprod = function(numat, Aoffset, Quadratic, ITolerances) {
- valt <- function(x, z, U, Rank=1,
- Blist=NULL,
- Cinit=NULL,
+ valt <- function(x, z, U, Rank = 1,
+ Blist = NULL,
+ Cinit = NULL,
Alphavec=c(2, 4, 6, 9, 12, 16, 20, 25, 30, 40, 50,
60, 80, 100, 125, 2^(8:12)),
Criterion=c("rss", "coefficients"),
- Crow1positive = rep(TRUE, len=Rank),
+ Crow1positive = rep(TRUE, len = Rank),
colx1.index,
- Linesearch=FALSE,
+ Linesearch = FALSE,
Maxit=20,
- Structural.zero=NULL,
+ szero = NULL,
SD.Cinit=0.02,
- Suppress.warning=FALSE,
- Tolerance=1e-6,
- trace=FALSE,
- xij=NULL)
+ Suppress.warning = FALSE,
+ Tolerance = 1e-6,
+ trace = FALSE,
+ xij = NULL)
{
@@ -114,15 +115,15 @@ qrrvglm.xprod = function(numat, Aoffset, Quadratic, ITolerances) {
stop("input unconformable")
clist2 = replace.constraints(vector("list", Rank+p1),
- if (length(Structural.zero))
- diag(M)[,-Structural.zero,drop=FALSE] else diag(M), 1:Rank)
+ if (length(szero))
+ diag(M)[, -szero, drop = FALSE] else diag(M), 1:Rank)
if (p1) {
for(kk in 1:p1)
clist2[[Rank+kk]] <- Blist[[colx1.index[kk]]]
}
if (is.null(Cinit))
- Cinit <- matrix(rnorm(p2*Rank, sd=SD.Cinit), p2, Rank)
+ Cinit <- matrix(rnorm(p2*Rank, sd = SD.Cinit), p2, Rank)
fit <- list(rss = 0) # Only for initial old.crit below
@@ -133,7 +134,7 @@ qrrvglm.xprod = function(numat, Aoffset, Quadratic, ITolerances) {
for(iter in 1:Maxit) {
iter.save <- iter
- lv.mat <- x[, colx2.index, drop=FALSE] %*% C
+ lv.mat <- x[, colx2.index, drop = FALSE] %*% C
new.lv.model.matrix = cbind(lv.mat,
if (p1) x[, colx1.index] else NULL)
fit = vlm.wfit(xmat = new.lv.model.matrix, z, Blist = clist2,
@@ -145,10 +146,10 @@ qrrvglm.xprod = function(numat, Aoffset, Quadratic, ITolerances) {
fit = vlm.wfit(xmat = x, z, Blist = clist1, U = U,
matrix.out = TRUE, is.vlmX = FALSE,
rss = TRUE, qr = FALSE, xij = xij)
- C = fit$mat.coef[colx2.index, , drop=FALSE] %*% A %*%
+ C = fit$mat.coef[colx2.index, , drop = FALSE] %*% A %*%
solve(t(A) %*% A)
- numat = x[, colx2.index, drop=FALSE] %*% C
+ numat = x[, colx2.index, drop = FALSE] %*% C
evnu = eigen(var(numat))
temp7 = if (Rank > 1) evnu$vector %*% diag(evnu$value^(-0.5)) else
evnu$vector %*% evnu$value^(-0.5)
@@ -164,10 +165,10 @@ qrrvglm.xprod = function(numat, Aoffset, Quadratic, ITolerances) {
rss = max(abs(fit$rss - old.crit) / (Tolerance+fit$rss)))
if (trace) {
- cat(" Alternating iteration", iter,
- " ratio =", format(ratio), "\n")
+ cat(" Alternating iteration", iter,
+ ", Convergence criterion = ", format(ratio), "\n")
if (!is.null(fit$rss))
- cat(" rss =", fit$rss, "\n")
+ cat(" ResSS = ", fit$rss, "\n")
flush.console()
}
@@ -187,7 +188,7 @@ qrrvglm.xprod = function(numat, Aoffset, Quadratic, ITolerances) {
for(itter in 1:length(Alphavec)) {
CC <- xold + Alphavec[itter] * direction1
- try.lv.mat <- x[, colx2.index, drop=FALSE] %*% CC
+ try.lv.mat <- x[, colx2.index, drop = FALSE] %*% CC
try.new.lv.model.matrix = cbind(try.lv.mat,
if (p1) x[,colx1.index] else NULL)
@@ -200,12 +201,12 @@ qrrvglm.xprod = function(numat, Aoffset, Quadratic, ITolerances) {
fit <- try
ftemp <- try$rss
C <- CC
- A = t(fit$mat.coef[1:Rank, , drop=FALSE])
- lv.mat <- x[, colx2.index, drop=FALSE] %*% C
+ A = t(fit$mat.coef[1:Rank, , drop = FALSE])
+ lv.mat <- x[, colx2.index, drop = FALSE] %*% C
recover = iter # Give it some altg iters to recover
} else {
if (trace && use.alpha > 0) {
- cat(" Finished line search using Alpha =",
+ cat(" Finished line search using Alpha = ",
use.alpha, "\n")
flush.console()
}
@@ -216,7 +217,7 @@ qrrvglm.xprod = function(numat, Aoffset, Quadratic, ITolerances) {
}
xold <- C # Do not take care of drift
- old.crit <- switch(Criterion, coefficients=C, rss=fit$rss)
+ old.crit <- switch(Criterion, coefficients = C, rss = fit$rss)
} # End of iter loop
list(A = A, C = C, fitted = fit$fitted, new.coeffs = fit$coef,
@@ -225,8 +226,8 @@ qrrvglm.xprod = function(numat, Aoffset, Quadratic, ITolerances) {
-lm2qrrvlm.model.matrix = function(x, Blist, C, control, assign=TRUE,
- no.thrills=FALSE)
+lm2qrrvlm.model.matrix = function(x, Blist, C, control, assign = TRUE,
+ no.thrills = FALSE)
{
Rank = control$Rank
@@ -238,7 +239,7 @@ lm2qrrvlm.model.matrix = function(x, Blist, C, control, assign=TRUE,
M = nrow(Blist[[1]])
p1 = length(colx1.index)
- combine2 = c(control$Structural.zero,
+ combine2 = c(control$szero,
if (Corner) control$Index.corner else NULL)
Qoffset = if (Quadratic) ifelse(ITolerances, 0, sum(1:Rank)) else 0
@@ -249,31 +250,31 @@ lm2qrrvlm.model.matrix = function(x, Blist, C, control, assign=TRUE,
} else {
Aoffset = Rank
replace.constraints(vector("list", Aoffset+Qoffset+p1),
- if (length(combine2)) diag(M)[,-combine2,drop=FALSE] else diag(M),
+ if (length(combine2)) diag(M)[,-combine2,drop = FALSE] else diag(M),
1:Rank) # If Corner then does not contain \bI_{Rank}
}
if (Quadratic && !ITolerances)
clist2 = replace.constraints(clist2,
if (control$EqualTolerances)
matrix(1, M, 1) - eij(Dzero, M) else {
- if (length(Dzero)) diag(M)[,-Dzero,drop=FALSE] else diag(M)},
+ if (length(Dzero)) diag(M)[,-Dzero,drop = FALSE] else diag(M)},
Aoffset + (1:Qoffset))
if (p1)
for(kk in 1:p1)
clist2[[Aoffset+Qoffset+kk]] <- Blist[[colx1.index[kk]]]
if (!no.thrills) {
- i63 = iam(NA, NA, M=Rank, both=TRUE)
+ i63 = iam(NA, NA, M=Rank, both = TRUE)
names(clist2) = c(
- if (NoA) NULL else paste("(lv", 1:Rank, ")", sep=""),
- if (Quadratic && Rank==1 && !ITolerances)
+ if (NoA) NULL else paste("(lv", 1:Rank, ")", sep = ""),
+ if (Quadratic && Rank == 1 && !ITolerances)
"(lv^2)" else
if (Quadratic && Rank>1 && !ITolerances)
- paste("(lv", i63$row, ifelse(i63$row==i63$col, "^2",
- paste("*lv", i63$col, sep="")), ")", sep="") else NULL,
+ paste("(lv", i63$row, ifelse(i63$row == i63$col, "^2",
+ paste("*lv", i63$col, sep = "")), ")", sep = "") else NULL,
if (p1) names(colx1.index) else NULL)
}
- lv.mat = x[,control$colx2.index,drop=FALSE] %*% C
+ lv.mat = x[,control$colx2.index,drop = FALSE] %*% C
tmp900 = qrrvglm.xprod(lv.mat, Aoffset, Quadratic, ITolerances)
@@ -306,9 +307,9 @@ valt.2iter <- function(x, z, U, Blist, A, control) {
clist1 = replace.constraints(Blist, A, control$colx2.index)
- fit <- vlm.wfit(xmat=x, z, Blist=clist1, U=U, matrix.out=TRUE,
- is.vlmX=FALSE, rss=TRUE, qr=FALSE, xij=control$xij)
- C = fit$mat.coef[control$colx2.index,,drop=FALSE] %*% A %*% solve(t(A) %*% A)
+ fit <- vlm.wfit(xmat=x, z, Blist=clist1, U=U, matrix.out = TRUE,
+ is.vlmX = FALSE, rss = TRUE, qr = FALSE, xij=control$xij)
+ C = fit$mat.coef[control$colx2.index,,drop = FALSE] %*% A %*% solve(t(A) %*% A)
list(A=A, C=C, fitted=fit$fitted, new.coeffs = fit$coef,
Blist=clist1, rss=fit$rss)
@@ -316,7 +317,7 @@ valt.2iter <- function(x, z, U, Blist, A, control) {
-valt.1iter = function(x, z, U, Blist, C, control, lp.names=NULL, nice31=FALSE,
+valt.1iter = function(x, z, U, Blist, C, control, lp.names = NULL, nice31 = FALSE,
MSratio = 1) {
Rank = control$Rank
@@ -329,7 +330,7 @@ valt.1iter = function(x, z, U, Blist, C, control, lp.names=NULL, nice31=FALSE,
ITolerances = control$ITolerances
Qoffset = if (Quadratic) ifelse(ITolerances, 0, sum(1:Rank)) else 0
- tmp833 = lm2qrrvlm.model.matrix(x=x, Blist=Blist, C=C, control=control)
+ tmp833 = lm2qrrvlm.model.matrix(x=x, Blist = Blist, C=C, control=control)
new.lv.model.matrix = tmp833$new.lv.model.matrix
clist2 = clist2 = tmp833$constraints # Does not contain \bI_{Rank}
lv.mat = tmp833$lv.mat
@@ -341,14 +342,14 @@ valt.1iter = function(x, z, U, Blist, C, control, lp.names=NULL, nice31=FALSE,
clist2 = NULL # for vlm.wfit
- i5 = rep(0, len=MSratio)
+ i5 = rep(0, len = MSratio)
for(ii in 1:NOS) {
i5 = i5 + 1:MSratio
- tmp100 = vlm.wfit(xmat=new.lv.model.matrix, zedd[,i5,drop=FALSE],
- Blist=clist2, U=U[i5,,drop=FALSE],
- matrix.out=TRUE, is.vlmX=FALSE, rss=TRUE,
- qr=FALSE, Eta.range = control$Eta.range,
+ tmp100 = vlm.wfit(xmat=new.lv.model.matrix, zedd[,i5,drop = FALSE],
+ Blist=clist2, U=U[i5,,drop = FALSE],
+ matrix.out = TRUE, is.vlmX = FALSE, rss = TRUE,
+ qr = FALSE, Eta.range = control$Eta.range,
xij=control$xij, lp.names=lp.names[i5])
fit$rss = fit$rss + tmp100$rss
fit$mat.coef = cbind(fit$mat.coef, tmp100$mat.coef)
@@ -356,16 +357,16 @@ valt.1iter = function(x, z, U, Blist, C, control, lp.names=NULL, nice31=FALSE,
}
} else {
fit = vlm.wfit(xmat=new.lv.model.matrix, zedd, Blist=clist2, U=U,
- matrix.out=TRUE, is.vlmX=FALSE, rss=TRUE, qr=FALSE,
+ matrix.out = TRUE, is.vlmX = FALSE, rss = TRUE, qr = FALSE,
Eta.range = control$Eta.range,
xij=control$xij, lp.names=lp.names)
}
A = if (tmp833$NoA) matrix(0, M, Rank) else
- t(fit$mat.coef[1:Rank,,drop=FALSE])
+ t(fit$mat.coef[1:Rank,,drop = FALSE])
if (Corner)
A[Index.corner,] = diag(Rank)
- B1 = if (p1) fit$mat.coef[-(1:(tmp833$Aoffset+Qoffset)),,drop=FALSE] else NULL
+ B1 = if (p1) fit$mat.coef[-(1:(tmp833$Aoffset+Qoffset)),,drop = FALSE] else NULL
fv = as.matrix(fit$fitted.values)
if (Corner)
fv[,Index.corner] = fv[,Index.corner] + lv.mat
@@ -376,7 +377,7 @@ valt.1iter = function(x, z, U, Blist, C, control, lp.names=NULL, nice31=FALSE,
tmp800
} else
t(fit$mat.coef[(tmp833$Aoffset+1):
- (tmp833$Aoffset+Qoffset),,drop=FALSE])
+ (tmp833$Aoffset+Qoffset),,drop = FALSE])
} else
NULL
@@ -398,21 +399,21 @@ rrr.init.expression <- expression({
if (function.name %in% c("cqo", "cao")) {
- modelno = switch(family at vfamily[1], "poissonff"=2,
- "quasipoissonff"=2, "quasipoisson"=2,
- "binomialff"=1, "quasibinomialff"=1,
- "quasibinomial"=1, "negbinomial"=3,
- "gamma2"=5, "gaussianff"=8,
+ modelno = switch(family at vfamily[1], "poissonff" = 2,
+ "quasipoissonff" = 2, "quasipoisson" = 2,
+ "binomialff" = 1, "quasibinomialff" = 1,
+ "quasibinomial" = 1, "negbinomial" = 3,
+ "gamma2" = 5, "gaussianff" = 8,
0) # stop("cannot fit this model using fast algorithm")
- if (modelno == 1) modelno = get("modelno", envir = VGAMenv)
+ if (modelno == 1) modelno = get("modelno", envir = VGAM:::VGAMenv)
rrcontrol$modelno = control$modelno = modelno
- if (modelno==3 || modelno==5) {
+ if (modelno == 3 || modelno == 5) {
M = 2 * ifelse(is.matrix(y), ncol(y), 1)
- control$Structural.zero =
- rrcontrol$Structural.zero = seq(from=2, to=M, by=2) # Handles A
- control$Dzero =
+ control$szero =
+ rrcontrol$szero = seq(from=2, to=M, by=2) # Handles A
+ control$Dzero =
rrcontrol$Dzero = seq(from=2, to=M, by=2) # Handles D
@@ -429,20 +430,20 @@ rrr.init.expression <- expression({
rrr.alternating.expression <- expression({
alt <- valt(x, z, U, Rank=Rank,
- Blist=Blist,
- Cinit=rrcontrol$Cinit,
- Criterion=rrcontrol$Criterion,
- colx1.index=rrcontrol$colx1.index,
- Linesearch=rrcontrol$Linesearch,
- Maxit=rrcontrol$Maxit,
- Structural.zero=rrcontrol$Structural.zero,
- SD.Cinit=rrcontrol$SD.Cinit,
- Suppress.warning=rrcontrol$Suppress.warning,
- Tolerance=rrcontrol$Tolerance,
- trace=trace,
- xij=control$xij) # This is subject to drift in A and C
-
- ans2 = rrr.normalize(rrcontrol=rrcontrol, A=alt$A, C=alt$C, x=x)
+ Blist = Blist,
+ Cinit = rrcontrol$Cinit,
+ Criterion = rrcontrol$Criterion,
+ colx1.index = rrcontrol$colx1.index,
+ Linesearch = rrcontrol$Linesearch,
+ Maxit = rrcontrol$Maxit,
+ szero = rrcontrol$szero,
+ SD.Cinit = rrcontrol$SD.Cinit,
+ Suppress.warning = rrcontrol$Suppress.warning,
+ Tolerance = rrcontrol$Tolerance,
+ trace = trace,
+ xij = control$xij) # This is subject to drift in A and C
+
+ ans2 = rrr.normalize(rrcontrol = rrcontrol, A=alt$A, C=alt$C, x=x)
Amat = ans2$A # Fed into Blist below (in rrr.end.expression)
tmp.fitted = alt$fitted # Also fed; was alt2$fitted
@@ -458,7 +459,7 @@ rrr.alternating.expression <- expression({
if (length(Dmat)) {
ind0 = iam(NA, NA, both= TRUE, M=Rank)
for(kay in 1:M) {
- elts = Dmat[kay,,drop=FALSE] # Manual recycling
+ elts = Dmat[kay,,drop = FALSE] # Manual recycling
if (length(elts) < Rank)
elts = matrix(elts, 1, Rank)
Dk = m2adefault(elts, M=Rank)[,,1]
@@ -471,7 +472,7 @@ rrr.alternating.expression <- expression({
-rrr.normalize = function(rrcontrol, A, C, x, Dmat=NULL) {
+rrr.normalize = function(rrcontrol, A, C, x, Dmat = NULL) {
@@ -482,7 +483,7 @@ rrr.normalize = function(rrcontrol, A, C, x, Dmat=NULL) {
C.old = C
if (rrcontrol$Corner) {
- tmp87 = A[Index.corner,,drop=FALSE]
+ tmp87 = A[Index.corner,,drop = FALSE]
Mmat <- solve(tmp87) # The normalizing matrix
C <- C %*% t(tmp87)
A <- A %*% Mmat
@@ -494,17 +495,17 @@ rrr.normalize = function(rrcontrol, A, C, x, Dmat=NULL) {
temp = svd(C %*% t(A))
if (!is.matrix(temp$v))
temp$v = as.matrix(temp$v)
- C = temp$u[,1:Rank,drop=FALSE] %*%
+ C = temp$u[,1:Rank,drop = FALSE] %*%
diag(temp$d[1:Rank]^(1-rrcontrol$Alpha), nrow=Rank)
A = diag(temp$d[1:Rank]^(rrcontrol$Alpha), nrow=Rank) %*%
- t(temp$v[,1:Rank,drop=FALSE])
+ t(temp$v[,1:Rank,drop = FALSE])
A = t(A)
Mmat = t(C.old) %*% C.old %*% solve(t(C) %*% C.old)
eval(adjust.Dmat.expression)
}
if (rrcontrol$Uncorrelated.lv) {
- lv.mat <- x[,colx2.index,drop=FALSE] %*% C
+ lv.mat <- x[,colx2.index,drop = FALSE] %*% C
var.lv.mat <- var(lv.mat)
UU = chol(var.lv.mat)
Ut <- solve(UU)
@@ -534,8 +535,8 @@ rrr.normalize = function(rrcontrol, A, C, x, Dmat=NULL) {
rrr.end.expression = expression({
- if (exists(".VGAM.etamat", envir = VGAMenv))
- rm(".VGAM.etamat", envir = VGAMenv)
+ if (exists(".VGAM.etamat", envir = VGAM:::VGAMenv))
+ rm(".VGAM.etamat", envir = VGAM:::VGAMenv)
if (control$Quadratic) {
@@ -548,8 +549,8 @@ rrr.end.expression = expression({
}
X_vlm_save = if (control$Quadratic) {
- tmp300 = lm2qrrvlm.model.matrix(x=x, Blist=Blist.save,
- C=Cmat, control=control)
+ tmp300 = lm2qrrvlm.model.matrix(x=x, Blist = Blist.save,
+ C = Cmat, control=control)
lv.mat = tmp300$lv.mat # Needed at the top of new.s.call
lm2vlm.model.matrix(tmp300$new.lv.model.matrix,B.list,xij=control$xij)
@@ -571,7 +572,7 @@ rrr.end.expression = expression({
deriv.mu <- eval(family at deriv)
wz <- eval(family at weight)
if (control$checkwz)
- wz = checkwz(wz, M=M, trace=trace, wzeps=control$wzepsilon)
+ wz = checkwz(wz, M=M, trace = trace, wzeps=control$wzepsilon)
U <- vchol(wz, M=M, n=n, silent=!trace)
tvfor <- vforsub(U, as.matrix(deriv.mu), M=M, n=n)
z <- eta + vbacksub(U, tvfor, M=M, n=n) - offset # Contains \bI \bnu
@@ -605,18 +606,18 @@ rrr.derivative.expression <- expression({
all(trivial.constraints(constraints) == 1)
theta0 <- c(Cmat)
- assign(".VGAM.dot.counter", 0, envir = VGAMenv)
+ assign(".VGAM.dot.counter", 0, envir = VGAM:::VGAMenv)
if (control$OptimizeWrtC) {
if (control$Quadratic && control$FastAlgorithm) {
if (iter == 2) {
- if (exists(".VGAM.etamat", envir = VGAMenv))
- rm(".VGAM.etamat", envir = VGAMenv)
+ if (exists(".VGAM.etamat", envir = VGAM:::VGAMenv))
+ rm(".VGAM.etamat", envir = VGAM:::VGAMenv)
}
if (iter > 2 && !quasi.newton$convergence) {
- if (zthere <- exists(".VGAM.z", envir = VGAMenv)) {
- ..VGAM.z = get(".VGAM.z", envir = VGAMenv)
- ..VGAM.U = get(".VGAM.U", envir = VGAMenv)
- ..VGAM.beta = get(".VGAM.beta", envir = VGAMenv)
+ if (zthere <- exists(".VGAM.z", envir = VGAM:::VGAMenv)) {
+ ..VGAM.z = get(".VGAM.z", envir = VGAM:::VGAMenv)
+ ..VGAM.U = get(".VGAM.U", envir = VGAM:::VGAMenv)
+ ..VGAM.beta = get(".VGAM.beta", envir = VGAM:::VGAMenv)
}
if (zthere) {
z = matrix(..VGAM.z, n, M) # minus any offset
@@ -626,23 +627,23 @@ rrr.derivative.expression <- expression({
}
if (iter == 2 || quasi.newton$convergence) {
- NOS = ifelse(modelno==3 || modelno==5, M/2, M)
+ NOS = ifelse(modelno == 3 || modelno == 5, M/2, M)
- canfitok = (exists("CQO.FastAlgorithm", envir=VGAMenv) &&
- get("CQO.FastAlgorithm", envir = VGAMenv))
+ canfitok = (exists("CQO.FastAlgorithm", envir=VGAM:::VGAMenv) &&
+ get("CQO.FastAlgorithm", envir = VGAM:::VGAMenv))
if (!canfitok)
stop("cannot fit this model using fast algorithm")
p2star = if (nice31)
ifelse(control$IToleran, Rank, Rank+0.5*Rank*(Rank+1)) else
(NOS*Rank + Rank*(Rank+1)/2 * ifelse(control$EqualTol,1,NOS))
- p1star = if (nice31) p1 * ifelse(modelno==3 || modelno==5,2,1) else
+ p1star = if (nice31) p1 * ifelse(modelno == 3 || modelno == 5,2,1) else
(ncol(X_vlm_save) - p2star)
X_vlm_1save = if (p1star > 0) X_vlm_save[,-(1:p2star)] else NULL
quasi.newton = optim(par=Cmat, fn=callcqof,
gr = if (control$GradientFunction) calldcqo else NULL,
method=which.optimizer,
- control=list(fnscale=1,trace=as.integer(control$trace),
- parscale=rep(control$Parscale, len=length(Cmat)),
+ control=list(fnscale = 1,trace=as.integer(control$trace),
+ parscale=rep(control$Parscale, len = length(Cmat)),
maxit=250),
etamat=eta, xmat=x, ymat=y, wvec=w,
X_vlm_1save = if (nice31) NULL else X_vlm_1save,
@@ -650,18 +651,18 @@ rrr.derivative.expression <- expression({
n=n, M=M, p1star=p1star, p2star=p2star, nice31=nice31)
- if (zthere <- exists(".VGAM.z", envir = VGAMenv)) {
- ..VGAM.z = get(".VGAM.z", envir = VGAMenv)
- ..VGAM.U = get(".VGAM.U", envir = VGAMenv)
- ..VGAM.beta = get(".VGAM.beta", envir = VGAMenv)
+ if (zthere <- exists(".VGAM.z", envir = VGAM:::VGAMenv)) {
+ ..VGAM.z = get(".VGAM.z", envir = VGAM:::VGAMenv)
+ ..VGAM.U = get(".VGAM.U", envir = VGAM:::VGAMenv)
+ ..VGAM.beta = get(".VGAM.beta", envir = VGAM:::VGAMenv)
}
if (zthere) {
z = matrix(..VGAM.z, n, M) # minus any offset
U = matrix(..VGAM.U, M, n)
}
} else {
- if (exists(".VGAM.offset", envir = VGAMenv))
- rm(".VGAM.offset", envir = VGAMenv)
+ if (exists(".VGAM.offset", envir = VGAM:::VGAMenv))
+ rm(".VGAM.offset", envir = VGAM:::VGAMenv)
}
} else {
use.reltol = if (length(rrcontrol$Reltol) >= iter)
@@ -676,26 +677,26 @@ rrr.derivative.expression <- expression({
reltol=use.reltol),
U=U, z= if (control$ITolerances) z+offset else z,
M=M, xmat=x, # varbix2=varbix2,
- Blist=Blist, rrcontrol=rrcontrol)
+ Blist = Blist, rrcontrol = rrcontrol)
}
- Cmat = matrix(quasi.newton$par, p2, Rank, byrow=FALSE)
+ Cmat = matrix(quasi.newton$par, p2, Rank, byrow = FALSE)
if (Rank > 1 && rrcontrol$ITolerances) {
- numat = x[,rrcontrol$colx2.index,drop=FALSE] %*% Cmat
+ numat = x[,rrcontrol$colx2.index,drop = FALSE] %*% Cmat
evnu = eigen(var(numat))
Cmat = Cmat %*% evnu$vector
- numat = x[,rrcontrol$colx2.index,drop=FALSE] %*% Cmat
+ numat = x[,rrcontrol$colx2.index,drop = FALSE] %*% Cmat
offset = if (Rank > 1) -0.5*rowSums(numat^2) else -0.5*numat^2
}
}
- alt = valt.1iter(x=x, z=z, U=U, Blist=Blist, C=Cmat, nice31=nice31,
- control=rrcontrol, lp.names=predictors.names)
+ alt = valt.1iter(x=x, z=z, U=U, Blist = Blist, C = Cmat, nice31=nice31,
+ control = rrcontrol, lp.names=predictors.names)
if (length(alt$offset))
@@ -711,14 +712,14 @@ rrr.derivative.expression <- expression({
if (trace && control$OptimizeWrtC) {
cat("\n")
cat(which.optimizer, "using optim():\n")
- cat("Objective =", quasi.newton$value, "\n")
+ cat("Objective = ", quasi.newton$value, "\n")
cat("Parameters (= c(C)) = ", if (length(quasi.newton$par) < 5)
"" else "\n")
- cat(alt$Cmat, fill=TRUE)
+ cat(alt$Cmat, fill = TRUE)
cat("\n")
- cat("Number of function evaluations =", quasi.newton$count[1], "\n")
+ cat("Number of function evaluations = ", quasi.newton$count[1], "\n")
if (length(quasi.newton$message))
- cat("Message =", quasi.newton$message, "\n")
+ cat("Message = ", quasi.newton$message, "\n")
cat("\n")
flush.console()
}
@@ -735,53 +736,54 @@ rrr.derivative.expression <- expression({
rrr.derivC.rss = function(theta, U, z, M, xmat, Blist, rrcontrol,
- omit.these=NULL) {
+ omit.these = NULL) {
if (rrcontrol$trace) {
cat(".")
flush.console()
}
- alreadyThere = exists(".VGAM.dot.counter", envir = VGAMenv)
+ alreadyThere = exists(".VGAM.dot.counter", envir = VGAM:::VGAMenv)
if (alreadyThere) {
- VGAM.dot.counter = get(".VGAM.dot.counter", envir = VGAMenv)
+ VGAM.dot.counter = get(".VGAM.dot.counter", envir = VGAM:::VGAMenv)
VGAM.dot.counter = VGAM.dot.counter + 1
- assign(".VGAM.dot.counter", VGAM.dot.counter, envir = VGAMenv)
+ assign(".VGAM.dot.counter", VGAM.dot.counter, envir = VGAM:::VGAMenv)
if (VGAM.dot.counter > max(50, options()$width - 5)) {
if (rrcontrol$trace) {
cat("\n")
flush.console()
}
- assign(".VGAM.dot.counter", 0, envir = VGAMenv)
+ assign(".VGAM.dot.counter", 0, envir = VGAM:::VGAMenv)
}
}
Cmat = matrix(theta, length(rrcontrol$colx2.index), rrcontrol$Rank)
- tmp700 = lm2qrrvlm.model.matrix(x=xmat, Blist=Blist,
+ tmp700 = lm2qrrvlm.model.matrix(x = xmat, Blist = Blist,
no.thrills = !rrcontrol$Corner,
- C=Cmat, control=rrcontrol, assign=FALSE)
+ C = Cmat, control = rrcontrol, assign = FALSE)
Blist = tmp700$constraints # Does not contain \bI_{Rank} \bnu
if (rrcontrol$Corner) {
z = as.matrix(z) # should actually call this zedd
- z[,rrcontrol$Index.corner] = z[,rrcontrol$Index.corner] - tmp700$lv.mat
+ z[,rrcontrol$Index.corner] = z[,rrcontrol$Index.corner] -
+ tmp700$lv.mat
}
if (length(tmp700$offset)) z = z - tmp700$offset
vlm.wfit(xmat=tmp700$new.lv.model.matrix, zmat=z,
- Blist=Blist, ncolx=ncol(xmat), U=U, only.rss=TRUE,
- matrix.out=FALSE, is.vlmX=FALSE, rss= TRUE, qr=FALSE,
+ Blist = Blist, ncolx=ncol(xmat), U=U, only.rss = TRUE,
+ matrix.out = FALSE, is.vlmX = FALSE, rss= TRUE, qr = FALSE,
Eta.range = rrcontrol$Eta.range,
xij=rrcontrol$xij)$rss
}
-rrvglm.optim.control = function(Fnscale=1,
- Maxit=100,
+rrvglm.optim.control = function(Fnscale = 1,
+ Maxit = 100,
Switch.optimizer=3,
Abstol= -Inf,
Reltol=sqrt(.Machine$double.eps),
@@ -830,7 +832,7 @@ Coef.qrrvglm <- function(object, varlvI = FALSE, reference = NULL, ...) {
stop("'varlvI' must be TRUE or FALSE")
if (length(reference) > 1) stop("'reference' must be of length 0 or 1")
if (length(reference) && is.Numeric(reference))
- if (!is.Numeric(reference, allow=1, integ=TRUE))
+ if (!is.Numeric(reference, allow = 1, integ = TRUE))
stop("bad input for argument 'reference'")
if (!is.logical(ConstrainedQO <- object at control$ConstrainedQO))
stop("cannot determine whether the model is constrained or not")
@@ -845,13 +847,13 @@ Coef.qrrvglm <- function(object, varlvI = FALSE, reference = NULL, ...) {
p1 = length(ocontrol$colx1.index)
p2 = length(ocontrol$colx2.index)
Index.corner = ocontrol$Index.corner
- Structural.zero = ocontrol$Structural.zero
+ szero = ocontrol$szero
EqualTolerances = ocontrol$EqualTolerances
Dzero = ocontrol$Dzero
Corner = if (ConstrainedQO) ocontrol$Corner else FALSE
estITol = if (ConstrainedQO) object at control$ITolerances else FALSE
modelno = object at control$modelno # 1,2,3,4,5,6,7 or 0
- combine2 = c(Structural.zero, if (Corner) Index.corner else NULL)
+ combine2 = c(szero, if (Corner) Index.corner else NULL)
NoA = length(combine2) == M # A is fully known.
Qoffset = if (Quadratic) ifelse(estITol, 0, sum(1:Rank)) else 0
@@ -859,15 +861,15 @@ Coef.qrrvglm <- function(object, varlvI = FALSE, reference = NULL, ...) {
ynames = object at misc$ynames
if (!length(ynames)) ynames = object at misc$predictors.names
if (!length(ynames)) ynames = object at misc$ynames
- if (!length(ynames)) ynames = paste("Y", 1:NOS, sep="")
+ if (!length(ynames)) ynames = paste("Y", 1:NOS, sep = "")
lp.names = object at misc$predictors.names
if (!length(lp.names)) lp.names = NULL
- dzero.vector = rep(FALSE, length=M)
+ dzero.vector = rep(FALSE, length = M)
if (length(Dzero))
dzero.vector[Dzero] = TRUE
names(dzero.vector) = ynames
- lv.names = if (Rank==1) "lv" else paste("lv", 1:Rank, sep="")
+ lv.names = if (Rank == 1) "lv" else paste("lv", 1:Rank, sep = "")
td.expression = expression({
Tolerance = Darray = m2adefault(Dmat, M=Rank)
@@ -888,7 +890,7 @@ Coef.qrrvglm <- function(object, varlvI = FALSE, reference = NULL, ...) {
Cmat = object at extra$Cmat # p2 x Rank
Dmat = object at extra$Dmat #
B1 = object at extra$B1 #
- bellshaped = rep(FALSE, length=M)
+ bellshaped = rep(FALSE, length = M)
if (is.character(reference)) {
reference = (1:NOS)[reference == ynames]
@@ -902,7 +904,7 @@ Coef.qrrvglm <- function(object, varlvI = FALSE, reference = NULL, ...) {
if (ptr1 > 0) {
this.spp = candidates[ptr1]
}
- elts = Dmat[this.spp,,drop=FALSE]
+ elts = Dmat[this.spp,,drop = FALSE]
if (length(elts) < Rank)
elts = matrix(elts, 1, Rank)
Dk = m2adefault(elts, M=Rank)[,,1] # Hopefully negative-def
@@ -939,7 +941,7 @@ Coef.qrrvglm <- function(object, varlvI = FALSE, reference = NULL, ...) {
if (ConstrainedQO)
if (Rank > 1) {
if (!length(xmat <- object at x)) stop("cannot obtain the model matrix")
- numat = xmat[,ocontrol$colx2.index,drop=FALSE] %*% Cmat
+ numat = xmat[,ocontrol$colx2.index,drop = FALSE] %*% Cmat
evnu = eigen(var(numat))
Mmat = solve(t(evnu$vector))
Cmat = Cmat %*% evnu$vector # == Cmat %*% solve(t(Mmat))
@@ -955,7 +957,7 @@ Coef.qrrvglm <- function(object, varlvI = FALSE, reference = NULL, ...) {
if (ConstrainedQO)
if (varlvI) {
if (!length(xmat <- object at x)) stop("cannot obtain the model matrix")
- numat = xmat[,ocontrol$colx2.index,drop=FALSE] %*% Cmat
+ numat = xmat[,ocontrol$colx2.index,drop = FALSE] %*% Cmat
sdnumat = sd(numat)
Mmat = if (Rank > 1) diag(sdnumat) else matrix(sdnumat, 1, 1)
Cmat = Cmat %*% solve(t(Mmat))
@@ -969,22 +971,22 @@ Coef.qrrvglm <- function(object, varlvI = FALSE, reference = NULL, ...) {
cx1i = ocontrol$colx1.index
- maximum = if (length(cx1i)==1 && names(cx1i)=="(Intercept)") {
+ maximum = if (length(cx1i) == 1 && names(cx1i) == "(Intercept)") {
eta.temp = B1
for(ii in 1:M)
eta.temp[ii] = eta.temp[ii] +
- Amat[ii,,drop=FALSE] %*% optimum[,ii,drop=FALSE] +
- t(optimum[,ii,drop=FALSE]) %*%
- Darray[,,ii,drop= TRUE] %*% optimum[,ii,drop=FALSE]
+ Amat[ii,,drop = FALSE] %*% optimum[,ii,drop = FALSE] +
+ t(optimum[,ii,drop = FALSE]) %*%
+ Darray[,,ii,drop= TRUE] %*% optimum[,ii,drop = FALSE]
mymax = object at family@inverse(rbind(eta.temp), extra=object at extra)
c(mymax) # Convert from matrix to vector
} else {
- 5 * rep(as.numeric(NA), len=M) # Make "numeric"
+ 5 * rep(as.numeric(NA), len = M) # Make "numeric"
}
names(maximum) = ynames
lv.mat = if (ConstrainedQO) {
- object at x[,ocontrol$colx2.index,drop=FALSE] %*% Cmat
+ object at x[,ocontrol$colx2.index,drop = FALSE] %*% Cmat
} else {
object at lv
}
@@ -1028,10 +1030,10 @@ Coef.qrrvglm <- function(object, varlvI = FALSE, reference = NULL, ...) {
}
if (MSratio > 1) {
- keepIndex = seq(from=1, to=M, by=MSratio)
+ keepIndex = seq(from = 1, to=M, by=MSratio)
ans at Dzero = ans at Dzero[keepIndex]
- ans at Optimum = ans at Optimum[,keepIndex,drop=FALSE]
- ans at Tolerance = ans at Tolerance[,,keepIndex,drop=FALSE]
+ ans at Optimum = ans at Optimum[,keepIndex,drop = FALSE]
+ ans at Tolerance = ans at Tolerance[,,keepIndex,drop = FALSE]
ans at bellshaped = ans at bellshaped[keepIndex]
names(ans at Dzero) = ynames
} else {
@@ -1044,7 +1046,7 @@ Coef.qrrvglm <- function(object, varlvI = FALSE, reference = NULL, ...) {
}
-setClass(Class="Coef.rrvglm", representation(
+setClass(Class = "Coef.rrvglm", representation(
"A" = "matrix",
"B1" = "matrix", # This may be unassigned if p1=0.
"C" = "matrix",
@@ -1053,7 +1055,7 @@ setClass(Class="Coef.rrvglm", representation(
"colx2.index" = "numeric",
"Atilde" = "matrix"))
-setClass(Class="Coef.uqo", representation(
+setClass(Class = "Coef.uqo", representation(
"A" = "matrix",
"B1" = "matrix",
"Constrained" = "logical",
@@ -1070,7 +1072,7 @@ setClass(Class="Coef.uqo", representation(
"Dzero" = "logical",
"Tolerance" = "array"))
-setClass(Class="Coef.qrrvglm", representation(
+setClass(Class = "Coef.qrrvglm", representation(
"C" = "matrix"),
contains = "Coef.uqo")
@@ -1083,14 +1085,14 @@ printCoef.qrrvglm = function(x, ...) {
mymat = matrix(as.numeric(NA), NOS, Rank)
if (Rank == 1) { # || object at Diagonal
for(ii in 1:NOS) {
- fred = if (Rank>1) diag(object at Tolerance[,,ii,drop=F]) else
+ fred = if (Rank>1) diag(object at Tolerance[,,ii,drop = F]) else
object at Tolerance[,,ii]
if (all(fred > 0))
mymat[ii,] = sqrt(fred)
}
dimnames(mymat) = list(dimnames(object at Tolerance)[[3]],
- if (Rank==1) "lv" else
- paste("Tolerance", dimnames(mymat)[[2]], sep=""))
+ if (Rank == 1) "lv" else
+ paste("Tolerance", dimnames(mymat)[[2]], sep = ""))
} else {
for(ii in 1:NOS) {
fred = eigen(object at Tolerance[,,ii])
@@ -1098,21 +1100,21 @@ printCoef.qrrvglm = function(x, ...) {
mymat[ii,] = sqrt(fred$value)
}
dimnames(mymat) = list(dimnames(object at Tolerance)[[3]],
- paste("tol", 1:Rank, sep=""))
+ paste("tol", 1:Rank, sep = ""))
}
dimnames(object at A) = list(dimnames(object at A)[[1]],
- if (Rank > 1) paste("A", dimnames(object at A)[[2]], sep=".") else "A")
+ if (Rank > 1) paste("A", dimnames(object at A)[[2]], sep = ".") else "A")
Maximum = if (length(object at Maximum)) cbind(Maximum=object at Maximum) else NULL
- if (length(Maximum) && length(mymat) && Rank==1)
+ if (length(Maximum) && length(mymat) && Rank == 1)
Maximum[is.na(mymat),] = NA
optmat = cbind(t(object at Optimum))
dimnames(optmat) = list(dimnames(optmat)[[1]],
- if (Rank > 1) paste("Optimum", dimnames(optmat)[[2]], sep=".")
+ if (Rank > 1) paste("Optimum", dimnames(optmat)[[2]], sep = ".")
else "Optimum")
- if (length(optmat) && length(mymat) && Rank==1)
+ if (length(optmat) && length(mymat) && Rank == 1)
optmat[is.na(mymat),] = NA
if ( object at Constrained ) {
@@ -1145,25 +1147,25 @@ setMethod("summary", "qrrvglm", function(object, ...)
predict.qrrvglm <- function(object,
- newdata=NULL,
+ newdata = NULL,
type=c("link", "response", "lv", "terms"),
- se.fit=FALSE,
+ se.fit = FALSE,
deriv=0,
- dispersion=NULL,
+ dispersion = NULL,
extra=object at extra,
varlvI = FALSE, reference = NULL, ...)
{
if (se.fit)
- stop("cannot handle se.fit==TRUE yet")
+ stop("cannot handle se.fit == TRUE yet")
if (deriv != 0)
stop("derivative is not equal to 0")
if (mode(type) != "character" && mode(type) != "name")
type <- as.character(substitute(type))
type <- match.arg(type, c("link", "response", "lv", "terms"))[1]
- if (type=="lv")
+ if (type == "lv")
stop("cannot handle type='lv' yet")
- if (type=="terms")
+ if (type == "terms")
stop("cannot handle type='terms' yet")
M = object at misc$M
@@ -1172,7 +1174,7 @@ predict.qrrvglm <- function(object,
na.act = object at na.action
object at na.action = list()
- if (!length(newdata) && type=="response" && length(object at fitted.values)) {
+ if (!length(newdata) && type == "response" && length(object at fitted.values)) {
if (length(na.act)) {
return(napredict(na.act[[1]], object at fitted.values))
} else {
@@ -1181,7 +1183,7 @@ predict.qrrvglm <- function(object,
}
if (!length(newdata)) {
- X <- model.matrixvlm(object, type="lm", ...)
+ X <- model.matrixvlm(object, type = "lm", ...)
offset <- object at offset
tt <- object at terms$terms # terms(object)
if (!length(object at x))
@@ -1198,7 +1200,7 @@ predict.qrrvglm <- function(object,
if (nrow(X) != nrow(newdata)) {
as.save = attr(X, "assign")
- X = X[rep(1, nrow(newdata)),,drop=FALSE]
+ X = X[rep(1, nrow(newdata)),,drop = FALSE]
dimnames(X) = list(dimnames(newdata)[[1]], "(Intercept)")
attr(X, "assign") = as.save # Restored
}
@@ -1227,13 +1229,13 @@ predict.qrrvglm <- function(object,
is.matrix(object at predictors)) ncol(object at predictors) else
object at misc$M
MSratio = M / NOS # First value is g(mean) = quadratic form in lv
- if (MSratio != 1) stop("can only handle MSratio==1 for now")
+ if (MSratio != 1) stop("can only handle MSratio == 1 for now")
if (length(newdata)) {
Coefs = Coef(object, varlvI = varlvI, reference = reference)
- X1mat = X[,ocontrol$colx1.index,drop=FALSE]
- X2mat = X[,ocontrol$colx2.index,drop=FALSE]
+ X1mat = X[,ocontrol$colx1.index,drop = FALSE]
+ X2mat = X[,ocontrol$colx2.index,drop = FALSE]
lvmat = as.matrix(X2mat %*% Coefs at C) # n x Rank
etamat = as.matrix(X1mat %*% Coefs at B1 + lvmat %*% t(Coefs at A))
@@ -1242,7 +1244,7 @@ predict.qrrvglm <- function(object,
thisSpecies = whichSpecies[sppno]
Dmat = matrix(Coefs at D[,,thisSpecies], Rank, Rank)
etamat[,thisSpecies] = etamat[,thisSpecies] +
- mux34(lvmat, Dmat, symm=TRUE)
+ mux34(lvmat, Dmat, symm = TRUE)
}
} else {
etamat = object at predictors
@@ -1279,7 +1281,7 @@ setMethod("predict", "qrrvglm", function(object, ...)
coefqrrvglm = function(object, matrix.out = FALSE,
label = TRUE) {
if (matrix.out)
- stop("currently cannot handle matrix.out=TRUE")
+ stop("currently cannot handle matrix.out = TRUE")
coefvlm(object, matrix.out = matrix.out, label = label)
}
@@ -1307,7 +1309,7 @@ printrrvglm <- function(x, ...)
vecOfBetas <- x at coefficients
if (any(nas <- is.na(vecOfBetas))) {
if (is.null(names(vecOfBetas)))
- names(vecOfBetas) = paste("b", 1:length(vecOfBetas), sep="")
+ names(vecOfBetas) = paste("b", 1:length(vecOfBetas), sep = "")
cat("\nCoefficients: (", sum(nas),
" not defined because of singularities)\n", sep = "")
} else
@@ -1337,7 +1339,7 @@ printrrvglm <- function(x, ...)
ncrit <- names(x at criterion)
for(iii in ncrit)
if (iii != "loglikelihood" && iii != "deviance")
- cat(paste(iii, ":", sep=""), format(x at criterion[[iii]]), "\n")
+ cat(paste(iii, ":", sep = ""), format(x at criterion[[iii]]), "\n")
}
invisible(x)
@@ -1363,18 +1365,18 @@ rrvglm.control.Gaussian <- function(half.stepsizing= FALSE,
-summary.rrvglm <- function(object, correlation=FALSE,
- dispersion=NULL, digits=NULL,
+summary.rrvglm <- function(object, correlation = FALSE,
+ dispersion = NULL, digits = NULL,
numerical= TRUE,
h.step = 0.0001,
- kill.all=FALSE, omit13=FALSE, fixA=FALSE, ...)
+ kill.all = FALSE, omit13 = FALSE, fixA = FALSE, ...)
{
- if (!is.Numeric(h.step, allow=1) || abs(h.step)>1)
+ if (!is.Numeric(h.step, allow = 1) || abs(h.step)>1)
stop("bad input for 'h.step'")
if (!object at control$Corner)
@@ -1390,7 +1392,7 @@ summary.rrvglm <- function(object, correlation=FALSE,
dispersion=dispersion)
answer <-
- new(Class="summary.rrvglm",
+ new(Class = "summary.rrvglm",
object,
call=stuff at call,
coef3=stuff at coef3,
@@ -1407,7 +1409,7 @@ summary.rrvglm <- function(object, correlation=FALSE,
tmp5 <- get.rrvglm.se1(object, omit13=omit13,
- numerical=numerical, h.step=h.step,
+ numerical=numerical, h.step = h.step,
kill.all=kill.all, fixA=fixA, ...)
if (any(diag(tmp5$cov.unscaled) <= 0) ||
any(eigen(tmp5$cov.unscaled)$value <= 0)) {
@@ -1426,10 +1428,10 @@ summary.rrvglm <- function(object, correlation=FALSE,
}
tmp8 = object at misc$M - object at control$Rank -
- length(object at control$Structural.zero)
+ length(object at control$szero)
answer at df[1] <- answer at df[1] + tmp8 * object at control$Rank
answer at df[2] <- answer at df[2] - tmp8 * object at control$Rank
- if (dispersion==0) {
+ if (dispersion == 0) {
dispersion <- tmp5$rss / answer at df[2] # Estimate
}
@@ -1459,7 +1461,7 @@ get.rrvglm.se1 = function(fit, omit13 = FALSE, kill.all = FALSE,
if (length(fit at control$Nested) && fit at control$Nested)
stop("sorry, cannot handle nested models yet")
- Structural.zero = fit at control$Structural.zero
+ szero = fit at control$szero
if (!length(fit at x))
@@ -1476,14 +1478,14 @@ get.rrvglm.se1 = function(fit, omit13 = FALSE, kill.all = FALSE,
Rank <- fit at control$Rank # fit at misc$Nested.Rank
Amat <- fit at constraints[[colx2.index[1]]]
- B1mat =if (p1) coefvlm(fit,mat=TRUE)[colx1.index,,drop=FALSE] else NULL
- C.try <- coefvlm(fit, mat= TRUE)[colx2.index,,drop=FALSE]
+ B1mat =if (p1) coefvlm(fit,mat = TRUE)[colx1.index,,drop = FALSE] else NULL
+ C.try <- coefvlm(fit, mat= TRUE)[colx2.index,,drop = FALSE]
Cmat <- C.try %*% Amat %*% solve(t(Amat) %*% Amat)
- x1mat <- if (p1) fit at x[, colx1.index, drop=FALSE] else NULL
- x2mat <- fit at x[, colx2.index, drop=FALSE]
+ x1mat <- if (p1) fit at x[, colx1.index, drop = FALSE] else NULL
+ x2mat <- fit at x[, colx2.index, drop = FALSE]
- wz <- weights(fit, type="work") # old: wweights(fit) #fit at weights
+ wz <- weights(fit, type = "work") # old: wweights(fit) #fit at weights
if (!length(wz))
stop("cannot get fit at weights")
@@ -1491,24 +1493,24 @@ get.rrvglm.se1 = function(fit, omit13 = FALSE, kill.all = FALSE,
n <- fit at misc$n
Index.corner <- fit at control$Index.corner # used to be (1:Rank);
zmat <- fit at predictors + fit at residuals
- theta <- c(Amat[-c(Index.corner,Structural.zero),])
+ theta <- c(Amat[-c(Index.corner,szero),])
if (fit at control$checkwz)
- wz = checkwz(wz, M=M, trace=trace, wzeps=fit at control$wzepsilon)
+ wz = checkwz(wz, M=M, trace = trace, wzeps=fit at control$wzepsilon)
U <- vchol(wz, M=M, n=n, silent= TRUE)
if (numerical) {
delct.da <- num.deriv.rrr(fit, M=M, r=Rank,
x1mat=x1mat, x2mat=x2mat, p2=p2,
Index.corner, Aimat=Amat, B1mat=B1mat, Cimat=Cmat,
- h.step=h.step, colx2.index=colx2.index,
- xij=fit at control$xij,
- Structural.zero=Structural.zero)
+ h.step = h.step, colx2.index=colx2.index,
+ xij = fit at control$xij,
+ szero = szero)
} else {
delct.da <- dctda.fast.only(theta=theta, wz=wz, U=U, zmat,
M=M, r=Rank, x1mat=x1mat, x2mat=x2mat, p2=p2,
Index.corner, Aimat=Amat, B1mat=B1mat, Cimat=Cmat,
xij=fit at control$xij,
- Structural.zero=Structural.zero)
+ szero = szero)
}
@@ -1531,18 +1533,18 @@ get.rrvglm.se1 = function(fit, omit13 = FALSE, kill.all = FALSE,
if (any(ii == names(colx2.index))) {
log.vec33 = c(log.vec33, choose.from[[ii]])
}
- cov33 = cov2233[ log.vec33, log.vec33, drop=FALSE] # r*p2 by r*p2
- cov23 = cov2233[-log.vec33, log.vec33, drop=FALSE]
- cov22 = cov2233[-log.vec33,-log.vec33, drop=FALSE]
+ cov33 = cov2233[ log.vec33, log.vec33, drop = FALSE] # r*p2 by r*p2
+ cov23 = cov2233[-log.vec33, log.vec33, drop = FALSE]
+ cov22 = cov2233[-log.vec33,-log.vec33, drop = FALSE]
lv.mat <- x2mat %*% Cmat
- offs = matrix(0, n, M) # The "0" handles Structural.zero's
+ offs = matrix(0, n, M) # The "0" handles szero's
offs[,Index.corner] = lv.mat
- if (M == (Rank+length(Structural.zero)))
+ if (M == (Rank + length(szero)))
stop("cannot handle full-rank models yet")
- cm = matrix(0, M, M-Rank-length(Structural.zero))
- cm[-c(Index.corner,Structural.zero),] = diag(M-Rank-length(Structural.zero))
+ cm = matrix(0, M, M - Rank - length(szero))
+ cm[-c(Index.corner, szero),] = diag(M - Rank - length(szero))
Blist = vector("list", length(colx1.index)+1)
names(Blist) = c(names(colx1.index), "I(lv.mat)")
@@ -1559,14 +1561,14 @@ get.rrvglm.se1 = function(fit, omit13 = FALSE, kill.all = FALSE,
bb = c(bb, names(ooo)[ii])
}
- has.intercept = any(bb=="(Intercept)")
- bb[bb=="(Intercept)"] = "1"
+ has.intercept = any(bb == "(Intercept)")
+ bb[bb == "(Intercept)"] = "1"
if (p1>1)
- bb = paste(bb, collapse="+")
+ bb = paste(bb, collapse = "+")
if (has.intercept) {
- bb = paste("zmat - offs ~ ", bb, " + I(lv.mat)", collapse=" ")
+ bb = paste("zmat - offs ~ ", bb, " + I(lv.mat)", collapse = " ")
} else {
- bb = paste("zmat - offs ~ -1 + ", bb, " + I(lv.mat)", collapse=" ")
+ bb = paste("zmat - offs ~ -1 + ", bb, " + I(lv.mat)", collapse = " ")
}
bb = as.formula(bb)
} else {
@@ -1577,21 +1579,21 @@ get.rrvglm.se1 = function(fit, omit13 = FALSE, kill.all = FALSE,
if (fit at misc$dataname == "list") {
dspec = FALSE
} else {
- mytext1 = "exists(x=fit at misc$dataname, envir = VGAMenv)"
+ mytext1 = "exists(x=fit at misc$dataname, envir = VGAM:::VGAMenv)"
myexp1 = parse(text=mytext1)
is.there = eval(myexp1)
- bbdata= if (is.there) get(fit at misc$dataname, envir=VGAMenv) else
+ bbdata= if (is.there) get(fit at misc$dataname, envir=VGAM:::VGAMenv) else
get(fit at misc$dataname)
dspec = TRUE
}
fit1122 <- if (dspec) vlm(bb,
- constraint=Blist, crit="d", weight=wz, data=bbdata,
- save.weight=TRUE, smart=FALSE, trace=trace.arg, x=TRUE) else
+ constraint=Blist, crit = "d", weight=wz, data=bbdata,
+ save.weight = TRUE, smart = FALSE, trace = trace.arg, x = TRUE) else
vlm(bb,
- constraint=Blist, crit="d", weight=wz,
- save.weight=TRUE, smart=FALSE, trace=trace.arg, x=TRUE)
+ constraint=Blist, crit = "d", weight=wz,
+ save.weight = TRUE, smart = FALSE, trace = trace.arg, x = TRUE)
@@ -1601,10 +1603,10 @@ get.rrvglm.se1 = function(fit, omit13 = FALSE, kill.all = FALSE,
dimnames(cov1122) = list(d8, d8)
lcs = length(coefvlm(sfit1122))
- log.vec11 = (lcs-(M-Rank-length(Structural.zero))*Rank+1):lcs
- cov11 = cov1122[log.vec11, log.vec11, drop=FALSE]
- cov12 = cov1122[ log.vec11, -log.vec11, drop=FALSE]
- cov22 = cov1122[-log.vec11, -log.vec11, drop=FALSE]
+ log.vec11 = (lcs-(M-Rank-length(szero))*Rank+1):lcs
+ cov11 = cov1122[log.vec11, log.vec11, drop = FALSE]
+ cov12 = cov1122[ log.vec11, -log.vec11, drop = FALSE]
+ cov22 = cov1122[-log.vec11, -log.vec11, drop = FALSE]
cov13 = delct.da %*% cov33
@@ -1640,7 +1642,7 @@ get.rrvglm.se1 = function(fit, omit13 = FALSE, kill.all = FALSE,
-get.rrvglm.se2 <- function(cov.unscaled, dispersion=1, coefficients) {
+get.rrvglm.se2 <- function(cov.unscaled, dispersion = 1, coefficients) {
d8 <- dimnames(cov.unscaled)[[1]]
ans <- matrix(coefficients, length(coefficients), 3)
@@ -1655,7 +1657,7 @@ get.rrvglm.se2 <- function(cov.unscaled, dispersion=1, coefficients) {
num.deriv.rrr <- function(fit, M, r, x1mat, x2mat,
p2, Index.corner, Aimat, B1mat, Cimat,
h.step=0.0001, colx2.index,
- xij=NULL, Structural.zero=NULL)
+ xij = NULL, szero = NULL)
{
@@ -1663,11 +1665,11 @@ num.deriv.rrr <- function(fit, M, r, x1mat, x2mat,
if (nrow(Cimat) != p2 || ncol(Cimat) != r)
stop("'Cimat' wrong shape")
- dct.da <- matrix(as.numeric(NA), (M-r-length(Structural.zero))*r, r*p2)
+ dct.da <- matrix(as.numeric(NA), (M-r-length(szero))*r, r*p2)
- if ((length(Index.corner) + length(Structural.zero)) == M)
+ if ((length(Index.corner) + length(szero)) == M)
stop("cannot handle full rank models yet")
- cbindex = (1:M)[-c(Index.corner, Structural.zero)]
+ cbindex = (1:M)[-c(Index.corner, szero)]
ptr = 1
for(sss in 1:r)
@@ -1687,15 +1689,15 @@ num.deriv.rrr <- function(fit, M, r, x1mat, x2mat,
newmu <- fit at family@inverse(neweta, fit at extra)
- fit at fitted.values = newmu
+ fit at fitted.values = as.matrix(newmu) # 20100909
- fred = weights(fit, type="w", deriv= TRUE, ignore.slot= TRUE)
+ fred = weights(fit, type = "w", deriv= TRUE, ignore.slot= TRUE)
if (!length(fred))
stop("cannot get @weights and $deriv from object")
wz = fred$weights
deriv.mu <- fred$deriv
- U <- vchol(wz, M=M, n=nn, silent= TRUE)
+ U <- vchol(wz, M=M, n=nn, silent = TRUE)
tvfor <- vforsub(U, as.matrix(deriv.mu), M=M, n=nn)
newzmat <- neweta + vbacksub(U, tvfor, M=M, n=nn) - offset
if (is.numeric(x1mat))
@@ -1718,13 +1720,13 @@ num.deriv.rrr <- function(fit, M, r, x1mat, x2mat,
dctda.fast.only = function(theta, wz, U, zmat, M, r, x1mat, x2mat,
p2, Index.corner, Aimat, B1mat, Cimat,
- xij=NULL,
- Structural.zero=NULL)
+ xij = NULL,
+ szero = NULL)
{
- if (length(Structural.zero))
- stop("cannot handle Structural.zero in dctda.fast.only()")
+ if (length(szero))
+ stop("cannot handle 'szero' in dctda.fast.only()")
nn = nrow(x2mat)
if (nrow(Cimat) != p2 || ncol(Cimat) != r)
@@ -1746,8 +1748,8 @@ dctda.fast.only = function(theta, wz, U, zmat, M, r, x1mat, x2mat,
stop("cannot handle full rank models yet")
cbindex = (1:M)[-Index.corner] # complement of Index.corner
resid2 = if (length(x1mat))
- mux22(t(wz), zmat - x1mat %*% B1mat, M=M, upp=FALSE, as.mat=TRUE) else
- mux22(t(wz), zmat , M=M, upp=FALSE, as.mat=TRUE)
+ mux22(t(wz), zmat - x1mat %*% B1mat, M=M, upp = FALSE, as.mat = TRUE) else
+ mux22(t(wz), zmat , M=M, upp = FALSE, as.mat = TRUE)
for(sss in 1:r)
for(ttt in cbindex) {
@@ -1759,7 +1761,7 @@ dctda.fast.only = function(theta, wz, U, zmat, M, r, x1mat, x2mat,
wxx = Wiak[,ttt] * x2mat
blocki = t(x2mat) %*% wxx
temp4a = blocki %*% Cimat[,kkk]
- if (kkk==1) {
+ if (kkk == 1) {
temp4b = blocki %*% Cimat[,sss]
}
temp2 = temp2 - kronecker(ei(sss,r), temp4a) -
@@ -1767,7 +1769,7 @@ dctda.fast.only = function(theta, wz, U, zmat, M, r, x1mat, x2mat,
}
dc.da[,,ttt,sss] = G %*% temp2
}
- ans1 = dc.da[,,cbindex,,drop=FALSE] # p2 x r x (M-r) x r
+ ans1 = dc.da[,,cbindex,,drop = FALSE] # p2 x r x (M-r) x r
ans1 = aperm(ans1, c(2,1,3,4)) # r x p2 x (M-r) x r
ans1 = matrix(c(ans1), r*p2, (M-r)*r)
@@ -1778,7 +1780,7 @@ dctda.fast.only = function(theta, wz, U, zmat, M, r, x1mat, x2mat,
dcda.fast = function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
- intercept= TRUE, xij=NULL)
+ intercept= TRUE, xij = NULL)
{
@@ -1800,18 +1802,18 @@ dcda.fast = function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
Blist[[ii]] = Aimat
}
- coeffs = vlm.wfit(xmat=xmat, z, Blist, U=U, matrix.out=TRUE,
+ coeffs = vlm.wfit(xmat=xmat, z, Blist, U=U, matrix.out = TRUE,
xij=xij)$mat.coef
c3 <- coeffs <- t(coeffs) # transpose to make M x (pp+1)
int.vec <- if (intercept) c3[,1] else 0 # \boldeta_0
- Cimat <- if (intercept) t(c3[Index.corner,-1,drop=FALSE]) else
- t(c3[Index.corner,,drop=FALSE])
+ Cimat <- if (intercept) t(c3[Index.corner,-1,drop = FALSE]) else
+ t(c3[Index.corner,,drop = FALSE])
if (nrow(Cimat)!=pp || ncol(Cimat)!=r)
stop("Cimat wrong shape")
- fred <- kronecker(matrix(1,1,r), if (intercept) xmat[,-1,drop=FALSE] else xmat)
+ fred <- kronecker(matrix(1,1,r), if (intercept) xmat[,-1,drop = FALSE] else xmat)
fred <- kronecker(fred, matrix(1,M,1))
barney <- kronecker(Aimat, matrix(1,1,pp))
barney <- kronecker(matrix(1,nn,1), barney)
@@ -1829,7 +1831,7 @@ dcda.fast = function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
for(s in 1:r)
for(tt in cbindex) {
- fred <- (if (intercept) t(xmat[,-1,drop=FALSE]) else
+ fred <- (if (intercept) t(xmat[,-1,drop = FALSE]) else
t(xmat)) * matrix(resid2[,tt],pp,nn,byrow= TRUE)
temp2 <- kronecker(ei(s,r), rowSums(fred))
@@ -1837,15 +1839,15 @@ dcda.fast = function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
for(k in 1:r) {
Wiak <- mux22(t(wz), matrix(Aimat[,k],nn,M,byrow= TRUE),
M=M, upper= FALSE, as.mat= TRUE) # mat= TRUE,
- wxx <- Wiak[,tt] * (if (intercept) xmat[,-1,drop=FALSE] else
+ wxx <- Wiak[,tt] * (if (intercept) xmat[,-1,drop = FALSE] else
xmat)
- blocki <- (if (intercept) t(xmat[,-1,drop=FALSE]) else
+ blocki <- (if (intercept) t(xmat[,-1,drop = FALSE]) else
t(xmat)) %*% wxx
temp4 <- temp4 + blocki %*% Cimat[,k]
}
dc.da[,,tt,s] <- G %*% (temp2 - 2 * kronecker(ei(s,r),temp4))
}
- ans1 <- dc.da[,,cbindex,,drop=FALSE] # pp x r x (M-r) x r
+ ans1 <- dc.da[,,cbindex,,drop = FALSE] # pp x r x (M-r) x r
ans1 <- aperm(ans1, c(2,1,3,4)) # r x pp x (M-r) x r
ans1 <- matrix(c(ans1), (M-r)*r, r*pp, byrow= TRUE)
@@ -1857,10 +1859,10 @@ dcda.fast = function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
t1 <- t(dc.da[,j,,s])
t1 <- matrix(t1, M, pp)
detastar.da[,j,s,] <- t1 %*% (if (intercept)
- t(xmat[,-1,drop=FALSE]) else t(xmat))
+ t(xmat[,-1,drop = FALSE]) else t(xmat))
}
- etastar <- (if (intercept) xmat[,-1,drop=FALSE] else xmat) %*% Cimat
+ etastar <- (if (intercept) xmat[,-1,drop = FALSE] else xmat) %*% Cimat
eta <- matrix(int.vec, nn, M, byrow= TRUE) + etastar %*% t(Aimat)
sumWinv <- solve((m2adefault(t(colSums(wz)), M=M))[,,1])
@@ -1876,7 +1878,7 @@ dcda.fast = function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
temp91 = rowSums(temp93, dims=2) # M x M
deta0.da[,,ss] <- -(temp90 + temp91) %*% sumWinv
}
- ans2 <- deta0.da[-(1:r),,,drop=FALSE] # (M-r) x M x r
+ ans2 <- deta0.da[-(1:r),,,drop = FALSE] # (M-r) x M x r
ans2 <- aperm(ans2, c(1,3,2)) # (M-r) x r x M
ans2 <- matrix(c(ans2), (M-r)*r, M)
@@ -1887,7 +1889,7 @@ dcda.fast = function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
rrr.deriv.rss = function(theta, wz, U, z, M, r, xmat,
pp, Index.corner, intercept= TRUE,
- xij=NULL)
+ xij = NULL)
{
Amat = matrix(as.numeric(NA), M, r)
@@ -1905,8 +1907,8 @@ rrr.deriv.rss = function(theta, wz, U, z, M, r, xmat,
Blist[[ii]] = Amat
}
- vlm.wfit(xmat=xmat, z, Blist, U=U, matrix.out=FALSE,
- rss=TRUE, xij=xij)$rss
+ vlm.wfit(xmat=xmat, z, Blist, U=U, matrix.out = FALSE,
+ rss = TRUE, xij=xij)$rss
}
@@ -1937,17 +1939,17 @@ rrr.deriv.gradient.fast = function(theta, wz, U, z, M, r, xmat,
}
coeffs = vlm.wfit(xmat, z, Blist, U=U, matrix.out= TRUE,
- xij=NULL)$mat.coef
+ xij = NULL)$mat.coef
c3 = coeffs = t(coeffs) # transpose to make M x (pp+1)
int.vec = if (intercept) c3[,1] else 0 # \boldeta_0
- Cimat = if (intercept) t(c3[Index.corner,-1,drop=FALSE]) else
- t(c3[Index.corner,,drop=FALSE])
+ Cimat = if (intercept) t(c3[Index.corner,-1,drop = FALSE]) else
+ t(c3[Index.corner,,drop = FALSE])
if (nrow(Cimat)!=pp || ncol(Cimat)!=r)
stop("Cimat wrong shape")
- fred = kronecker(matrix(1,1,r), if (intercept) xmat[,-1,drop=FALSE] else xmat)
+ fred = kronecker(matrix(1,1,r), if (intercept) xmat[,-1,drop = FALSE] else xmat)
fred = kronecker(fred, matrix(1,M,1))
barney = kronecker(Aimat, matrix(1,1,pp))
barney = kronecker(matrix(1,nn,1), barney)
@@ -1965,7 +1967,7 @@ rrr.deriv.gradient.fast = function(theta, wz, U, z, M, r, xmat,
for(s in 1:r)
for(tt in cbindex) {
- fred = (if (intercept) t(xmat[,-1,drop=FALSE]) else
+ fred = (if (intercept) t(xmat[,-1,drop = FALSE]) else
t(xmat)) * matrix(resid2[,tt],pp,nn,byrow= TRUE)
temp2 = kronecker(ei(s,r), rowSums(fred))
@@ -1973,8 +1975,8 @@ rrr.deriv.gradient.fast = function(theta, wz, U, z, M, r, xmat,
for(k in 1:r) {
Wiak = mux22(t(wz), matrix(Aimat[,k],nn,M,byrow= TRUE),
M=M, upper= FALSE, as.mat= TRUE) # mat= TRUE,
- wxx = Wiak[,tt] * (if (intercept) xmat[,-1,drop=FALSE] else xmat)
- blocki = (if (intercept) t(xmat[,-1,drop=FALSE]) else t(xmat)) %*% wxx
+ wxx = Wiak[,tt] * (if (intercept) xmat[,-1,drop = FALSE] else xmat)
+ blocki = (if (intercept) t(xmat[,-1,drop = FALSE]) else t(xmat)) %*% wxx
temp4 = temp4 + blocki %*% Cimat[,k]
}
dc.da[,,s,tt] = G %*% (temp2 - 2 * kronecker(ei(s,r),temp4))
@@ -1986,10 +1988,10 @@ rrr.deriv.gradient.fast = function(theta, wz, U, z, M, r, xmat,
t1 = t(dc.da[,j,s,])
t1 = matrix(t1, M, pp)
detastar.da[,j,s,] = t1 %*% (if (intercept)
- t(xmat[,-1,drop=FALSE]) else t(xmat))
+ t(xmat[,-1,drop = FALSE]) else t(xmat))
}
- etastar = (if (intercept) xmat[,-1,drop=FALSE] else xmat) %*% Cimat
+ etastar = (if (intercept) xmat[,-1,drop = FALSE] else xmat) %*% Cimat
eta = matrix(int.vec, nn, M, byrow= TRUE) + etastar %*% t(Aimat)
sumWinv = solve((m2adefault(t(colSums(wz)), M=M))[,,1])
@@ -2030,7 +2032,7 @@ rrr.deriv.gradient.fast = function(theta, wz, U, z, M, r, xmat,
-vellipse = function(R, ratio=1, orientation=0, center=c(0,0), N=300) {
+vellipse = function(R, ratio = 1, orientation=0, center=c(0,0), N=300) {
if (length(center) != 2) stop("center must be of length 2")
theta = 2*pi*(0:N)/N
x1 = R*cos(theta)
@@ -2049,9 +2051,9 @@ biplot.qrrvglm = function(x, ...) {
lvplot.qrrvglm = function(object, varlvI = FALSE, reference = NULL,
add= FALSE, plot.it= TRUE, rug= TRUE, y = FALSE,
type=c("fitted.values", "predictors"),
- xlab=paste("Latent Variable", if (Rank==1) "" else " 1", sep=""),
- ylab= if (Rank == 1) switch(type, predictors="Predictors",
- fitted.values="Fitted values") else "Latent Variable 2",
+ xlab=paste("Latent Variable", if (Rank == 1) "" else " 1", sep = ""),
+ ylab= if (Rank == 1) switch(type, predictors = "Predictors",
+ fitted.values = "Fitted values") else "Latent Variable 2",
pcex=par()$cex, pcol=par()$col, pch=par()$pch,
llty=par()$lty, lcol=par()$col, llwd=par()$lwd,
label.arg= FALSE, adj.arg=-0.1,
@@ -2062,8 +2064,8 @@ lvplot.qrrvglm = function(object, varlvI = FALSE, reference = NULL,
C = FALSE,
OriginC = c("origin","mean"),
Clty=par()$lty, Ccol=par()$col, Clwd=par()$lwd,
- Ccex=par()$cex, Cadj.arg=-0.1, stretchC=1,
- sites= FALSE, spch=NULL, scol=par()$col, scex=par()$cex,
+ Ccex=par()$cex, Cadj.arg=-0.1, stretchC = 1,
+ sites= FALSE, spch = NULL, scol=par()$col, scex=par()$cex,
sfont=par()$font,
check.ok = TRUE, ...)
{
@@ -2071,7 +2073,7 @@ lvplot.qrrvglm = function(object, varlvI = FALSE, reference = NULL,
type <- as.character(substitute(type))
type <- match.arg(type, c("fitted.values", "predictors"))[1]
- if (is.numeric(OriginC)) OriginC = rep(OriginC, len=2) else {
+ if (is.numeric(OriginC)) OriginC = rep(OriginC, len = 2) else {
if (mode(OriginC) != "character" && mode(OriginC) != "name")
OriginC <- as.character(substitute(OriginC))
OriginC <- match.arg(OriginC, c("origin","mean"))[1]
@@ -2101,14 +2103,14 @@ lvplot.qrrvglm = function(object, varlvI = FALSE, reference = NULL,
r.curves = slot(object, type) # n times M (\boldeta or \boldmu)
if (!add) {
- if (Rank==1) {
+ if (Rank == 1) {
matplot(nustar,
- if ( y && type=="fitted.values") object at y else r.curves,
- type="n", xlab=xlab, ylab=ylab, ...)
- } else { # Rank==2
+ if ( y && type == "fitted.values") object at y else r.curves,
+ type = "n", xlab=xlab, ylab=ylab, ...)
+ } else { # Rank == 2
matplot(c(Coef.list at Optimum[1,], nustar[,1]),
c(Coef.list at Optimum[2,], nustar[,2]),
- type="n", xlab=xlab, ylab=ylab, ...)
+ type = "n", xlab=xlab, ylab=ylab, ...)
}
}
@@ -2135,7 +2137,7 @@ lvplot.qrrvglm = function(object, varlvI = FALSE, reference = NULL,
Ccex <- rep(Ccex, leng=nrow(Cmat))
}
- if (Rank==1) {
+ if (Rank == 1) {
for(i in 1:ncol(r.curves)) {
xx = nustar
yy = r.curves[,i]
@@ -2143,7 +2145,7 @@ lvplot.qrrvglm = function(object, varlvI = FALSE, reference = NULL,
xx = xx[o]
yy = yy[o]
lines(xx, yy, col=lcol[i], lwd=llwd[i], lty=llty[i])
- if ( y && type=="fitted.values") {
+ if ( y && type == "fitted.values") {
ypts = object at y
if (ncol(as.matrix(ypts)) == ncol(r.curves))
points(xx, ypts[o,i], col=pcol[i], cex=pcex[i], pch=pch[i])
@@ -2163,18 +2165,18 @@ lvplot.qrrvglm = function(object, varlvI = FALSE, reference = NULL,
if (chull.arg) {
hull = chull(nustar[,1], nustar[,2])
hull = c(hull, hull[1])
- lines(nustar[hull,1], nustar[hull,2], type="b", pch=cpch,
+ lines(nustar[hull,1], nustar[hull,2], type = "b", pch=cpch,
lty=clty, col=ccol, lwd=clwd)
}
if (length(ellipse)) {
ellipse.temp = if (ellipse > 0) ellipse else 0.95
if (ellipse < 0 && (!object at control$EqualTolerances || varlvI))
- stop("an equal-tolerances assumption and 'varlvI=FALSE' ",
+ stop("an equal-tolerances assumption and 'varlvI = FALSE' ",
"is needed for 'ellipse' < 0")
if ( check.ok ) {
colx1.index = object at control$colx1.index
- if (!(length(colx1.index)==1 &&
- names(colx1.index)=="(Intercept)"))
+ if (!(length(colx1.index) == 1 &&
+ names(colx1.index) == "(Intercept)"))
stop("can only plot ellipses for intercept models only")
}
for(i in 1:ncol(r.curves)) {
@@ -2188,7 +2190,7 @@ lvplot.qrrvglm = function(object, varlvI = FALSE, reference = NULL,
extra=object at extra) - cutpoint
if (is.finite(cutpoint) && cutpoint > 0) {
Mmat = diag(rep(ifelse(object at control$Crow1positive, 1, -1),
- len=Rank))
+ len = Rank))
etoli = eigen(t(Mmat) %*% Coef.list at Tolerance[,,i] %*% Mmat)
A=ifelse(etoli$val[1]>0,sqrt(2*cutpoint*etoli$val[1]),Inf)
B=ifelse(etoli$val[2]>0,sqrt(2*cutpoint*etoli$val[2]),Inf)
@@ -2207,9 +2209,9 @@ lvplot.qrrvglm = function(object, varlvI = FALSE, reference = NULL,
}
if ( C ) {
- if (is.character(OriginC) && OriginC=="mean")
+ if (is.character(OriginC) && OriginC == "mean")
OriginC = c(mean(nustar[,1]), mean(nustar[,2]))
- if (is.character(OriginC) && OriginC=="origin")
+ if (is.character(OriginC) && OriginC == "origin")
OriginC = c(0,0)
for(i in 1:nrow(Cmat))
arrows(x0=OriginC[1], y0=OriginC[2],
@@ -2227,7 +2229,7 @@ lvplot.qrrvglm = function(object, varlvI = FALSE, reference = NULL,
if (sites) {
text(nustar[,1], nustar[,2], adj=0.5,
labels = if (is.null(spch)) dimnames(nustar)[[1]] else
- rep(spch, length=nrow(nustar)), col=scol, cex=scex, font=sfont)
+ rep(spch, length = nrow(nustar)), col=scol, cex=scex, font=sfont)
}
}
invisible(nustar)
@@ -2236,31 +2238,31 @@ lvplot.qrrvglm = function(object, varlvI = FALSE, reference = NULL,
lvplot.rrvglm = function(object,
- A=TRUE,
- C=TRUE,
- scores=FALSE, plot.it= TRUE,
+ A = TRUE,
+ C = TRUE,
+ scores = FALSE, plot.it= TRUE,
groups=rep(1,n),
- gapC=sqrt(sum(par()$cxy^2)), scaleA=1,
- xlab="Latent Variable 1",
- ylab="Latent Variable 2",
+ gapC=sqrt(sum(par()$cxy^2)), scaleA = 1,
+ xlab = "Latent Variable 1",
+ ylab = "Latent Variable 2",
Alabels= if (length(object at misc$predictors.names))
- object at misc$predictors.names else paste("LP", 1:M, sep=""),
+ object at misc$predictors.names else paste("LP", 1:M, sep = ""),
Aadj=par()$adj,
Acex=par()$cex,
Acol=par()$col,
- Apch=NULL,
+ Apch = NULL,
Clabels=rownames(Cmat),
Cadj=par()$adj,
Ccex=par()$cex,
Ccol=par()$col,
Clty=par()$lty,
Clwd=par()$lwd,
- chull.arg=FALSE,
+ chull.arg = FALSE,
ccex=par()$cex,
ccol=par()$col,
clty=par()$lty,
clwd=par()$lwd,
- spch=NULL,
+ spch = NULL,
scex=par()$cex,
scol=par()$col,
slabels=rownames(x2mat),
@@ -2282,9 +2284,9 @@ lvplot.rrvglm = function(object,
Cmat = Cmat / scaleA
if (!length(object at x)) {
- object at x = model.matrixvlm(object, type="lm")
+ object at x = model.matrixvlm(object, type = "lm")
}
- x2mat = object at x[, colx2.index, drop=FALSE]
+ x2mat = object at x[, colx2.index, drop = FALSE]
nuhat = x2mat %*% Cmat
if (!plot.it) return(as.matrix(nuhat))
@@ -2293,16 +2295,16 @@ lvplot.rrvglm = function(object,
if (C) Cmat else NULL,
if (scores) nuhat else NULL)
- plot(allmat[,1], allmat[,2], type="n",
+ plot(allmat[,1], allmat[,2], type = "n",
xlab=xlab, ylab=ylab, ...) # xlim etc. supplied through ...
if (A) {
- Aadj = rep(Aadj, len=length(index.nosz))
- Acex = rep(Acex, len=length(index.nosz))
- Acol = rep(Acol, len=length(index.nosz))
+ Aadj = rep(Aadj, len = length(index.nosz))
+ Acex = rep(Acex, len = length(index.nosz))
+ Acol = rep(Acol, len = length(index.nosz))
if (length(Alabels) != M) stop("'Alabels' must be of length ", M)
if (length(Apch)) {
- Apch = rep(Apch, len=length(index.nosz))
+ Apch = rep(Apch, len = length(index.nosz))
for(i in index.nosz)
points(Amat[i,1],Amat[i,2],pch=Apch[i],cex=Acex[i],col=Acol[i])
} else {
@@ -2314,12 +2316,12 @@ lvplot.rrvglm = function(object,
if (C) {
p2 = nrow(Cmat)
- gapC = rep(gapC, len=p2)
- Cadj = rep(Cadj, len=p2)
- Ccex = rep(Ccex, len=p2)
- Ccol = rep(Ccol, len=p2)
- Clwd = rep(Clwd, len=p2)
- Clty = rep(Clty, len=p2)
+ gapC = rep(gapC, len = p2)
+ Cadj = rep(Cadj, len = p2)
+ Ccex = rep(Ccex, len = p2)
+ Ccol = rep(Ccol, len = p2)
+ Clwd = rep(Clwd, len = p2)
+ Clty = rep(Clty, len = p2)
if (length(Clabels) != p2)
stop("'length(Clabels)' must be equal to ", p2)
for(ii in 1:p2) {
@@ -2334,13 +2336,13 @@ lvplot.rrvglm = function(object,
if (scores) {
ugrp = unique(groups)
nlev = length(ugrp) # number of groups
- clty = rep(clty, len=nlev)
- clwd = rep(clwd, len=nlev)
- ccol = rep(ccol, len=nlev)
+ clty = rep(clty, len = nlev)
+ clwd = rep(clwd, len = nlev)
+ ccol = rep(ccol, len = nlev)
if (length(spch))
- spch = rep(spch, len=n)
- scol = rep(scol, len=n)
- scex = rep(scex, len=n)
+ spch = rep(spch, len = n)
+ scol = rep(scol, len = n)
+ scex = rep(scex, len = n)
for(ii in ugrp) {
gp = groups == ii
if (nlev > 1 && (length(unique(spch[gp])) != 1 ||
@@ -2349,7 +2351,7 @@ lvplot.rrvglm = function(object,
warning("spch/scol/scex is different for individuals ",
"from the same group")
- temp = nuhat[gp,,drop=FALSE]
+ temp = nuhat[gp,,drop = FALSE]
if (length(spch)) {
points(temp[,1], temp[,2], cex=scex[gp], pch=spch[gp],
col=scol[gp])
@@ -2360,8 +2362,8 @@ lvplot.rrvglm = function(object,
if (chull.arg) {
hull = chull(temp[,1],temp[,2])
hull = c(hull, hull[1])
- lines(temp[hull,1], temp[hull,2], type="b", lty=clty[ii],
- col=ccol[ii], lwd=clwd[ii], pch=" ")
+ lines(temp[hull,1], temp[hull,2], type = "b", lty=clty[ii],
+ col=ccol[ii], lwd=clwd[ii], pch = " ")
}
}
}
@@ -2382,21 +2384,21 @@ lvplot.rrvglm = function(object,
p1 = length(colx1.index) # May be 0
Amat <- object at constraints[[colx2.index[1]]]
- B1mat = if (p1) coefvlm(object, mat=TRUE)[colx1.index,,drop=FALSE] else NULL
+ B1mat = if (p1) coefvlm(object, mat = TRUE)[colx1.index,,drop = FALSE] else NULL
- C.try <- coefvlm(object, mat = TRUE)[colx2.index, , drop=FALSE]
+ C.try <- coefvlm(object, mat = TRUE)[colx2.index, , drop = FALSE]
Cmat <- C.try %*% Amat %*% solve(t(Amat) %*% Amat)
Rank = object at control$Rank
- lv.names = if (Rank>1) paste("lv", 1:Rank, sep="") else "lv"
+ lv.names = if (Rank>1) paste("lv", 1:Rank, sep = "") else "lv"
dimnames(Amat) = list(object at misc$predictors.names, lv.names)
dimnames(Cmat) = list(dimnames(Cmat)[[1]], lv.names)
- ans = new(Class="Coef.rrvglm",
+ ans = new(Class = "Coef.rrvglm",
A = Amat,
C = Cmat,
Rank = Rank,
@@ -2409,7 +2411,7 @@ lvplot.rrvglm = function(object,
if (object at control$Corner)
ans at Atilde = Amat[-c(object at control$Index.corner,
- object at control$Structural.zero),,drop=FALSE]
+ object at control$szero),,drop = FALSE]
ans
}
@@ -2504,7 +2506,7 @@ printsummary.qrrvglm = function(x, ...) {
names(x at dispersion) = x at misc$ynames
print(x at dispersion, ...)
} else
- cat(x at dispersion, fill=TRUE)
+ cat(x at dispersion, fill = TRUE)
cat("\n")
} else if (length(x at dispersion) == 1) {
cat("\nDispersion parameter: ", x at dispersion, "\n")
@@ -2540,13 +2542,14 @@ setMethod("show", "Coef.rrvglm", function(object)
-grc = function(y, Rank=1, Index.corner=2:(1+Rank), Structural.zero=1,
- summary.arg= FALSE, h.step=0.0001, ...) {
+ grc <- function(y, Rank = 1, Index.corner = 2:(1+Rank),
+ szero = 1,
+ summary.arg = FALSE, h.step = 0.0001, ...) {
- myrrcontrol = rrvglm.control(Rank=Rank, Index.corner=Index.corner,
- Structural.zero = Structural.zero, ...)
+ myrrcontrol = rrvglm.control(Rank = Rank, Index.corner = Index.corner,
+ szero = szero, ...)
object.save = y
if (is(y, "rrvglm")) {
y = object.save at y
@@ -2557,33 +2560,33 @@ grc = function(y, Rank=1, Index.corner=2:(1+Rank), Structural.zero=1,
if (length(dim(y)) != 2 || nrow(y) < 3 || ncol(y) < 3)
stop("y must be a matrix with >= 3 rows & columns, or a rrvglm() object")
- ei = function(i, n) diag(n)[,i,drop=FALSE]
- .grc.df = data.frame(Row2 = ei(2, nrow(y)))
+ ei = function(i, n) diag(n)[,i,drop = FALSE]
+ .grc.df = data.frame(Row.2 = ei(2, nrow(y)))
yn1 = if (length(dimnames(y)[[1]])) dimnames(y)[[1]] else
- paste("x2", 1:nrow(y), sep="")
+ paste("X2.", 1:nrow(y), sep = "")
warn.save = options()$warn
- options(warn=-3) # Suppress the warnings (hopefully, temporarily)
+ options(warn = -3) # Suppress the warnings (hopefully, temporarily)
if (any(!is.na(as.numeric(substring(yn1, 1, 1)))))
- yn1 = paste("x2", 1:nrow(y), sep="")
- options(warn=warn.save)
+ yn1 = paste("X2.", 1:nrow(y), sep = "")
+ options(warn = warn.save)
- Row = factor(1:nrow(y))
- modmat.row = model.matrix( ~ Row)
- Col = factor(1:ncol(y))
- modmat.col = model.matrix( ~ Col)
+ Row. = factor(1:nrow(y))
+ modmat.row = model.matrix( ~ Row.)
+ Col. = factor(1:ncol(y))
+ modmat.col = model.matrix( ~ Col.)
cms = list("(Intercept)" = matrix(1, ncol(y), 1))
for(ii in 2:nrow(y)) {
- cms[[paste("Row", ii, sep="")]] = matrix(1, ncol(y), 1)
- .grc.df[[paste("Row", ii, sep="")]] = modmat.row[,ii]
+ cms[[paste("Row.", ii, sep = "")]] = matrix(1, ncol(y), 1)
+ .grc.df[[paste("Row.", ii, sep = "")]] = modmat.row[,ii]
}
for(ii in 2:ncol(y)) {
- cms[[paste("Col", ii, sep="")]] = modmat.col[,ii,drop=FALSE]
- .grc.df[[paste("Col", ii, sep="")]] = rep(1, nrow(y))
+ cms[[paste("Col.", ii, sep = "")]] = modmat.col[,ii,drop = FALSE]
+ .grc.df[[paste("Col.", ii, sep = "")]] = rep(1, nrow(y))
}
for(ii in 2:nrow(y)) {
- cms[[yn1[ii]]] = diag(ncol(y))
+ cms[[yn1[ii]]] = diag(ncol(y))
.grc.df[[yn1[ii]]] = ei(ii, nrow(y))
}
@@ -2591,36 +2594,37 @@ grc = function(y, Rank=1, Index.corner=2:(1+Rank), Structural.zero=1,
as.character(1:nrow(y)),
dimnames(.grc.df)[[2]])
- str1 = "~ Row2"
- if (nrow(y)>2)
- for(ii in 3:nrow(y))
- str1 = paste(str1, paste("Row", ii, sep=""), sep=" + ")
+ str1 = "~ Row.2"
+ if (nrow(y) > 2)
+ for(ii in 3:nrow(y))
+ str1 = paste(str1, paste("Row.", ii, sep = ""), sep = " + ")
for(ii in 2:ncol(y))
- str1 = paste(str1, paste("Col", ii, sep=""), sep=" + ")
+ str1 = paste(str1, paste("Col.", ii, sep = ""), sep = " + ")
str2 = paste("y ", str1)
for(ii in 2:nrow(y))
- str2 = paste(str2, yn1[ii], sep=" + ")
+ str2 = paste(str2, yn1[ii], sep = " + ")
myrrcontrol$Norrr = as.formula(str1) # Overwrite this
- assign(".grc.df", .grc.df, envir = VGAMenv)
+ assign(".grc.df", .grc.df, envir = VGAM:::VGAMenv)
warn.save = options()$warn
- options(warn=-3) # Suppress the warnings (hopefully, temporarily)
+ options(warn = -3) # Suppress the warnings (hopefully, temporarily)
answer = if (is(object.save, "rrvglm")) object.save else
- rrvglm(as.formula(str2), fam=poissonff,
- constraints=cms, control=myrrcontrol, data=.grc.df)
- options(warn=warn.save)
+ rrvglm(as.formula(str2), fam = poissonff,
+ constraints = cms, control = myrrcontrol,
+ data = .grc.df)
+ options(warn = warn.save)
if (summary.arg) {
answer = as(answer, "rrvglm")
- answer = summary.rrvglm(answer, h.step=h.step)
+ answer = summary.rrvglm(answer, h.step = h.step)
} else {
answer = as(answer, "grc")
}
- if (exists(".grc.df", envir = VGAMenv))
- rm(".grc.df", envir = VGAMenv)
+ if (exists(".grc.df", envir = VGAM:::VGAMenv))
+ rm(".grc.df", envir = VGAM:::VGAMenv)
answer
}
@@ -2634,20 +2638,20 @@ summary.grc = function(object, ...) {
trplot.qrrvglm = function(object,
- whichSpecies=NULL,
- add=FALSE, plot.it=TRUE,
- label.sites=FALSE,
+ whichSpecies = NULL,
+ add = FALSE, plot.it = TRUE,
+ label.sites = FALSE,
sitenames = rownames(object at y),
axes.equal = TRUE,
cex=par()$cex,
- col=1:(nos*(nos-1)/2),
- log="",
- lty = rep(par()$lty, len=nos*(nos-1)/2),
- lwd = rep(par()$lwd, len=nos*(nos-1)/2),
- tcol= rep(par()$col, len=nos*(nos-1)/2),
+ col = 1:(nos*(nos-1)/2),
+ log = "",
+ lty = rep(par()$lty, len = nos*(nos-1)/2),
+ lwd = rep(par()$lwd, len = nos*(nos-1)/2),
+ tcol= rep(par()$col, len = nos*(nos-1)/2),
xlab = NULL, ylab = NULL,
- main="", # "Trajectory plot",
- type="b", check.ok=TRUE, ...) {
+ main = "", # "Trajectory plot",
+ type = "b", check.ok = TRUE, ...) {
coef.obj = Coef(object) # use defaults for those two arguments
if (coef.obj at Rank != 1) stop("object must be a rank-1 model")
fv = fitted(object)
@@ -2656,7 +2660,7 @@ trplot.qrrvglm = function(object,
M = object at misc$M #
nn = nrow(fv) # Number of sites
if (length(sitenames))
- sitenames = rep(sitenames, len=nn)
+ sitenames = rep(sitenames, len = nn)
sppNames = dimnames(object at y)[[2]]
if (!length(whichSpecies)) {
whichSpecies = sppNames[1:NOS]
@@ -2673,18 +2677,18 @@ trplot.qrrvglm = function(object,
stop("must have at least 2 species to be plotted")
cx1i = object at control$colx1.index
if (check.ok)
- if (!(length(cx1i)==1 && names(cx1i)=="(Intercept)"))
+ if (!(length(cx1i) == 1 && names(cx1i) == "(Intercept)"))
stop("trajectory plots allowable only for Norrr = ~ 1 models")
- first.spp = iam(1,1,M=M,both=TRUE,diag=FALSE)$row.index
- second.spp = iam(1,1,M=M,both=TRUE,diag=FALSE)$col.index
- myxlab = if (length(whichSpecies.numer)==2) {
+ first.spp = iam(1,1,M=M,both = TRUE,diag = FALSE)$row.index
+ second.spp = iam(1,1,M=M,both = TRUE,diag = FALSE)$col.index
+ myxlab = if (length(whichSpecies.numer) == 2) {
paste("Fitted value for",
if (is.character(whichSpecies.numer)) whichSpecies.numer[1] else
sppNames[whichSpecies.numer[1]])
} else "Fitted value for 'first' species"
myxlab = if (length(xlab)) xlab else myxlab
- myylab = if (length(whichSpecies.numer)==2) {
+ myylab = if (length(whichSpecies.numer) == 2) {
paste("Fitted value for",
if (is.character(whichSpecies.numer)) whichSpecies.numer[2] else
sppNames[whichSpecies.numer[2]])
@@ -2695,18 +2699,18 @@ trplot.qrrvglm = function(object,
fv[,whichSpecies.numer[first.spp]]
yyy = if (axes.equal) fv[,whichSpecies.numer] else
fv[,whichSpecies.numer[second.spp]]
- matplot(xxx, yyy, type="n", log=log, xlab=myxlab,
+ matplot(xxx, yyy, type = "n", log=log, xlab=myxlab,
ylab=myylab, main=main, ...)
}
- lwd = rep(lwd, len=nos*(nos-1)/2)
- col = rep(col, len=nos*(nos-1)/2)
- lty = rep(lty, len=nos*(nos-1)/2)
- tcol = rep(tcol, len=nos*(nos-1)/2)
+ lwd = rep(lwd, len = nos*(nos-1)/2)
+ col = rep(col, len = nos*(nos-1)/2)
+ lty = rep(lty, len = nos*(nos-1)/2)
+ tcol = rep(tcol, len = nos*(nos-1)/2)
oo = order(coef.obj at lv) # Sort by the latent variable
ii = 0
- col = rep(col, length=nos*(nos-1)/2)
+ col = rep(col, length = nos*(nos-1)/2)
species.names = NULL
if (plot.it)
for(i1 in seq(whichSpecies.numer)) {
@@ -2717,7 +2721,7 @@ trplot.qrrvglm = function(object,
cbind(sppNames[i1], sppNames[i2]))
matplot(fv[oo,whichSpecies.numer[i1]],
fv[oo,whichSpecies.numer[i2]],
- type=type, add=TRUE,
+ type=type, add = TRUE,
lty=lty[ii], lwd=lwd[ii], col=col[ii],
pch = if (label.sites) " " else "*" )
if (label.sites && length(sitenames))
@@ -2747,7 +2751,7 @@ vcovqrrvglm = function(object,
ITolerances = object at control$EqualTolerances,
MaxScale = c("predictors", "response"),
dispersion = rep(if (length(sobj at dispersion)) sobj at dispersion else 1,
- len=M), ...) {
+ len = M), ...) {
stop("this function is not yet completed")
if (mode(MaxScale) != "character" && mode(MaxScale) != "name")
@@ -2759,7 +2763,7 @@ vcovqrrvglm = function(object,
sobj = summary(object)
cobj = Coef(object, ITolerances = ITolerances, ...)
M = nrow(cobj at A)
- dispersion = rep(dispersion, len=M)
+ dispersion = rep(dispersion, len = M)
if (cobj at Rank != 1)
stop("object must be a rank 1 model")
@@ -2770,9 +2774,9 @@ vcovqrrvglm = function(object,
if ((length(object at control$colx1.index) != 1) ||
(names(object at control$colx1.index) != "(Intercept)"))
stop("Can only handle Norrr=~1 models")
- okvals=c(3*M,2*M+1) # Tries to correspond to EqualTol==c(FALSE,TRUE) resp.
+ okvals=c(3*M,2*M+1) # Tries to correspond to EqualTol == c(FALSE,TRUE) resp.
if (all(length(coef(object)) != okvals))
- stop("Can only handle intercepts-only model with EqualTolerances=FALSE")
+ stop("Can only handle intercepts-only model with EqualTolerances = FALSE")
answer = NULL
Cov.unscaled = array(NA, c(3,3,M), dimnames=list(
@@ -2784,21 +2788,22 @@ vcovqrrvglm = function(object,
M+ifelse(object at control$EqualTolerances, 1, spp))
vcov = Cov.unscaled[,,spp] =
sobj at cov.unscaled[index,index] # Order is A, D, B1
- se2Max = dvecMax[spp,,drop=FALSE] %*% vcov %*% cbind(dvecMax[spp,])
- se2Tol = dvecTol[spp,,drop=FALSE] %*% vcov %*% cbind(dvecTol[spp,])
- se2Opt = dvecOpt[spp,,drop=FALSE] %*% vcov %*% cbind(dvecOpt[spp,])
+ se2Max = dvecMax[spp,,drop = FALSE] %*% vcov %*% cbind(dvecMax[spp,])
+ se2Tol = dvecTol[spp,,drop = FALSE] %*% vcov %*% cbind(dvecTol[spp,])
+ se2Opt = dvecOpt[spp,,drop = FALSE] %*% vcov %*% cbind(dvecOpt[spp,])
answer = rbind(answer, dispersion[spp]^0.5 *
c(se2Opt=se2Opt, se2Tol=se2Tol, se2Max=se2Max))
}
- link.function = if (MaxScale=="predictors")
+ link.function = if (MaxScale == "predictors")
remove.arg(object at misc$predictors.names[1]) else ""
dimnames(answer) = list(dimnames(cobj at D)[[3]], c("Optimum", "Tolerance",
- if (nchar(link.function)) paste(link.function,"(Maximum)",sep="") else
- "Maximum"))
- NAthere = is.na(answer %*% rep(1, len=3))
+ if (nchar(link.function))
+ paste(link.function, "(Maximum)", sep = "") else
+ "Maximum"))
+ NAthere = is.na(answer %*% rep(1, len = 3))
answer[NAthere,] = NA # NA in tolerance means NA everywhere else
- new(Class="vcov.qrrvglm",
+ new(Class = "vcov.qrrvglm",
Cov.unscaled=Cov.unscaled,
dispersion=dispersion,
se=sqrt(answer))
@@ -2811,10 +2816,10 @@ setMethod("vcov", "rrvglm", function(object, ...)
setMethod("vcov", "qrrvglm", function(object, ...)
vcovqrrvglm(object, ...))
-setClass(Class="vcov.qrrvglm", representation(
- Cov.unscaled="array", # permuted cov.unscaled
- dispersion="numeric",
- se="matrix"))
+setClass(Class = "vcov.qrrvglm", representation(
+ Cov.unscaled = "array", # permuted cov.unscaled
+ dispersion = "numeric",
+ se = "matrix"))
@@ -2837,18 +2842,18 @@ setMethod("model.matrix", "qrrvglm", function(object, ...)
persp.qrrvglm = function(x, varlvI = FALSE, reference = NULL,
- plot.it=TRUE,
- xlim=NULL, ylim=NULL, zlim=NULL, # zlim ignored if Rank==1
+ plot.it = TRUE,
+ xlim = NULL, ylim = NULL, zlim = NULL, # zlim ignored if Rank == 1
gridlength = if (Rank == 1) 301 else c(51,51),
whichSpecies = NULL,
- xlab = if (Rank==1) "Latent Variable" else "Latent Variable 1",
- ylab = if (Rank==1) "Expected Value" else "Latent Variable 2",
- zlab="Expected value",
- labelSpecies = FALSE, # For Rank==1 only
- stretch = 1.05, # quick and dirty, Rank==1 only
- main="",
+ xlab = if (Rank == 1) "Latent Variable" else "Latent Variable 1",
+ ylab = if (Rank == 1) "Expected Value" else "Latent Variable 2",
+ zlab = "Expected value",
+ labelSpecies = FALSE, # For Rank == 1 only
+ stretch = 1.05, # quick and dirty, Rank == 1 only
+ main = "",
ticktype = "detailed",
- col = if (Rank==1) par()$col else "white",
+ col = if (Rank == 1) par()$col else "white",
llty=par()$lty, llwd=par()$lwd,
add1 = FALSE,
...) {
@@ -2861,31 +2866,33 @@ persp.qrrvglm = function(x, varlvI = FALSE, reference = NULL,
NOS = ncol(fv) # Number of species
M = object at misc$M #
- xlim = rep(if (length(xlim)) xlim else range(coef.obj at lv[,1]), length=2)
+ xlim = rep(if (length(xlim)) xlim else range(coef.obj at lv[,1]), length = 2)
if (!length(oylim)) {
- ylim = if (Rank==1) c(0, max(fv)*stretch) else
- rep(range(coef.obj at lv[,2]), length=2)
+ ylim = if (Rank == 1) c(0, max(fv)*stretch) else
+ rep(range(coef.obj at lv[,2]), length = 2)
}
- gridlength = rep(gridlength, length=Rank)
- lv1 = seq(xlim[1], xlim[2], length=gridlength[1])
- if (Rank==1) {
+ gridlength = rep(gridlength, length = Rank)
+ lv1 = seq(xlim[1], xlim[2], length = gridlength[1])
+ if (Rank == 1) {
m = cbind(lv1)
} else {
- lv2 = seq(ylim[1], ylim[2], length=gridlength[2])
+ lv2 = seq(ylim[1], ylim[2], length = gridlength[2])
m = expand.grid(lv1,lv2)
}
- if (dim(coef.obj at B1)[1] != 1 || dimnames(coef.obj at B1)[[1]] != "(Intercept)")
+ if (dim(coef.obj at B1)[1] != 1 ||
+ dimnames(coef.obj at B1)[[1]] != "(Intercept)")
stop("Norrr = ~ 1 is needed")
LP = coef.obj at A %*% t(cbind(m)) # M by n
LP = LP + c(coef.obj at B1) # Assumes \bix_1 = 1 (intercept only)
mm = as.matrix(m)
N = ncol(LP)
- for(j in 1:M) {
- for(i in 1:N) {
- LP[j,i] = LP[j,i] + mm[i,,drop=FALSE] %*% coef.obj at D[,,j] %*%
- t(mm[i,,drop=FALSE])
+ for(jay in 1:M) {
+ for(ii in 1:N) {
+ LP[jay, ii] = LP[jay, ii] +
+ mm[ii, , drop = FALSE] %*% coef.obj at D[,,jay] %*%
+ t(mm[ii, , drop = FALSE])
}
}
LP = t(LP) # n by M
@@ -2903,22 +2910,22 @@ persp.qrrvglm = function(x, varlvI = FALSE, reference = NULL,
whichSpecies = sppNames[whichSpecies.numer] # Convert to character
} else
whichSpecies.numer = match(whichSpecies, sppNames)
- if (Rank==1) {
+ if (Rank == 1) {
if (plot.it) {
if (!length(oylim))
ylim = c(0, max(fitvals[,whichSpecies.numer])*stretch) # A revision
- col = rep(col, len=length(whichSpecies.numer))
+ col = rep(col, len = length(whichSpecies.numer))
llty = rep(llty, leng=length(whichSpecies.numer))
llwd = rep(llwd, leng=length(whichSpecies.numer))
if (!add1)
- matplot(lv1, fitvals, xlab=xlab, ylab=ylab, type="n",
+ matplot(lv1, fitvals, xlab=xlab, ylab=ylab, type = "n",
main=main, xlim=xlim, ylim=ylim, ...)
for(j in 1:length(whichSpecies.numer)) {
ptr2 = whichSpecies.numer[j] # points to species column
lines(lv1, fitvals[,ptr2], col=col[j],
lty=llty[j], lwd=llwd[j], ...)
if (labelSpecies) {
- ptr1=(1:nrow(fitvals))[max(fitvals[,ptr2])==fitvals[,ptr2]]
+ ptr1=(1:nrow(fitvals))[max(fitvals[,ptr2]) == fitvals[,ptr2]]
ptr1 = ptr1[1]
text(lv1[ptr1], fitvals[ptr1,ptr2]+
(stretch-1)*diff(range(ylim)),
@@ -2945,8 +2952,8 @@ persp.qrrvglm = function(x, varlvI = FALSE, reference = NULL,
invisible(list(fitted=fitvals,
lv1grid=lv1,
- lv2grid = if (Rank==2) lv2 else NULL,
- maxfitted = if (Rank==2) maxfitted else NULL))
+ lv2grid = if (Rank == 2) lv2 else NULL,
+ maxfitted = if (Rank == 2) maxfitted else NULL))
}
if (!isGeneric("persp"))
@@ -2970,7 +2977,7 @@ lv.qrrvglm <- function(object, varlvI = FALSE, reference = NULL, ...) {
}
lv.rrvglm = function(object, ...) {
- ans = lvplot(object, plot.it=FALSE)
+ ans = lvplot(object, plot.it = FALSE)
if (ncol(ans) == 1) dimnames(ans) = list(dimnames(ans)[[1]], "lv")
ans
}
@@ -3059,7 +3066,7 @@ is.bell.vlm <-
is.bell.rrvglm <- function(object, ...) {
M = object at misc$M
ynames = object at misc$ynames
- ans = rep(FALSE, len=M)
+ ans = rep(FALSE, len = M)
if (length(ynames)) names(ans) = ynames
ans
}
diff --git a/R/family.survival.R b/R/family.survival.R
index 2a78d10..d1005ff 100644
--- a/R/family.survival.R
+++ b/R/family.survival.R
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
@@ -10,7 +11,7 @@
dcnormal1 = function(r1=0, r2=0, link.sd="loge",
- earg=list(),
+ earg =list(),
isd=NULL, zero=NULL)
{
if (!is.Numeric(r1, allow=1, integ=TRUE) || r1<0) stop("bad input for r1")
@@ -22,15 +23,15 @@
new("vglmff",
blurb=c("Univariate Normal distribution with double censoring\n\n",
"Links: ",
- "mean; ", namesof("sd", link.sd, earg=earg, tag= TRUE),
+ "mean; ", namesof("sd", link.sd, earg =earg, tag= TRUE),
"\n",
"Variance: sd^2"),
constraints=eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }) , list( .zero=zero))),
+ }) , list( .zero = zero))),
initialize=eval(substitute(expression({
predictors.names =
- c("mean", namesof("sd", .link.sd, earg=.earg, tag= FALSE))
+ c("mean", namesof("sd", .link.sd, earg =.earg, tag= FALSE))
if (ncol(y <- cbind(y)) != 1)
stop("the response must be a vector or a one-column matrix")
if (length(w) != n || !is.Numeric(w, integ=TRUE, posit=TRUE))
@@ -45,10 +46,10 @@
1.25 * sqrt( sum(w * junk$resid^2) / junk$df.residual )
}
etastart = cbind(mu=y,
- theta2eta(sd.y.est, .link.sd, earg=.earg))
+ theta2eta(sd.y.est, .link.sd, earg =.earg))
}
}) , list( .link.sd=link.sd, .r1=r1, .r2=r2, .isd=isd,
- .earg=earg ))),
+ .earg =earg ))),
inverse=function(eta, extra=NULL) eta[,1],
last=eval(substitute(expression({
misc$link = c(mu="identity", sd= .link.sd)
@@ -57,19 +58,20 @@
misc$r1 = .r1
misc$r2 = .r2
}) , list( .link.sd=link.sd, .r1=r1, .r2=r2,
- .earg=earg ))),
+ .earg =earg ))),
loglikelihood=eval(substitute(
function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- sd = eta2theta(eta[,2], .link.sd, earg=.earg)
- if (residuals) stop("loglikelihood residuals not implemented yet") else
+ sd = eta2theta(eta[,2], .link.sd, earg =.earg)
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
sum(w * (-log(sd) - 0.5 * ((y - mu)/sd)^2)) +
(if(.r1==0) 0 else {z1=min((y-mu)/sd); Fz1=pnorm(z1); .r1*log(Fz1)}) +
(if(.r2==0) 0 else {z2=max((y-mu)/sd); Fz2=pnorm(z2); .r2*log1p(-Fz2)})
} , list( .link.sd=link.sd, .r1=r1, .r2=r2,
- .earg=earg ))),
+ .earg =earg ))),
vfamily=c("dcnormal1"),
deriv=eval(substitute(expression({
- sd = eta2theta(eta[,2], .link.sd, earg=.earg)
+ sd = eta2theta(eta[,2], .link.sd, earg =.earg)
q1 = .r1 / extra$bign
q2 = .r2 / extra$bign
pee = 1 - q1 - q2 # 1 if r1==r2==0
@@ -84,10 +86,10 @@
dl.dsd = -1/sd + (y-mu)^2 / sd^3 +
((- .r1 * z1*fz1/Fz1 + .r2 * z2*fz2/(1-Fz2)) / sd) / (n*w)
dmu.deta = dtheta.deta(mu, "identity")
- dsd.deta = dtheta.deta(sd, .link.sd, earg=.earg)
+ dsd.deta = dtheta.deta(sd, .link.sd, earg =.earg)
cbind(w * dl.dmu * dmu.deta, w * dl.dsd * dsd.deta)
}) , list( .link.sd=link.sd, .r1=r1, .r2=r2,
- .earg=earg ))),
+ .earg =earg ))),
weight=expression({
wz = matrix(as.numeric(NA), n, dimm(M))
Q1 = ifelse(q1==0, 1, q1) # Saves division by 0 below; not elegant
@@ -170,6 +172,11 @@ rbisa = function(n, shape, scale=1) {
+
+
+
+
+
bisa = function(lshape = "loge", lscale = "loge",
eshape = list(), escale = list(),
ishape = NULL, iscale=1,
@@ -192,16 +199,17 @@ rbisa = function(n, shape, scale=1) {
new("vglmff",
blurb=c("Birnbaum-Saunders distribution\n\n",
"Links: ",
- namesof("shape", lshape, earg= eshape, tag= TRUE), "; ",
- namesof("scale", lscale, earg= escale, tag= TRUE)),
+ namesof("shape", lshape, earg = eshape, tag= TRUE), "; ",
+ namesof("scale", lscale, earg = escale, tag= TRUE)),
constraints=eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }) , list( .zero=zero))),
+ }) , list( .zero = zero))),
initialize=eval(substitute(expression({
if (ncol(y <- cbind(y)) != 1)
stop("the response must be a vector or a one-column matrix")
- predictors.names = c(namesof("shape", .lshape,earg= .eshape,tag=FALSE),
- namesof("scale", .lscale, tag=FALSE))
+ predictors.names =
+ c(namesof("shape", .lshape, earg = .eshape, tag = FALSE),
+ namesof("scale", .lscale, tag=FALSE))
if (!length(etastart)) {
scale.init = rep( .iscale, len=n)
shape.init = if (is.Numeric( .ishape)) rep( .ishape, len=n) else {
@@ -216,55 +224,57 @@ rbisa = function(n, shape, scale=1) {
sqrt(2*( pmax(ybar, scale.init+0.1) / scale.init - 1))
}
}
- etastart = cbind(theta2eta(shape.init, .lshape, earg= .eshape),
- theta2eta(scale.init, .lscale, earg= .escale))
+ etastart = cbind(theta2eta(shape.init, .lshape, earg = .eshape),
+ theta2eta(scale.init, .lscale, earg = .escale))
}
- }) , list( .lshape=lshape, .lscale=lscale,
+ }) , list( .lshape = lshape, .lscale = lscale,
.ishape=ishape, .iscale=iscale,
- .eshape=eshape, .escale=escale,
+ .eshape = eshape, .escale = escale,
.method.init=method.init ))),
inverse=eval(substitute(function(eta, extra=NULL) {
- sh = eta2theta(eta[,1], .lshape, earg= .eshape)
- sc = eta2theta(eta[,2], .lscale, earg= .escale)
+ sh = eta2theta(eta[,1], .lshape, earg = .eshape)
+ sc = eta2theta(eta[,2], .lscale, earg = .escale)
sc * (1 + sh^2 / 2)
- }, list( .lshape=lshape, .lscale=lscale,
- .eshape=eshape, .escale=escale ))),
+ }, list( .lshape = lshape, .lscale = lscale,
+ .eshape = eshape, .escale = escale ))),
last=eval(substitute(expression({
misc$link = c(shape= .lshape, scale= .lscale)
misc$earg = list(shape= .eshape, scale= .escale)
misc$expected = TRUE
- }) , list( .lshape=lshape, .lscale=lscale,
- .eshape=eshape, .escale=escale ))),
+ }) , list( .lshape = lshape, .lscale = lscale,
+ .eshape = eshape, .escale = escale ))),
loglikelihood=eval(substitute(
function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- sh = eta2theta(eta[,1], .lshape, earg= .eshape)
- sc = eta2theta(eta[,2], .lscale, earg= .escale)
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
+ sh = eta2theta(eta[,1], .lshape, earg = .eshape)
+ sc = eta2theta(eta[,2], .lscale, earg = .escale)
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
sum(w * dbisa(x=y, shape=sh, scale=sc, log = TRUE))
}
- } , list( .lshape=lshape, .lscale=lscale,
- .eshape=eshape, .escale=escale ))),
+ } , list( .lshape = lshape, .lscale = lscale,
+ .eshape = eshape, .escale = escale ))),
vfamily=c("bisa"),
deriv=eval(substitute(expression({
- sh = eta2theta(eta[,1], .lshape, earg= .eshape)
- sc = eta2theta(eta[,2], .lscale, earg= .escale)
+ sh = eta2theta(eta[,1], .lshape, earg = .eshape)
+ sc = eta2theta(eta[,2], .lscale, earg = .escale)
dl.dsh = ((y/sc - 2 + sc/y) / sh^2 - 1) / sh
- dl.dsc = -0.5 / sc + 1/(y+sc) + sqrt(y) * ((y+sc)/y) *
+ dl.dsc = -0.5 / sc + 1/(y+sc) + sqrt(y) * ((y+sc)/y) *
(sqrt(y/sc) - sqrt(sc/y)) / (2 * sh^2 * sc^1.5)
- dsh.deta = dtheta.deta(sh, .lshape, earg= .eshape)
- dsc.deta = dtheta.deta(sc, .lscale, earg= .escale)
+ dsh.deta = dtheta.deta(sh, .lshape, earg = .eshape)
+ dsc.deta = dtheta.deta(sc, .lscale, earg = .escale)
w * cbind(dl.dsh * dsh.deta, dl.dsc * dsc.deta)
- }) , list( .lshape=lshape, .lscale=lscale,
- .eshape=eshape, .escale=escale ))),
+ }) , list( .lshape = lshape, .lscale = lscale,
+ .eshape = eshape, .escale = escale ))),
weight=eval(substitute(expression({
wz = matrix(as.numeric(NA), n, M) # Diagonal!!
wz[,iam(1,1,M)] = 2 * dsh.deta^2 / sh^2
hfunction = function(alpha)
- alpha * sqrt(pi/2) - pi * exp(2/alpha^2) * (1-pnorm(2/alpha))
+ alpha * sqrt(pi/2) - pi * exp(2/alpha^2) *
+ pnorm(2/alpha, lower.tail = FALSE)
wz[,iam(2,2,M)] = dsc.deta^2 * (sh * hfunction(sh) / sqrt(2*pi) +
1) / (sh*sc)^2
w * wz
- }), list( .zero=zero ))))
+ }), list( .zero = zero ))))
}
diff --git a/R/family.ts.R b/R/family.ts.R
index a945e46..677c220 100644
--- a/R/family.ts.R
+++ b/R/family.ts.R
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
diff --git a/R/family.univariate.R b/R/family.univariate.R
index 117b8e9..19c2ea3 100644
--- a/R/family.univariate.R
+++ b/R/family.univariate.R
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
@@ -21,9 +22,8 @@
-
-getMaxMin = function(vov, objfun, y, x, w, extraargs=NULL, maximize=TRUE,
- abs.arg=FALSE) {
+getMaxMin = function(vov, objfun, y, x, w, extraargs = NULL, maximize = TRUE,
+ abs.arg = FALSE) {
if (!is.vector(vov)) stop("'vov' must be a vector")
objvals = vov
for(ii in 1:length(vov))
@@ -41,17 +41,17 @@ getMaxMin = function(vov, objfun, y, x, w, extraargs=NULL, maximize=TRUE,
- mccullagh89 = function(ltheta="rhobit", lnu="logoff",
- itheta=NULL, inu=NULL,
- etheta=list(),
- enu=if (lnu == "logoff") list(offset=0.5) else list(),
- zero=NULL)
+ mccullagh89 = function(ltheta = "rhobit", lnu = "logoff",
+ itheta = NULL, inu = NULL,
+ etheta = list(),
+ enu = if (lnu == "logoff") list(offset = 0.5) else list(),
+ zero = NULL)
{
if (mode(ltheta) != "character" && mode(ltheta) != "name")
ltheta = as.character(substitute(ltheta))
if (mode(lnu) != "character" && mode(lnu) != "name")
lnu = as.character(substitute(lnu))
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
stop("bad input for argument 'zero'")
if (!is.list(etheta)) etheta = list()
if (!is.list(enu)) enu = list()
@@ -62,73 +62,74 @@ getMaxMin = function(vov, objfun, y, x, w, extraargs=NULL, maximize=TRUE,
" Beta[nu+1/2, 1/2], ",
" -1 < y < 1, -1 < theta < 1, nu > -1/2\n",
"Links: ",
- namesof("theta", ltheta, earg=etheta), ", ",
- namesof("nu", lnu, earg=enu),
+ namesof("theta", ltheta, earg = etheta), ", ",
+ namesof("nu", lnu, earg = enu),
"\n",
"\n",
"Mean: nu*theta/(1+nu)"),
- constraints=eval(substitute(expression({
+ constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
y = as.numeric(y)
if (any(y <= -1 | y >= 1))
stop("all y values must be in (-1,1)")
- predictors.names= c(namesof("theta", .ltheta, earg= .etheta,tag=FALSE),
- namesof("nu", .lnu, earg= .enu, tag=FALSE))
+ predictors.names =
+ c(namesof("theta", .ltheta, earg = .etheta, tag = FALSE),
+ namesof("nu", .lnu, earg = .enu, tag = FALSE))
if (!length(etastart)) {
- theta.init = if (length(.itheta)) rep(.itheta, length=n) else {
+ theta.init = if (length( .itheta)) rep( .itheta, length = n) else {
mccullagh89.aux = function(thetaval, y, x, w, extraargs)
mean((y-thetaval)*(thetaval^2-1)/(1-2*thetaval*y+thetaval^2))
theta.grid = seq(-0.9, 0.9, by=0.05)
try.this = getMaxMin(theta.grid, objfun=mccullagh89.aux,
- y=y, x=x, w=w, maximize=FALSE,
- abs.arg=TRUE)
- try.this = rep(try.this, len=n)
+ y=y, x=x, w=w, maximize = FALSE,
+ abs.arg = TRUE)
+ try.this = rep(try.this, len = n)
try.this
}
tmp = y / (theta.init-y)
tmp[tmp < -0.4] = -0.4
tmp[tmp > 10.0] = 10.0
- nu.init = rep(if (length(.inu)) .inu else tmp, length=n)
+ nu.init = rep(if (length( .inu)) .inu else tmp, length = n)
nu.init[!is.finite(nu.init)] = 0.4
- etastart = cbind(theta2eta(theta.init, .ltheta, earg=.etheta ),
- theta2eta(nu.init, .lnu, earg= .enu ))
+ etastart = cbind(theta2eta(theta.init, .ltheta, earg = .etheta ),
+ theta2eta(nu.init, .lnu, earg = .enu ))
}
}), list( .ltheta=ltheta, .lnu=lnu, .inu=inu, .itheta=itheta,
.etheta = etheta, .enu=enu ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- Theta = eta2theta(eta[,1], .ltheta, earg= .etheta )
- nu = eta2theta(eta[,2], .lnu, earg= .enu )
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ Theta = eta2theta(eta[,1], .ltheta, earg = .etheta )
+ nu = eta2theta(eta[,2], .lnu, earg = .enu )
nu*Theta/(1+nu)
}, list( .ltheta=ltheta, .lnu=lnu,
.etheta = etheta, .enu=enu ))),
last = eval(substitute(expression({
- misc$link = c("theta"= .ltheta, "nu"= .lnu)
- misc$earg = list(theta = .etheta, nu= .enu )
+ misc$link = c("theta" = .ltheta, "nu" = .lnu)
+ misc$earg = list("theta" = .etheta, "nu" = .enu )
}), list( .ltheta=ltheta, .lnu=lnu, .etheta = etheta, .enu=enu ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- Theta = eta2theta(eta[,1], .ltheta, earg= .etheta )
- nu = eta2theta(eta[,2], .lnu, earg= .enu )
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ Theta = eta2theta(eta[,1], .ltheta, earg = .etheta )
+ nu = eta2theta(eta[,2], .lnu, earg = .enu )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
sum(w * ((nu-0.5)*log1p(-y^2) - nu * log1p(-2*Theta*y + Theta^2) -
lbeta(nu+0.5,0.5 )))
}, list( .ltheta=ltheta, .lnu=lnu, .etheta = etheta, .enu=enu ))),
- vfamily=c("mccullagh89"),
- deriv=eval(substitute(expression({
- Theta = eta2theta(eta[,1], .ltheta, earg= .etheta )
- nu = eta2theta(eta[,2], .lnu, earg= .enu )
- dTheta.deta = dtheta.deta(Theta, .ltheta, earg= .etheta )
- dnu.deta = dtheta.deta(nu, .lnu, earg= .enu )
+ vfamily = c("mccullagh89"),
+ deriv = eval(substitute(expression({
+ Theta = eta2theta(eta[,1], .ltheta, earg = .etheta )
+ nu = eta2theta(eta[,2], .lnu, earg = .enu )
+ dTheta.deta = dtheta.deta(Theta, .ltheta, earg = .etheta )
+ dnu.deta = dtheta.deta(nu, .lnu, earg = .enu )
dl.dTheta = 2 * nu * (y-Theta) / (1 -2*Theta*y + Theta^2)
dl.dnu = log1p(-y^2) - log1p(-2*Theta*y + Theta^2) -
digamma(nu+0.5) + digamma(nu+1)
w * cbind(dl.dTheta * dTheta.deta, dl.dnu * dnu.deta)
}), list( .ltheta=ltheta, .lnu=lnu, .etheta = etheta, .enu=enu ))),
- weight=eval(substitute(expression({
+ weight = eval(substitute(expression({
d2l.dTheta2 = (2 * nu^2 / (1+nu)) / (1-Theta^2)
d2l.dnu2 = trigamma(nu+0.5) - trigamma(nu+1)
wz = matrix(as.numeric(NA), n, M) #diagonal matrix
@@ -141,18 +142,18 @@ getMaxMin = function(vov, objfun, y, x, w, extraargs=NULL, maximize=TRUE,
-hzeta.control <- function(save.weight=TRUE, ...)
+hzeta.control <- function(save.weight = TRUE, ...)
{
- list(save.weight=save.weight)
+ list(save.weight = save.weight)
}
- hzeta = function(link="loglog", earg=list(), ialpha=NULL, nsimEIM=100)
+ hzeta = function(link = "loglog", earg = list(), ialpha = NULL, nsimEIM = 100)
{
stopifnot(ialpha > 0)
- stopifnot(nsimEIM > 10, length(nsimEIM)==1, nsimEIM==round(nsimEIM))
+ stopifnot(nsimEIM > 10, length(nsimEIM) == 1, nsimEIM == round(nsimEIM))
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
@@ -163,58 +164,58 @@ hzeta.control <- function(save.weight=TRUE, ...)
"Haight's Zeta distribution f(y) = (2y-1)^(-alpha) - (2y+1)^(-alpha),\n",
" alpha>0, y=1,2,....\n\n",
"Link: ",
- namesof("alpha", link, earg=earg), "\n\n",
+ namesof("alpha", link, earg = earg), "\n\n",
"Mean: (1-2^(-alpha)) * zeta(alpha) if alpha>1",
"\n",
"Variance: (1-2^(1-alpha)) * zeta(alpha-1) - mean^2 if alpha>2"),
- initialize=eval(substitute(expression({
+ initialize = eval(substitute(expression({
y = as.numeric(y)
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
if (any(y < 1))
stop("all y values must be in 1,2,3,....")
- predictors.names = namesof("alpha", .link, earg= .earg, tag=FALSE)
+ predictors.names = namesof("alpha", .link, earg = .earg, tag = FALSE)
if (!length(etastart)) {
a.init = if (length( .ialpha)) .ialpha else {
if ((meany <- weighted.mean(y,w)) < 1.5) 3.0 else
if (meany < 2.5) 1.4 else 1.1
}
- a.init = rep(a.init, length=n)
- etastart = theta2eta(a.init, .link, earg= .earg )
+ a.init = rep(a.init, length = n)
+ etastart = theta2eta(a.init, .link, earg = .earg )
}
- }), list( .link=link, .earg=earg, .ialpha=ialpha ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- alpha = eta2theta(eta, .link, earg= .earg )
+ }), list( .link = link, .earg = earg, .ialpha=ialpha ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ alpha = eta2theta(eta, .link, earg = .earg )
mu = (1-2^(-alpha)) * zeta(alpha)
mu[alpha <= 1] = Inf
mu
- }, list( .link=link, .earg=earg ))),
+ }, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- misc$link = c(alpha= .link)
- misc$earg = list(alpha= .earg)
+ misc$link = c(alpha = .link)
+ misc$earg = list(alpha = .earg)
misc$nsimEIM = .nsimEIM
- }), list( .link=link, .earg=earg, .nsimEIM=nsimEIM ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- alpha = eta2theta(eta, .link, earg= .earg )
+ }), list( .link = link, .earg = earg, .nsimEIM = nsimEIM ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ alpha = eta2theta(eta, .link, earg = .earg )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- sum(w * dhzeta(x=y, alpha=alpha, log=TRUE))
+ sum(w * dhzeta(x=y, alpha=alpha, log = TRUE))
}
- }, list( .link=link, .earg=earg ))),
- vfamily=c("hzeta"),
- deriv=eval(substitute(expression({
- alpha = eta2theta(eta, .link, earg= .earg )
- dalpha.deta = dtheta.deta(alpha, .link, earg= .earg )
+ }, list( .link = link, .earg = earg ))),
+ vfamily = c("hzeta"),
+ deriv = eval(substitute(expression({
+ alpha = eta2theta(eta, .link, earg = .earg )
+ dalpha.deta = dtheta.deta(alpha, .link, earg = .earg )
d3 = deriv3(~ log((2*y-1)^(-alpha) - (2*y+1)^(-alpha)),
- "alpha", hessian=FALSE)
+ "alpha", hessian = FALSE)
eval.d3 = eval(d3)
dl.dalpha = attr(eval.d3, "gradient")
w * dl.dalpha * dalpha.deta
- }), list( .link=link, .earg=earg ))),
+ }), list( .link = link, .earg = earg ))),
weight = eval(substitute(expression({
sd3 = deriv3(~ log((2*ysim-1)^(-alpha) - (2*ysim+1)^(-alpha)),
- "alpha", hessian=FALSE)
+ "alpha", hessian = FALSE)
run.var = 0
for(ii in 1:( .nsimEIM )) {
ysim = rhzeta(n, alpha=alpha)
@@ -226,11 +227,11 @@ hzeta.control <- function(save.weight=TRUE, ...)
}
wz = if (intercept.only)
matrix(colMeans(cbind(run.var)),
- n, dimm(M), byrow=TRUE) else cbind(run.var)
+ n, dimm(M), byrow = TRUE) else cbind(run.var)
wz = wz * dalpha.deta^2
w * wz
- }), list( .nsimEIM=nsimEIM ))))
+ }), list( .nsimEIM = nsimEIM ))))
}
@@ -242,13 +243,13 @@ dhzeta = function(x, alpha, log = FALSE)
stop("bad input for argument 'log'")
rm(log)
- if (!is.Numeric(alpha, posit=TRUE))
+ if (!is.Numeric(alpha, posit = TRUE))
stop("'alpha' must be numeric and have positive values")
nn = max(length(x), length(alpha))
- x = rep(x, len=nn); alpha = rep(alpha, len=nn)
+ x = rep(x, len = nn); alpha = rep(alpha, len = nn)
ox = !is.finite(x)
zero = ox | round(x) != x | x < 1
- ans = rep(0, len=nn)
+ ans = rep(0, len = nn)
ans[!zero] = (2*x[!zero]-1)^(-alpha[!zero]) - (2*x[!zero]+1)^(-alpha[!zero])
if (log.arg) log(ans) else ans
}
@@ -256,11 +257,11 @@ dhzeta = function(x, alpha, log = FALSE)
phzeta = function(q, alpha)
{
- if (!is.Numeric(alpha, posit=TRUE))
+ if (!is.Numeric(alpha, posit = TRUE))
stop("'alpha' must be numeric and have positive values")
nn = max(length(q), length(alpha))
- q = rep(q, len=nn)
- alpha = rep(alpha, len=nn)
+ q = rep(q, len = nn)
+ alpha = rep(alpha, len = nn)
oq = !is.finite(q)
zero = oq | q < 1
q = floor(q)
@@ -272,365 +273,373 @@ phzeta = function(q, alpha)
qhzeta = function(p, alpha)
{
- if (!is.Numeric(alpha, posit=TRUE))
+ if (!is.Numeric(alpha, posit = TRUE))
stop("'alpha' must be numeric and have positive values")
- if (!is.Numeric(p, posit=TRUE) || any(p >= 1))
+ if (!is.Numeric(p, posit = TRUE) || any(p >= 1))
stop("argument 'p' must have values inside the interval (0,1)")
nn = max(length(p), length(alpha))
- p = rep(p, len=nn)
- alpha = rep(alpha, len=nn)
+ p = rep(p, len = nn)
+ alpha = rep(alpha, len = nn)
ans = (((1 - p)^(-1/alpha) - 1) / 2) # p is in (0,1)
floor(ans+1)
}
rhzeta = function(n, alpha)
{
- if (!is.Numeric(alpha, posit=TRUE))
+ if (!is.Numeric(alpha, posit = TRUE))
stop("'alpha' must be numeric and have positive values")
- if (!is.Numeric(n, posit=TRUE, integ=TRUE, allow=1))
+ if (!is.Numeric(n, posit = TRUE, integ = TRUE, allow = 1))
stop("argument 'n' must be a positive integer")
ans = ((runif(n)^(-1/alpha) - 1) / 2)
floor(ans+1)
}
- dirmultinomial = function(lphi="logit", ephi = list(),
- iphi = 0.10, parallel= FALSE, zero="M")
-{
- if (mode(lphi) != "character" && mode(lphi) != "name")
- lphi = as.character(substitute(lphi))
- if (length(zero) &&
- !(is.Numeric(zero, integer=TRUE, posit=TRUE) || is.character(zero )))
- stop("bad input for argument 'zero'")
- if (!is.Numeric(iphi, positive=TRUE) || max(iphi) >= 1.0)
- stop("bad input for argument 'iphi'")
- if (!is.list(ephi)) ephi = list()
- new("vglmff",
- blurb = c("Dirichlet-multinomial distribution\n\n",
- "Links: ",
- "log(prob[1]/prob[M]), ..., log(prob[M-1]/prob[M]), ",
- namesof("phi", lphi, earg=ephi), "\n", "\n",
- "Mean: shape_j / sum_j(shape_j)"),
- constraints=eval(substitute(expression({
- .ZERO = .zero
- if (is.character(.ZERO)) .ZERO = eval(parse(text = .ZERO))
- .PARALLEL = .parallel
- if (is.logical(.PARALLEL) && .PARALLEL) {
- mycmatrix = if (length(.ZERO))
- stop("can only handle parallel=TRUE when zero=NULL") else
- cbind(rbind(matrix(1,M-1,1), 0), rbind(matrix(0,M-1,1), 1))
- } else
- mycmatrix = if (M==1) diag(1) else diag(M)
- constraints=cm.vgam(mycmatrix, x, .PARALLEL, constraints, int=TRUE)
- constraints = cm.zero.vgam(constraints, x, .ZERO, M)
- }), list( .parallel=parallel, .zero=zero ))),
- initialize=eval(substitute(expression({
- delete.zero.colns <- TRUE
- eval(process.categorical.data.vgam)
- y = as.matrix(y)
- ycount = as.matrix(y * w)
- M = ncol(y)
- if (max(abs(ycount - round(ycount ))) > 1.0e-6)
- warning("there appears to be non-integer responses")
- if (min(ycount) < 0)
- stop("all values of the response (matrix) must be non-negative")
- predictors.names =
- c(paste("log(prob[,",1:(M-1),"]/prob[,",M,"])", sep=""),
- namesof("phi", .lphi, short=TRUE))
- extra$n2 = w # aka omega, must be integer # as.vector(apply(y, 1, sum))
- if (!length(etastart)) {
- prob.init = colSums(ycount)
- prob.init = prob.init / sum(prob.init)
- prob.init = matrix(prob.init, n, M, byrow=TRUE)
- phi.init = rep( .iphi, len=n)
- etastart = cbind(log(prob.init[,-M]/prob.init[,M]),
- theta2eta(phi.init, .lphi, earg= .ephi ))
- }
- }), list( .lphi=lphi, .ephi=ephi, .iphi=iphi ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- M = if (is.matrix(eta)) ncol(eta) else 1
- temp = cbind(exp(eta[,-M]), 1)
- temp / as.vector(temp %*% rep(1, M))
- }, list( .ephi=ephi, .lphi=lphi ))),
- last = eval(substitute(expression({
- misc$link = c(rep("noLinkFunction", length=M-1), .lphi)
- names(misc$link) = c(paste("prob", 1:(M-1), sep=""), "phi")
- misc$earg = vector("list", M)
- names(misc$earg) = names(misc$link)
- for(ii in 1:(M-1)) misc$earg[[ii]] = list()
- misc$earg[[M]] = .ephi
- misc$expected = TRUE
- if (intercept.only) {
- misc$shape=probs[1,]*(1/phi[1]-1) # phi & probs computed in @deriv
- }
- }), list( .ephi=ephi, .lphi=lphi ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- M = if (is.matrix(eta)) ncol(eta) else 1
- probs = cbind(exp(eta[,-M]), 1)
- probs = probs / as.vector(probs %*% rep(1, M))
- phi = eta2theta(eta[,M], .lphi, earg= .ephi )
- n = length(phi)
- ycount = as.matrix(y * w)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- ans = rep(0.0, len=n)
- omega = extra$n2
- for(jay in 1:M) {
- maxyj = max(ycount[,jay])
- loopOveri = n < maxyj
- if (loopOveri) {
- for(iii in 1:n) {
- rrr = 1:ycount[iii,jay] # a vector
- if (ycount[iii,jay] > 0)
- ans[iii] = ans[iii] + sum(log((1-phi[iii]) *
- probs[iii,jay] + (rrr-1)*phi[iii]))
-
- }
- } else {
- for(rrr in 1:maxyj) {
- index = (rrr <= ycount[,jay]) & (ycount[,jay] > 0)
- if (any(index))
- ans[index] = ans[index] + log((1-phi[index]) *
- probs[index,jay] + (rrr-1)*phi[index])
- }
- }
- } # end of jay loop
-
- maxomega = max(omega)
- loopOveri = n < maxomega
- if (loopOveri) {
- for(iii in 1:n) {
- rrr = 1:omega[iii]
- ans[iii]= ans[iii] - sum(log1p(-phi[iii] + (rrr-1)*phi[iii]))
- }
- } else {
- for(rrr in 1:maxomega) {
- ind8 = rrr <= omega
- ans[ind8] = ans[ind8] - log1p(-phi[ind8] + (rrr-1)*phi[ind8])
- }
- }
- sum(ans)
- }
- }, list( .ephi=ephi, .lphi=lphi ))),
- vfamily=c("dirmultinomial "),
- deriv=eval(substitute(expression({
- probs = cbind(exp(eta[,-M]), 1)
- probs = probs / as.vector(probs %*% rep(1, M))
- phi = eta2theta(eta[,M], .lphi, earg= .ephi )
- dl.dprobs = matrix(0.0, n, M-1)
- dl.dphi = rep(0.0, len=n)
- omega = extra$n2
- ycount = as.matrix(y * w)
+
+ dirmultinomial <- function(lphi = "logit", ephi = list(),
+ iphi = 0.10, parallel = FALSE, zero = "M")
+{
+
+ if (mode(lphi) != "character" && mode(lphi) != "name")
+ lphi <- as.character(substitute(lphi))
+ if (length(zero) &&
+ !(is.Numeric(zero, integer = TRUE, posit = TRUE) || is.character(zero )))
+ stop("bad input for argument 'zero'")
+ if (!is.Numeric(iphi, positive = TRUE) || max(iphi) >= 1.0)
+ stop("bad input for argument 'iphi'")
+ if (!is.list(ephi)) ephi <- list()
+
+ new("vglmff",
+ blurb = c("Dirichlet-multinomial distribution\n\n",
+ "Links: ",
+ "log(prob[1]/prob[M]), ..., log(prob[M-1]/prob[M]), ",
+ namesof("phi", lphi, earg = ephi), "\n", "\n",
+ "Mean: shape_j / sum_j(shape_j)"),
+ constraints = eval(substitute(expression({
+ .ZERO <- .zero
+ if (is.character( .ZERO)) .ZERO <- eval(parse(text = .ZERO))
+ .PARALLEL <- .parallel
+ if (is.logical( .PARALLEL) && .PARALLEL) {
+ mycmatrix <- if (length( .ZERO))
+ stop("can only handle parallel = TRUE when zero = NULL") else
+ cbind(rbind(matrix(1, M - 1, 1), 0), rbind(matrix(0, M - 1, 1), 1))
+ } else
+ mycmatrix <- if (M == 1) diag(1) else diag(M)
+ constraints <- cm.vgam(mycmatrix, x, .PARALLEL,
+ constraints, int = TRUE)
+ constraints <- cm.zero.vgam(constraints, x, .ZERO, M)
+ }), list( .parallel = parallel, .zero = zero ))),
+ initialize = eval(substitute(expression({
+ delete.zero.colns <- TRUE
+ eval(process.categorical.data.vgam)
+
+ y <- as.matrix(y)
+ ycount <- as.matrix(y * w)
+ M <- ncol(y)
+ if (max(abs(ycount - round(ycount ))) > 1.0e-6)
+ warning("there appears to be non-integer responses")
+ if (min(ycount) < 0)
+ stop("all values of the response (matrix) must be non-negative")
+ predictors.names <-
+ c(paste("log(prob[,", 1:(M-1), "]/prob[,", M, "])", sep = ""),
+ namesof("phi", .lphi, short = TRUE))
+ extra$n2 <- w # aka omega, must be integer # as.vector(apply(y, 1, sum))
+ if (!length(etastart)) {
+ prob.init <- colSums(ycount)
+ prob.init <- prob.init / sum(prob.init)
+ prob.init <- matrix(prob.init, n, M, byrow = TRUE)
+ phi.init <- rep( .iphi, len = n)
+ etastart <- cbind(log(prob.init[,-M]/prob.init[,M]),
+ theta2eta(phi.init, .lphi, earg = .ephi ))
+ }
+ }), list( .lphi = lphi, .ephi = ephi, .iphi=iphi ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ M <- if (is.matrix(eta)) ncol(eta) else 1
+ temp <- cbind(exp(eta[,-M]), 1)
+ temp / as.vector(temp %*% rep(1, M))
+ }, list( .ephi = ephi, .lphi = lphi ))),
+ last = eval(substitute(expression({
+ misc$link <- c(rep("noLinkFunction", length = M-1), .lphi)
+ names(misc$link) <- c(paste("prob", 1:(M-1), sep = ""), "phi")
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- names(misc$link)
+ for(ii in 1:(M-1)) misc$earg[[ii]] <- list()
+ misc$earg[[M]] <- .ephi
+ misc$expected <- TRUE
+ if (intercept.only) {
+ misc$shape<-probs[1,]*(1/phi[1]-1) # phi & probs computed in @deriv
+ }
+ }), list( .ephi = ephi, .lphi = lphi ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ M = if (is.matrix(eta)) ncol(eta) else 1
+ probs <- cbind(exp(eta[,-M]), 1)
+ probs <- probs / as.vector(probs %*% rep(1, M))
+ phi <- eta2theta(eta[,M], .lphi, earg = .ephi )
+ n <- length(phi)
+ ycount <- as.matrix(y * w)
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ ans <- rep(0.0, len = n)
+ omega <- extra$n2
for(jay in 1:M) {
- maxyj = max(ycount[,jay])
- loopOveri = n < maxyj
- if (loopOveri) {
- for(iii in 1:n) {
- rrr = 1:ycount[iii,jay]
- if (ycount[iii,jay] > 0) {
- PHI = phi[iii]
- dl.dphi[iii]=dl.dphi[iii] + sum((rrr-1-probs[iii,jay]) /
- ((1-PHI)*probs[iii,jay] + (rrr-1)*PHI))
-
- tmp9 = (1-PHI) / ((1-PHI)*probs[iii,jay] + (rrr-1)*PHI)
- if (jay < M) {
- dl.dprobs[iii,jay] = dl.dprobs[iii,jay] + sum(tmp9)
- } else {
- for(jay2 in 1:(M-1))
- dl.dprobs[iii,jay2]=dl.dprobs[iii,jay2]-sum(tmp9)
- }
- }
- }
- } else {
- for(rrr in 1:maxyj) {
- index = (rrr <= ycount[,jay]) & (ycount[,jay] > 0)
- PHI = phi[index]
- dl.dphi[index] = dl.dphi[index] + (rrr-1-probs[index,jay]) /
- ((1-PHI)*probs[index,jay] + (rrr-1)*PHI)
- tmp9 = (1-PHI) / ((1-PHI)*probs[index,jay] + (rrr-1)*PHI)
- if (jay < M) {
- dl.dprobs[index,jay] = dl.dprobs[index,jay] + tmp9
- } else {
- for(jay2 in 1:(M-1))
- dl.dprobs[index,jay2] = dl.dprobs[index,jay2] - tmp9
- }
- }
+ maxyj <- max(ycount[,jay])
+ loopOveri <- n < maxyj
+ if (loopOveri) {
+ for(iii in 1:n) {
+ rrr <- 1:ycount[iii,jay] # a vector
+ if (ycount[iii,jay] > 0)
+ ans[iii] <- ans[iii] + sum(log((1-phi[iii]) *
+ probs[iii,jay] + (rrr-1)*phi[iii]))
}
+ } else {
+ for(rrr in 1:maxyj) {
+ index <- (rrr <= ycount[,jay]) & (ycount[,jay] > 0)
+ if (any(index))
+ ans[index] <- ans[index] + log((1-phi[index]) *
+ probs[index,jay] + (rrr-1)*phi[index])
+ }
+ }
} # end of jay loop
- maxomega = max(omega)
- loopOveri = n < maxomega
+
+ maxomega <- max(omega)
+ loopOveri <- n < maxomega
if (loopOveri) {
- for(iii in 1:n) {
- rrr = 1:omega[iii]
- dl.dphi[iii]=dl.dphi[iii] - sum((rrr-2)/(1 + (rrr-2)*phi[iii]))
- }
+ for(iii in 1:n) {
+ rrr <- 1:omega[iii]
+ ans[iii]<- ans[iii] - sum(log1p(-phi[iii] + (rrr-1)*phi[iii]))
+ }
} else {
- for(rrr in 1:maxomega) {
- index = rrr <= omega
- dl.dphi[index]=dl.dphi[index] - (rrr-2)/(1 + (rrr-2)*phi[index])
- }
+ for(rrr in 1:maxomega) {
+ ind8 <- rrr <= omega
+ ans[ind8] <- ans[ind8] - log1p(-phi[ind8] + (rrr-1)*phi[ind8])
+ }
}
- dprobs.deta = probs[,-M] * (1 - probs[,-M]) # n x (M-1)
- dphi.deta = dtheta.deta(phi, .lphi, earg= .ephi )
- ans = cbind(dl.dprobs * dprobs.deta, dl.dphi * dphi.deta)
- ans
- }), list( .ephi=ephi, .lphi=lphi ))),
- weight=eval(substitute(expression({
- wz = matrix(0, n, dimm(M))
- loopOveri = n < maxomega
+ sum(ans)
+ }
+ }, list( .ephi = ephi, .lphi = lphi ))),
+ vfamily = c("dirmultinomial "),
+ deriv = eval(substitute(expression({
+ probs <- cbind(exp(eta[,-M]), 1)
+ probs <- probs / as.vector(probs %*% rep(1, M))
+ phi <- eta2theta(eta[,M], .lphi, earg = .ephi )
+ dl.dprobs <- matrix(0.0, n, M-1)
+ dl.dphi <- rep(0.0, len = n)
+ omega <- extra$n2
+ ycount <- as.matrix(y * w)
+ for(jay in 1:M) {
+ maxyj <- max(ycount[,jay])
+ loopOveri <- n < maxyj
if (loopOveri) {
- for(iii in 1:n) {
- rrr = 1:omega[iii] # A vector
- PHI = phi[iii]
- pYiM.ge.rrr = 1 - pbetabin.ab(q=rrr-1, size=omega[iii],
- shape1=probs[iii,M]*(1/PHI-1),
- shape2=(1-probs[iii,M])*(1/PHI-1)) # A vector
- denomM = ((1-PHI)*probs[iii,M] + (rrr-1)*PHI)^2 # A vector
- wz[iii,iam(M,M,M)] = wz[iii,iam(M,M,M)] +
- sum(probs[iii,M]^2 * pYiM.ge.rrr / denomM) -
- sum(1 / (1 + (rrr-2)*PHI)^2)
- for(jay in 1:(M-1)) {
- denomj = ((1-PHI)*probs[iii,jay] + (rrr-1)*PHI)^2
- pYij.ge.rrr = 1 - pbetabin.ab(q=rrr-1, size=omega[iii],
- shape1=probs[iii,jay]*(1/PHI-1),
- shape2=(1-probs[iii,jay])*(1/PHI-1))
- wz[iii,iam(jay,jay,M)] = wz[iii,iam(jay,jay,M)] +
- sum(pYij.ge.rrr / denomj) +
- sum(pYiM.ge.rrr / denomM)
- for(kay in jay:(M-1)) if (kay > jay) {
- wz[iii,iam(jay,kay,M)] = wz[iii,iam(jay,kay,M)] +
- sum(pYiM.ge.rrr / denomM)
- }
- wz[iii,iam(jay,M,M)] = wz[iii,iam(jay,M,M)] +
- sum(probs[iii,jay] * pYij.ge.rrr / denomj) -
- sum(probs[iii,M] * pYiM.ge.rrr / denomM)
- wz[iii,iam(M,M,M)] = wz[iii,iam(M,M,M)] +
- sum(probs[iii,jay]^2 * pYij.ge.rrr / denomj)
- } # end of jay loop
- } # end of iii loop
+ for(iii in 1:n) {
+ rrr <- 1:ycount[iii,jay]
+ if (ycount[iii,jay] > 0) {
+ PHI <- phi[iii]
+ dl.dphi[iii] <- dl.dphi[iii] +
+ sum((rrr-1-probs[iii,jay]) / ((1-PHI)*probs[iii,jay] + (rrr-1)*PHI))
+
+ tmp9 <- (1-PHI) / ((1-PHI)*probs[iii,jay] + (rrr-1)*PHI)
+ if (jay < M) {
+ dl.dprobs[iii,jay] <- dl.dprobs[iii,jay] + sum(tmp9)
+ } else {
+ for(jay2 in 1:(M-1))
+ dl.dprobs[iii,jay2]<-dl.dprobs[iii,jay2]-sum(tmp9)
+ }
+ }
+ }
} else {
- for(rrr in 1:maxomega) {
- ind5 = rrr <= omega
- PHI = phi[ind5]
- pYiM.ge.rrr = 1 - pbetabin.ab(q=rrr-1, size=omega[ind5],
- shape1=probs[ind5,M]*(1/PHI-1),
- shape2=(1-probs[ind5,M])*(1/PHI-1))
- denomM = ((1-PHI)*probs[ind5,M] + (rrr-1)*PHI)^2
- wz[ind5,iam(M,M,M)] = wz[ind5,iam(M,M,M)] +
- probs[ind5,M]^2 * pYiM.ge.rrr / denomM -
- 1 / (1 + (rrr-2)*PHI)^2
- for(jay in 1:(M-1)) {
- denomj = ((1-PHI)*probs[ind5,jay] + (rrr-1)*PHI)^2
- pYij.ge.rrr = 1 - pbetabin.ab(q=rrr-1, size=omega[ind5],
- shape1=probs[ind5,jay]*(1/PHI-1),
- shape2=(1-probs[ind5,jay])*(1/PHI-1))
- wz[ind5,iam(jay,jay,M)] = wz[ind5,iam(jay,jay,M)] +
- pYij.ge.rrr / denomj + pYiM.ge.rrr / denomM
- for(kay in jay:(M-1)) if (kay > jay) {
- wz[ind5,iam(jay,kay,M)] = wz[ind5,iam(jay,kay,M)] +
- pYiM.ge.rrr / denomM
- }
- wz[ind5,iam(jay,M,M)] = wz[ind5,iam(jay,M,M)] +
- probs[ind5,jay] * pYij.ge.rrr / denomj -
- probs[ind5,M] * pYiM.ge.rrr / denomM
- wz[ind5,iam(M,M,M)] = wz[ind5,iam(M,M,M)] +
- probs[ind5,jay]^2 * pYij.ge.rrr / denomj
- } # end of jay loop
- } # end of rrr loop
- }
-
- for(jay in 1:(M-1))
- for(kay in jay:(M-1))
- wz[,iam(jay,kay,M)] = wz[,iam(jay,kay,M)] * (1-phi)^2
- for(jay in 1:(M-1))
- wz[,iam(jay,M,M)] = wz[,iam(jay,M,M)] * (phi-1) / phi
- wz[,iam(M,M,M)] = wz[,iam(M,M,M)] / phi^2
-
- d1Thetas.deta = cbind(dprobs.deta, dphi.deta)
- index = iam(NA, NA, M, both = TRUE, diag = TRUE)
- wz = wz * d1Thetas.deta[,index$row] * d1Thetas.deta[,index$col]
- wz
- }), list( .ephi=ephi, .lphi=lphi ))))
+ for(rrr in 1:maxyj) {
+ index <- (rrr <= ycount[,jay]) & (ycount[,jay] > 0)
+ PHI <- phi[index]
+ dl.dphi[index] <- dl.dphi[index] + (rrr-1-probs[index,jay]) /
+ ((1-PHI)*probs[index,jay] + (rrr-1)*PHI)
+ tmp9 <- (1-PHI) / ((1-PHI)*probs[index,jay] + (rrr-1)*PHI)
+ if (jay < M) {
+ dl.dprobs[index,jay] <- dl.dprobs[index,jay] + tmp9
+ } else {
+ for(jay2 in 1:(M-1))
+ dl.dprobs[index,jay2] <- dl.dprobs[index,jay2] - tmp9
+ }
+ }
+ }
+ } # end of jay loop
+ maxomega <- max(omega)
+ loopOveri <- n < maxomega
+ if (loopOveri) {
+ for(iii in 1:n) {
+ rrr <- 1:omega[iii]
+ dl.dphi[iii]<-dl.dphi[iii] - sum((rrr-2)/(1 + (rrr-2)*phi[iii]))
+ }
+ } else {
+ for(rrr in 1:maxomega) {
+ index <- rrr <= omega
+ dl.dphi[index]<-dl.dphi[index] - (rrr-2)/(1 + (rrr-2)*phi[index])
+ }
+ }
+ dprobs.deta <- probs[,-M] * (1 - probs[,-M]) # n x (M-1)
+ dphi.deta <- dtheta.deta(phi, .lphi, earg = .ephi )
+ ans <- cbind(dl.dprobs * dprobs.deta, dl.dphi * dphi.deta)
+ ans
+ }), list( .ephi = ephi, .lphi = lphi ))),
+ weight = eval(substitute(expression({
+ wz <- matrix(0, n, dimm(M))
+ loopOveri <- n < maxomega
+ if (loopOveri) {
+ for(iii in 1:n) {
+ rrr <- 1:omega[iii] # A vector
+ PHI <- phi[iii]
+ pYiM.ge.rrr <- 1 - pbetabin.ab(q=rrr-1, size=omega[iii],
+ shape1<-probs[iii,M]*(1/PHI-1),
+ shape2<-(1-probs[iii,M])*(1/PHI-1)) # A vector
+ denomM <- ((1-PHI)*probs[iii,M] + (rrr-1)*PHI)^2 # A vector
+ wz[iii,iam(M,M,M)] <- wz[iii,iam(M,M,M)] +
+ sum(probs[iii,M]^2 * pYiM.ge.rrr / denomM) -
+ sum(1 / (1 + (rrr-2)*PHI)^2)
+ for(jay in 1:(M-1)) {
+ denomj <- ((1-PHI)*probs[iii,jay] + (rrr-1)*PHI)^2
+ pYij.ge.rrr <- 1 - pbetabin.ab(q=rrr-1, size=omega[iii],
+ shape1<-probs[iii,jay]*(1/PHI-1),
+ shape2<-(1-probs[iii,jay])*(1/PHI-1))
+ wz[iii,iam(jay,jay,M)] <- wz[iii,iam(jay,jay,M)] +
+ sum(pYij.ge.rrr / denomj) +
+ sum(pYiM.ge.rrr / denomM)
+ for(kay in jay:(M-1)) if (kay > jay) {
+ wz[iii,iam(jay,kay,M)] <- wz[iii,iam(jay,kay,M)] +
+ sum(pYiM.ge.rrr / denomM)
+ }
+ wz[iii,iam(jay,M,M)] <- wz[iii,iam(jay,M,M)] +
+ sum(probs[iii,jay] * pYij.ge.rrr / denomj) -
+ sum(probs[iii,M] * pYiM.ge.rrr / denomM)
+ wz[iii,iam(M,M,M)] <- wz[iii,iam(M,M,M)] +
+ sum(probs[iii,jay]^2 * pYij.ge.rrr / denomj)
+ } # end of jay loop
+ } # end of iii loop
+ } else {
+ for(rrr in 1:maxomega) {
+ ind5 <- rrr <= omega
+ PHI <- phi[ind5]
+ pYiM.ge.rrr <- 1 - pbetabin.ab(q=rrr-1, size=omega[ind5],
+ shape1<-probs[ind5,M]*(1/PHI-1),
+ shape2<-(1-probs[ind5,M])*(1/PHI-1))
+ denomM <- ((1-PHI)*probs[ind5,M] + (rrr-1)*PHI)^2
+ wz[ind5,iam(M,M,M)] <- wz[ind5,iam(M,M,M)] +
+ probs[ind5,M]^2 * pYiM.ge.rrr / denomM -
+ 1 / (1 + (rrr-2)*PHI)^2
+ for(jay in 1:(M-1)) {
+ denomj <- ((1-PHI)*probs[ind5,jay] + (rrr-1)*PHI)^2
+ pYij.ge.rrr <- 1 - pbetabin.ab(q=rrr-1, size=omega[ind5],
+ shape1<-probs[ind5,jay]*(1/PHI-1),
+ shape2<-(1-probs[ind5,jay])*(1/PHI-1))
+ wz[ind5,iam(jay,jay,M)] <- wz[ind5,iam(jay,jay,M)] +
+ pYij.ge.rrr / denomj + pYiM.ge.rrr / denomM
+ for(kay in jay:(M-1)) if (kay > jay) {
+ wz[ind5,iam(jay,kay,M)] <- wz[ind5,iam(jay,kay,M)] +
+ pYiM.ge.rrr / denomM
+ }
+ wz[ind5,iam(jay,M,M)] <- wz[ind5,iam(jay,M,M)] +
+ probs[ind5,jay] * pYij.ge.rrr / denomj -
+ probs[ind5,M] * pYiM.ge.rrr / denomM
+ wz[ind5,iam(M,M,M)] <- wz[ind5,iam(M,M,M)] +
+ probs[ind5,jay]^2 * pYij.ge.rrr / denomj
+ } # end of jay loop
+ } # end of rrr loop
+ }
+
+ for(jay in 1:(M-1))
+ for(kay in jay:(M-1))
+ wz[,iam(jay,kay,M)] <- wz[,iam(jay,kay,M)] * (1-phi)^2
+ for(jay in 1:(M-1))
+ wz[,iam(jay,M,M)] <- wz[,iam(jay,M,M)] * (phi-1) / phi
+ wz[,iam(M,M,M)] <- wz[,iam(M,M,M)] / phi^2
+
+ d1Thetas.deta <- cbind(dprobs.deta, dphi.deta)
+ index <- iam(NA, NA, M, both = TRUE, diag = TRUE)
+ wz <- wz * d1Thetas.deta[,index$row] * d1Thetas.deta[,index$col]
+ wz
+ }), list( .ephi = ephi, .lphi = lphi ))))
}
-dirmul.old = function(link="loge", earg=list(), init.alpha = 0.01,
- parallel= FALSE, zero=NULL)
+
+
+
+dirmul.old = function(link = "loge", earg = list(), init.alpha = 0.01,
+ parallel = FALSE, zero = NULL)
{
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
stop("bad input for argument 'zero'")
- if (!is.Numeric(init.alpha, posit=TRUE))
+ if (!is.Numeric(init.alpha, posit = TRUE))
stop("'init.alpha' must contain positive values only")
if (!is.list(earg)) earg = list()
new("vglmff",
blurb = c("Dirichlet-Multinomial distribution\n\n",
"Links: ",
- namesof("shape1", link, earg=earg), ", ..., ",
- namesof("shapeM", link, earg=earg), "\n\n",
+ namesof("shape1", link, earg = earg), ", ..., ",
+ namesof("shapeM", link, earg = earg), "\n\n",
"Posterior mean: (n_j + shape_j)/(2*sum(n_j) + sum(shape_j))\n"),
- constraints=eval(substitute(expression({
- constraints = cm.vgam(matrix(1,M,1), x, .parallel, constraints, int= TRUE)
+ constraints = eval(substitute(expression({
+ constraints = cm.vgam(matrix(1, M, 1), x, .parallel,
+ constraints, int = TRUE)
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .parallel=parallel, .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .parallel = parallel, .zero = zero ))),
+ initialize = eval(substitute(expression({
y = as.matrix(y)
M = ncol(y)
if (any(y != round(y )))
stop("all y values must be integer-valued")
- predictors.names = namesof(paste("shape", 1:M, sep=""), .link,
- earg=.earg, short=TRUE)
+ predictors.names = namesof(paste("shape", 1:M, sep = ""),
+ .link, earg = .earg, short = TRUE)
extra$n2 = rowSums(y) # Nb. don't multiply by 2
extra$y = y
if (!length(etastart)) {
- yy = if (is.numeric(.init.alpha))
- matrix(.init.alpha, n, M, byrow= TRUE) else
+ yy = if (is.numeric( .init.alpha))
+ matrix( .init.alpha, n, M, byrow= TRUE) else
matrix(runif(n*M), n, M)
- etastart = theta2eta(yy, .link, earg=.earg)
+ etastart = theta2eta(yy, .link, earg = .earg)
}
- }), list( .link=link, .earg=earg, .init.alpha=init.alpha ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- shape = eta2theta(eta, .link, earg=.earg)
+ }), list( .link = link, .earg = earg, .init.alpha=init.alpha ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ shape = eta2theta(eta, .link, earg = .earg)
M = if (is.matrix(eta)) ncol(eta) else 1
- sumshape = as.vector(shape %*% rep(1, len=M))
+ sumshape = as.vector(shape %*% rep(1, len = M))
(extra$y + shape) / (extra$n2 + sumshape)
- }, list( .link=link, .earg=earg ))),
+ }, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- misc$link = rep(.link, length=M)
- names(misc$link) = paste("shape", 1:M, sep="")
+ misc$link = rep( .link, length = M)
+ names(misc$link) = paste("shape", 1:M, sep = "")
misc$earg = vector("list", M)
names(misc$earg) = names(misc$link)
for(ii in 1:M) misc$earg[[ii]] = .earg
misc$pooled.weight = pooled.weight
- }), list( .link=link, .earg=earg ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- shape = eta2theta(eta, .link, earg=.earg)
+ }), list( .link = link, .earg = earg ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ shape = eta2theta(eta, .link, earg = .earg)
M = if (is.matrix(eta)) ncol(eta) else 1
- sumshape = as.vector(shape %*% rep(1, len=M))
+ sumshape = as.vector(shape %*% rep(1, len = M))
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
sum(w*(lgamma(sumshape) - lgamma(extra$n2 + sumshape ))) +
sum(w * (lgamma(y + shape) - lgamma(shape )))
- }, list( .link=link, .earg=earg ))),
- vfamily=c("dirmul.old"),
- deriv=eval(substitute(expression({
- shape = eta2theta(eta, .link, earg=.earg)
- sumshape = as.vector(shape %*% rep(1, len=M))
+ }, list( .link = link, .earg = earg ))),
+ vfamily = c("dirmul.old"),
+ deriv = eval(substitute(expression({
+ shape = eta2theta(eta, .link, earg = .earg)
+ sumshape = as.vector(shape %*% rep(1, len = M))
dl.dsh = digamma(sumshape) - digamma(extra$n2 + sumshape) +
digamma(y + shape) - digamma(shape)
- dsh.deta = dtheta.deta(shape, .link, earg=.earg)
+ dsh.deta = dtheta.deta(shape, .link, earg = .earg)
w * dl.dsh * dsh.deta
- }), list( .link=link, .earg=earg ))),
- weight=eval(substitute(expression({
+ }), list( .link = link, .earg = earg ))),
+ weight = eval(substitute(expression({
index = iam(NA, NA, M, both = TRUE, diag = TRUE)
wz = matrix(trigamma(sumshape)-trigamma(extra$n2 + sumshape),
nrow=n, ncol=dimm(M))
@@ -648,13 +657,13 @@ dirmul.old = function(link="loge", earg=list(), init.alpha = 0.01,
pooled.weight = FALSE
wz
- }), list( .link=link, .earg=earg ))))
+ }), list( .link = link, .earg = earg ))))
}
-rdiric = function(n, shape, dimension=NULL) {
+rdiric = function(n, shape, dimension = NULL) {
if (!is.numeric(dimension))
dimension = length(shape)
shape = rep(shape, len=dimension)
@@ -668,69 +677,69 @@ rdiric = function(n, shape, dimension=NULL) {
}
- dirichlet = function(link="loge", earg=list(), parallel= FALSE, zero=NULL)
+ dirichlet = function(link = "loge", earg = list(), parallel = FALSE, zero = NULL)
{
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
stop("bad input for argument 'zero'")
if (!is.list(earg)) earg = list()
new("vglmff",
blurb = c("Dirichlet distribution\n\n",
"Links: ",
- namesof("shapej", link, earg=earg), "\n\n",
+ namesof("shapej", link, earg = earg), "\n\n",
"Mean: shape_j/(1 + sum(shape_j)), j=1,..,ncol(y)"),
- constraints=eval(substitute(expression({
- constraints = cm.vgam(matrix(1,M,1), x, .parallel, constraints, int= TRUE)
+ constraints = eval(substitute(expression({
+ constraints = cm.vgam(matrix(1, M, 1), x, .parallel, constraints, int= TRUE)
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .parallel=parallel, .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .parallel = parallel, .zero = zero ))),
+ initialize = eval(substitute(expression({
y = as.matrix(y)
M = ncol(y)
if (any(y <= 0) || any(y>=1))
stop("all y values must be > 0 and < 1")
- predictors.names = namesof(paste("shape", 1:M, sep=""), .link,
- earg=.earg, short=TRUE)
+ predictors.names = namesof(paste("shape", 1:M, sep = ""), .link,
+ earg = .earg, short = TRUE)
if (!length(etastart)) {
yy = matrix(t(y) %*% rep(1/nrow(y), nrow(y)), nrow(y), M, byrow= TRUE)
- etastart = theta2eta(yy, .link, earg= .earg )
+ etastart = theta2eta(yy, .link, earg = .earg )
}
- }), list( .link=link, .earg=earg ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- shape = eta2theta(eta, .link, earg= .earg )
+ }), list( .link = link, .earg = earg ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ shape = eta2theta(eta, .link, earg = .earg )
M = if (is.matrix(eta)) ncol(eta) else 1
sumshape = rowSums(shape)
shape / sumshape
- }, list( .link=link, .earg=earg ))),
+ }, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
misc$link = c(shape= .link)
- temp.names = paste("shape", 1:M, sep="")
- misc$link = rep( .link, len=M)
+ temp.names = paste("shape", 1:M, sep = "")
+ misc$link = rep( .link, len = M)
names(misc$link) = temp.names
misc$earg = vector("list", M)
names(misc$earg) = names(misc$link)
for(ii in 1:M) misc$earg[[ii]] = .earg
misc$expected = TRUE
- }), list( .link=link, .earg=earg ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- shape = eta2theta(eta, .link, earg= .earg )
+ }), list( .link = link, .earg = earg ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ shape = eta2theta(eta, .link, earg = .earg )
M = if (is.matrix(eta)) ncol(eta) else 1
- sumshape = as.vector(shape %*% rep(1, len=M))
+ sumshape = as.vector(shape %*% rep(1, len = M))
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
sum(w * (lgamma(sumshape) - lgamma(shape) + (shape-1)*log(y )))
- }, list( .link=link, .earg=earg ))),
- vfamily=c("dirichlet"),
- deriv=eval(substitute(expression({
- shape = eta2theta(eta, .link, earg= .earg )
- sumshape = as.vector(shape %*% rep(1, len=M))
+ }, list( .link = link, .earg = earg ))),
+ vfamily = c("dirichlet"),
+ deriv = eval(substitute(expression({
+ shape = eta2theta(eta, .link, earg = .earg )
+ sumshape = as.vector(shape %*% rep(1, len = M))
dl.dsh = digamma(sumshape) - digamma(shape) + log(y)
- dsh.deta = dtheta.deta(shape, .link, earg= .earg )
+ dsh.deta = dtheta.deta(shape, .link, earg = .earg )
w * dl.dsh * dsh.deta
- }), list( .link=link, .earg=earg ))),
- weight=expression({
+ }), list( .link = link, .earg = earg ))),
+ weight = expression({
index = iam(NA, NA, M, both = TRUE, diag = TRUE)
wz = matrix(trigamma(sumshape), nrow=n, ncol=dimm(M))
wz[,1:M] = wz[,1:M] - trigamma(shape)
@@ -760,7 +769,7 @@ rdiric = function(n, shape, dimension=NULL) {
if (any(special <- Re(x) <= 1)) {
ans <- x
- ans[special] <- Inf # For Re(x)==1
+ ans[special] <- Inf # For Re(x) == 1
special3 <- Re(x) < 1
ans[special3] <- NA # For 0 < Re(x) < 1
@@ -805,7 +814,7 @@ rdiric = function(n, shape, dimension=NULL) {
{
- if (!is.Numeric(deriv.arg, allow=1, integer=TRUE))
+ if (!is.Numeric(deriv.arg, allow = 1, integer = TRUE))
stop("'deriv.arg' must be a single non-negative integer")
if (deriv.arg < 0 || deriv.arg > 2)
stop("'deriv.arg' must be 0, 1, or 2")
@@ -819,7 +828,7 @@ rdiric = function(n, shape, dimension=NULL) {
ans = rep(as.numeric(NA), length(x))
nn = sum(ok) # Effective length (excludes x < 0 and x = 1 values)
if (nn)
- ans[ok] = dotC(name="vzetawr", as.double(x[ok]), ans=double(nn),
+ ans[ok] = dotC(name = "vzetawr", as.double(x[ok]), ans=double(nn),
as.integer(deriv.arg), as.integer(nn))$ans
@@ -838,15 +847,15 @@ dzeta = function(x, p, log = FALSE)
stop("bad input for argument 'log'")
rm(log)
- if (!is.Numeric(p, posit=TRUE)) # || min(p) <= 1
+ if (!is.Numeric(p, posit = TRUE)) # || min(p) <= 1
stop("'p' must be numeric and > 0")
LLL = max(length(p), length(x))
- x = rep(x, len=LLL); p = rep(p, len=LLL)
+ x = rep(x, len = LLL); p = rep(p, len = LLL)
ox = !is.finite(x)
zero = ox | round(x) != x | x < 1
if (any(zero)) warning("non-integer x and/or x < 1 or NAs")
- ans = rep(if (log.arg) log(0) else 0, len=LLL)
+ ans = rep(if (log.arg) log(0) else 0, len = LLL)
if (any(!zero)) {
if (log.arg) {
ans[!zero] = (-p[!zero]-1)*log(x[!zero]) - log(zeta(p[!zero]+1))
@@ -858,10 +867,10 @@ dzeta = function(x, p, log = FALSE)
ans
}
- zetaff = function(link="loge", earg=list(), init.p=NULL)
+ zetaff = function(link = "loge", earg = list(), init.p = NULL)
{
- if (length(init.p) && !is.Numeric(init.p, positi=TRUE))
+ if (length(init.p) && !is.Numeric(init.p, positi = TRUE))
stop("argument 'init.p' must be > 0")
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
@@ -871,10 +880,10 @@ dzeta = function(x, p, log = FALSE)
blurb = c("Zeta distribution ",
"f(y) = 1/(y^(p+1) zeta(p+1)), p>0, y=1,2,..\n\n",
"Link: ",
- namesof("p", link, earg=earg), "\n\n",
+ namesof("p", link, earg = earg), "\n\n",
"Mean: zeta(p) / zeta(p+1), provided p>1\n",
"Variance: zeta(p-1) / zeta(p+1) - mean^2, provided p>2"),
- initialize=eval(substitute(expression({
+ initialize = eval(substitute(expression({
y = as.numeric(y)
if (any(y < 1))
stop("all y values must be in 1,2,3,...")
@@ -883,48 +892,48 @@ dzeta = function(x, p, log = FALSE)
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
- predictors.names = namesof("p", .link, earg=.earg, tag=FALSE)
+ predictors.names = namesof("p", .link, earg = .earg, tag = FALSE)
if (!length(etastart)) {
zetaff.Loglikfun = function(pp, y, x, w, extraargs) {
- sum(w * dzeta(x=y, p=pp, log=TRUE))
+ sum(w * dzeta(x=y, p=pp, log = TRUE))
}
p.grid = seq(0.1, 3.0, len=19)
pp.init = if (length( .init.p )) .init.p else
getMaxMin(p.grid, objfun=zetaff.Loglikfun, y=y, x=x, w=w)
pp.init = rep(pp.init, length=length(y))
if ( .link == "loglog") pp.init[pp.init <= 1] = 1.2
- etastart = theta2eta(pp.init, .link, earg=.earg)
+ etastart = theta2eta(pp.init, .link, earg = .earg)
}
- }), list( .link=link, .earg=earg, .init.p=init.p ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- ans = pp = eta2theta(eta, .link, earg=.earg)
- ans[pp>1] = zeta(pp[pp>1]) / zeta(pp[pp>1]+1)
- ans[pp<=1] = NA
+ }), list( .link = link, .earg = earg, .init.p=init.p ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ ans <- pp <- eta2theta(eta, .link, earg = .earg)
+ ans[pp > 1] <- zeta(pp[pp > 1]) / zeta(pp[pp > 1] + 1)
+ ans[pp <= 1] <- NA
ans
- }, list( .link=link, .earg=earg ))),
+ }, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- misc$link = c(pp= .link)
- misc$earg = list(pp = .earg)
- }), list( .link=link, .earg=earg ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- pp = eta2theta(eta, .link, earg=.earg)
+ misc$link <- c(pp = .link)
+ misc$earg <- list(pp = .earg)
+ }), list( .link = link, .earg = earg ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ pp = eta2theta(eta, .link, earg = .earg)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- sum(w * dzeta(x=y, p=pp, log=TRUE))
+ sum(w * dzeta(x=y, p=pp, log = TRUE))
}
- }, list( .link=link, .earg=earg ))),
- vfamily=c("zetaff"),
- deriv=eval(substitute(expression({
- pp = eta2theta(eta, .link, earg=.earg)
+ }, list( .link = link, .earg = earg ))),
+ vfamily = c("zetaff"),
+ deriv = eval(substitute(expression({
+ pp = eta2theta(eta, .link, earg = .earg)
fred1 = zeta(pp+1)
fred2 = zeta(pp+1, deriv=1)
dl.dpp = -log(y) - fred2 / fred1
- dpp.deta = dtheta.deta(pp, .link, earg=.earg)
+ dpp.deta = dtheta.deta(pp, .link, earg = .earg)
w * dl.dpp * dpp.deta
- }), list( .link=link, .earg=earg ))),
- weight=expression({
+ }), list( .link = link, .earg = earg ))),
+ weight = expression({
ed2l.dpp2 = zeta(pp+1, deriv=2) / fred1 - (fred2/fred1)^2
wz = w * dpp.deta^2 * ed2l.dpp2
wz
@@ -935,17 +944,17 @@ dzeta = function(x, p, log = FALSE)
gharmonic = function(n, s=1, lognexponent=0) {
- if (!is.Numeric(n, integ=TRUE, posit=TRUE))
+ if (!is.Numeric(n, integ = TRUE, posit = TRUE))
stop("bad input for argument 'n'")
- if (!is.Numeric(lognexponent, allow=1))
+ if (!is.Numeric(lognexponent, allow = 1))
stop("bad input for argument 'lognexponent'")
if (length(n) == 1 && length(s) == 1) {
if (lognexponent != 0) sum(log(1:n)^lognexponent * (1:n)^(-s)) else
sum((1:n)^(-s))
} else {
LEN = max(length(n), length(s))
- n = rep(n, len=LEN)
- ans = s = rep(s, len=LEN)
+ n = rep(n, len = LEN)
+ ans = s = rep(s, len = LEN)
if (lognexponent != 0) {
for(ii in 1:LEN)
ans[ii] = sum(log(1:n[ii])^lognexponent * (1:n[ii])^(-s[ii]))
@@ -956,7 +965,7 @@ gharmonic = function(n, s=1, lognexponent=0) {
}
}
-dzipf = function(x, N, s, log=FALSE)
+dzipf = function(x, N, s, log = FALSE)
{
if (!is.logical(log.arg <- log))
stop("bad input for argument 'log'")
@@ -964,12 +973,12 @@ dzipf = function(x, N, s, log=FALSE)
if (!is.Numeric(x))
stop("bad input for argument 'x'")
- if (!is.Numeric(N, integ=TRUE, posit=TRUE))
+ if (!is.Numeric(N, integ = TRUE, posit = TRUE))
stop("bad input for argument 'N'")
- if (!is.Numeric(s, posit=TRUE))
+ if (!is.Numeric(s, posit = TRUE))
stop("bad input for argument 's'")
nn = max(length(x), length(N), length(s))
- x = rep(x, len=nn); N = rep(N, len=nn); s = rep(s, len=nn);
+ x = rep(x, len = nn); N = rep(N, len = nn); s = rep(s, len = nn);
ox = !is.finite(x)
zero = ox | round(x) != x | x < 1 | x > N
ans = (if (log.arg) log(0) else 0) * x
@@ -988,13 +997,13 @@ dzipf = function(x, N, s, log=FALSE)
pzipf = function(q, N, s) {
if (!is.Numeric(q))
stop("bad input for argument 'q'")
- if (!is.Numeric(N, integ=TRUE, posit=TRUE))
+ if (!is.Numeric(N, integ = TRUE, posit = TRUE))
stop("bad input for argument 'N'")
- if (!is.Numeric(s, posit=TRUE))
+ if (!is.Numeric(s, posit = TRUE))
stop("bad input for argument 's'")
nn = max(length(q), length(N), length(s))
- q = rep(q, len=nn); N = rep(N, len=nn); s = rep(s, len=nn);
+ q = rep(q, len = nn); N = rep(N, len = nn); s = rep(s, len = nn);
oq = !is.finite(q)
zeroOR1 = oq | q < 1 | q >= N
floorq = floor(q)
@@ -1007,13 +1016,13 @@ pzipf = function(q, N, s) {
}
- zipf = function(N=NULL, link="loge", earg=list(), init.s=NULL)
+ zipf = function(N = NULL, link = "loge", earg = list(), init.s = NULL)
{
if (length(N) &&
- (!is.Numeric(N, positi=TRUE, integ=TRUE, allow=1) || N <= 1))
+ (!is.Numeric(N, positi = TRUE, integ = TRUE, allow = 1) || N <= 1))
stop("bad input for argument 'N'")
enteredN = length(N)
- if (length(init.s) && !is.Numeric(init.s, positi=TRUE))
+ if (length(init.s) && !is.Numeric(init.s, positi = TRUE))
stop("argument 'init.s' must be > 0")
if (mode(link) != "character" && mode(link) != "name")
@@ -1022,68 +1031,68 @@ pzipf = function(q, N, s) {
new("vglmff",
blurb = c("Zipf distribution f(y;s) = y^(-s) / sum((1:N)^(-s)),",
- " s>0, y=1,2,...,N", ifelse(enteredN, paste("=",N,sep=""), ""),
+ " s>0, y=1,2,...,N", ifelse(enteredN, paste(" = ",N,sep = ""), ""),
"\n\n",
"Link: ",
- namesof("s", link, earg=earg),
+ namesof("s", link, earg = earg),
"\n\n",
"Mean: gharmonic(N,s-1) / gharmonic(N,s)"),
- initialize=eval(substitute(expression({
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
y = as.numeric(y)
if (any(y != round(y )))
stop("y must be integer-valued")
- predictors.names = namesof("s", .link, earg= .earg, tag=FALSE)
+ predictors.names = namesof("s", .link, earg = .earg, tag = FALSE)
NN = .N
- if (!is.Numeric(NN, allow=1, posit=TRUE, integ=TRUE))
+ if (!is.Numeric(NN, allow = 1, posit = TRUE, integ = TRUE))
NN = max(y)
if (max(y) > NN)
stop("maximum of the response is greater than argument 'N'")
if (any(y < 1))
- stop("all response values must be in 1,2,3,...,N(=", NN,")")
+ stop("all response values must be in 1,2,3,...,N( = ", NN,")")
extra$N = NN
if (!length(etastart)) {
llfun = function(ss, y, N, w) {
- sum(w * dzipf(x=y, N=extra$N, s=ss, log=TRUE))
+ sum(w * dzipf(x=y, N=extra$N, s=ss, log = TRUE))
}
ss.init = if (length( .init.s )) .init.s else
getInitVals(gvals=seq(0.1, 3.0, len=19), llfun=llfun,
y=y, N=extra$N, w=w)
ss.init = rep(ss.init, length=length(y))
if ( .link == "loglog") ss.init[ss.init <= 1] = 1.2
- etastart = theta2eta(ss.init, .link, earg= .earg)
+ etastart = theta2eta(ss.init, .link, earg = .earg)
}
- }), list( .link=link, .earg=earg, .init.s=init.s, .N=N ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- ss = eta2theta(eta, .link, earg= .earg)
+ }), list( .link = link, .earg = earg, .init.s = init.s, .N = N ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ ss = eta2theta(eta, .link, earg = .earg)
gharmonic(extra$N, s=ss - 1) / gharmonic(extra$N, s=ss)
- }, list( .link=link, .earg=earg ))),
+ }, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
misc$expected = FALSE
- misc$link = c(s= .link)
- misc$earg = list(s= .earg )
+ misc$link = c(s = .link)
+ misc$earg = list(s = .earg )
misc$N = extra$N
- }), list( .link=link, .earg=earg ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- ss = eta2theta(eta, .link, earg= .earg)
+ }), list( .link = link, .earg = earg ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ ss = eta2theta(eta, .link, earg = .earg)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- sum(w * dzipf(x=y, N=extra$N, s=ss, log=TRUE))
+ sum(w * dzipf(x=y, N=extra$N, s=ss, log = TRUE))
}
- }, list( .link=link, .earg=earg ))),
- vfamily=c("zipf"),
- deriv=eval(substitute(expression({
- ss = eta2theta(eta, .link, earg= .earg)
+ }, list( .link = link, .earg = earg ))),
+ vfamily = c("zipf"),
+ deriv = eval(substitute(expression({
+ ss = eta2theta(eta, .link, earg = .earg)
fred1 = gharmonic(extra$N, ss)
fred2 = gharmonic(extra$N, ss, lognexp=1)
dl.dss = -log(y) + fred2 / fred1
- dss.deta = dtheta.deta(ss, .link, earg= .earg)
- d2ss.deta2 = d2theta.deta2(ss, .link, earg= .earg)
+ dss.deta = dtheta.deta(ss, .link, earg = .earg)
+ d2ss.deta2 = d2theta.deta2(ss, .link, earg = .earg)
w * dl.dss * dss.deta
- }), list( .link=link, .earg=earg ))),
- weight=expression({
+ }), list( .link = link, .earg = earg ))),
+ weight = expression({
d2l.dss = gharmonic(extra$N, ss, lognexp=2) / fred1 - (fred2/fred1)^2
wz = w * (dss.deta^2 * d2l.dss - d2ss.deta2 * dl.dss)
wz
@@ -1092,55 +1101,55 @@ pzipf = function(q, N, s) {
-cauchy.control <- function(save.weight=TRUE, ...)
+cauchy.control <- function(save.weight = TRUE, ...)
{
- list(save.weight=save.weight)
+ list(save.weight = save.weight)
}
- cauchy = function(llocation="identity", lscale="loge",
- elocation=list(), escale=list(),
- ilocation=NULL, iscale=NULL,
+ cauchy = function(llocation = "identity", lscale = "loge",
+ elocation = list(), escale = list(),
+ ilocation = NULL, iscale = NULL,
iprobs = seq(0.2, 0.8, by=0.2),
- method.init=1, nsimEIM=NULL, zero=2)
+ method.init = 1, nsimEIM = NULL, zero = 2)
{
if (mode(llocation) != "character" && mode(llocation) != "name")
llocation = as.character(substitute(llocation))
if (mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
- if (!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
method.init > 3)
stop("'method.init' must be 1 or 2 or 3")
if (!is.list(elocation)) elocation = list()
if (!is.list(escale)) escale = list()
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
stop("bad input for argument 'zero'")
if (length(nsimEIM) &&
- (!is.Numeric(nsimEIM, allow=1, integ=TRUE) || nsimEIM <= 50))
- stop("'nsimEIM' should be an integer greater than 50")
- if (length(iscale) && !is.Numeric(iscale, posit=TRUE))
+ (!is.Numeric(nsimEIM, allow = 1, integ = TRUE) || nsimEIM <= 50))
+ stop("argument 'nsimEIM' should be an integer greater than 50")
+ if (length(iscale) && !is.Numeric(iscale, posit = TRUE))
stop("bad input for argument 'iscale'")
- if (!is.Numeric(iprobs, posit=TRUE) || max(iprobs) >= 1)
+ if (!is.Numeric(iprobs, posit = TRUE) || max(iprobs) >= 1)
stop("bad input for argument 'iprobs'")
new("vglmff",
blurb = c("Two parameter Cauchy distribution (location & scale unknown)\n\n",
"Link: ",
- namesof("location", llocation, earg=elocation), "\n",
- namesof("scale", lscale, earg=escale), "\n\n",
+ namesof("location", llocation, earg = elocation), "\n",
+ namesof("scale", lscale, earg = escale), "\n\n",
"Mean: NA\n",
"Variance: NA"),
- constraints=eval(substitute(expression({
+ constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
predictors.names = c(
- namesof("location", .llocation, earg=.elocation, tag=FALSE),
- namesof("scale", .lscale, earg=.escale, tag=FALSE))
+ namesof("location", .llocation, earg = .elocation, tag = FALSE),
+ namesof("scale", .lscale, earg = .escale, tag = FALSE))
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
if (!length(etastart)) {
- loc.init = if (length(.ilocation)) .ilocation else {
+ loc.init = if (length( .ilocation)) .ilocation else {
if ( .method.init == 2) median(rep(y,w)) else
if ( .method.init == 3) y else {
cauchy2.Loglikfun = function(loc, y, x, w, extraargs) {
@@ -1150,19 +1159,19 @@ cauchy.control <- function(save.weight=TRUE, ...)
btry = (qy - loc) / ztry
scal = median(btry, na.rm = TRUE)
if (scal <= 0) scal = 0.1
- sum(w * dcauchy(x=y, loc=loc, scale=scal, log=TRUE))
+ sum(w * dcauchy(x=y, loc=loc, scale=scal, log = TRUE))
}
loc.grid = c(quantile(y, probs=seq(0.1, 0.9, by=0.05)))
try.this = getMaxMin(loc.grid, objfun=cauchy2.Loglikfun,
y=y, x=x, w=w)
- try.this = rep(c(try.this), len=n)
+ try.this = rep(c(try.this), len = n)
try.this
}
}
- loc.init = rep(c(loc.init), len=n)
+ loc.init = rep(c(loc.init), len = n)
- sca.init = if (length(.iscale)) .iscale else {
+ sca.init = if (length( .iscale)) .iscale else {
iprobs = .iprobs
qy = quantile(rep(y,w), probs=iprobs)
ztry = tan(pi*(iprobs-0.5))
@@ -1172,52 +1181,52 @@ cauchy.control <- function(save.weight=TRUE, ...)
sca.init
}
- sca.init = rep(c(sca.init), len=n)
- if (.llocation == "loge") loc.init = abs(loc.init)+0.01
- etastart = cbind(theta2eta(loc.init, .llocation, earg=.elocation),
- theta2eta(sca.init, .lscale, earg=.escale))
- }
- }), list( .ilocation=ilocation, .elocation=elocation, .llocation=llocation,
- .iscale=iscale, .escale=escale, .lscale=lscale,
- .iprobs=iprobs, .method.init=method.init ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- eta2theta(eta[,1], .llocation, earg= .elocation)
- }, list( .llocation=llocation,
- .elocation=elocation ))),
+ sca.init = rep(c(sca.init), len = n)
+ if ( .llocation == "loge") loc.init = abs(loc.init)+0.01
+ etastart = cbind(theta2eta(loc.init, .llocation, earg = .elocation),
+ theta2eta(sca.init, .lscale, earg = .escale))
+ }
+ }), list( .ilocation = ilocation, .elocation = elocation, .llocation = llocation,
+ .iscale = iscale, .escale = escale, .lscale = lscale,
+ .iprobs=iprobs, .method.init = method.init ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ eta2theta(eta[,1], .llocation, earg = .elocation)
+ }, list( .llocation = llocation,
+ .elocation = elocation ))),
last = eval(substitute(expression({
misc$expected = TRUE
- misc$link = c("location"= .llocation, "scale"=.lscale)
- misc$earg = list(location= .elocation, scale= .escale)
+ misc$link = c("location" = .llocation, "scale" =.lscale)
+ misc$earg = list("location" = .elocation, "scale" = .escale)
misc$method.init = .method.init
- }), list( .escale=escale, .elocation=elocation,
- .method.init=method.init,
- .llocation=llocation, .lscale=lscale ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- location = eta2theta(eta[,1], .llocation, earg=.elocation)
- myscale = eta2theta(eta[,2], .lscale, earg=.escale)
+ }), list( .escale = escale, .elocation = elocation,
+ .method.init = method.init,
+ .llocation = llocation, .lscale = lscale ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ location = eta2theta(eta[,1], .llocation, earg = .elocation)
+ myscale = eta2theta(eta[,2], .lscale, earg = .escale)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- sum(w * dcauchy(x=y, loc=location, sc=myscale, log=TRUE))
- }
- }, list( .escale=escale, .lscale=lscale,
- .elocation=elocation, .llocation=llocation ))),
- vfamily=c("cauchy"),
- deriv=eval(substitute(expression({
- location = eta2theta(eta[,1], .llocation, earg=.elocation)
- myscale = eta2theta(eta[,2], .lscale, earg=.escale)
- dlocation.deta = dtheta.deta(location, .llocation, earg=.elocation)
- dscale.deta = dtheta.deta(myscale, .lscale, earg=.escale)
+ sum(w * dcauchy(x=y, loc=location, sc=myscale, log = TRUE))
+ }
+ }, list( .escale = escale, .lscale = lscale,
+ .elocation = elocation, .llocation = llocation ))),
+ vfamily = c("cauchy"),
+ deriv = eval(substitute(expression({
+ location = eta2theta(eta[,1], .llocation, earg = .elocation)
+ myscale = eta2theta(eta[,2], .lscale, earg = .escale)
+ dlocation.deta = dtheta.deta(location, .llocation, earg = .elocation)
+ dscale.deta = dtheta.deta(myscale, .lscale, earg = .escale)
Z = (y-location) / myscale
dl.dlocation = 2 * Z / ((1 + Z^2) * myscale)
dl.dscale = (Z^2 - 1) / ((1 + Z^2) * myscale)
w * cbind(dl.dlocation * dlocation.deta,
dl.dscale * dscale.deta)
- }), list( .escale=escale, .lscale=lscale,
- .elocation=elocation, .llocation=llocation ))),
- weight=eval(substitute(expression({
+ }), list( .escale = escale, .lscale = lscale,
+ .elocation = elocation, .llocation = llocation ))),
+ weight = eval(substitute(expression({
run.varcov = 0
- ind1 = iam(NA, NA, M=M, both=TRUE, diag=TRUE)
+ ind1 = iam(NA, NA, M=M, both = TRUE, diag = TRUE)
dthetas.detas = cbind(dlocation.deta, dscale.deta)
if (length( .nsimEIM )) {
for(ii in 1:( .nsimEIM )) {
@@ -1232,7 +1241,7 @@ cauchy.control <- function(save.weight=TRUE, ...)
}
wz = if (intercept.only)
matrix(colMeans(run.varcov),
- n, ncol(run.varcov), byrow=TRUE) else run.varcov
+ n, ncol(run.varcov), byrow = TRUE) else run.varcov
wz = wz * dthetas.detas[,ind1$row] * dthetas.detas[,ind1$col]
wz = w * matrix(wz, n, dimm(M))
@@ -1243,8 +1252,8 @@ cauchy.control <- function(save.weight=TRUE, ...)
}
wz
- }), list( .escale=escale, .lscale=lscale, .nsimEIM=nsimEIM,
- .elocation=elocation, .llocation=llocation ))))
+ }), list( .escale = escale, .lscale = lscale, .nsimEIM = nsimEIM,
+ .elocation = elocation, .llocation = llocation ))))
}
@@ -1253,14 +1262,14 @@ cauchy.control <- function(save.weight=TRUE, ...)
- cauchy1 = function(scale.arg=1, llocation="identity",
- elocation=list(),
- ilocation=NULL, method.init=1)
+ cauchy1 = function(scale.arg=1, llocation = "identity",
+ elocation = list(),
+ ilocation = NULL, method.init = 1)
{
if (mode(llocation) != "character" && mode(llocation) != "name")
llocation = as.character(substitute(llocation))
- if (!is.Numeric(scale.arg, posit=TRUE)) stop("bad input for 'scale.arg'")
- if (!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
+ if (!is.Numeric(scale.arg, posit = TRUE)) stop("bad input for 'scale.arg'")
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
method.init > 3)
stop("'method.init' must be 1 or 2 or 3")
if (!is.list(elocation)) elocation = list()
@@ -1269,71 +1278,71 @@ cauchy.control <- function(save.weight=TRUE, ...)
blurb = c("One-parameter Cauchy distribution ",
"(location unknown, scale known)\n\n",
"Link: ",
- namesof("location", llocation, earg=elocation), "\n\n",
+ namesof("location", llocation, earg = elocation), "\n\n",
"Mean: NA\n",
"Variance: NA"),
- initialize=eval(substitute(expression({
+ initialize = eval(substitute(expression({
predictors.names = namesof("location", .llocation,
- earg=.elocation, tag=FALSE)
+ earg = .elocation, tag = FALSE)
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
if (!length(etastart)) {
- loc.init = if (length(.ilocation)) .ilocation else {
+ loc.init = if (length( .ilocation)) .ilocation else {
if ( .method.init == 2) median(rep(y,w)) else
if ( .method.init == 3) y else {
cauchy1.Loglikfun = function(loc, y, x, w, extraargs) {
scal = extraargs
- sum(w * dcauchy(x=y, loc=loc, scale=scal, log=TRUE))
+ sum(w * dcauchy(x=y, loc=loc, scale=scal, log = TRUE))
}
loc.grid = quantile(y, probs=seq(0.1, 0.9, by=0.05))
try.this = getMaxMin(loc.grid, objfun=cauchy1.Loglikfun,
y=y, x=x, w=w, extraargs= .scale.arg)
- try.this = rep(try.this, len=n)
+ try.this = rep(try.this, len = n)
try.this
}
}
- loc.init = rep(loc.init, len=n)
- if (.llocation == "loge") loc.init = abs(loc.init)+0.01
- etastart = theta2eta(loc.init, .llocation, earg=.elocation)
- }
- }), list( .scale.arg=scale.arg, .ilocation=ilocation,
- .elocation=elocation, .llocation=llocation,
- .method.init=method.init ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- eta2theta(eta, .llocation, earg= .elocation)
- }, list( .llocation=llocation,
- .elocation=elocation ))),
+ loc.init = rep(loc.init, len = n)
+ if ( .llocation == "loge") loc.init = abs(loc.init)+0.01
+ etastart = theta2eta(loc.init, .llocation, earg = .elocation)
+ }
+ }), list( .scale.arg=scale.arg, .ilocation = ilocation,
+ .elocation = elocation, .llocation = llocation,
+ .method.init = method.init ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ eta2theta(eta, .llocation, earg = .elocation)
+ }, list( .llocation = llocation,
+ .elocation = elocation ))),
last = eval(substitute(expression({
misc$expected = TRUE
- misc$link = c("location"= .llocation)
- misc$earg = list(location= .elocation )
+ misc$link = c("location" = .llocation)
+ misc$earg = list("location" = .elocation )
misc$scale.arg = .scale.arg
- }), list( .scale.arg=scale.arg, .elocation=elocation,
- .llocation=llocation ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- location = eta2theta(eta, .llocation, earg=.elocation)
+ }), list( .scale.arg=scale.arg, .elocation = elocation,
+ .llocation = llocation ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ location = eta2theta(eta, .llocation, earg = .elocation)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- sum(w * dcauchy(x=y, loc=location, scale= .scale.arg, log=TRUE))
+ sum(w * dcauchy(x=y, loc=location, scale= .scale.arg, log = TRUE))
}
- }, list( .scale.arg=scale.arg, .elocation=elocation,
- .llocation=llocation ))),
- vfamily=c("cauchy1"),
- deriv=eval(substitute(expression({
- location = eta2theta(eta, .llocation, earg=.elocation)
+ }, list( .scale.arg=scale.arg, .elocation = elocation,
+ .llocation = llocation ))),
+ vfamily = c("cauchy1"),
+ deriv = eval(substitute(expression({
+ location = eta2theta(eta, .llocation, earg = .elocation)
temp = (y-location)/.scale.arg
dl.dlocation = 2 * temp / ((1 + temp^2) * .scale.arg)
- dlocation.deta = dtheta.deta(location, .llocation, earg=.elocation)
+ dlocation.deta = dtheta.deta(location, .llocation, earg = .elocation)
w * dl.dlocation * dlocation.deta
- }), list( .scale.arg=scale.arg, .elocation=elocation,
- .llocation=llocation ))),
- weight=eval(substitute(expression({
- wz = w * dlocation.deta^2 / (.scale.arg^2 * 2)
+ }), list( .scale.arg=scale.arg, .elocation = elocation,
+ .llocation = llocation ))),
+ weight = eval(substitute(expression({
+ wz = w * dlocation.deta^2 / ( .scale.arg^2 * 2)
wz
- }), list( .scale.arg=scale.arg, .elocation=elocation,
- .llocation=llocation ))))
+ }), list( .scale.arg=scale.arg, .elocation = elocation,
+ .llocation = llocation ))))
}
@@ -1341,15 +1350,15 @@ cauchy.control <- function(save.weight=TRUE, ...)
- logistic1 = function(llocation="identity",
- elocation=list(),
- scale.arg=1, method.init=1)
+ logistic1 = function(llocation = "identity",
+ elocation = list(),
+ scale.arg=1, method.init = 1)
{
if (mode(llocation) != "character" && mode(llocation) != "name")
llocation = as.character(substitute(llocation))
- if (!is.Numeric(scale.arg, allow=1, posit=TRUE))
+ if (!is.Numeric(scale.arg, allow = 1, posit = TRUE))
stop("'scale.arg' must be a single positive number")
- if (!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
method.init > 2)
stop("'method.init' must be 1 or 2")
if (!is.list(elocation)) elocation = list()
@@ -1358,53 +1367,53 @@ cauchy.control <- function(save.weight=TRUE, ...)
blurb = c("One-parameter logistic distribution ",
"(location unknown, scale known)\n\n",
"Link: ",
- namesof("location", llocation, earg=elocation), "\n\n",
+ namesof("location", llocation, earg = elocation), "\n\n",
"Mean: location", "\n",
"Variance: (pi*scale)^2 / 3"),
- initialize=eval(substitute(expression({
+ initialize = eval(substitute(expression({
predictors.names = namesof("location", .llocation,
- earg= .elocation, tag=FALSE)
+ earg = .elocation, tag = FALSE)
if (!length(etastart)) {
location.init = if ( .method.init == 1) y else median(rep(y, w))
- location.init = rep(location.init, len=n)
- if (.llocation == "loge") location.init = abs(location.init) + 0.001
- etastart = theta2eta(location.init, .llocation, earg= .elocation)
- }
- }), list( .method.init=method.init, .llocation=llocation,
- .elocation=elocation ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- eta2theta(eta, .llocation, earg= .elocation)
- }, list( .llocation=llocation,
- .elocation=elocation ))),
+ location.init = rep(location.init, len = n)
+ if ( .llocation == "loge") location.init = abs(location.init) + 0.001
+ etastart = theta2eta(location.init, .llocation, earg = .elocation)
+ }
+ }), list( .method.init = method.init, .llocation = llocation,
+ .elocation = elocation ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ eta2theta(eta, .llocation, earg = .elocation)
+ }, list( .llocation = llocation,
+ .elocation = elocation ))),
last = eval(substitute(expression({
misc$expected = TRUE
- misc$link = c(location= .llocation)
- misc$earg = list(location= .elocation )
+ misc$link = c(location = .llocation)
+ misc$earg = list(location = .elocation )
misc$scale.arg = .scale.arg
- }), list( .llocation=llocation,
- .elocation=elocation, .scale.arg=scale.arg ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- location = eta2theta(eta, .llocation, earg= .elocation)
+ }), list( .llocation = llocation,
+ .elocation = elocation, .scale.arg=scale.arg ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ location = eta2theta(eta, .llocation, earg = .elocation)
zedd = (y-location)/.scale.arg
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(w * dlogis(x=y, location = location,
scale = .scale.arg, log = TRUE))
}
- }, list( .llocation=llocation,
- .elocation=elocation, .scale.arg=scale.arg ))),
- vfamily=c("logistic1"),
- deriv=eval(substitute(expression({
- location = eta2theta(eta, .llocation, earg= .elocation)
+ }, list( .llocation = llocation,
+ .elocation = elocation, .scale.arg=scale.arg ))),
+ vfamily = c("logistic1"),
+ deriv = eval(substitute(expression({
+ location = eta2theta(eta, .llocation, earg = .elocation)
ezedd = exp(-(y-location)/.scale.arg)
dl.dlocation = (1 - ezedd) / ((1 + ezedd) * .scale.arg)
- dlocation.deta = dtheta.deta(location, .llocation, earg= .elocation)
+ dlocation.deta = dtheta.deta(location, .llocation, earg = .elocation)
w * dl.dlocation * dlocation.deta
- }), list( .llocation=llocation,
- .elocation=elocation, .scale.arg=scale.arg ))),
- weight=eval(substitute(expression({
- wz = w * dlocation.deta^2 / (.scale.arg^2 * 3)
+ }), list( .llocation = llocation,
+ .elocation = elocation, .scale.arg=scale.arg ))),
+ weight = eval(substitute(expression({
+ wz = w * dlocation.deta^2 / ( .scale.arg^2 * 3)
wz
}), list( .scale.arg=scale.arg ))))
}
@@ -1412,12 +1421,12 @@ cauchy.control <- function(save.weight=TRUE, ...)
- erlang = function(shape.arg, link="loge", earg=list(), method.init=1)
+ erlang = function(shape.arg, link = "loge", earg = list(), method.init = 1)
{
- if (!is.Numeric(shape.arg, allow=1, integer=TRUE, positi=TRUE))
+ if (!is.Numeric(shape.arg, allow = 1, integer = TRUE, positi = TRUE))
stop("'shape' must be a positive integer")
- if (!is.Numeric(method.init, allow=1, integer=TRUE, positi=TRUE) ||
+ if (!is.Numeric(method.init, allow = 1, integer = TRUE, positi = TRUE) ||
method.init > 2)
stop("'method.init' must be 1 or 2")
@@ -1427,74 +1436,75 @@ cauchy.control <- function(save.weight=TRUE, ...)
new("vglmff",
blurb = c("Erlang distribution\n\n",
- "Link: ", namesof("scale", link, earg=earg), "\n", "\n",
+ "Link: ", namesof("scale", link, earg = earg), "\n", "\n",
"Mean: shape * scale", "\n",
"Variance: shape * scale^2"),
- initialize=eval(substitute(expression({
+ initialize = eval(substitute(expression({
if (ncol(y <- as.matrix(y)) > 1)
stop("erlang cannot handle matrix responses yet")
if (any(y < 0))
stop("all y values must be >= 0")
- predictors.names = namesof("scale", .link, earg=.earg, tag=FALSE)
+ predictors.names =
+ namesof("scale", .link, earg = .earg, tag = FALSE)
if (!length(etastart)) {
- if (.method.init==1)
+ if ( .method.init == 1)
sc.init = y / .shape.arg
- if (.method.init==2) {
+ if ( .method.init==2) {
sc.init = median(y) / .shape.arg
- sc.init = rep(sc.init, length=n)
+ sc.init = rep(sc.init, length = n)
}
- etastart = theta2eta(sc.init, .link, earg=.earg)
+ etastart = theta2eta(sc.init, .link, earg = .earg)
}
- }), list( .link=link, .earg=earg,
- .shape.arg=shape.arg, .method.init=method.init ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- sc = eta2theta(eta, .link, earg=.earg)
+ }), list( .link = link, .earg = earg,
+ .shape.arg=shape.arg, .method.init = method.init ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ sc = eta2theta(eta, .link, earg = .earg)
.shape.arg * sc
- }, list( .link=link, .earg=earg, .shape.arg=shape.arg ))),
+ }, list( .link = link, .earg = earg, .shape.arg=shape.arg ))),
last = eval(substitute(expression({
misc$expected = TRUE
- misc$link = c(scale= .link)
- misc$earg = list(scale= .earg )
+ misc$link = c(scale = .link)
+ misc$earg = list(scale = .earg )
misc$shape.arg = .shape.arg
- }), list( .link=link, .earg=earg, .shape.arg=shape.arg ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- sc = eta2theta(eta, .link, earg=.earg)
+ }), list( .link = link, .earg = earg, .shape.arg=shape.arg ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ sc = eta2theta(eta, .link, earg = .earg)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- sum(w * ((.shape.arg - 1) * log(y) - y / sc - .shape.arg * log(sc) -
+ sum(w * (( .shape.arg - 1) * log(y) - y / sc - .shape.arg * log(sc) -
lgamma( .shape.arg )))
}
- }, list( .link=link, .earg=earg, .shape.arg=shape.arg ))),
- vfamily=c("erlang"),
- deriv=eval(substitute(expression({
- sc = eta2theta(eta, .link, earg=.earg)
+ }, list( .link = link, .earg = earg, .shape.arg=shape.arg ))),
+ vfamily = c("erlang"),
+ deriv = eval(substitute(expression({
+ sc = eta2theta(eta, .link, earg = .earg)
dl.dsc = (y / sc - .shape.arg) / sc
- dsc.deta = dtheta.deta(sc, .link, earg=.earg)
+ dsc.deta = dtheta.deta(sc, .link, earg = .earg)
w * dl.dsc * dsc.deta
- }), list( .link=link, .earg=earg, .shape.arg=shape.arg ))),
- weight=eval(substitute(expression({
+ }), list( .link = link, .earg = earg, .shape.arg=shape.arg ))),
+ weight = eval(substitute(expression({
ed2l.dsc2 = .shape.arg / sc^2
wz = w * dsc.deta^2 * ed2l.dsc2
wz
- }), list( .earg=earg, .shape.arg=shape.arg ))))
+ }), list( .earg = earg, .shape.arg=shape.arg ))))
}
-dbort = function(x, Qsize=1, a=0.5, log=FALSE) {
+dbort = function(x, Qsize=1, a=0.5, log = FALSE) {
if (!is.logical(log.arg <- log))
stop("bad input for argument 'log'")
rm(log)
if (!is.Numeric(x)) stop("bad input for argument 'x'")
- if (!is.Numeric(Qsize, allow=1, integ=TRUE, posit=TRUE))
+ if (!is.Numeric(Qsize, allow = 1, integ = TRUE, posit = TRUE))
stop("bad input for argument 'Qsize'")
- if (!is.Numeric(a, posit=TRUE) || max(a) >= 1)
+ if (!is.Numeric(a, posit = TRUE) || max(a) >= 1)
stop("bad input for argument 'a'")
N = max(length(x), length(Qsize), length(a))
x = rep(x, len=N); Qsize = rep(Qsize, len=N); a = rep(a, len=N);
@@ -1512,11 +1522,11 @@ dbort = function(x, Qsize=1, a=0.5, log=FALSE) {
rbort = function(n, Qsize=1, a=0.5) {
- if (!is.Numeric(n, integ=TRUE, posit=TRUE, allow=1))
+ if (!is.Numeric(n, integ = TRUE, posit = TRUE, allow = 1))
stop("bad input for argument 'n'")
- if (!is.Numeric(Qsize, allow=1, integ=TRUE, posit=TRUE))
+ if (!is.Numeric(Qsize, allow = 1, integ = TRUE, posit = TRUE))
stop("bad input for argument 'Qsize'")
- if (!is.Numeric(a, posit=TRUE) || max(a) >= 1)
+ if (!is.Numeric(a, posit = TRUE) || max(a) >= 1)
stop("bad input for argument 'a'")
N = n
qsize = rep(Qsize, len=N); a = rep(a, len=N)
@@ -1533,25 +1543,25 @@ rbort = function(n, Qsize=1, a=0.5) {
}
- borel.tanner = function(Qsize=1, link="logit", earg=list(), method.init=1)
+ borel.tanner = function(Qsize=1, link = "logit", earg = list(), method.init = 1)
{
- if (!is.Numeric(Qsize, allow=1, integ=TRUE, posit=TRUE))
+ if (!is.Numeric(Qsize, allow = 1, integ = TRUE, posit = TRUE))
stop("bad input for argument 'Qsize'")
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if (!is.list(earg)) earg = list()
- if (!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
method.init > 4)
stop("'method.init' must be 1 or 2, 3 or 4")
new("vglmff",
blurb = c("Borel-Tanner distribution\n\n",
"Link: ",
- namesof("a", link, earg=earg), "\n\n",
+ namesof("a", link, earg = earg), "\n\n",
"Mean: Qsize/(1-a)",
"\n",
"Variance: Qsize*a / (1-a)^3"),
- initialize=eval(substitute(expression({
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
if (any(y < .Qsize))
@@ -1559,44 +1569,44 @@ rbort = function(n, Qsize=1, a=0.5) {
if (any(y != round(y)))
warning("response should be integer-valued")
- predictors.names = namesof("a", .link, earg=.earg, tag=FALSE)
+ predictors.names = namesof("a", .link, earg = .earg, tag = FALSE)
if (!length(etastart)) {
a.init = switch(as.character( .method.init ),
- "1"= 1 - .Qsize / (y+1/8),
- "2"= rep(1 - .Qsize / weighted.mean(y,w), len=n),
- "3"= rep(1 - .Qsize / median(y), len=n),
- "4"= rep(0.5, len=n))
- etastart = theta2eta(a.init, .link, earg=.earg)
- }
- }), list( .link=link, .earg=earg, .Qsize=Qsize,
- .method.init=method.init ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- a = eta2theta(eta, .link, earg=.earg)
+ "1" = 1 - .Qsize / (y+1/8),
+ "2" = rep(1 - .Qsize / weighted.mean(y,w), len = n),
+ "3" = rep(1 - .Qsize / median(y), len = n),
+ "4" = rep(0.5, len = n))
+ etastart = theta2eta(a.init, .link, earg = .earg)
+ }
+ }), list( .link = link, .earg = earg, .Qsize=Qsize,
+ .method.init = method.init ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ a = eta2theta(eta, .link, earg = .earg)
.Qsize / (1 - a)
- }, list( .link=link, .earg=earg, .Qsize=Qsize ))),
+ }, list( .link = link, .earg = earg, .Qsize=Qsize ))),
last = eval(substitute(expression({
misc$expected = TRUE
- misc$link = c(a= .link)
- misc$earg = list(a= .earg )
+ misc$link = c(a = .link)
+ misc$earg = list(a = .earg )
misc$Qsize = .Qsize
- }), list( .link=link, .earg=earg, .Qsize=Qsize ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- aa = eta2theta(eta, .link, earg=.earg)
+ }), list( .link = link, .earg = earg, .Qsize=Qsize ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ aa = eta2theta(eta, .link, earg = .earg)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- sum(w * dbort(x=y, Qsize= .Qsize, a=aa, log=TRUE))
+ sum(w * dbort(x=y, Qsize= .Qsize, a=aa, log = TRUE))
}
- }, list( .link=link, .earg=earg, .Qsize=Qsize ))),
- vfamily=c("borel.tanner"),
- deriv=eval(substitute(expression({
- a = eta2theta(eta, .link, earg=.earg)
+ }, list( .link = link, .earg = earg, .Qsize=Qsize ))),
+ vfamily = c("borel.tanner"),
+ deriv = eval(substitute(expression({
+ a = eta2theta(eta, .link, earg = .earg)
dl.da = (y- .Qsize)/a - y
- da.deta = dtheta.deta(a, .link, earg=.earg)
+ da.deta = dtheta.deta(a, .link, earg = .earg)
w * dl.da * da.deta
- }), list( .link=link, .earg=earg, .Qsize=Qsize ))),
- weight=eval(substitute(expression({
+ }), list( .link = link, .earg = earg, .Qsize=Qsize ))),
+ weight = eval(substitute(expression({
ed2l.da2 = .Qsize / (a*(1-a))
wz = w * da.deta^2 * ed2l.da2
wz
@@ -1611,7 +1621,7 @@ dfelix = function(x, a=0.25, log = FALSE) {
rm(log)
if (!is.Numeric(x)) stop("bad input for argument 'x'")
- if (!is.Numeric(a, posit=TRUE)) stop("bad input for argument 'a'")
+ if (!is.Numeric(a, posit = TRUE)) stop("bad input for argument 'a'")
N = max(length(x), length(a))
x = rep(x, len=N); a = rep(a, len=N);
@@ -1627,192 +1637,70 @@ dfelix = function(x, a=0.25, log = FALSE) {
- felix = function(link="elogit",
+ felix = function(link = "elogit",
earg=if (link == "elogit") list(min=0, max=0.5) else list(),
- method.init=1)
+ method.init = 1)
{
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if (!is.list(earg)) earg = list()
- if (!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
method.init > 4)
stop("'method.init' must be 1 or 2, 3 or 4")
new("vglmff",
blurb = c("Felix distribution\n\n",
"Link: ",
- namesof("a", link, earg=earg), "\n\n",
+ namesof("a", link, earg = earg), "\n\n",
"Mean: 1/(1-2*a)"),
- initialize=eval(substitute(expression({
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
if (any(y < 1) || any((y+1)/2 != round((y+1)/2)))
warning("response should be positive, odd and integer-valued")
- predictors.names = namesof("a", .link, earg=.earg, tag=FALSE)
+ predictors.names = namesof("a", .link, earg = .earg, tag = FALSE)
if (!length(etastart)) {
wymean = weighted.mean(y,w)
a.init = switch(as.character( .method.init ),
- "1"= (y-1+1/8) / (2*(y+1/8)+1/8),
- "2"= rep((wymean-1+1/8) / (2*(wymean+1/8)+1/8), len=n),
- "3"= rep((median(y)-1+1/8) / (2*(median(y)+1/8)+1/8), len=n),
- "4"= rep(0.25, len=n))
- etastart = theta2eta(a.init, .link, earg=.earg)
- }
- }), list( .link=link, .earg=earg,
- .method.init=method.init ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- a = eta2theta(eta, .link, earg=.earg)
+ "1" = (y-1+1/8) / (2*(y+1/8)+1/8),
+ "2" = rep((wymean-1+1/8) / (2*(wymean+1/8)+1/8), len = n),
+ "3" = rep((median(y)-1+1/8) / (2*(median(y)+1/8)+1/8), len = n),
+ "4" = rep(0.25, len = n))
+ etastart = theta2eta(a.init, .link, earg = .earg)
+ }
+ }), list( .link = link, .earg = earg,
+ .method.init = method.init ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ a = eta2theta(eta, .link, earg = .earg)
1 / (1 - 2*a)
- }, list( .link=link, .earg=earg ))),
+ }, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
misc$expected = TRUE
- misc$link = c(a= .link)
- misc$earg = list(a= .earg )
- }), list( .link=link, .earg=earg ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- aa = eta2theta(eta, .link, earg=.earg)
+ misc$link = c(a = .link)
+ misc$earg = list(a = .earg )
+ }), list( .link = link, .earg = earg ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ aa = eta2theta(eta, .link, earg = .earg)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(w * dfelix(x=y, a=aa, log = TRUE))
}
- }, list( .link=link, .earg=earg ))),
- vfamily=c("felix"),
- deriv=eval(substitute(expression({
- a = eta2theta(eta, .link, earg=.earg)
+ }, list( .link = link, .earg = earg ))),
+ vfamily = c("felix"),
+ deriv = eval(substitute(expression({
+ a = eta2theta(eta, .link, earg = .earg)
dl.da = (y- 1)/(2*a) - y
- da.deta = dtheta.deta(a, .link, earg=.earg)
+ da.deta = dtheta.deta(a, .link, earg = .earg)
w * dl.da * da.deta
- }), list( .link=link, .earg=earg ))),
- weight=eval(substitute(expression({
+ }), list( .link = link, .earg = earg ))),
+ weight = eval(substitute(expression({
ed2l.da2 = 1 / (a*(1-2*a))
wz = w * da.deta^2 * ed2l.da2
wz
- }), list( .link=link ))))
-}
-
-dsnorm = function(x, location=0, scale=1, shape=0, log=FALSE) {
- if (!is.logical(log.arg <- log))
- stop("bad input for argument 'log'")
- rm(log)
-
- if (!is.Numeric(scale, posit=TRUE))
- stop("bad input for argument 'scale'")
- zedd = (x - location) / scale
- loglik = log(2) + dnorm(zedd, log=TRUE) + pnorm(shape * zedd, log.p=TRUE) -
- log(scale)
- if (log.arg) {
- loglik
- } else {
- exp(loglik)
- }
-}
-
-
-
-rsnorm = function(n, location=0, scale=1, shape=0) {
- if (!is.Numeric(n, posit=TRUE, integ=TRUE, allow=1))
- stop("bad input for argument 'n'")
- if (!is.Numeric(scale, posit=TRUE))
- stop("bad input for argument 'scale'")
- if (!is.Numeric(shape)) stop("bad input for argument 'shape'")
- rho = shape / sqrt(1 + shape^2)
- u0 = rnorm(n)
- v = rnorm(n)
- u1 = rho*u0 + sqrt(1 - rho^2) * v
- location + scale * ifelse(u0 >= 0, u1, -u1)
-}
-
-
- skewnormal1 = function(lshape="identity", earg = list(), ishape=NULL,
- nsimEIM=NULL)
-{
- if (mode(lshape) != "character" && mode(lshape) != "name")
- lshape = as.character(substitute(lshape))
- if (!is.list(earg)) earg = list()
- if (length(nsimEIM) &&
- (!is.Numeric(nsimEIM, allow=1, integ=TRUE) || nsimEIM <= 10))
- stop("'nsimEIM' should be an integer greater than 10")
-
- new("vglmff",
- blurb = c("1-parameter Skew-normal distribution\n\n",
- "Link: ",
- namesof("shape", lshape, earg=earg), "\n",
- "Mean: shape * sqrt(2 / (pi * (1+shape^2 )))\n",
- "Variance: 1-mu^2"),
- initialize=eval(substitute(expression({
- y = cbind(y)
- if (ncol(y) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names = namesof("shape", .lshape, earg=.earg, tag=FALSE)
- if (!length(etastart)) {
- init.shape = if (length( .ishape)) rep( .ishape, len=n) else {
- temp = y
- index = abs(y) < sqrt(2/pi)-0.01
- temp[!index] = y[!index]
- temp[index] = sign(y[index])/sqrt(2/(pi*y[index]*y[index])-1)
- temp
- }
- etastart = matrix(init.shape, n, ncol(y))
- }
- }), list( .lshape=lshape, .earg=earg, .ishape=ishape ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- alpha = eta2theta(eta, .lshape, earg=.earg)
- alpha * sqrt(2/(pi * (1+alpha^2 )))
- }, list( .earg=earg, .lshape=lshape ))),
- last = eval(substitute(expression({
- misc$link = c(shape= .lshape)
- misc$earg = list(shape= .earg )
- misc$nsimEIM = .nsimEIM
- misc$expected = (length( .nsimEIM ) > 0)
- }), list( .earg=earg, .lshape=lshape, .nsimEIM=nsimEIM ))),
- link=eval(substitute(function(mu, extra=NULL) {
- alpha = mu / sqrt(2/pi - mu^2)
- theta2eta(alpha, .lshape, earg=.earg)
- }, list( .earg=earg, .lshape=lshape ))),
- loglikelihood=eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
- alpha = eta2theta(eta, .lshape, earg=.earg)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * dsnorm(x=y, location=0, scale=1, shape=alpha, log=TRUE))
- }
- }, list( .earg=earg, .lshape=lshape ))),
- vfamily=c("skewnormal1"),
- deriv=eval(substitute(expression({
- alpha = eta2theta(eta, .lshape, earg=.earg)
- zedd = y*alpha
- tmp76 = pnorm(zedd)
- tmp86 = dnorm(zedd)
- dl.dshape = tmp86 * y / tmp76
- dshape.deta = dtheta.deta(alpha, .lshape, earg=.earg)
- w * dl.dshape * dshape.deta
- }), list( .earg=earg, .lshape=lshape ))),
- weight=eval(substitute(expression({
- if ( length( .nsimEIM )) {
- run.mean = 0
- for(ii in 1:(.nsimEIM)) {
- ysim = rsnorm(n, location = 0, scale = 1, shape = alpha)
- zedd = ysim*alpha
- tmp76 = pnorm(zedd)
- tmp86 = dnorm(zedd)
- d2l.dshape2 = -ysim*ysim*tmp86*(tmp76*zedd+tmp86)/tmp76^2
- rm(ysim)
- run.mean = ((ii-1) * run.mean + d2l.dshape2) / ii
- }
- if (intercept.only)
- run.mean = mean(run.mean)
- wz = -w * (dshape.deta^2) * run.mean
- } else {
- d2shape.deta2 = d2theta.deta2(alpha, .lshape, earg=.earg)
- d2l.dshape2 = -y*y * tmp86 * (tmp76 * zedd + tmp86) / tmp76^2
- wz = -(dshape.deta^2) * d2l.dshape2 - d2shape.deta2 * dl.dshape
- wz = w * wz
- }
- wz
- }), list( .earg=earg, .lshape=lshape, .nsimEIM=nsimEIM ))))
+ }), list( .link = link ))))
}
@@ -1820,27 +1708,27 @@ rsnorm = function(n, location=0, scale=1, shape=0) {
betaff = function(A=0, B=1,
- lmu = if (A == 0 & B == 1) "logit" else "elogit", lphi="loge",
+ lmu = if (A == 0 & B == 1) "logit" else "elogit", lphi = "loge",
emu = if (lmu == "elogit") list(min=A, max=B) else list(),
ephi=list(),
- imu=NULL, iphi=NULL, method.init=1, zero=NULL)
+ imu = NULL, iphi = NULL, method.init = 1, zero = NULL)
{
- if (!is.Numeric(A, allow=1) || !is.Numeric(B, allow=1) || A >= B)
+ if (!is.Numeric(A, allow = 1) || !is.Numeric(B, allow = 1) || A >= B)
stop("A must be < B, and both must be of length one")
- stdbeta = (A==0 && B==1)
+ stdbeta = (A == 0 && B == 1)
if (mode(lmu) != "character" && mode(lmu) != "name")
lmu = as.character(substitute(lmu))
if (mode(lphi) != "character" && mode(lphi) != "name")
lphi = as.character(substitute(lphi))
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
stop("bad input for argument 'zero'")
- if (length(imu) && (!is.Numeric(imu, posit=TRUE) ||
+ if (length(imu) && (!is.Numeric(imu, posit = TRUE) ||
any(imu <= A) || any(imu >= B)))
stop("bad input for argument 'imu'")
- if (length(iphi) && !is.Numeric(iphi, posit=TRUE))
+ if (length(iphi) && !is.Numeric(iphi, posit = TRUE))
stop("bad input for argument 'iphi'")
- if (!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
method.init > 2)
stop("'method.init' must be 1 or 2")
@@ -1856,66 +1744,66 @@ rsnorm = function(n, location=0, scale=1, shape=0) {
B, "-", A, ")^(phi-1)),\n",
A," < y < ",B, ", ", A," < mu < ",B,
", mu = ", A, " + ", (B-A), " * mu1",
- ", phi > 0\n\n", sep=""),
+ ", phi > 0\n\n", sep = ""),
"Links: ",
- namesof("mu", lmu, earg=emu), ", ",
- namesof("phi", lphi, earg=ephi)),
- constraints=eval(substitute(expression({
+ namesof("mu", lmu, earg = emu), ", ",
+ namesof("phi", lphi, earg = ephi)),
+ constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
if (min(y) <= .A || max(y) >= .B)
stop("data not within (A, B)")
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
- predictors.names = c(namesof("mu", .lmu, .emu, short=TRUE),
- namesof("phi", .lphi, .ephi, short=TRUE))
+ predictors.names = c(namesof("mu", .lmu, .emu, short = TRUE),
+ namesof("phi", .lphi, .ephi, short = TRUE))
if (!length(etastart)) {
- mu.init = if (is.Numeric(.imu)) .imu else
+ mu.init = if (is.Numeric( .imu)) .imu else
{if ( .method.init == 1) weighted.mean(y,w) else
median(rep(y,w))}
- mu1.init = (mu.init - .A) / (.B - .A) # In (0,1)
- phi.init = if (is.Numeric(.iphi)) .iphi else
- max(0.01, -1 + (.B-.A)^2 * mu1.init*(1-mu1.init)/var(y))
+ mu1.init = (mu.init - .A) / ( .B - .A) # In (0,1)
+ phi.init = if (is.Numeric( .iphi)) .iphi else
+ max(0.01, -1 + ( .B-.A)^2 * mu1.init*(1-mu1.init)/var(y))
etastart = matrix(0, n, 2)
- etastart[,1] = theta2eta(mu.init, .lmu, earg=.emu )
- etastart[,2] = theta2eta(phi.init, .lphi, earg=.ephi )
+ etastart[,1] = theta2eta(mu.init, .lmu, earg = .emu )
+ etastart[,2] = theta2eta(phi.init, .lphi, earg = .ephi )
}
- }), list( .lmu=lmu, .lphi=lphi, .imu=imu, .iphi=iphi,
- .A=A, .B=B, .emu=emu, .ephi=ephi, .method.init=method.init ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
+ }), list( .lmu = lmu, .lphi = lphi, .imu=imu, .iphi=iphi,
+ .A = A, .B = B, .emu = emu, .ephi = ephi, .method.init = method.init ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
mu = eta2theta(eta[,1], .lmu, .emu )
mu
- }, list( .lmu=lmu, .emu=emu, .A=A, .B=B))),
+ }, list( .lmu = lmu, .emu = emu, .A = A, .B = B))),
last = eval(substitute(expression({
- misc$link = c(mu = .lmu, phi = .lphi)
- misc$limits = c(.A, .B)
- misc$earg = list(mu= .emu, phi= .ephi)
+ misc$link = c(mu = .lmu, phi = .lphi)
+ misc$earg = list(mu = .emu, phi = .ephi)
+ misc$limits = c( .A, .B)
misc$stdbeta = .stdbeta
- }), list( .lmu=lmu, .lphi=lphi, .A=A, .B=B, .emu=emu, .ephi=ephi,
+ }), list( .lmu = lmu, .lphi = lphi, .A = A, .B = B, .emu = emu, .ephi = ephi,
.stdbeta = stdbeta ))),
- loglikelihood=eval(substitute(
- function(mu, y, w, residuals= FALSE, eta, extra=NULL){
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL){
mu = eta2theta(eta[,1], .lmu, .emu )
- m1u = if ( .stdbeta ) mu else (mu - .A) / (.B - .A)
+ m1u = if ( .stdbeta ) mu else (mu - .A) / ( .B - .A)
phi = eta2theta(eta[,2], .lphi, .ephi )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
shape1 = phi * m1u
shape2 = (1 - m1u) * phi
zedd = (y - .A) / ( .B - .A)
- sum(w * (dbeta(x=zedd, shape1=shape1, shape2=shape2, log=TRUE) -
+ sum(w * (dbeta(x=zedd, shape1=shape1, shape2=shape2, log = TRUE) -
log( abs( .B - .A ))))
}
- }, list( .lmu=lmu, .lphi=lphi, .A=A, .B=B, .emu=emu, .ephi=ephi,
+ }, list( .lmu = lmu, .lphi = lphi, .A = A, .B = B, .emu = emu, .ephi = ephi,
.stdbeta = stdbeta ))),
- vfamily="betaff",
- deriv=eval(substitute(expression({
+ vfamily = "betaff",
+ deriv = eval(substitute(expression({
mu = eta2theta(eta[,1], .lmu, .emu )
phi = eta2theta(eta[,2], .lphi, .ephi )
- m1u = if ( .stdbeta ) mu else (mu - .A) / (.B - .A)
+ m1u = if ( .stdbeta ) mu else (mu - .A) / ( .B - .A)
dmu.deta = dtheta.deta(mu, .lmu, .emu )
- dmu1.dmu = 1 / (.B - .A)
+ dmu1.dmu = 1 / ( .B - .A)
dphi.deta = dtheta.deta(phi, .lphi, .ephi )
temp1 = m1u*phi
temp2 = (1-m1u)*phi
@@ -1925,15 +1813,15 @@ rsnorm = function(n, location=0, scale=1, shape=0) {
mu*log(y) + (1-mu)*log1p(-y)
} else {
dl.dmu1 = phi*(digamma(temp2) - digamma(temp1) +
- log(y-.A) - log(.B-y))
+ log(y-.A) - log( .B-y))
dl.dphi = digamma(phi) - m1u*digamma(temp1) -
(1-m1u)*digamma(temp2) +
- m1u*log(y-.A) + (1-m1u)*log(.B-y) - log(.B -.A)
+ m1u*log(y-.A) + (1-m1u)*log( .B-y) - log( .B -.A)
}
w * cbind(dl.dmu1 * dmu1.dmu * dmu.deta, dl.dphi * dphi.deta)
- }), list( .lmu=lmu, .lphi=lphi, .A=A, .B=B, .emu=emu, .ephi=ephi,
+ }), list( .lmu = lmu, .lphi = lphi, .A = A, .B = B, .emu = emu, .ephi = ephi,
.stdbeta = stdbeta ))),
- weight=eval(substitute(expression({
+ weight = eval(substitute(expression({
d2l.dmu12 = phi^2 * (trigamma(temp1) + trigamma(temp2))
d2l.dphi2 = -trigamma(phi) + trigamma(temp1) * m1u^2 +
trigamma(temp2) * (1-m1u)^2
@@ -1943,7 +1831,7 @@ rsnorm = function(n, location=0, scale=1, shape=0) {
wz[,iam(2,2,M)] = d2l.dphi2 * dphi.deta^2
wz[,iam(1,2,M)] = d2l.dmu1phi * dmu1.dmu * dmu.deta * dphi.deta
w * wz
- }), list( .A=A, .B=B ))))
+ }), list( .A = A, .B = B ))))
}
@@ -1959,14 +1847,14 @@ rsnorm = function(n, location=0, scale=1, shape=0) {
lshape1 = as.character(substitute(lshape1))
if (mode(lshape2) != "character" && mode(lshape2) != "name")
lshape2 = as.character(substitute(lshape2))
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
stop("bad input for argument 'zero'")
- if (length( i1 ) && !is.Numeric( i1, posit=TRUE))
+ if (length( i1 ) && !is.Numeric( i1, posit = TRUE))
stop("bad input for argument 'i1'")
- if (length( i2 ) && !is.Numeric( i2, posit=TRUE))
+ if (length( i2 ) && !is.Numeric( i2, posit = TRUE))
stop("bad input for argument 'i2'")
- if (!is.Numeric(A, allow=1) || !is.Numeric(B, allow=1) || A >= B)
+ if (!is.Numeric(A, allow = 1) || !is.Numeric(B, allow = 1) || A >= B)
stop("A must be < B, and both must be of length one")
stdbeta = (A == 0 && B == 1) # stdbeta == T iff standard beta distribution
if (!is.list(eshape1)) eshape1 = list()
@@ -1981,76 +1869,76 @@ rsnorm = function(n, location=0, scale=1, shape=0) {
paste("(y-",A,")^(shape1-1) * (",B,
"-y)^(shape2-1) / [B(shape1,shape2) * (",
B, "-", A, ")^(shape1+shape2-1)], ",
- A,"<=y<=",B," shape1>0, shape2>0\n\n", sep=""),
+ A,"<=y< = ",B," shape1>0, shape2>0\n\n", sep = ""),
"Links: ",
- namesof("shape1", lshape1, earg=eshape1), ", ",
- namesof("shape2", lshape2, earg=eshape2)),
- constraints=eval(substitute(expression({
- constraints = cm.vgam(matrix(1,M,1), x, .parallel, constraints, int= TRUE)
+ namesof("shape1", lshape1, earg = eshape1), ", ",
+ namesof("shape2", lshape2, earg = eshape2)),
+ constraints = eval(substitute(expression({
+ constraints = cm.vgam(matrix(1, M, 1), x, .parallel, constraints, int= TRUE)
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .parallel=parallel, .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .parallel = parallel, .zero = zero ))),
+ initialize = eval(substitute(expression({
if (min(y) <= .A || max(y) >= .B)
stop("data not within (A, B)")
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
predictors.names =
- c(namesof("shape1", .lshape1, earg= .eshape1, short=TRUE),
- namesof("shape2", .lshape2, earg= .eshape2, short=TRUE))
+ c(namesof("shape1", .lshape1, earg = .eshape1, short = TRUE),
+ namesof("shape2", .lshape2, earg = .eshape2, short = TRUE))
if (!length(etastart)) {
mu1d = mean(y, trim = .trim)
- uu = (mu1d - .A) / (.B - .A)
- DD = (.B - .A)^2
+ uu = (mu1d - .A) / ( .B - .A)
+ DD = ( .B - .A)^2
pinit = max(0.01, uu^2 * (1 - uu) * DD / var(y) - uu)
qinit = max(0.01, pinit * (1 - uu) / uu)
etastart = matrix(0, n, 2)
- etastart[,1] = theta2eta( pinit, .lshape1, earg= .eshape1 )
- etastart[,2] = theta2eta( qinit, .lshape2, earg= .eshape2 )
+ etastart[,1] = theta2eta( pinit, .lshape1, earg = .eshape1 )
+ etastart[,2] = theta2eta( qinit, .lshape2, earg = .eshape2 )
}
if (is.Numeric( .i1 ))
- etastart[,1] = theta2eta( .i1, .lshape1, earg= .eshape1 )
+ etastart[,1] = theta2eta( .i1, .lshape1, earg = .eshape1 )
if (is.Numeric( .i2 ))
- etastart[,2] = theta2eta( .i2, .lshape2, earg= .eshape2 )
+ etastart[,2] = theta2eta( .i2, .lshape2, earg = .eshape2 )
}), list( .lshape1 = lshape1, .lshape2 = lshape2,
.i1 = i1, .i2 = i2, .trim = trim, .A = A, .B = B,
.eshape1 = eshape1, .eshape2 = eshape2 ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- shapes = cbind(eta2theta(eta[,1], .lshape1, earg= .eshape1 ),
- eta2theta(eta[,2], .lshape2, earg= .eshape2 ))
- .A + (.B-.A) * shapes[,1] / (shapes[,1] + shapes[,2])
- }, list( .lshape1=lshape1, .lshape2=lshape2, .A=A, .B=B,
- .eshape1=eshape1, .eshape2=eshape2 ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ shapes = cbind(eta2theta(eta[,1], .lshape1, earg = .eshape1 ),
+ eta2theta(eta[,2], .lshape2, earg = .eshape2 ))
+ .A + ( .B-.A) * shapes[,1] / (shapes[,1] + shapes[,2])
+ }, list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
+ .eshape1 = eshape1, .eshape2 = eshape2 ))),
last = eval(substitute(expression({
- misc$link = c(shape1 = .lshape1, shape2 = .lshape2)
- misc$limits = c(.A, .B)
- misc$earg = list(shape1= .eshape1, shape2= .eshape2)
- }), list( .lshape1=lshape1, .lshape2=lshape2, .A=A, .B=B,
- .eshape1=eshape1, .eshape2=eshape2 ))),
- loglikelihood=eval(substitute(
- function(mu, y, w, residuals= FALSE, eta, extra=NULL){
- shapes = cbind(eta2theta(eta[,1], .lshape1, earg= .eshape1 ),
- eta2theta(eta[,2], .lshape2, earg= .eshape2 ))
+ misc$link = c(shape1 = .lshape1, shape2 = .lshape2)
+ misc$earg = list(shape1 = .eshape1, shape2 = .eshape2)
+ misc$limits = c( .A, .B)
+ }), list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
+ .eshape1 = eshape1, .eshape2 = eshape2 ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL){
+ shapes = cbind(eta2theta(eta[,1], .lshape1, earg = .eshape1 ),
+ eta2theta(eta[,2], .lshape2, earg = .eshape2 ))
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
zedd = (y - .A) / ( .B - .A)
sum(w * (dbeta(x=zedd, shape1=shapes[,1], shape2=shapes[,2],
- log=TRUE) - log( abs( .B - .A ))))
- }
- }, list( .lshape1=lshape1, .lshape2=lshape2, .A=A, .B=B,
- .eshape1=eshape1, .eshape2=eshape2 ))),
- vfamily="beta.ab",
- deriv=eval(substitute(expression({
- shapes = cbind(eta2theta(eta[,1], .lshape1, earg= .eshape1 ),
- eta2theta(eta[,2], .lshape2, earg= .eshape2 ))
- dshapes.deta = cbind(dtheta.deta(shapes[,1], .lshape1, earg= .eshape1),
- dtheta.deta(shapes[,2], .lshape2, earg= .eshape2))
- dl.dshapes = cbind(log(y-.A), log(.B-y)) - digamma(shapes) +
- digamma(shapes[,1] + shapes[,2]) - log(.B - .A)
+ log = TRUE) - log( abs( .B - .A ))))
+ }
+ }, list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
+ .eshape1 = eshape1, .eshape2 = eshape2 ))),
+ vfamily = "beta.ab",
+ deriv = eval(substitute(expression({
+ shapes = cbind(eta2theta(eta[,1], .lshape1, earg = .eshape1 ),
+ eta2theta(eta[,2], .lshape2, earg = .eshape2 ))
+ dshapes.deta = cbind(dtheta.deta(shapes[,1], .lshape1, earg = .eshape1),
+ dtheta.deta(shapes[,2], .lshape2, earg = .eshape2))
+ dl.dshapes = cbind(log(y-.A), log( .B-y)) - digamma(shapes) +
+ digamma(shapes[,1] + shapes[,2]) - log( .B - .A)
w * dl.dshapes * dshapes.deta
- }), list( .lshape1=lshape1, .lshape2=lshape2, .A=A, .B=B,
- .eshape1=eshape1, .eshape2=eshape2 ))),
- weight=expression({
+ }), list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
+ .eshape1 = eshape1, .eshape2 = eshape2 ))),
+ weight = expression({
temp2 = trigamma(shapes[,1]+shapes[,2])
d2l.dshape12 = temp2 - trigamma(shapes[,1])
d2l.dshape22 = temp2 - trigamma(shapes[,2])
@@ -2067,8 +1955,8 @@ rsnorm = function(n, location=0, scale=1, shape=0) {
- beta4 = function(link="loge", earg=list(),
- i1=2.3, i2=2.4, iA=NULL, iB=NULL)
+ beta4 = function(link = "loge", earg = list(),
+ i1=2.3, i2=2.4, iA = NULL, iB = NULL)
{
@@ -2081,68 +1969,68 @@ rsnorm = function(n, location=0, scale=1, shape=0) {
blurb = c("Four-parameter Beta distribution\n",
"(y-A)^(shape1-1) * (B-y)^(shape2-1), A < y < B \n\n",
"Links: ",
- namesof("shape1", link, earg=earg), ", ",
- namesof("shape2", link, earg=earg), ", ",
+ namesof("shape1", link, earg = earg), ", ",
+ namesof("shape2", link, earg = earg), ", ",
" A, B"),
- initialize=eval(substitute(expression({
+ initialize = eval(substitute(expression({
if (!is.vector(y) || (is.matrix(y) && ncol(y) != 1))
stop("y must be a vector or a one-column matrix")
- if (length(.iA) && any(y < .iA))
+ if (length( .iA) && any(y < .iA))
stop("initial 'A' value out of range")
- if (length(.iB) && any(y > .iB))
+ if (length( .iB) && any(y > .iB))
stop("initial 'B' value out of range")
predictors.names = c(
- namesof("shape1", .link, earg=.earg, short=TRUE),
- namesof("shape2", .link, earg=.earg, short=TRUE), "A", "B")
+ namesof("shape1", .link, earg = .earg, short = TRUE),
+ namesof("shape2", .link, earg = .earg, short = TRUE), "A", "B")
my.range = diff(range(y))
if (!length(etastart)) {
- etastart = cbind(shape1= rep(.i1, len=length(y)),
+ etastart = cbind(shape1= rep( .i1, len = length(y)),
shape2= .i2,
- A = if (length(.iA)) .iA else min(y)-my.range/70,
- B = if (length(.iB)) .iB else max(y)+my.range/70)
+ A = if (length( .iA)) .iA else min(y)-my.range/70,
+ B = if (length( .iB)) .iB else max(y)+my.range/70)
}
- }), list( .i1=i1, .i2=i2, .iA=iA, .iB=iB, .link=link, .earg=earg ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- shapes = eta2theta(eta[,1:2], .link, earg=.earg)
+ }), list( .i1=i1, .i2=i2, .iA=iA, .iB=iB, .link = link, .earg = earg ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ shapes = eta2theta(eta[,1:2], .link, earg = .earg)
.A = eta[,3]
.B = eta[,4]
- .A + (.B-.A) * shapes[,1] / (shapes[,1] + shapes[,2])
- }, list( .link=link, .earg=earg ))),
+ .A + ( .B-.A) * shapes[,1] / (shapes[,1] + shapes[,2])
+ }, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- misc$link = c(shape1 = .link, shape2 = .link,
- A="identity", B="identity")
+ misc$link = c(shape1 = .link, shape2 = .link,
+ A = "identity", B = "identity")
misc$earg = list(shape1 = .earg, shape2 = .earg,
- A=list(), B=list())
- }), list( .link=link, .earg=earg ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta,extra=NULL) {
- shapes = eta2theta(eta[,1:2], .link, earg=.earg)
+ A = list(), B = list())
+ }), list( .link = link, .earg = earg ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ shapes = eta2theta(eta[,1:2], .link, earg = .earg)
.A = eta[,3]
.B = eta[,4]
temp = lbeta(shapes[,1], shapes[,2])
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
- sum(w * ((shapes[,1]-1)*log(y-.A) + (shapes[,2]-1)*log(.B-y) - temp -
- (shapes[,1]+shapes[,2]-1)*log(.B-.A )))
- }, list( .link=link, .earg=earg ))),
- vfamily="beta4",
- deriv=eval(substitute(expression({
- shapes = eta2theta(eta[,1:2], .link, earg=.earg)
+ sum(w * ((shapes[,1]-1)*log(y-.A) + (shapes[,2]-1)*log( .B-y) - temp -
+ (shapes[,1]+shapes[,2]-1)*log( .B-.A )))
+ }, list( .link = link, .earg = earg ))),
+ vfamily = "beta4",
+ deriv = eval(substitute(expression({
+ shapes = eta2theta(eta[,1:2], .link, earg = .earg)
.A = eta[,3]
.B = eta[,4]
- dshapes.deta = dtheta.deta(shapes, .link, earg=.earg)
- rr1 = (.B - .A)
+ dshapes.deta = dtheta.deta(shapes, .link, earg = .earg)
+ rr1 = ( .B - .A)
temp3 = (shapes[,1] + shapes[,2] - 1)
temp1 = temp3 / rr1
- dl.dshapes = cbind(log(y-.A), log(.B-y)) - digamma(shapes) +
- digamma(shapes[,1] + shapes[,2]) - log(.B - .A)
+ dl.dshapes = cbind(log(y-.A), log( .B-y)) - digamma(shapes) +
+ digamma(shapes[,1] + shapes[,2]) - log( .B - .A)
dl.dA = -(shapes[,1]-1) / (y- .A) + temp1
- dl.dB = (shapes[,2]-1) / (.B - y) - temp1
+ dl.dB = (shapes[,2]-1) / ( .B - y) - temp1
w * cbind(dl.dshapes * dshapes.deta, dl.dA, dl.dB)
- }), list( .link=link, .earg=earg ))),
- weight=expression({
+ }), list( .link = link, .earg = earg ))),
+ weight = expression({
temp2 = trigamma(shapes[,1]+shapes[,2])
d2l.dshape12 = temp2 - trigamma(shapes[,1])
@@ -2180,112 +2068,122 @@ rsnorm = function(n, location=0, scale=1, shape=0) {
simple.exponential = function()
{
- new("vglmff",
- blurb = c("Simple Exponential distribution\n",
- "Link: log(rate)\n"),
- deviance= function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
- devy = -log(y) - 1
- devmu = -log(mu) - y/mu
- devi = 2 * (devy - devmu)
- if (residuals) sign(y - mu) * sqrt(abs(devi) * w) else sum(w * devi)
- },
- initialize=expression({
- predictors.names = "log(rate)"
- mustart = y + (y == 0) / 8
- }),
- inverse=function(eta, extra=NULL)
- exp(-eta),
- link=function(mu, extra=NULL)
- -log(mu),
- vfamily="simple.exponential",
- deriv=expression({
- rate = 1 / mu
- dl.drate = mu - y
- drate.deta = dtheta.deta(rate, "loge")
- w * dl.drate * drate.deta
- }),
- weight=expression({
- ed2l.drate2 = -1 / rate^2
- wz = -w * drate.deta^2 * ed2l.drate2
- wz
- }))
-}
-
-
- exponential = function(link="loge", earg=list(), location=0, expected=TRUE) {
- if (!is.Numeric(location, allow=1))
- stop("bad input for argument 'location'")
-
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.list(earg)) earg = list()
- if (!is.logical(expected) || length(expected) != 1)
- stop("bad input for argument 'expected'")
+ new("vglmff",
+ blurb = c("Simple Exponential distribution\n",
+ "Link: log(rate)\n"),
+ deviance= function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ devy = -log(y) - 1
+ devmu = -log(mu) - y/mu
+ devi = 2 * (devy - devmu)
+ if (residuals) sign(y - mu) * sqrt(abs(devi) * w) else sum(w * devi)
+ },
+ initialize=expression({
+ predictors.names = "log(rate)"
+ mustart = y + (y == 0) / 8
+ }),
+ inverse=function(eta, extra = NULL)
+ exp(-eta),
+ link=function(mu, extra = NULL)
+ -log(mu),
+ vfamily = "simple.exponential",
+ deriv=expression({
+ rate = 1 / mu
+ dl.drate = mu - y
+ drate.deta = dtheta.deta(rate, "loge")
+ w * dl.drate * drate.deta
+ }),
+ weight = expression({
+ ed2l.drate2 = -1 / rate^2
+ wz = -w * drate.deta^2 * ed2l.drate2
+ wz
+ }))
+}
+
+
+
+
+ exponential <- function(link = "loge", earg = list(),
+ location = 0, expected = TRUE) {
+ if (!is.Numeric(location, allow = 1))
+ stop("bad input for argument 'location'")
+
+ if (mode(link) != "character" && mode(link) != "name")
+ link <- as.character(substitute(link))
+ if (!is.list(earg)) earg = list()
+ if (!is.logical(expected) || length(expected) != 1)
+ stop("bad input for argument 'expected'")
- new("vglmff",
- blurb = c("Exponential distribution\n\n",
- "Link: ", namesof("rate", link, tag= TRUE), "\n",
+ new("vglmff",
+ blurb = c("Exponential distribution\n\n",
+ "Link: ", namesof("rate", link, tag = TRUE), "\n",
"Mean: ", "mu = ",
if (location == 0) "1/rate" else
paste(location, "+ 1/rate"), "\n",
"Variance: ",
- if (location==0) "Exponential: mu^2" else
- paste("(mu-", location, ")^2", sep="")),
- deviance=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta,extra=NULL) {
- devy = -log(y - .location) - 1
- devmu = -log(mu - .location) - (y - .location)/(mu - .location)
- devi = 2 * (devy - devmu)
- if (residuals)
- sign(y - mu) * sqrt(abs(devi) * w) else
- sum(w * devi)
- }, list( .location=location, .earg=earg ))),
- initialize=eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- extra$loc = .location # This is passed into, e.g., link, deriv etc.
- if (any(y <= extra$loc))
- stop("all responses must be greater than ", extra$loc)
- predictors.names = namesof("rate", .link, tag=FALSE)
- mu = y + (y == extra$loc) / 8
- if (!length(etastart))
- etastart = theta2eta(1/(mu-extra$loc), .link, earg=.earg)
- }), list( .location=location, .link=link, .earg=earg ))),
- inverse=eval(substitute(function(eta, extra=NULL)
- extra$loc + 1 / eta2theta(eta, .link, earg=.earg),
- list( .link=link, .earg=earg ))),
- last = eval(substitute(expression({
- misc$location = extra$loc
- misc$link = c(rate = .link)
- misc$earg = list(rate = .earg)
- misc$expected = .expected
- }), list( .link=link, .earg=earg, .expected=expected ))),
- link=eval(substitute(function(mu, extra=NULL)
- theta2eta(1/(mu-extra$loc), .link, earg=.earg),
- list( .link=link, .earg=earg ))),
- vfamily=c("exponential"),
- deriv=eval(substitute(expression({
- rate = 1 / (mu - extra$loc)
- dl.drate = mu - y
- drate.deta = dtheta.deta(rate, .link, earg=.earg)
- w * dl.drate * drate.deta
- }), list( .link=link, .earg=earg ))),
- weight=eval(substitute(expression({
- d2l.drate2 = - ((mu-extra$loc)^2)
- wz = -(drate.deta^2) * d2l.drate2
- if (! .expected) {
- # Use the observed info matrix rather than the expected
- d2rate.deta2 = d2theta.deta2(rate, .link, earg=.earg)
- wz = wz - dl.drate * d2rate.deta2
- }
- w * wz
- }), list( .link=link, .expected=expected, .earg=earg ))))
+ if (location == 0) "Exponential: mu^2" else
+ paste("(mu - ", location, ")^2", sep = "")),
+ deviance = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ devy <- -log(y - .location) - 1
+ devmu <- -log(mu - .location) - (y - .location) / (mu - .location)
+ devi <- 2 * (devy - devmu)
+ if (residuals) {
+ sign(y - mu) * sqrt(abs(devi) * w)
+ } else {
+ sum(w * devi)
+ }
+ }, list( .location = location, .earg = earg ))),
+ initialize = eval(substitute(expression({
+ if (ncol(cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+
+ extra$loc <- .location # Passed into, e.g., @link, @deriv etc.
+
+ if (any(y <= extra$loc))
+ stop("all responses must be greater than ", extra$loc)
+
+ predictors.names <- namesof("rate", .link, tag = FALSE)
+
+ if (length(mustart) + length(etastart) == 0)
+ mustart <- y + (y == extra$loc) / 8
+ if (!length(etastart))
+ etastart <- theta2eta(1 / (mustart - extra$loc),
+ .link, earg = .earg)
+ }), list( .location = location, .link = link, .earg = earg ))),
+ inverse = eval(substitute(function(eta, extra = NULL)
+ extra$loc + 1 / eta2theta(eta, .link, earg = .earg),
+ list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
+ misc$location <- extra$loc
+ misc$link <- c(rate = .link)
+ misc$earg <- list(rate = .earg)
+ misc$expected <- .expected
+ }), list( .link = link, .earg = earg, .expected = expected ))),
+ link = eval(substitute(function(mu, extra = NULL)
+ theta2eta(1 / (mu - extra$loc), .link, earg = .earg),
+ list( .link = link, .earg = earg ))),
+ vfamily = c("exponential"),
+ deriv = eval(substitute(expression({
+ rate <- 1 / (mu - extra$loc)
+ dl.drate <- mu - y
+ drate.deta <- dtheta.deta(rate, .link, earg = .earg)
+ w * dl.drate * drate.deta
+ }), list( .link = link, .earg = earg ))),
+ weight = eval(substitute(expression({
+ d2l.drate2 <- -((mu-extra$loc)^2)
+ wz <- -(drate.deta^2) * d2l.drate2
+ if (! .expected) {
+ d2rate.deta2 <- d2theta.deta2(rate, .link, earg = .earg)
+ wz <- wz - dl.drate * d2rate.deta2
+ }
+ w * wz
+ }), list( .link = link, .expected = expected, .earg = earg ))))
}
- gamma1 = function(link="loge", earg=list())
+ gamma1 = function(link = "loge", earg = list())
{
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
@@ -2294,46 +2192,47 @@ rsnorm = function(n, location=0, scale=1, shape=0) {
new("vglmff",
blurb = c("1-parameter Gamma distribution\n",
"Link: ",
- namesof("shape", link, earg=earg, tag= TRUE), "\n",
+ namesof("shape", link, earg = earg, tag = TRUE), "\n",
"Mean: mu (=shape)\n",
"Variance: mu (=shape)"),
- initialize=eval(substitute(expression({
+ initialize = eval(substitute(expression({
if (any(y <= 0))
stop("all responses must be positive")
M = if (is.matrix(y)) ncol(y) else 1
- temp.names = if (M == 1) "shape" else paste("shape", 1:M, sep="")
- predictors.names = namesof(temp.names, .link, earg=.earg, short=TRUE)
+ temp.names = if (M == 1) "shape" else paste("shape", 1:M, sep = "")
+ predictors.names =
+ namesof(temp.names, .link, earg = .earg, short = TRUE)
if (!length(etastart))
- etastart = cbind(theta2eta(y+1/8, .link, earg=.earg ))
- }), list( .link=link, .earg=earg ))),
- inverse=eval(substitute(function(eta, extra=NULL)
- eta2theta(eta, .link, earg=.earg)),
- list( .link=link, .earg=earg )),
+ etastart = cbind(theta2eta(y + 1/8, .link, earg = .earg ))
+ }), list( .link = link, .earg = earg ))),
+ inverse = eval(substitute(function(eta, extra = NULL)
+ eta2theta(eta, .link, earg = .earg)),
+ list( .link = link, .earg = earg )),
last = eval(substitute(expression({
- temp.names = if (M == 1) "shape" else paste("shape", 1:M, sep="")
- misc$link = rep( .link, length=M)
+ temp.names = if (M == 1) "shape" else paste("shape", 1:M, sep = "")
+ misc$link = rep( .link, length = M)
names(misc$link) = temp.names
misc$earg = vector("list", M)
names(misc$earg) = names(misc$link)
for(ii in 1:M) misc$earg[[ii]] = .earg
misc$expected = TRUE
- }), list( .link=link, .earg=earg ))),
- link=eval(substitute(function(mu, extra=NULL)
- theta2eta(mu, .link, earg=.earg)),
- list( .link=link, .earg=earg )),
- loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL)
+ }), list( .link = link, .earg = earg ))),
+ link = eval(substitute(function(mu, extra = NULL)
+ theta2eta(mu, .link, earg = .earg)),
+ list( .link = link, .earg = earg )),
+ loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra = NULL)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- sum(w * dgamma(x=y, shape=mu, scale=1, log=TRUE))
+ sum(w * dgamma(x=y, shape=mu, scale=1, log = TRUE))
},
- vfamily=c("gamma1"),
- deriv=eval(substitute(expression({
+ vfamily = c("gamma1"),
+ deriv = eval(substitute(expression({
shape = mu
dl.dshape = log(y) - digamma(shape)
- dshape.deta = dtheta.deta(shape, .link, earg=.earg)
+ dshape.deta = dtheta.deta(shape, .link, earg = .earg)
w * dl.dshape * dshape.deta
- }), list( .link=link, .earg=earg ))),
- weight=expression({
+ }), list( .link = link, .earg = earg ))),
+ weight = expression({
d2l.dshape = -trigamma(shape)
wz = -(dshape.deta^2) * d2l.dshape
w * wz
@@ -2341,19 +2240,19 @@ rsnorm = function(n, location=0, scale=1, shape=0) {
}
- gamma2.ab = function(lrate="loge", lshape="loge",
- erate=list(), eshape=list(),
- irate=NULL, ishape=NULL, expected=TRUE, zero=2)
+ gamma2.ab = function(lrate = "loge", lshape = "loge",
+ erate = list(), eshape = list(),
+ irate = NULL, ishape = NULL, expected = TRUE, zero = 2)
{
if (mode(lrate) != "character" && mode(lrate) != "name")
lrate = as.character(substitute(lrate))
if (mode(lshape) != "character" && mode(lshape) != "name")
lshape = as.character(substitute(lshape))
- if (length( irate) && !is.Numeric(irate, posit=TRUE))
+ if (length( irate) && !is.Numeric(irate, posit = TRUE))
stop("bad input for argument 'irate'")
- if (length( ishape) && !is.Numeric(ishape, posit=TRUE))
+ if (length( ishape) && !is.Numeric(ishape, posit = TRUE))
stop("bad input for argument 'ishape'")
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
stop("bad input for argument 'zero'")
if (!is.logical(expected) || length(expected) != 1)
stop("bad input for argument 'expected'")
@@ -2363,68 +2262,69 @@ rsnorm = function(n, location=0, scale=1, shape=0) {
new("vglmff",
blurb = c("2-parameter Gamma distribution\n",
"Links: ",
- namesof("rate", lrate, earg=erate), ", ",
- namesof("shape", lshape, earg=eshape), "\n",
+ namesof("rate", lrate, earg = erate), ", ",
+ namesof("shape", lshape, earg = eshape), "\n",
"Mean: mu = shape/rate\n",
"Variance: (mu^2)/shape = shape/rate^2"),
- constraints=eval(substitute(expression({
+ constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
# Error check
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
if (any(y <= 0))
stop("all responses must be positive")
- predictors.names = c(namesof("rate", .lrate, earg=.erate, tag=FALSE),
- namesof("shape", .lshape, earg=.eshape, tag=FALSE))
+ predictors.names =
+ c(namesof("rate", .lrate, earg = .erate, tag = FALSE),
+ namesof("shape", .lshape, earg = .eshape, tag = FALSE))
if (!length(etastart)) {
mymu = y + 0.167 * (y == 0)
junk = lsfit(x, y, wt = w, intercept = FALSE)
var.y.est = sum(w * junk$resid^2) / (nrow(x) - length(junk$coef))
init.shape = if (length( .ishape)) .ishape else mymu^2 / var.y.est
init.rate = if (length( .irate)) .irate else init.shape / mymu
- init.rate = rep(init.rate, len=n)
- init.shape = rep(init.shape, len=n)
+ init.rate = rep(init.rate, len = n)
+ init.shape = rep(init.shape, len = n)
if ( .lshape == "loglog")
init.shape[init.shape <= 1] = 3.1 #Hopefully value is big enough
- etastart = cbind(theta2eta(init.rate, .lrate, earg=.erate),
- theta2eta(init.shape, .lshape, earg=.eshape))
- }
- }), list( .lrate=lrate, .lshape=lshape, .irate=irate, .ishape=ishape,
- .erate=erate, .eshape=eshape ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- eta2theta(eta[,2], .lshape, earg=.eshape) / eta2theta(eta[,1], .lrate,
- earg=.erate)
- }, list( .lrate=lrate, .lshape=lshape,
- .erate=erate, .eshape=eshape ))),
+ etastart = cbind(theta2eta(init.rate, .lrate, earg = .erate),
+ theta2eta(init.shape, .lshape, earg = .eshape))
+ }
+ }), list( .lrate = lrate, .lshape = lshape, .irate=irate, .ishape = ishape,
+ .erate = erate, .eshape = eshape ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ eta2theta(eta[,2], .lshape, earg = .eshape) / eta2theta(eta[,1], .lrate,
+ earg = .erate)
+ }, list( .lrate = lrate, .lshape = lshape,
+ .erate = erate, .eshape = eshape ))),
last = eval(substitute(expression({
- misc$link = c(rate= .lrate, shape= .lshape)
- misc$earg = list(rate= .erate, shape= .eshape)
- }), list( .lrate=lrate, .lshape=lshape,
- .erate=erate, .eshape=eshape ))),
- loglikelihood=eval(substitute(
- function(mu, y, w, residuals= FALSE,eta, extra=NULL) {
- rate = eta2theta(eta[,1], .lrate, earg=.erate)
- shape = eta2theta(eta[,2], .lshape, earg=.eshape)
+ misc$link = c(rate = .lrate, shape = .lshape)
+ misc$earg = list(rate = .erate, shape = .eshape)
+ }), list( .lrate = lrate, .lshape = lshape,
+ .erate = erate, .eshape = eshape ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ rate = eta2theta(eta[,1], .lrate, earg = .erate)
+ shape = eta2theta(eta[,2], .lshape, earg = .eshape)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- sum(w * dgamma(x=y, shape=shape, rate=rate, log=TRUE))
- }
- }, list( .lrate=lrate, .lshape=lshape,
- .erate=erate, .eshape=eshape ))),
- vfamily=c("gamma2.ab"),
- deriv=eval(substitute(expression({
- rate = eta2theta(eta[,1], .lrate, earg=.erate)
- shape = eta2theta(eta[,2], .lshape, earg=.eshape)
+ sum(w * dgamma(x=y, shape=shape, rate=rate, log = TRUE))
+ }
+ }, list( .lrate = lrate, .lshape = lshape,
+ .erate = erate, .eshape = eshape ))),
+ vfamily = c("gamma2.ab"),
+ deriv = eval(substitute(expression({
+ rate = eta2theta(eta[,1], .lrate, earg = .erate)
+ shape = eta2theta(eta[,2], .lshape, earg = .eshape)
dl.drate = mu - y
dl.dshape = log(y*rate) - digamma(shape)
- dratedeta = dtheta.deta(rate, .lrate, earg=.erate)
- dshape.deta = dtheta.deta(shape, .lshape, earg=.eshape)
+ dratedeta = dtheta.deta(rate, .lrate, earg = .erate)
+ dshape.deta = dtheta.deta(shape, .lshape, earg = .eshape)
w * cbind(dl.drate * dratedeta, dl.dshape * dshape.deta)
- }), list( .lrate=lrate, .lshape=lshape,
- .erate=erate, .eshape=eshape ))),
- weight=eval(substitute(expression({
+ }), list( .lrate = lrate, .lshape = lshape,
+ .erate = erate, .eshape = eshape ))),
+ weight = eval(substitute(expression({
d2l.dshape2 = -trigamma(shape)
d2l.drate2 = -shape/(rate^2)
d2l.drateshape = 1/rate
@@ -2433,32 +2333,33 @@ rsnorm = function(n, location=0, scale=1, shape=0) {
wz[,iam(2,2,M)] = -d2l.dshape2 * dshape.deta^2
wz[,iam(1,2,M)] = -d2l.drateshape * dratedeta * dshape.deta
if (! .expected) {
- d2ratedeta2 = d2theta.deta2(rate, .lrate, earg=.erate)
- d2shapedeta2 = d2theta.deta2(shape, .lshape, earg=.eshape)
+ d2ratedeta2 = d2theta.deta2(rate, .lrate, earg = .erate)
+ d2shapedeta2 = d2theta.deta2(shape, .lshape, earg = .eshape)
wz[,iam(1,1,M)] = wz[,iam(1,1,M)] - dl.drate * d2ratedeta2
wz[,iam(2,2,M)] = wz[,iam(2,2,M)] - dl.dshape * d2shapedeta2
}
w * wz
- }), list( .lrate=lrate, .lshape=lshape,
- .erate=erate, .eshape=eshape, .expected=expected ))))
+ }), list( .lrate = lrate, .lshape = lshape,
+ .erate = erate, .eshape = eshape, .expected = expected ))))
}
- gamma2 = function(lmu="loge", lshape="loge",
- emu=list(), eshape=list(),
- method.init=1,
- deviance.arg=FALSE, ishape=NULL, zero=-2)
+ gamma2 = function(lmu = "loge", lshape = "loge",
+ emu = list(), eshape = list(),
+ method.init = 1,
+ deviance.arg = FALSE, ishape = NULL, zero = -2)
{
+
if (mode(lmu) != "character" && mode(lmu) != "name")
lmu = as.character(substitute(lmu))
if (mode(lshape) != "character" && mode(lshape) != "name")
lshape = as.character(substitute(lshape))
- if (length(zero) && !is.Numeric(zero, integer=TRUE))
+ if (length(zero) && !is.Numeric(zero, integer = TRUE))
stop("bad input for argument 'zero'")
- if (length( ishape) && !is.Numeric(ishape, posit=TRUE))
+ if (length( ishape) && !is.Numeric(ishape, posit = TRUE))
stop("bad input for argument 'ishape'")
- if (!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
method.init > 2)
stop("'method.init' must be 1 or 2")
if (!is.list(emu)) emu = list()
@@ -2469,32 +2370,33 @@ rsnorm = function(n, location=0, scale=1, shape=0) {
blurb = c("2-parameter Gamma distribution",
" (McCullagh and Nelder 1989 parameterization)\n",
"Links: ",
- namesof("mu", lmu, earg=emu), ", ",
- namesof("shape", lshape, earg=eshape), "\n",
+ namesof("mu", lmu, earg = emu), ", ",
+ namesof("shape", lshape, earg = eshape), "\n",
"Mean: mu\n",
"Variance: (mu^2)/shape"),
- constraints=eval(substitute(expression({
- temp752 = .zero
- if (length(temp752) && all(temp752 == -2))
- temp752 = 2*(1:ncol(y))
- constraints = cm.zero.vgam(constraints, x, temp752, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ constraints = eval(substitute(expression({
+
+ dotzero <- .zero
+ Musual <- 2
+ eval(negzero.expression)
+ constraints = cm.zero.vgam(constraints, x, z_Index, M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
assign("CQO.FastAlgorithm", ( .lmu == "loge" && .lshape == "loge"),
- envir = VGAMenv)
+ envir = VGAM:::VGAMenv)
if (any(function.name == c("cqo","cao")) &&
- is.Numeric( .zero, allow=1) && .zero != -2)
+ is.Numeric( .zero, allow = 1) && .zero != -2)
stop("argument zero=-2 is required")
y = as.matrix(y)
M = 2 * ncol(y)
NOS = ncoly = ncol(y) # Number of species
- temp1.names = if (NOS==1) "mu" else paste("mu", 1:NOS, sep="")
- temp2.names = if (NOS==1) "shape" else paste("shape", 1:NOS, sep="")
+ temp1.names = if (NOS == 1) "mu" else paste("mu", 1:NOS, sep = "")
+ temp2.names = if (NOS == 1) "shape" else paste("shape", 1:NOS, sep = "")
predictors.names =
- c(namesof(temp1.names, .lmu, earg=.emu, tag=FALSE),
- namesof(temp2.names, .lshape, earg=.eshape, tag=FALSE))
- predictors.names = predictors.names[interleave.VGAM(M, M=2)]
+ c(namesof(temp1.names, .lmu, earg = .emu, tag = FALSE),
+ namesof(temp2.names, .lshape, earg = .eshape, tag = FALSE))
+ predictors.names = predictors.names[interleave.VGAM(M, M = 2)]
# Error check
@@ -2516,67 +2418,69 @@ rsnorm = function(n, location=0, scale=1, shape=0) {
if ( .lshape == "loglog") init.shape[init.shape[,spp] <=
1,spp] = 3.1 # Hopefully value is big enough
}
- etastart = cbind(theta2eta(mymu, .lmu, earg=.emu ),
- theta2eta(init.shape, .lshape, earg=.eshape ))
- etastart = etastart[,interleave.VGAM(M, M=2),drop=FALSE]
- }
- }), list( .lmu=lmu, .lshape=lshape, .ishape=ishape, .zero=zero,
- .emu=emu, .eshape=eshape,
- .method.init=method.init ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
+ etastart = cbind(theta2eta(mymu, .lmu, earg = .emu ),
+ theta2eta(init.shape, .lshape, earg = .eshape ))
+ etastart = etastart[,interleave.VGAM(M, M = 2), drop = FALSE]
+ }
+ }), list( .lmu = lmu, .lshape = lshape, .ishape = ishape, .zero = zero,
+ .emu = emu, .eshape = eshape,
+ .method.init = method.init ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
NOS = ncol(eta) / 2
- eta2theta(eta[,2*(1:NOS)-1,drop=FALSE], .lmu, earg=.emu )
- }, list( .lmu=lmu, .emu=emu ))),
+ eta2theta(eta[,2*(1:NOS)-1, drop = FALSE], .lmu, earg = .emu )
+ }, list( .lmu = lmu, .emu = emu ))),
last = eval(substitute(expression({
- if (exists("CQO.FastAlgorithm", envir = VGAMenv))
- rm("CQO.FastAlgorithm", envir = VGAMenv)
- tmp34 = c(rep( .lmu, length=NOS), rep( .lshape, length=NOS))
+ if (exists("CQO.FastAlgorithm", envir = VGAM:::VGAMenv))
+ rm("CQO.FastAlgorithm", envir = VGAM:::VGAMenv)
+ tmp34 = c(rep( .lmu, length = NOS),
+ rep( .lshape, length = NOS))
names(tmp34) =
- c(if (NOS == 1) "mu" else paste("mu", 1:NOS, sep=""),
- if (NOS == 1) "shape" else paste("shape", 1:NOS, sep=""))
- tmp34 = tmp34[interleave.VGAM(M, M=2)]
+ c(if (NOS == 1) "mu" else paste("mu", 1:NOS, sep = ""),
+ if (NOS == 1) "shape" else paste("shape", 1:NOS, sep = ""))
+ tmp34 = tmp34[interleave.VGAM(M, M = 2)]
misc$link = tmp34 # Already named
misc$earg = vector("list", M)
+ misc$Musual <- Musual
names(misc$earg) = names(misc$link)
for(ii in 1:NOS) {
misc$earg[[2*ii-1]] = .emu
misc$earg[[2*ii ]] = .eshape
}
misc$expected = TRUE
- }), list( .lmu=lmu, .lshape=lshape,
- .emu=emu, .eshape=eshape ))),
- link=eval(substitute(function(mu, extra=NULL) {
- temp = theta2eta(mu, .lmu, earg=.emu )
+ }), list( .lmu = lmu, .lshape = lshape,
+ .emu = emu, .eshape = eshape ))),
+ link = eval(substitute(function(mu, extra = NULL) {
+ temp = theta2eta(mu, .lmu, earg = .emu )
temp = cbind(temp, NA * temp)
- temp[,interleave.VGAM(ncol(temp), M=2),drop=FALSE]
- }, list( .lmu=lmu, .emu=emu ))),
- loglikelihood=eval(substitute(
- function(mu, y, w, residuals= FALSE,eta, extra=NULL) {
+ temp[,interleave.VGAM(ncol(temp), M = 2), drop = FALSE]
+ }, list( .lmu = lmu, .emu = emu ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
NOS = ncol(eta) / 2
- mymu = mu # eta2theta(eta[,2*(1:NOS)-1], .lmu, earg=.emu )
- shapemat = eta2theta(eta[,2*(1:NOS),drop=FALSE], .lshape, earg=.eshape )
+ mymu = mu # eta2theta(eta[,2*(1:NOS)-1], .lmu, earg = .emu )
+ shapemat = eta2theta(eta[,2*(1:NOS), drop = FALSE], .lshape, earg = .eshape )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(w * dgamma(x=y, shape=c(shapemat), scale=c(mymu/shapemat),
- log=TRUE))
+ log = TRUE))
}
- }, list( .lmu=lmu, .lshape=lshape,
- .emu=emu, .eshape=eshape ))),
- vfamily=c("gamma2"),
- deriv=eval(substitute(expression({
+ }, list( .lmu = lmu, .lshape = lshape,
+ .emu = emu, .eshape = eshape ))),
+ vfamily = c("gamma2"),
+ deriv = eval(substitute(expression({
NOS = ncol(eta) / 2
- mymu = eta2theta(eta[,2*(1:NOS)-1], .lmu, earg=.emu )
- shape = eta2theta(eta[,2*(1:NOS)], .lshape, earg=.eshape )
+ mymu = eta2theta(eta[,2*(1:NOS)-1], .lmu, earg = .emu )
+ shape = eta2theta(eta[,2*(1:NOS)], .lshape, earg = .eshape )
dl.dmu = shape * (y / mymu - 1) / mymu
dl.dshape = log(y) + log(shape) - log(mymu) + 1 - digamma(shape) -
y / mymu
- dmu.deta = dtheta.deta(mymu, .lmu, earg=.emu )
- dshape.deta = dtheta.deta(shape, .lshape, earg=.eshape )
+ dmu.deta = dtheta.deta(mymu, .lmu, earg = .emu )
+ dshape.deta = dtheta.deta(shape, .lshape, earg = .eshape )
myderiv = w * cbind(dl.dmu * dmu.deta, dl.dshape * dshape.deta)
- myderiv[,interleave.VGAM(M, M=2)]
- }), list( .lmu=lmu, .lshape=lshape,
- .emu=emu, .eshape=eshape ))),
- weight=eval(substitute(expression({
+ myderiv[,interleave.VGAM(M, M = 2)]
+ }), list( .lmu = lmu, .lshape = lshape,
+ .emu = emu, .eshape = eshape ))),
+ weight = eval(substitute(expression({
ed2l.dmu2 = shape / (mymu^2)
ed2l.dshape2 = trigamma(shape) - 1 / shape
wz = matrix(as.numeric(NA), n, M) #2=M; diagonal!
@@ -2584,31 +2488,31 @@ rsnorm = function(n, location=0, scale=1, shape=0) {
wz[,2*(1:NOS)-1] = ed2l.dmu2 * dmu.deta^2
wz[,2*(1:NOS)] = ed2l.dshape2 * dshape.deta^2
w * wz
- }), list( .lmu=lmu ))))
+ }), list( .lmu = lmu ))))
- if (deviance.arg) ans at deviance=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta,extra=NULL) {
+ if (deviance.arg) ans at deviance = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
NOS = ncol(eta) / 2
- temp300 = eta[,2*(1:NOS),drop=FALSE]
+ temp300 = eta[,2*(1:NOS), drop = FALSE]
if ( .lshape == "loge") {
bigval = 28
temp300[temp300 > bigval] = bigval
temp300[temp300 < -bigval] = -bigval
} else stop("can only handle the 'loge' link")
- shape = eta2theta(temp300, .lshape, earg=.eshape )
+ shape = eta2theta(temp300, .lshape, earg = .eshape )
devi = -2 * (log(y/mu) - y/mu + 1)
if (residuals) {
warning("not 100% sure about these deviance residuals!")
sign(y - mu) * sqrt(abs(devi) * w)
} else
sum(w * devi)
- }, list( .lshape=lshape )))
+ }, list( .lshape = lshape )))
ans
}
- geometric =function(link="logit", earg=list(), expected=TRUE,
+ geometric =function(link = "logit", earg = list(), expected = TRUE,
method.init= 1)
{
if (!is.logical(expected) || length(expected) != 1)
@@ -2616,72 +2520,74 @@ rsnorm = function(n, location=0, scale=1, shape=0) {
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if (!is.list(earg)) earg = list()
- if (!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
method.init > 3)
stop("'method.init' must be 1 or 2 or 3")
new("vglmff",
blurb = c("Geometric distribution (P[Y=y] = prob*(1-prob)^y, y=0,1,2,...)\n",
"Link: ",
- namesof("prob", link, earg=earg), "\n",
+ namesof("prob", link, earg = earg), "\n",
"Mean: mu = (1-prob)/prob\n",
"Variance: mu*(1+mu) = (1-prob)/prob^2"),
- initialize=eval(substitute(expression({
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a 1-column matrix")
if (any(y < 0)) stop("all responses must be >= 0")
if (any(y!=round(y ))) stop("response should be integer-valued")
- predictors.names = namesof("prob", .link, earg=.earg, tag=FALSE)
+ predictors.names = namesof("prob", .link, earg = .earg, tag = FALSE)
if (!length(etastart)) {
- prob.init = if ( .method.init == 3) 1 / (1 + y + 1/16) else
- if ( .method.init == 1) 1 / (1 + median(rep(y,w)) + 1/16) else
- 1 / (1 + weighted.mean(y,w) + 1/16)
- etastart = theta2eta(prob.init, .link, earg= .earg)
- }
- }), list( .link=link, .earg=earg, .method.init=method.init ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- prob = eta2theta(eta, .link, earg= .earg)
+ prob.init = if ( .method.init == 3)
+ 1 / (1 + y + 1/16) else
+ if ( .method.init == 1)
+ 1 / (1 + median(rep(y,w)) + 1/16) else
+ 1 / (1 + weighted.mean(y,w) + 1/16)
+ etastart = theta2eta(prob.init, .link, earg = .earg)
+ }
+ }), list( .link = link, .earg = earg, .method.init = method.init ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ prob = eta2theta(eta, .link, earg = .earg)
(1-prob)/prob
- }, list( .link=link, .earg=earg ))),
+ }, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- misc$link = c(prob= .link)
- misc$earg = list(prob= .earg )
+ misc$link = c(prob = .link)
+ misc$earg = list(prob = .earg )
misc$expected = .expected
misc$method.init = .method.init
- }), list( .link=link, .earg=earg, .expected=expected, .method.init=method.init ))),
- loglikelihood=eval(substitute(
- function(mu, y, w, residuals= FALSE,eta, extra=NULL) {
- prob = eta2theta(eta, .link, earg= .earg)
+ }), list( .link = link, .earg = earg, .expected = expected, .method.init = method.init ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ prob = eta2theta(eta, .link, earg = .earg)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- sum(w * dgeom(x=y, prob=prob, log=TRUE))
+ sum(w * dgeom(x=y, prob=prob, log = TRUE))
}
- }, list( .link=link, .earg=earg ))),
- vfamily=c("geometric"),
- deriv=eval(substitute(expression({
- prob = eta2theta(eta, .link, earg= .earg)
+ }, list( .link = link, .earg = earg ))),
+ vfamily = c("geometric"),
+ deriv = eval(substitute(expression({
+ prob = eta2theta(eta, .link, earg = .earg)
dl.dprob = -y/(1-prob) + 1/prob
- dprobdeta = dtheta.deta(prob, .link, earg= .earg)
+ dprobdeta = dtheta.deta(prob, .link, earg = .earg)
w * cbind(dl.dprob * dprobdeta)
- }), list( .link=link, .earg=earg, .expected=expected ))),
- weight=eval(substitute(expression({
+ }), list( .link = link, .earg = earg, .expected = expected ))),
+ weight = eval(substitute(expression({
ed2l.dprob2 = if ( .expected ) 1 / (prob^2 * (1-prob)) else
y / (1-prob)^2 + 1 / prob^2
wz = ed2l.dprob2 * dprobdeta^2
- if ( !( .expected )) wz = wz - dl.dprob * d2theta.deta2(prob, .link, earg= .earg)
+ if ( !( .expected )) wz = wz - dl.dprob * d2theta.deta2(prob, .link, earg = .earg)
w * wz
- }), list( .link=link, .earg=earg, .expected=expected ))))
+ }), list( .link = link, .earg = earg, .expected = expected ))))
}
-dbetageom = function(x, shape1, shape2, log=FALSE) {
+dbetageom = function(x, shape1, shape2, log = FALSE) {
if (!is.logical(log.arg <- log))
stop("bad input for argument 'log'")
rm(log)
if (!is.Numeric(x)) stop("bad input for argument 'x'")
- if (!is.Numeric(shape1, pos=TRUE)) stop("bad input for argument 'shape1'")
- if (!is.Numeric(shape2, pos=TRUE)) stop("bad input for argument 'shape2'")
+ if (!is.Numeric(shape1, pos = TRUE)) stop("bad input for argument 'shape1'")
+ if (!is.Numeric(shape2, pos = TRUE)) stop("bad input for argument 'shape2'")
N = max(length(x), length(shape1), length(shape2))
x = rep(x, len=N); shape1 = rep(shape1, len=N); shape2 = rep(shape2, len=N)
loglik = lbeta(1+shape1, shape2+abs(x)) - lbeta(shape1, shape2)
@@ -2695,10 +2601,10 @@ dbetageom = function(x, shape1, shape2, log=FALSE) {
}
-pbetageom = function(q, shape1, shape2, log.p=FALSE) {
+pbetageom = function(q, shape1, shape2, log.p = FALSE) {
if (!is.Numeric(q)) stop("bad input for argument 'q'")
- if (!is.Numeric(shape1, pos=TRUE)) stop("bad input for argument 'shape1'")
- if (!is.Numeric(shape2, pos=TRUE)) stop("bad input for argument 'shape2'")
+ if (!is.Numeric(shape1, pos = TRUE)) stop("bad input for argument 'shape1'")
+ if (!is.Numeric(shape2, pos = TRUE)) stop("bad input for argument 'shape2'")
N = max(length(q), length(shape1), length(shape2))
q = rep(q, len=N); shape1 = rep(shape1, len=N); shape2 = rep(shape2, len=N)
ans = q * 0 # Retains names(q)
@@ -2722,601 +2628,185 @@ pbetageom = function(q, shape1, shape2, log.p=FALSE) {
}
rbetageom = function(n, shape1, shape2) {
- if (!is.Numeric(n, integ=TRUE,allow=1)) stop("bad input for argument 'n'")
- if (!is.Numeric(shape1, pos=TRUE)) stop("bad input for argument 'shape1'")
- if (!is.Numeric(shape2, pos=TRUE)) stop("bad input for argument 'shape2'")
+ if (!is.Numeric(n, integ = TRUE,allow = 1)) stop("bad input for argument 'n'")
+ if (!is.Numeric(shape1, pos = TRUE)) stop("bad input for argument 'shape1'")
+ if (!is.Numeric(shape2, pos = TRUE)) stop("bad input for argument 'shape2'")
rgeom(n=n, prob = rbeta(n=n, shape1=shape1, shape2=shape2))
}
- tobit = function(Lower = 0, Upper = Inf, lmu="identity",
- lsd="loge", emu=list(), esd=list(), imethod=1, zero=2)
-{
- if (mode(lmu) != "character" && mode(lmu) != "name")
- lmu = as.character(substitute(lmu))
- if (mode(lsd) != "character" && mode(lsd) != "name")
- lsd = as.character(substitute(lsd))
- if (!is.Numeric(imethod, allow=1, integer=TRUE, positi=TRUE) || imethod > 2)
- stop("imethod must be 1 or 2")
- if (length(Lower) != 1 || length(Upper) != 1 ||
- !is.numeric(Lower) || !is.numeric(Upper) || Lower >= Upper)
- stop("Lower and Upper must have length 1 and be numeric with Lower < Upper")
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument 'zero'")
- if (!is.list(emu)) emu = list()
- if (!is.list(esd)) esd = list()
- new("vglmff",
- blurb = c("Tobit model\n\n",
- "Links: ", namesof("mu", lmu, earg=emu, tag= TRUE), "; ",
- namesof("sd", lsd, earg=esd, tag= TRUE), "\n",
- "Conditional variance: sd^2"),
- constraints=eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
- y = cbind(y)
- if (ncol(y)!=1)stop("the response must be a vector or a 1-column matrix")
- extra$censoredL = (y <= .Lower)
- extra$censoredU = (y >= .Upper)
- if (min(y) < .Lower) {
- warning("replacing response values less than the value ",
- .Lower, " by ", .Lower)
- y[y < .Lower] = .Lower
- }
- if (max(y) > .Upper) {
- warning("replacing response values greater than the value ",
- .Upper, " by ", .Upper)
- y[y > .Upper] = .Upper
- }
- predictors.names = c(namesof("mu", .lmu, earg=.emu, tag=FALSE),
- namesof("sd", .lsd, earg=.esd, tag=FALSE))
- if (!length(etastart)) {
- anyc = extra$censoredL | extra$censoredU
- i11 = if ( .imethod == 1) anyc else FALSE # can be all data
- junk = lm.wfit(x=cbind(x[!i11,]), y=y[!i11], w=w[!i11])
- sd.y.est = sqrt( sum(w[!i11] * junk$resid^2) / junk$df.residual )
- etastart = cbind(mu=y, rep(theta2eta(sd.y.est, .lsd, earg= .esd),
- length=n))
- if (any(anyc)) etastart[anyc,1] = x[anyc,,drop=FALSE] %*% junk$coeff
- }
- }), list( .Lower=Lower, .Upper=Upper, .lmu=lmu, .lsd=lsd,
- .emu=emu, .esd=esd, .imethod=imethod ))),
- inverse=eval(substitute( function(eta, extra=NULL) {
- eta2theta(eta[,1], .lmu, earg= .emu)
- }, list( .lmu=lmu, .emu=emu ))),
- last = eval(substitute(expression({
- misc$link = c("mu"= .lmu, "sd"= .lsd)
- misc$earg = list("mu"= .emu, "sd"= .esd)
- misc$expected = TRUE
- misc$Lower = .Lower
- misc$Upper = .Upper
- }), list( .lmu=lmu, .lsd=lsd,
- .emu=emu, .esd=esd,
- .Lower=Lower, .Upper=Upper ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- cenL = extra$censoredL
- cenU = extra$censoredU
- cen0 = !cenL & !cenU # uncensored obsns
- mum = eta2theta(eta[,1], .lmu, earg= .emu)
- sd = eta2theta(eta[,2], .lsd, earg= .esd)
- ell1 = -log(sd[cen0]) - 0.5 * ((y[cen0] - mum[cen0])/sd[cen0])^2
- ell2 = log1p(-pnorm((mum[cenL] - .Lower)/sd[cenL]))
- ell3 = log1p(-pnorm(( .Upper - mum[cenU])/sd[cenU]))
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(w[cen0] * ell1) + sum(w[cenL] * ell2) + sum(w[cenU] * ell3)
- }, list( .lmu=lmu, .lsd=lsd,
- .emu=emu, .esd=esd,
- .Lower=Lower, .Upper=Upper ))),
- vfamily=c("tobit"),
- deriv=eval(substitute(expression({
- cenL = extra$censoredL
- cenU = extra$censoredU
- cen0 = !cenL & !cenU # uncensored obsns
- mum = eta2theta(eta[,1], .lmu, earg= .emu)
- sd = eta2theta(eta[,2], .lsd, earg= .esd)
- dl.dmu = (y-mum) / sd^2
- dl.dsd = (((y-mum)/sd)^2 - 1) / sd
- dmu.deta = dtheta.deta(mum, .lmu, earg= .emu)
- dsd.deta = dtheta.deta(sd, .lsd, earg= .esd)
- if (any(cenL)) {
- mumL = mum - .Lower
- temp21L = mumL[cenL] / sd[cenL]
- PhiL = pnorm(temp21L)
- phiL = dnorm(temp21L)
- fred21 = phiL / (1 - PhiL)
- dl.dmu[cenL] = -fred21 / sd[cenL]
- dl.dsd[cenL] = mumL[cenL] * fred21 / sd[cenL]^2
- rm(fred21)
- }
- if (any(cenU)) {
- mumU = .Upper - mum
- temp21U = mumU[cenU] / sd[cenU]
- PhiU = pnorm(temp21U)
- phiU = dnorm(temp21U)
- fred21 = phiU / (1 - PhiU)
- dl.dmu[cenU] = fred21 / sd[cenU] # Negated
- dl.dsd[cenU] = mumU[cenU] * fred21 / sd[cenU]^2
- rm(fred21)
- }
- w * cbind(dl.dmu * dmu.deta, dl.dsd * dsd.deta)
- }), list( .lmu=lmu, .lsd=lsd,
- .emu=emu, .esd=esd,
- .Lower=Lower, .Upper=Upper ))),
- weight=eval(substitute(expression({
- A1 = 1 - pnorm((mum - .Lower) / sd) # Lower
- A3 = 1 - pnorm(( .Upper - mum) / sd) # Upper
- A2 = 1 - A1 - A3 # Middle; uncensored
- wz = matrix(0, n, 3)
- wz[,iam(1,1,M)] = A2 * 1 / sd^2 # ed2l.dmu2
- wz[,iam(2,2,M)] = A2 * 2 / sd^2 # ed2l.dsd2
- mumL = mum - .Lower
- temp21L = mumL / sd
- PhiL = pnorm(temp21L)
- phiL = dnorm(temp21L)
- temp31L = ((1-PhiL) * sd)^2
- wz.cenL11 = phiL * (phiL - (1-PhiL)*temp21L) / temp31L
- wz.cenL22 = mumL * phiL * ((1-PhiL) * (2 - temp21L^2) +
- mumL * phiL / sd) / (sd * temp31L)
- wz.cenL12 = phiL * ((1-PhiL)*(temp21L^2 - 1) - temp21L*phiL) / temp31L
- wz.cenL11[!is.finite(wz.cenL11)] = 0
- wz.cenL22[!is.finite(wz.cenL22)] = 0
- wz.cenL12[!is.finite(wz.cenL12)] = 0
- wz[,iam(1,1,M)] = wz[,iam(1,1,M)] + A1 * wz.cenL11
- wz[,iam(2,2,M)] = wz[,iam(2,2,M)] + A1 * wz.cenL22
- wz[,iam(1,2,M)] = A1 * wz.cenL12
- mumU = .Upper - mum # often Inf
- temp21U = mumU / sd # often Inf
- PhiU = pnorm(temp21U) # often 1
- phiU = dnorm(temp21U) # often 0
- temp31U = ((1-PhiU) * sd)^2 # often 0
- tmp8 = (1-PhiU)*temp21U
- wzcenU11 = phiU * (phiU - tmp8) / temp31U
- tmp9 = (1-PhiU) * (2 - temp21U^2)
- wzcenU22 = mumU * phiU * (tmp9 + mumU * phiU / sd) / (sd * temp31U)
- wzcenU12 = -phiU * ((1-PhiU)*(temp21U^2 - 1) - temp21U*phiU) / temp31U
- wzcenU11[!is.finite(wzcenU11)] = 0 # Needed when .Upper==Inf
- wzcenU22[!is.finite(wzcenU22)] = 0 # Needed when .Upper==Inf
- wzcenU12[!is.finite(wzcenU12)] = 0 # Needed when .Upper==Inf
- wz[,iam(1,1,M)] = wz[,iam(1,1,M)] + A3 * wzcenU11
- wz[,iam(2,2,M)] = wz[,iam(2,2,M)] + A3 * wzcenU22
- wz[,iam(1,2,M)] = wz[,iam(1,2,M)] + A3 * wzcenU12
- wz[,iam(1,1,M)] = w * wz[,iam(1,1,M)] * dmu.deta^2
- wz[,iam(2,2,M)] = w * wz[,iam(2,2,M)] * dsd.deta^2
- wz[,iam(1,2,M)] = w * wz[,iam(1,2,M)] * dmu.deta * dsd.deta
- wz
- }), list( .lmu=lmu, .Lower=Lower, .Upper=Upper, .lsd=lsd ))))
+interleave.VGAM = function(L, M) c(matrix(1:L, nrow=M, byrow = TRUE))
+
+negbinomial.control <- function(save.weight = TRUE, ...)
+{
+ list(save.weight = save.weight)
}
+ negbinomial = function(lmu = "loge", lk = "loge",
+ emu =list(), ek=list(),
+ imu = NULL, ik = NULL,
+ quantile.probs = 0.75,
+ nsimEIM = 100, cutoff = 0.995, Maxiter = 5000,
+ deviance.arg = FALSE, method.init = 1,
+ parallel = FALSE,
+ shrinkage.init = 0.95, zero = -2)
+{
- normal1 = function(lmean="identity", lsd="loge",
- emean=list(), esd=list(), zero=NULL)
-{
- if (mode(lmean) != "character" && mode(lmean) != "name")
- lmean = as.character(substitute(lmean))
- if (mode(lsd) != "character" && mode(lsd) != "name")
- lsd = as.character(substitute(lsd))
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument 'zero'")
- if (!is.list(emean)) emean = list()
- if (!is.list(esd)) esd = list()
- new("vglmff",
- blurb = c("Univariate normal distribution\n\n",
- "Links: ",
- namesof("mean", lmean, earg=emean, tag= TRUE), "; ",
- namesof("sd", lsd, earg=esd, tag= TRUE),
- "\n",
- "Variance: sd^2"),
- constraints=eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
- predictors.names = c(namesof("mean", .lmean, earg=.emean, tag=FALSE),
- namesof("sd", .lsd, earg=.esd, tag=FALSE))
- if (ncol(y <- cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- if (!length(etastart)) {
- junk = lm.wfit(x=x, y=y, w=w)
- sd.y.est = sqrt( sum(w * junk$resid^2) / junk$df.residual )
- mean.init = if ( .lmean == "loge") pmax(1/1024, y) else y
- etastart = cbind(theta2eta(mean.init, .lmean, earg= .emean),
- theta2eta(sd.y.est, .lsd, earg= .esd))
- }
- }), list( .lmean=lmean, .lsd=lsd, .emean=emean, .esd=esd ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- eta2theta(eta[,1], .lmean, earg= .emean)
- }, list( .lmean=lmean, .emean=emean, .esd=esd ))),
- last = eval(substitute(expression({
- misc$link = c("mu"= .lmean, "sd"= .lsd)
- misc$earg = list("mu"= .emean, "sd"= .esd)
- misc$expected = TRUE
- }), list( .lmean=lmean, .lsd=lsd, .emean=emean, .esd=esd ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- sd = eta2theta(eta[,2], .lsd, earg= .esd)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * dnorm(y, m=mu, sd=sd, log=TRUE))
- }
- }, list( .lsd=lsd, .emean=emean, .esd=esd ))),
- vfamily=c("normal1"),
- deriv=eval(substitute(expression({
- mymu = eta2theta(eta[,1], .lmean, earg= .emean)
- sd = eta2theta(eta[,2], .lsd, earg= .esd)
- dl.dmu = (y-mymu) / sd^2
- dl.dsd = -1/sd + (y-mymu)^2 / sd^3
- dmu.deta = dtheta.deta(mymu, .lmean, earg= .emean)
- dsd.deta = dtheta.deta(sd, .lsd, earg= .esd)
- cbind(w * dl.dmu * dmu.deta, w * dl.dsd * dsd.deta)
- }), list( .lmean=lmean, .lsd=lsd, .emean=emean, .esd=esd ))),
- weight=expression({
- wz = matrix(as.numeric(NA), n, 2) # diagonal matrix; y is one-column too
- ed2l.dmu2 = -1 / sd^2
- ed2l.dsd2 = -2 / sd^2
- wz[,iam(1,1,M)] = -w * ed2l.dmu2 * dmu.deta^2
- wz[,iam(2,2,M)] = -w * ed2l.dsd2 * dsd.deta^2
- wz
- }))
-}
+ if (length(imu) && !is.Numeric(imu, posit = TRUE))
+ stop("bad input for argument 'imu'")
+ if (length(ik) && !is.Numeric(ik, posit = TRUE))
+ stop("bad input for argument 'ik'")
+
+ if (!is.Numeric(cutoff, allow = 1) || cutoff<0.8 || cutoff>=1)
+ stop("range error in the argument 'cutoff'")
+ if (!is.Numeric(Maxiter, integ = TRUE, allow = 1) || Maxiter < 100)
+ stop("bad input for argument 'Maxiter'")
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
+ method.init > 3) stop("argument 'method.init' must be 1 or 2 or 3")
+ if (!is.Numeric(shrinkage.init, allow = 1) || shrinkage.init < 0 ||
+ shrinkage.init > 1) stop("bad input for argument 'shrinkage.init'")
+ if (!is.null(nsimEIM)) {
+ if (!is.Numeric(nsimEIM, allow = 1, integ = TRUE))
+ stop("bad input for argument 'nsimEIM'")
+ if (nsimEIM <= 10)
+ warning("argument 'nsimEIM' should be an integer ",
+ "greater than 10, say")
+ }
+
+ if (mode(lmu) != "character" && mode(lmu) != "name")
+ lmu = as.character(substitute(lmu))
+ if (mode(lk) != "character" && mode(lk) != "name")
+ lk = as.character(substitute(lk))
+ if (!is.list(emu)) emu = list()
+ if (!is.list(ek)) ek = list()
+ if (!is.logical( parallel ) || length( parallel ) != 1)
+ stop("argument 'parallel' must be TRUE or FALSE")
+ if ( parallel && length(zero))
+ stop("need to set 'zero = NULL' when parallel = TRUE")
- lognormal = function(lmeanlog="identity", lsdlog="loge",
- emeanlog=list(), esdlog=list(), zero=NULL)
-{
- if (mode(lmeanlog) != "character" && mode(lmeanlog) != "name")
- lmeanlog = as.character(substitute(lmeanlog))
- if (mode(lsdlog) != "character" && mode(lsdlog) != "name")
- lsdlog = as.character(substitute(lsdlog))
- if (length(zero) && (!is.Numeric(zero, integer=TRUE, posit=TRUE) ||
- zero > 2)) stop("bad input for argument argument 'zero'")
- if (!is.list(emeanlog)) emeanlog = list()
- if (!is.list(esdlog)) esdlog = list()
- new("vglmff",
- blurb = c("Two-parameter (univariate) lognormal distribution\n\n",
- "Links: ", namesof("meanlog", lmeanlog, earg=emeanlog, tag= TRUE), ", ",
- namesof("sdlog", lsdlog, earg=esdlog, tag= TRUE)),
- constraints=eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- if (min(y) <= 0) stop("response must be positive")
- predictors.names = c(namesof("meanlog", .lmeanlog, earg=.emeanlog, tag=FALSE),
- namesof("sdlog", .lsdlog, earg=.esdlog, tag=FALSE))
- if (!length(etastart)) {
- junk = lm.wfit(x=x, y=log(y), w=w)
- sdlog.y.est = sqrt( sum(w * junk$resid^2) / junk$df.residual )
- etastart = cbind(
- meanlog= rep(theta2eta(log(median(y)), .lmeanlog, earg= .emeanlog), length=n),
- sdlog= rep(theta2eta(sdlog.y.est, .lsdlog, earg= .esdlog), length=n))
- }
- }), list( .lmeanlog = lmeanlog, .lsdlog=lsdlog,
- .emeanlog = emeanlog, .esdlog=esdlog ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- mulog = eta2theta(eta[,1], .lmeanlog, earg= .emeanlog)
- sdlog = eta2theta(eta[,2], .lsdlog, earg= .esdlog)
- exp(mulog + 0.5 * sdlog^2)
- }, list( .lmeanlog = lmeanlog, .lsdlog=lsdlog,
- .emeanlog = emeanlog, .esdlog=esdlog ))),
- last = eval(substitute(expression({
- misc$link = c("meanlog"= .lmeanlog, "sdlog"= .lsdlog)
- misc$earg = list("meanlog"= .emeanlog, "sdlog"= .esdlog)
- misc$expected = TRUE
- }), list( .lmeanlog = lmeanlog, .lsdlog=lsdlog,
- .emeanlog = emeanlog, .esdlog=esdlog ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- mulog = eta2theta(eta[,1], .lmeanlog, earg= .emeanlog)
- sdlog = eta2theta(eta[,2], .lsdlog, earg= .esdlog)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * dlnorm(y, meanlog=mulog, sdlog=sdlog, log=TRUE))
- }
- }, list( .lmeanlog = lmeanlog, .lsdlog=lsdlog,
- .emeanlog = emeanlog, .esdlog=esdlog ))),
- vfamily=c("lognormal"),
- deriv=eval(substitute(expression({
- mulog = eta2theta(eta[,1], .lmeanlog, earg= .emeanlog)
- sdlog = eta2theta(eta[,2], .lsdlog, earg= .esdlog)
- dl.dmulog = (log(y)-mulog) / sdlog^2
- dl.dsdlog = -1/sdlog + (log(y)-mulog)^2 / sdlog^3
- dl.dlambda = (1 + (log(y)-mulog) / sdlog^2) / y
- dmulog.deta = dtheta.deta(mulog, .lmeanlog, earg= .emeanlog)
- dsdlog.deta = dtheta.deta(sdlog, .lsdlog, earg= .esdlog)
- w * cbind(dl.dmulog * dmulog.deta,
- dl.dsdlog * dsdlog.deta)
- }), list( .lmeanlog = lmeanlog, .lsdlog=lsdlog,
- .emeanlog = emeanlog, .esdlog=esdlog ))),
- weight=expression({
- wz = matrix(as.numeric(NA), n, 2) # Diagonal!
- ed2l.dmulog2 = 1 / sdlog^2
- ed2l.dsdlog2 = 2 * ed2l.dmulog2
- wz[,iam(1,1,M)] = ed2l.dmulog2 * dmulog.deta^2
- wz[,iam(2,2,M)] = ed2l.dsdlog2 * dsdlog.deta^2
- wz = w * wz
- wz
- }))
-}
-
-
-
-
-
-
- lognormal3 = function(lmeanlog="identity", lsdlog="loge",
- emeanlog=list(), esdlog=list(),
- powers.try = (-3):3,
- delta=NULL, zero=NULL)
-{
- if (length(delta) && !is.Numeric(delta, positive=TRUE))
- stop("bad input for argument argument 'delta'")
- if (mode(lmeanlog) != "character" && mode(lmeanlog) != "name")
- lmeanlog = as.character(substitute(lmeanlog))
- if (mode(lsdlog) != "character" && mode(lsdlog) != "name")
- lsdlog = as.character(substitute(lsdlog))
- if (length(zero) && (!is.Numeric(zero, integer=TRUE, posit=TRUE) ||
- zero > 3))
- stop("bad input for argument argument 'zero'")
- if (!is.list(emeanlog)) emeanlog = list()
- if (!is.list(esdlog)) esdlog = list()
-
- new("vglmff",
- blurb = c("Three-parameter (univariate) lognormal distribution\n\n",
- "Links: ",
- namesof("meanlog", lmeanlog, earg=emeanlog, tag= TRUE),
- "; ", namesof("sdlog", lsdlog, earg=esdlog, tag= TRUE),
- "; ", namesof("lambda", "identity", earg=list(), tag= TRUE)),
- constraints=eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names =
- c(namesof("meanlog", .lmeanlog, earg=.emeanlog, tag=FALSE),
- namesof("sdlog", .lsdlog, earg=.esdlog, tag=FALSE), "lambda")
-
- if (!length(etastart)) {
- miny = min(y)
- if (length( .delta)) {
- lambda.init = rep(miny- .delta, length=n)
- } else {
- pvalue.vec = NULL
- powers.try = .powers.try
- for(delta in 10^powers.try) {
- pvalue.vec = c(pvalue.vec,
- shapiro.test(sample(log(y-miny+delta),
- size=min(5000, length(y ))))$p.value)
- }
- index.lambda=(1:length(powers.try))[pvalue.vec==max(pvalue.vec)]
- lambda.init = miny - 10^powers.try[index.lambda]
- }
- junk = lm.wfit(x=x, y=log(y-lambda.init), w=w)
- sdlog.y.est = sqrt( sum(w * junk$resid^2) / junk$df.residual )
- etastart = cbind(mu=log(median(y - lambda.init)),
- sdlog=rep(theta2eta(sdlog.y.est, .lsdlog, earg= .esdlog), length=n),
- lambda = lambda.init)
- }
- }), list( .lmeanlog=lmeanlog, .lsdlog=lsdlog,
- .emeanlog = emeanlog, .esdlog=esdlog,
- .delta = delta, .powers.try=powers.try ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- mymu = eta2theta(eta[,1], .lmeanlog, earg= .emeanlog)
- sdlog = eta2theta(eta[,2], .lsdlog, earg= .esdlog)
- lambda = eta2theta(eta[,3], "identity", earg=list())
- lambda + exp(mymu + 0.5 * sdlog^2)
- }, list( .lmeanlog=lmeanlog, .lsdlog=lsdlog,
- .emeanlog = emeanlog, .esdlog=esdlog ))),
- last = eval(substitute(expression({
- misc$link = c("meanlog"= .lmeanlog,"sdlog"= .lsdlog,"lambda"="identity")
- misc$earg = list("meanlog"= .emeanlog, "sdlog"= .esdlog,
- "lambda"=list())
- misc$expected = TRUE
- }), list( .lmeanlog=lmeanlog, .lsdlog=lsdlog,
- .emeanlog = emeanlog, .esdlog=esdlog ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- mymu = eta2theta(eta[,1], .lmeanlog, earg= .emeanlog)
- sdlog = eta2theta(eta[,2], .lsdlog, earg= .esdlog)
- lambda = eta2theta(eta[,3], "identity", earg=list())
- if (any(y < lambda))
- warning("bad 'y'")
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w*dlnorm(y-lambda, meanlog=mymu, sdlog=sdlog, log=TRUE))
- }
- }, list( .lmeanlog=lmeanlog, .lsdlog=lsdlog,
- .emeanlog = emeanlog, .esdlog=esdlog ))),
- vfamily=c("lognormal3"),
- deriv=eval(substitute(expression({
- mymu = eta2theta(eta[,1], .lmeanlog, earg= .emeanlog)
- sdlog = eta2theta(eta[,2], .lsdlog, earg= .esdlog)
- lambda = eta2theta(eta[,3], "identity", earg=list())
- if (any(y < lambda))
- warning("bad 'y'")
- dl.dmymu = (log(y-lambda)-mymu) / sdlog^2
- dl.dsdlog = -1/sdlog + (log(y-lambda)-mymu)^2 / sdlog^3
- dl.dlambda = (1 + (log(y-lambda)-mymu) / sdlog^2) / (y-lambda)
- dmymu.deta = dtheta.deta(mymu, .lmeanlog, earg= .emeanlog)
- dsdlog.deta = dtheta.deta(sdlog, .lsdlog, earg= .esdlog)
- dlambda.deta = dtheta.deta(lambda, "identity", earg=list())
- w * cbind(dl.dmymu * dmymu.deta,
- dl.dsdlog * dsdlog.deta,
- dl.dlambda * dlambda.deta)
- }), list( .lmeanlog=lmeanlog, .lsdlog=lsdlog,
- .emeanlog = emeanlog, .esdlog=esdlog ))),
- weight=expression({
- wz = matrix(0, n, dimm(M))
- ed2l.dmymu2 = 1 / sdlog^2
- ed2l.dsdlog = 2 / sdlog^2
- temp9 = exp(-mymu+sdlog^2 / 2)
- ed2l.dlambda2 = exp(2*(-mymu+sdlog^2)) * (1+sdlog^2) / sdlog^2
- wz[,iam(1,1,M)] = ed2l.dmymu2 * dmymu.deta^2
- wz[,iam(2,2,M)] = ed2l.dsdlog * dsdlog.deta^2
- wz[,iam(3,3,M)] = ed2l.dlambda2 * dlambda.deta^2
- wz[,iam(1,3,M)] = temp9 * dmymu.deta * dlambda.deta / sdlog^2
- wz[,iam(2,3,M)] = -2 * temp9 / sdlog * dsdlog.deta * dlambda.deta
- wz = w * wz
- wz
- }))
-}
-
-
-
-interleave.VGAM = function(L, M) c(matrix(1:L, nrow=M, byrow=TRUE))
-
-negbinomial.control <- function(save.weight=TRUE, ...)
-{
- list(save.weight=save.weight)
-}
-
- negbinomial = function(lmu = "loge", lk = "loge",
- emu =list(), ek=list(),
- imu = NULL, ik = NULL,
- quantile.probs = 0.75,
- nsimEIM=100, cutoff = 0.995, Maxiter=5000,
- deviance.arg=FALSE, method.init=1,
- shrinkage.init=0.95, zero = -2)
-{
-
-
-
-
- if (length(imu) && !is.Numeric(imu, posit=TRUE))
- stop("bad input for argument 'imu'")
- if (length(ik) && !is.Numeric(ik, posit=TRUE))
- stop("bad input for argument 'ik'")
- if (!is.Numeric(cutoff, allow=1) || cutoff<0.8 || cutoff>=1)
- stop("range error in the argument 'cutoff'")
- if (!is.Numeric(Maxiter, integ=TRUE, allow=1) || Maxiter < 100)
- stop("bad input for argument 'Maxiter'")
- if (!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
- method.init > 3) stop("argument 'method.init' must be 1 or 2 or 3")
- if (!is.Numeric(shrinkage.init, allow=1) || shrinkage.init < 0 ||
- shrinkage.init > 1) stop("bad input for argument 'shrinkage.init'")
- if (!is.null(nsimEIM)) {
- if (!is.Numeric(nsimEIM, allow=1, integ=TRUE))
- stop("bad input for argument 'nsimEIM'")
- if (nsimEIM <= 10)
- warning("'nsimEIM' should be an integer greater than 10, say")
- }
-
- if (mode(lmu) != "character" && mode(lmu) != "name")
- lmu = as.character(substitute(lmu))
- if (mode(lk) != "character" && mode(lk) != "name")
- lk = as.character(substitute(lk))
- if (!is.list(emu)) emu = list()
- if (!is.list(ek)) ek = list()
-
- ans =
+ ans =
new("vglmff",
blurb = c("Negative-binomial distribution\n\n",
"Links: ",
- namesof("mu", lmu, earg=emu), ", ",
- namesof("k", lk, earg=ek), "\n",
+ namesof("mu", lmu, earg = emu), ", ",
+ namesof("k", lk, earg = ek), "\n",
"Mean: mu\n",
"Variance: mu * (1 + mu/k)"),
- constraints=eval(substitute(expression({
- temp752 = .zero
- if (length(temp752) && all(temp752 == -2))
- temp752 = 2*(1:ncol(y))
- constraints = cm.zero.vgam(constraints, x, temp752, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
- assign("CQO.FastAlgorithm",
- ( .lmu == "loge") && ( .lk == "loge"), envir = VGAMenv)
- if (any(function.name == c("cqo","cao")) &&
- is.Numeric( .zero, allow=1) && .zero != -2)
- stop("argument zero=-2 is required")
+ constraints = eval(substitute(expression({
+
+ dotzero <- .zero
+ Musual <- 2
+ eval(negzero.expression)
+
+ if ( .parallel && ncol(cbind(y)) > 1)
+ stop("univariate responses needed if parallel = TRUE")
+ constraints = cm.vgam(matrix(1, M, 1), x, .parallel, constraints)
+ }), list( .parallel = parallel, .zero = zero ))),
+ infos = eval(substitute(function(...) {
+ list(Musual = 2,
+ zero = .zero)
+ }, list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ assign("CQO.FastAlgorithm",
+ ( .lmu == "loge") && ( .lk == "loge"), envir = VGAM:::VGAMenv)
+ if (any(function.name == c("cqo","cao")) &&
+ is.Numeric( .zero, allow = 1) && .zero != -2)
+ stop("argument zero = -2 is required")
+
+ if (any(y < 0))
+ stop("negative values not allowed for the 'negbinomial' family")
+ if (any(round(y) != y))
+ stop("integer-values only allowed for the 'negbinomial' family")
+ y = as.matrix(y)
+ M = 2 * ncol(y)
+ NOS = ncoly = ncol(y) # Number of species
+ predictors.names =
+ c(namesof(if (NOS == 1) "mu" else
+ paste("mu", 1:NOS, sep = ""), .lmu, earg = .emu, tag = FALSE),
+ namesof(if (NOS == 1) "k" else
+ paste("k", 1:NOS, sep = ""), .lk, earg = .ek, tag = FALSE))
+ predictors.names = predictors.names[interleave.VGAM(M, M = 2)]
+
+ if (is.null( .nsimEIM )) {
+ save.weight <- control$save.weight <- FALSE
+ }
- if (any(y < 0))
- stop("negative values not allowed for the negbinomial family")
- if (any(round(y) != y))
- stop("integer-values only allowed for the negbinomial family")
- y = as.matrix(y)
- M = 2 * ncol(y)
- NOS = ncoly = ncol(y) # Number of species
- predictors.names = c(namesof(if (NOS == 1) "mu" else
- paste("mu", 1:NOS, sep=""), .lmu, earg=.emu, tag=FALSE),
- namesof(if (NOS == 1) "k" else paste("k",1:NOS,sep=""), .lk, earg=.ek,
- tag=FALSE))
- predictors.names = predictors.names[interleave.VGAM(M, M=2)]
+ if (is.numeric( .mu.init ))
+ MU.INIT <- matrix( .mu.init, nrow(y), ncol(y), byrow = TRUE)
- if (is.null( .nsimEIM)) {
- save.weight <- control$save.weight <- FALSE
- }
+ if (!length(etastart)) {
+ mu.init = y
+ for(iii in 1:ncol(y)) {
+ use.this = if ( .method.init == 1) {
+ weighted.mean(y[,iii], w) + 1/16
+ } else if ( .method.init == 3) {
+ c(quantile(y[,iii], probs = .quantile.probs) + 1/16)
+ } else {
+ median(y[,iii]) + 1/16
+ }
- if (!length(etastart)) {
- mu.init = y
- for(iii in 1:ncol(y)) {
- use.this = if ( .method.init == 1) {
- weighted.mean(y[,iii], w) + 1/16
- } else if ( .method.init == 3) {
- c(quantile(y[,iii], probs = .quantile.probs) + 1/16)
- } else {
- median(y[,iii]) + 1/16
- }
- if (is.numeric( .mu.init ))
- use.this = .mu.init
- medabsres = median(abs(y[,iii] - use.this)) + 1/32
- allowfun = function(z, maxtol=1) sign(z) * pmin(abs(z), maxtol)
- mu.init[,iii] = use.this +
- (1- .sinit)*allowfun(y[,iii] - use.this, maxtol=medabsres)
+ if (is.numeric( .mu.init )) {
+ mu.init[, iii] = MU.INIT[, iii]
+ } else {
+ medabsres = median(abs(y[,iii] - use.this)) + 1/32
+ allowfun = function(z, maxtol=1) sign(z)*pmin(abs(z), maxtol)
+ mu.init[,iii] = use.this + (1 - .sinit) * allowfun(y[,iii] -
+ use.this, maxtol=medabsres)
- mu.init[,iii] = abs(mu.init[,iii]) + 1 / 1024
+ mu.init[,iii] = abs(mu.init[,iii]) + 1 / 1024
+ }
- }
+ }
- if ( is.Numeric( .k.init )) {
- kay.init = matrix( .k.init, nr=n, nc=NOS, byrow=TRUE)
- } else {
- negbinomial.Loglikfun = function(kmat, y, x, w, extraargs) {
- mu = extraargs
- sum(w * dnbinom(x=y, mu=mu, size=kmat, log=TRUE))
- }
- k.grid = 2^((-7):7)
- k.grid = 2^(seq(-8, 8, length = 40))
- kay.init = matrix(0, nr=n, nc=NOS)
- for(spp. in 1:NOS) {
- kay.init[,spp.] = getMaxMin(k.grid,
- objfun=negbinomial.Loglikfun,
- y=y[,spp.], x=x, w=w,
- extraargs= mu.init[,spp.])
- }
+ if ( is.Numeric( .k.init )) {
+ kay.init = matrix( .k.init, nr=n, nc=NOS, byrow = TRUE)
+ } else {
+ negbinomial.Loglikfun = function(kmat, y, x, w, extraargs) {
+ mu = extraargs
+ sum(w * dnbinom(x=y, mu=mu, size=kmat, log = TRUE))
+ }
+ k.grid = 2^((-7):7)
+ k.grid = 2^(seq(-8, 8, length = 40))
+ kay.init = matrix(0, nr=n, nc=NOS)
+ for(spp. in 1:NOS) {
+ kay.init[,spp.] = getMaxMin(k.grid,
+ objfun=negbinomial.Loglikfun,
+ y=y[,spp.], x=x, w=w,
+ extraargs= mu.init[,spp.])
}
- etastart = cbind(theta2eta(mu.init, .lmu, earg= .emu),
- theta2eta(kay.init, .lk, earg= .ek))
- etastart = etastart[,interleave.VGAM(M, M=2),drop=FALSE]
- }
- }), list( .lmu=lmu, .lk=lk,
- .emu=emu, .ek=ek, .mu.init = imu,
- .k.init = ik, .quantile.probs = quantile.probs,
- .sinit=shrinkage.init, .nsimEIM=nsimEIM, .zero=zero,
- .method.init=method.init ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
+ }
+ etastart = cbind(theta2eta(mu.init, .lmu, earg = .emu),
+ theta2eta(kay.init, .lk, earg = .ek))
+ etastart = etastart[,interleave.VGAM(M, M = 2), drop = FALSE]
+ }
+ }), list( .lmu = lmu, .lk = lk,
+ .emu = emu, .ek = ek, .mu.init = imu,
+ .k.init = ik, .quantile.probs = quantile.probs,
+ .sinit = shrinkage.init, .nsimEIM = nsimEIM, .zero = zero,
+ .method.init = method.init ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
NOS = ncol(eta) / 2
- eta2theta(eta[,2*(1:NOS)-1,drop=FALSE], .lmu, earg= .emu)
- }, list( .lmu=lmu, .emu=emu, .ek=ek ))),
+ eta2theta(eta[,2*(1:NOS)-1, drop = FALSE], .lmu, earg = .emu)
+ }, list( .lmu = lmu, .emu = emu, .ek = ek ))),
last = eval(substitute(expression({
- if (exists("CQO.FastAlgorithm", envir = VGAMenv))
- rm("CQO.FastAlgorithm", envir = VGAMenv)
- temp0303 = c(rep( .lmu, length=NOS), rep( .lk, length=NOS))
- names(temp0303) = c(if (NOS==1) "mu" else paste("mu", 1:NOS, sep=""),
- if (NOS==1) "k" else paste("k", 1:NOS, sep=""))
- temp0303 = temp0303[interleave.VGAM(M, M=2)]
+ if (exists("CQO.FastAlgorithm", envir = VGAM:::VGAMenv))
+ rm("CQO.FastAlgorithm", envir = VGAM:::VGAMenv)
+ temp0303 = c(rep( .lmu, length = NOS), rep( .lk, length = NOS))
+ names(temp0303) = c(if (NOS == 1) "mu" else paste("mu", 1:NOS, sep = ""),
+ if (NOS == 1) "k" else paste("k", 1:NOS, sep = ""))
+ temp0303 = temp0303[interleave.VGAM(M, M = 2)]
misc$link = temp0303 # Already named
misc$earg = vector("list", M)
names(misc$earg) = names(misc$link)
@@ -3328,54 +2818,59 @@ negbinomial.control <- function(save.weight=TRUE, ...)
misc$method.init = .method.init
misc$nsimEIM = .nsimEIM
misc$expected = TRUE
- }), list( .lmu=lmu, .lk=lk, .cutoff=cutoff,
- .emu=emu, .ek=ek, .nsimEIM=nsimEIM,
- .method.init=method.init ))),
- link=eval(substitute(function(mu, extra=NULL) {
- temp = theta2eta(mu, .lmu, earg= .emu)
- temp = cbind(temp, NA * temp)
- temp[, interleave.VGAM(ncol(temp), M=2), drop=FALSE]
- }, list( .lmu=lmu, .emu=emu, .ek=ek ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ misc$shrinkage.init = .sinit
+ }), list( .lmu = lmu, .lk = lk, .cutoff = cutoff,
+ .emu = emu, .ek = ek, .nsimEIM = nsimEIM,
+ .sinit = shrinkage.init,
+ .method.init = method.init ))),
+ link = eval(substitute(function(mu, extra = NULL) {
+ temp = theta2eta(mu, .lmu, earg = .emu)
+ kayy = theta2eta(if (is.numeric( .ik)) .ik else 1.0, .lk, earg = .ek)
+ kayy = 0 * temp + kayy # Right dimension now.
+ temp = cbind(temp, kayy)
+ temp[, interleave.VGAM(ncol(temp), M = 2), drop = FALSE]
+ }, list( .lmu = lmu, .emu = emu,
+ .lk = lk, .ek = ek, .ik = ik ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
NOS = ncol(eta) / 2
- temp300 = eta[,2*(1:NOS),drop=FALSE]
+ temp300 = eta[,2*(1:NOS), drop = FALSE]
if ( .lk == "loge") {
bigval = 28
temp300 = ifelse(temp300 > bigval, bigval, temp300)
temp300 = ifelse(temp300 < -bigval, -bigval, temp300)
}
- kmat = eta2theta(temp300, .lk, earg= .ek)
+ kmat = eta2theta(temp300, .lk, earg = .ek)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
- sum(w * dnbinom(x=y, mu=mu, size=kmat, log=TRUE))
- }, list( .lk=lk, .emu=emu, .ek=ek ))),
- vfamily=c("negbinomial"),
- deriv=eval(substitute(expression({
+ sum(w * dnbinom(x=y, mu=mu, size=kmat, log = TRUE))
+ }, list( .lk = lk, .emu = emu, .ek = ek ))),
+ vfamily = c("negbinomial"),
+ deriv = eval(substitute(expression({
NOS = ncol(eta) / 2
M = ncol(eta)
- temp3 = eta[,2*(1:NOS),drop=FALSE]
+ temp3 = eta[,2*(1:NOS), drop = FALSE]
bigval = 28
temp3 = ifelse(temp3 > bigval, bigval, temp3)
temp3 = ifelse(temp3 < -bigval, -bigval, temp3)
- kmat = eta2theta(temp3, .lk, earg= .ek)
+ kmat = eta2theta(temp3, .lk, earg = .ek)
dl.dmu = y/mu - (y+kmat)/(kmat+mu)
dl.dk = digamma(y+kmat) - digamma(kmat) - (y+kmat)/(mu+kmat) + 1 +
log(kmat/(kmat+mu))
- dmu.deta = dtheta.deta(mu, .lmu, earg= .emu)
- dk.deta = dtheta.deta(kmat, .lk, earg= .ek)
+ dmu.deta = dtheta.deta(mu, .lmu, earg = .emu)
+ dk.deta = dtheta.deta(kmat, .lk, earg = .ek)
dthetas.detas = cbind(dmu.deta, dk.deta)
myderiv = w * cbind(dl.dmu, dl.dk) * dthetas.detas
- myderiv[,interleave.VGAM(M, M=2)]
- }), list( .lmu=lmu, .lk=lk, .emu=emu, .ek=ek ))),
- weight=eval(substitute(expression({
+ myderiv[,interleave.VGAM(M, M = 2)]
+ }), list( .lmu = lmu, .lk = lk, .emu = emu, .ek = ek ))),
+ weight = eval(substitute(expression({
wz = matrix(as.numeric(NA), n, M) # wz is 'diagonal'
if (is.null( .nsimEIM)) {
- fred2 = dotFortran(name="enbin9", ans=double(n*NOS),
+ fred2 = dotFortran(name = "enbin9", ans=double(n*NOS),
as.double(kmat), as.double(mu), as.double( .cutoff ),
as.integer(n), ok=as.integer(1), as.integer(NOS),
- sumpdf=double(1), as.double(.Machine$double.eps),
+ sumpdf=double(1), as.double( .Machine$double.eps),
as.integer( .Maxiter ))
if (fred2$ok != 1)
stop("error in Fortran subroutine exnbin9")
@@ -3384,7 +2879,7 @@ negbinomial.control <- function(save.weight=TRUE, ...)
wz[,2*(1:NOS)] = dk.deta^2 * ed2l.dk2
} else {
run.varcov = matrix(0, n, NOS)
- ind1 = iam(NA, NA, M=M, both=TRUE, diag=TRUE)
+ ind1 = iam(NA, NA, M=M, both = TRUE, diag = TRUE)
for(ii in 1:( .nsimEIM )) {
ysim = rnbinom(n=n*NOS, mu=c(mu), size=c(kmat))
if (NOS > 1) dim(ysim) = c(n, NOS)
@@ -3395,47 +2890,47 @@ negbinomial.control <- function(save.weight=TRUE, ...)
run.varcov = cbind(run.varcov / .nsimEIM)
wz[,2*(1:NOS)] = if (intercept.only)
matrix(colMeans(run.varcov),
- n, ncol(run.varcov), byrow=TRUE) else run.varcov
+ n, ncol(run.varcov), byrow = TRUE) else run.varcov
wz[,2*(1:NOS)] = wz[,2*(1:NOS)] * dk.deta^2
}
ed2l.dmu2 = 1/mu - 1/(mu+kmat)
wz[,2*(1:NOS)-1] = dmu.deta^2 * ed2l.dmu2
w * wz
- }), list( .cutoff=cutoff, .Maxiter=Maxiter, .nsimEIM=nsimEIM ))))
+ }), list( .cutoff = cutoff, .Maxiter = Maxiter, .nsimEIM = nsimEIM ))))
- if (deviance.arg) ans at deviance=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta,extra=NULL) {
+ if (deviance.arg) ans at deviance = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
NOS = ncol(eta) / 2
- temp300 = eta[,2*(1:NOS),drop=FALSE]
+ temp300 = eta[,2*(1:NOS), drop = FALSE]
if ( .lk == "loge") {
bigval = 28
temp300[temp300 > bigval] = bigval
temp300[temp300 < -bigval] = -bigval
} else stop("can only handle the 'loge' link")
- k = eta2theta(temp300, .lk, earg= .ek)
+ k = eta2theta(temp300, .lk, earg = .ek)
devi = 2 * (y*log(ifelse(y < 1, 1, y)/mu) + (y+k)*log((mu+k)/(k+y)))
if (residuals)
sign(y - mu) * sqrt(abs(devi) * w) else
sum(w * devi)
- }, list( .lk=lk, .emu=emu, .ek=ek )))
+ }, list( .lk = lk, .emu = emu, .ek = ek )))
ans
}
- negbin.ab = function(link.alpha ="loge", link.k ="loge",
+ negbin.ab = function(link.alpha = "loge", link.k = "loge",
ealpha=list(), ek=list(),
k.init=1,
- zero=2,
+ zero = 2,
cutoff=0.995)
{
- if (!is.Numeric(cutoff, allow=1) || cutoff<0.8 || cutoff>=1)
+ if (!is.Numeric(cutoff, allow = 1) || cutoff<0.8 || cutoff>=1)
stop("range error in the argument cutoff")
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
stop("bad input for argument 'zero'")
if (mode(link.alpha) != "character" && mode(link.alpha) != "name")
@@ -3448,73 +2943,73 @@ negbinomial.control <- function(save.weight=TRUE, ...)
new("vglmff",
blurb = c("Negative-binomial distribution\n\n",
"Links: ",
- namesof("alpha", link.alpha, earg=ealpha), ", ",
- namesof("k", link.k, earg=ek),
+ namesof("alpha", link.alpha, earg = ealpha), ", ",
+ namesof("k", link.k, earg = ek),
"\n",
"Mean: alpha * k",
"\n",
"Variance: alpha * k * (1 + alpha)"),
- constraints=eval(substitute(expression({
+ constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
predictors.names =
- c(namesof("alpha", .link.alpha, earg=.ealpha, tag=FALSE),
- namesof("k", .link.k, earg=.ek, tag=FALSE))
+ c(namesof("alpha", .link.alpha, earg = .ealpha, tag = FALSE),
+ namesof("k", .link.k, earg = .ek, tag = FALSE))
if (!length(etastart)) {
etastart = cbind(
- theta2eta((y + 1/8) / .k.init, .link.alpha, earg= .ealpha),
- theta2eta( .k.init, .link.k, earg= .ek))
+ theta2eta((y + 1/8) / .k.init, .link.alpha, earg = .ealpha),
+ theta2eta( .k.init, .link.k, earg = .ek))
}
}), list( .link.alpha=link.alpha, .link.k=link.k, .k.init=k.init,
- .ealpha=ealpha, .ek=ek ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- alpha = eta2theta(eta[,1], .link.alpha, earg= .ealpha)
- k = eta2theta(eta[,2], .link.k, earg= .ek)
+ .ealpha=ealpha, .ek = ek ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ alpha = eta2theta(eta[,1], .link.alpha, earg = .ealpha)
+ k = eta2theta(eta[,2], .link.k, earg = .ek)
alpha * k
}, list( .link.alpha=link.alpha, .link.k=link.k,
- .ealpha=ealpha, .ek=ek ))),
+ .ealpha=ealpha, .ek = ek ))),
last = eval(substitute(expression({
- misc$link = c(alpha= .link.alpha, k= .link.k)
- misc$earg = list(alpha= .ealpha, k= .ek )
- }), list( .link.alpha=link.alpha, .link.k=link.k,
- .ealpha=ealpha, .ek=ek ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- alpha = eta2theta(eta[,1], .link.alpha, earg= .ealpha)
- kvec = eta2theta(eta[,2], .link.k, earg= .ek)
+ misc$link = c(alpha = .link.alpha, k = .link.k)
+ misc$earg = list(alpha = .ealpha, k = .ek )
+ }), list( .link.alpha = link.alpha, .link.k = link.k,
+ .ealpha = ealpha, .ek = ek ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ alpha = eta2theta(eta[,1], .link.alpha, earg = .ealpha)
+ kvec = eta2theta(eta[,2], .link.k, earg = .ek)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- sum(w * dnbinom(x=y, mu=kvec*alpha, size=kvec, log=TRUE))
+ sum(w * dnbinom(x=y, mu=kvec*alpha, size=kvec, log = TRUE))
}
}, list( .link.alpha=link.alpha, .link.k=link.k,
- .ealpha=ealpha, .ek=ek ))),
- vfamily=c("negbin.ab"),
- deriv=eval(substitute(expression({
- alpha = eta2theta(eta[,1], .link.alpha, earg= .ealpha)
- k = eta2theta(eta[,2], .link.k, earg= .ek)
+ .ealpha=ealpha, .ek = ek ))),
+ vfamily = c("negbin.ab"),
+ deriv = eval(substitute(expression({
+ alpha = eta2theta(eta[,1], .link.alpha, earg = .ealpha)
+ k = eta2theta(eta[,2], .link.k, earg = .ek)
dl.dalpha = (y/alpha - k)/(1+alpha)
dl.dk = digamma(y+k) - digamma(k) - log1p(alpha)
- dalpha.deta = dtheta.deta(alpha, .link.alpha, earg= .ealpha)
- dk.deta = dtheta.deta(k, .link.k, earg= .ek)
+ dalpha.deta = dtheta.deta(alpha, .link.alpha, earg = .ealpha)
+ dk.deta = dtheta.deta(k, .link.k, earg = .ek)
cbind(w * dl.dalpha * dalpha.deta, w * dl.dk * dk.deta)
}), list( .link.alpha=link.alpha, .link.k=link.k,
- .ealpha=ealpha, .ek=ek ))),
- weight=eval(substitute(expression({
+ .ealpha=ealpha, .ek = ek ))),
+ weight = eval(substitute(expression({
wz = matrix(as.numeric(NA), n, dimm(M)) # 3==dimm(M)
ed2l.dalpha2 = k/(alpha*(1+alpha))
ed2l.dalphak = 1/(1+alpha) # Not -1/(1+alpha)
- fred = dotFortran(name="enbin8",
+ fred = dotFortran(name = "enbin8",
ans=double(n),
as.double(k),
as.double(1/(1+alpha)),
as.double( .cutoff ),
as.integer(n), ok=as.integer(1), as.integer(1),
- sumpdf=double(1), macheps=as.double(.Machine$double.eps))
+ sumpdf=double(1), macheps=as.double( .Machine$double.eps))
if (fred$ok != 1)
stop("error in Fortran subroutine enbin8")
ed2l.dk2 = -fred$ans
@@ -3524,8 +3019,8 @@ negbinomial.control <- function(save.weight=TRUE, ...)
wz[,iam(1,2,M)] = dk.deta * dalpha.deta * ed2l.dalphak
w * wz
- }), list( .cutoff=cutoff,
- .ealpha=ealpha, .ek=ek ))))
+ }), list( .cutoff = cutoff,
+ .ealpha=ealpha, .ek = ek ))))
}
@@ -3535,7 +3030,7 @@ nbmud = function(lmu = c("loge","identity","reciprocal"),
k.init = 1,
zero = -2,
cutoff = 0.995,
- deviance.arg=FALSE)
+ deviance.arg = FALSE)
{
ans = negbinomial(link.mu = lmu[1],
link.k = "reciprocal",
@@ -3551,21 +3046,21 @@ nbmud = function(lmu = c("loge","identity","reciprocal"),
if (FALSE)
- neg.binomial = function(link.p="logit", link.k="loge",
+ neg.binomial = function(link.p = "logit", link.k = "loge",
ep=list(), ek=list(),
- zero=2,
- ik=NULL,
+ zero = 2,
+ ik = NULL,
cutoff=0.995)
{
- if (!is.Numeric(cutoff, allow=1) || cutoff<0.8 || cutoff>=1)
+ if (!is.Numeric(cutoff, allow = 1) || cutoff<0.8 || cutoff>=1)
stop("range error in the argument cutoff")
if (mode(link.p) != "character" && mode(link.p) != "name")
link.p = as.character(substitute(link.p))
- if (link.p=="canonical")
+ if (link.p == "canonical")
link.p = "logc"
if (mode(link.k) != "character" && mode(link.k) != "name")
link.k = as.character(substitute(link.k))
@@ -3575,14 +3070,14 @@ nbmud = function(lmu = c("loge","identity","reciprocal"),
new("vglmff",
blurb = c("Negative-binomial distribution\n\n",
"Links: ",
- namesof("p", link.p, earg=ep), ", ",
- namesof("k", link.k, earg=ek), "; mu=k*(1-p)/p",
+ namesof("p", link.p, earg = ep), ", ",
+ namesof("k", link.k, earg = ek), "; mu=k*(1-p)/p",
"\n",
"Variance: mu(1 + mu/k)"),
- constraints=eval(substitute(expression({
+ constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
y = as.numeric(y)
@@ -3592,8 +3087,8 @@ nbmud = function(lmu = c("loge","identity","reciprocal"),
stop("response must be integer-valued")
predictors.names =
- c(namesof("p", .link.p, earg=.ep, tag=FALSE),
- namesof("k", .link.k, earg=.ek, tag=FALSE))
+ c(namesof("p", .link.p, earg = .ep, tag = FALSE),
+ namesof("k", .link.k, earg = .ek, tag = FALSE))
@@ -3602,8 +3097,8 @@ nbmud = function(lmu = c("loge","identity","reciprocal"),
mu.adj = fitted(junk)
if (FALSE) {
- mu = rep(weighted.mean(y, w=w), len=length(y))
- mu = rep(median(rep(y+0.167, times=w)), len=length(y))
+ mu = rep(weighted.mean(y, w=w), len = length(y))
+ mu = rep(median(rep(y+0.167, times=w)), len = length(y))
k = mean(rep(mu^2 / (var.y.est - mu), w), trim=0.05)
k = rep(k, length(mu))
@@ -3616,55 +3111,55 @@ nbmud = function(lmu = c("loge","identity","reciprocal"),
if (length( .ik )) {
mu = median(rep(y, times=w))
- k = rep( .ik , len=length(y))
+ k = rep( .ik , len = length(y))
}
if (!length(etastart)) {
prob = k / (k + mu)
- etastart = cbind(theta2eta(prob, .link.p, earg= .ep),
- theta2eta(k, .link.k, earg= .ek))
+ etastart = cbind(theta2eta(prob, .link.p, earg = .ep),
+ theta2eta(k, .link.k, earg = .ek))
}
}), list( .link.p=link.p, .link.k=link.k, .ik=ik,
- .ep=ep, .ek=ek ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- prob = eta2theta(eta[,1], .link.p, earg= .ep)
- k = eta2theta(eta[,2], .link.k, earg= .ek)
+ .ep = ep, .ek = ek ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ prob = eta2theta(eta[,1], .link.p, earg = .ep)
+ k = eta2theta(eta[,2], .link.k, earg = .ek)
k * (1 - prob) / prob
}, list( .link.p=link.p, .link.k=link.k,
- .ep=ep, .ek=ek ))),
+ .ep = ep, .ek = ek ))),
last = eval(substitute(expression({
- misc$link = c(p= .link.p, k= .link.k )
- misc$earg = list(p= .ep, k= .ek )
+ misc$link = c(p = .link.p, k = .link.k )
+ misc$earg = list(p = .ep, k = .ek )
}), list( .link.p=link.p, .link.k=link.k,
- .ep=ep, .ek=ek ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- prob = eta2theta(eta[,1], .link.p, earg= .ep)
- k = eta2theta(eta[,2], .link.k, earg= .ek)
+ .ep = ep, .ek = ek ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ prob = eta2theta(eta[,1], .link.p, earg = .ep)
+ k = eta2theta(eta[,2], .link.k, earg = .ek)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
sum(w * (y * log1p(-prob) + k * log(prob) + lgamma(y+k) -
lgamma(k) - lgamma(y+1 )))
}, list( .link.p=link.p, .link.k=link.k,
- .ep=ep, .ek=ek ))),
- vfamily=c("neg.binomial"),
- deriv=eval(substitute(expression({
- prob = eta2theta(eta[,1], .link.p, earg= .ep)
- k = eta2theta(eta[,2], .link.k, earg= .ek)
+ .ep = ep, .ek = ek ))),
+ vfamily = c("neg.binomial"),
+ deriv = eval(substitute(expression({
+ prob = eta2theta(eta[,1], .link.p, earg = .ep)
+ k = eta2theta(eta[,2], .link.k, earg = .ek)
dl.dp = k/prob - y/(1-prob)
dl.dk = log(prob) + digamma(y+k) - digamma(k)
- dp.deta = dtheta.deta(prob, .link.p, earg= .ep)
- dk.deta = dtheta.deta(k, .link.k, earg= .ek)
+ dp.deta = dtheta.deta(prob, .link.p, earg = .ep)
+ dk.deta = dtheta.deta(k, .link.k, earg = .ek)
w * cbind(dl.dp * dp.deta, dl.dk * dk.deta)
}), list( .link.p=link.p, .link.k=link.k,
- .ep=ep, .ek=ek ))),
- weight=eval(substitute(expression({
+ .ep = ep, .ek = ek ))),
+ weight = eval(substitute(expression({
wz = matrix(as.numeric(NA), n, dimm(M)) # 3==dimm(M)
d2l.dpk = 1/prob
ed2l.dp2 = -k/(prob^2 * (1-prob)) # "e" for expected value
- fred = dotFortran(name="exnbin",
+ fred = dotFortran(name = "exnbin",
ans=double(n),
as.double(k),
as.double(prob),
@@ -3681,17 +3176,17 @@ nbmud = function(lmu = c("loge","identity","reciprocal"),
wz[,iam(2,2,M)] = dk.deta^2 * ed2l.dk2
wz = -w * wz
wz
- }), list( .cutoff=cutoff,
- .ep=ep, .ek=ek ))))
+ }), list( .cutoff = cutoff,
+ .ep = ep, .ek = ek ))))
}
if (FALSE)
- neg.binomial.k = function(k, link="logit", earg=list(), expected=TRUE, ...)
+ neg.binomial.k = function(k, link = "logit", earg = list(), expected = TRUE, ...)
{
- if (!is.Numeric(k, allow=1, posit=TRUE))
+ if (!is.Numeric(k, allow = 1, posit = TRUE))
stop("bad input for argument argument 'k'")
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
@@ -3699,16 +3194,16 @@ nbmud = function(lmu = c("loge","identity","reciprocal"),
new("vglmff",
blurb = c("Negative-binomial distribution with k known and p unknown\n",
- "(k=", k, ") ",
- if (k==1) "Geometric\n\n" else "\n\n",
+ "(k = ", k, ") ",
+ if (k == 1) "Geometric\n\n" else "\n\n",
"Links: ",
- namesof("p", link, earg=earg), "; p=",k,"/(",k,"+mu)",
+ namesof("p", link, earg = earg), "; p = ",k,"/(",k,"+mu)",
"\n",
"Variance: ",
- if (k==1) "Geometric: mu(1+mu)" else
- paste("mu(1 + mu/",k,")", sep="")),
- deviance=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta,extra=NULL) {
+ if (k == 1) "Geometric: mu(1+mu)" else
+ paste("mu(1 + mu/",k,")", sep = "")),
+ deviance = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
prob = .k / ( .k + mu)
devy = .k * log( .k / ( .k + y))
nz = y != 0
@@ -3718,46 +3213,47 @@ nbmud = function(lmu = c("loge","identity","reciprocal"),
if (residuals)
sign(y - mu) * sqrt(abs(devi) * w) else
sum(w * devi)
- }, list( .link=link, .earg=earg, .k=k ))),
- initialize=eval(substitute(expression({
- predictors.names = namesof("p", .link, earg=.ep, tag=FALSE)
+ }, list( .link = link, .earg = earg, .k = k ))),
+ initialize = eval(substitute(expression({
+ predictors.names = namesof("p", .link, earg = .ep, tag = FALSE)
mu = y + 0.167 * (y == 0)
if (!length(etastart)) {
prob = .k / ( .k + mu)
- etastart = theta2eta(prob, .link, earg= .earg)
+ etastart = theta2eta(prob, .link, earg = .earg)
}
- }), list( .link=link, .earg=earg, .k=k ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- prob = eta2theta(eta, .link, earg= .earg)
+ }), list( .link = link, .earg = earg, .k = k ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ prob = eta2theta(eta, .link, earg = .earg)
.k * (1 - prob) / prob
- }, list( .link=link, .earg=earg, .k=k ))),
+ }, list( .link = link, .earg = earg, .k = k ))),
last = eval(substitute(expression({
- misc$link = c(p = .link)
+ misc$link = c(p = .link)
+ misc$earg = list(p = .earg)
misc$k = .k
- }), list( .link=link, .earg=earg, .k=k ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- prob = eta2theta(eta, .link, earg= .earg)
+ }), list( .link = link, .earg = earg, .k = k ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ prob = eta2theta(eta, .link, earg = .earg)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
sum(w * (y * log1p(-prob) + .k * log(prob) + lgamma(y+ .k) -
lgamma( .k ) - lgamma(y+1 )))
- }, list( .link=link, .earg=earg, .k=k ))),
- vfamily=c("neg.binomial.k"),
- deriv=eval(substitute(expression({
+ }, list( .link = link, .earg = earg, .k = k ))),
+ vfamily = c("neg.binomial.k"),
+ deriv = eval(substitute(expression({
prob = .k / ( .k + mu)
- dp.deta = dtheta.deta(prob, .link, earg= .earg)
+ dp.deta = dtheta.deta(prob, .link, earg = .earg)
w * ( .k/prob - y/(1-prob)) * dp.deta
- }), list( .link=link, .earg=earg, .k=k ))),
- weight=eval(substitute(expression({
+ }), list( .link = link, .earg = earg, .k = k ))),
+ weight = eval(substitute(expression({
wz = dp.deta^2 * (y/(1 - prob)^2 + .k/prob^2)
if (! .expected) {
- d2pdeta2 = d2theta.deta2(prob, .link, earg= .earg)
+ d2pdeta2 = d2theta.deta2(prob, .link, earg = .earg)
wz = wz - d2pdeta2 * ( .k/prob - y/(1-prob))
}
w * wz
- }), list( .link=link, .earg=earg, .k=k, .expected=expected ))))
+ }), list( .link = link, .earg = earg, .k = k, .expected = expected ))))
}
@@ -3771,7 +3267,7 @@ nbmud = function(lmu = c("loge","identity","reciprocal"),
"Link: log(lambda)",
"\n",
"Variance: lambda"),
- deviance= function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
+ deviance= function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
nz = y > 0
devi = - (y - mu)
devi[nz] = devi[nz] + y[nz] * log(y[nz]/mu[nz])
@@ -3786,22 +3282,22 @@ nbmud = function(lmu = c("loge","identity","reciprocal"),
if (!length(etastart))
etastart = log(mu)
}),
- inverse=function(eta, extra=NULL)
+ inverse=function(eta, extra = NULL)
exp(eta),
last = expression({
misc$link = c(lambda = "loge")
misc$earg = list(lambda = list())
}),
- link=function(mu, extra=NULL)
+ link=function(mu, extra = NULL)
log(mu),
- vfamily="simple.poisson",
+ vfamily = "simple.poisson",
deriv=expression({
lambda = mu
dl.dlambda = -1 + y/lambda
- dlambda.deta = dtheta.deta(theta=lambda, link="loge", earg= list())
+ dlambda.deta = dtheta.deta(theta=lambda, link = "loge", earg = list())
w * dl.dlambda * dlambda.deta
}),
- weight=expression({
+ weight = expression({
d2l.dlambda2 = 1 / lambda
w * d2l.dlambda2 * dlambda.deta^2
}))
@@ -3809,243 +3305,753 @@ nbmud = function(lmu = c("loge","identity","reciprocal"),
- studentt = function(link.df="loglog", earg=list(),
- idf=NULL, nsimEIM=300)
+
+
+
+
+
+
+
+
+
+
+ studentt <- function(ldf = "loglog", edf = list(), idf = NULL,
+ tol1 = 0.1,
+ method.init = 1)
{
- if (mode(link.df) != "character" && mode(link.df) != "name")
- link.df = as.character(substitute(link.df))
- if (!is.list(earg)) earg = list()
- if (!is.Numeric(nsimEIM, allow=1, integ=TRUE) || nsimEIM <= 10)
- stop("'nsimEIM' should be an integer greater than 10")
- if (length(idf) && !is.Numeric(idf) && any(idf <= 1))
- stop("'idf' should be > 1")
+ ldof <- ldf
+ edof <- edf
+ idof <- idf
- new("vglmff",
- blurb = c("Student t-distribution\n\n",
+ if (mode(ldof) != "character" && mode(ldof) != "name")
+ ldof <- as.character(substitute(ldof))
+ if (!is.list(edof)) edof <- list()
+
+ if (length(idof))
+ if (!is.Numeric(idof) || any(idof <= 1))
+ stop("argument 'idf' should be > 1")
+
+ if (!is.Numeric(tol1, posit = TRUE))
+ stop("argument 'tol1' should be positive")
+
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
+ method.init > 3)
+ stop("'method.init' must be 1 or 2 or 3")
+
+
+ new("vglmff",
+ blurb = c("Student t-distribution\n\n",
"Link: ",
- namesof("df", link.df, earg=earg),
- "\n",
+ namesof("df", ldof, earg = edof), "\n",
"Variance: df/(df-2) if df > 2\n"),
- initialize=eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names = namesof("df", .link.df, earg=.earg, tag=FALSE)
- if (!length(etastart)) {
- init.df = if (length( .idf )) .idf else
- (2*var(y)/(var(y)-1)) # Should incorporate weights
- if (!is.Numeric(init.df) || init.df <= 1)
- init.df = 4
- etastart = rep(theta2eta(init.df, .link.df, earg= .earg),
- len=length(y))
- }
- }), list( .link.df=link.df, .earg=earg, .idf=idf ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- df = eta2theta(eta, .link.df, earg= .earg)
- ifelse(df > 1, 0, NA)
- }, list( .link.df=link.df, .earg=earg ))),
- last = eval(substitute(expression({
- misc$link = c(df = .plink )
- misc$earg = list(df = .earg )
- misc$simEIM = TRUE
- misc$nsimEIM = .nsimEIM
- }), list( .plink=link.df, .earg=earg, .nsimEIM=nsimEIM ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals=FALSE,eta,extra=NULL) {
- df = eta2theta(eta, .link.df, earg= .earg)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * dt(x=y, df=df, log=TRUE))
- }
- }, list( .link.df=link.df, .earg=earg ))),
- vfamily=c("studentt"),
- deriv=eval(substitute(expression({
- df = eta2theta(eta, .link.df, earg= .earg)
- temp = 1/df
- temp1 = y^2 * temp
- dl.ddf = 0.5*(-temp -log1p(temp1) +(df+1)*y^2/(df^2 * (1+temp1)) +
- digamma((df+1)/2)-digamma(df/2))
- ddf.deta = dtheta.deta(theta=df, .link.df, earg= .earg)
- w * dl.ddf * ddf.deta
- }), list( .link.df=link.df, .earg=earg ))),
- weight=eval(substitute(expression({
- temp2 = (df+1)/2
- d2df.deta2 = d2theta.deta2(theta=df, .link.df, earg= .earg)
-
- run.mean = 0
- for(ii in 1:(.nsimEIM)) {
- ysim = rt(n, df=df)
- d2l.ddf2 = 0.5*temp^2 + trigamma(temp2)/4 +
- 0.5*ysim^2*temp/(df+ysim^2) - trigamma(df/2)/4 -
- 0.5*ysim^2*((1+temp)/(df+ysim^2) + temp^2)/(df+ysim^2)
- rm(ysim)
- run.mean = ((ii-1) * run.mean + d2l.ddf2) / ii
- }
- if (intercept.only)
- run.mean = mean(run.mean)
- wz = -w * ddf.deta^2 * run.mean
- wz
- }), list( .link.df=link.df, .earg=earg, .nsimEIM=nsimEIM ))))
-}
+ infos = eval(substitute(function(...) {
+ list(Musual = 1,
+ tol1 = .tol1 )
+ }, list( .tol1 = tol1 ))),
+ initialize = eval(substitute(expression({
+ if (ncol(cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
-
- chisq = function(link = "loge", earg=list())
-{
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.list(earg)) earg = list()
+ predictors.names <- namesof("df", .ldof, earg = .edof, tag = FALSE)
- new("vglmff",
- blurb = c("Chi-squared distribution\n\n",
- "Link: ",
- namesof("df", link, earg=earg)),
- inverse =eval(substitute(function(eta,extra=NULL) {
- eta2theta(eta, .link, earg= .earg)
- }, list( .link = link, .earg=earg ))),
- initialize =eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names = namesof("df", .link, earg=.earg, tag = FALSE)
- mu = y + 0.167 * (y == 0)
- }), list( .link = link, .earg=earg ))),
- last =eval(substitute(expression({
- misc$link = c(df = .link)
- misc$earg = list(df = .earg )
- }), list( .link = link, .earg=earg ))),
- link=eval(substitute(function(mu, extra = NULL) {
- theta2eta(mu, .link, earg= .earg)
- }, list( .link = link, .earg=earg ))),
- loglikelihood =eval(substitute(
- function(mu,y,w,residuals= FALSE,eta,extra=NULL) {
- df = eta2theta(eta, .link, earg= .earg)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(w * dchisq(x=y, df=df, ncp = 0, log = TRUE))
- }, list( .link = link, .earg=earg ))),
- vfamily="chisq",
- deriv=eval(substitute(expression({
- df = eta2theta(eta, .link, earg= .earg)
- dl.dv = (log(y/2) - digamma(df/2)) / 2
- dv.deta = dtheta.deta(df, .link, earg= .earg)
- w * dl.dv * dv.deta
- }), list( .link = link, .earg=earg ))),
- weight =eval(substitute(expression({
- ed2l.dv2 = -trigamma(df/2) / 4
- wz = -ed2l.dv2 * dv.deta^2
- wz * w
- }), list( .link = link, .earg=earg ))))
-}
+ if (!length(etastart)) {
+ init.df <- if (length( .idof )) .idof else {
+ VarY = var(y)
+ MadY = mad(y)
+ if (VarY <= (1 + .tol1 )) VarY = 1.12
+ if ( .method.init == 1) {
+ 2 * VarY / (VarY - 1)
+ } else if ( .method.init == 2) {
+ ifelse(MadY < 1.05, 30, ifelse(MadY > 1.2, 2, 5))
+ } else
+ 10
+ }
+ etastart <- rep(theta2eta(init.df, .ldof , earg = .edof ),
+ len = length(y))
+ }
+ }), list( .ldof = ldof, .edof = edof, .idof = idof,
+ .tol1 = tol1, .method.init = method.init ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ Dof <- eta2theta(eta, .ldof, earg = .edof)
+ ans <- 0 * eta
+ ans[Dof <= 1] <- NA
+ ans
+ }, list( .ldof = ldof, .edof = edof ))),
+ last = eval(substitute(expression({
+ misc$link <- c(df = .ldof )
+ misc$earg <- list(df = .edof )
+ misc$method.init <- .method.init
+ misc$expected = TRUE
+ }), list( .ldof = ldof,
+ .edof = edof, .method.init = method.init ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ Dof <- eta2theta(eta, .ldof, earg = .edof)
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(w * dt(x = y, df = Dof, log = TRUE))
+ }
+ }, list( .ldof = ldof, .edof = edof ))),
+ vfamily = c("studentt"),
+ deriv = eval(substitute(expression({
+ Dof <- eta2theta(eta, .ldof, earg = .edof)
+ ddf.deta <- dtheta.deta(theta = Dof, .ldof, earg = .edof)
+
+ DDS <- function(df) digamma((df + 1) / 2) - digamma(df / 2)
+ DDSp <- function(df) 0.5 * (trigamma((df + 1) / 2) - trigamma(df / 2))
+
+ temp0 <- 1 / Dof
+ temp1 <- temp0 * y^2
+ dl.ddf <- 0.5 * (-temp0 - log1p(temp1) +
+ (Dof + 1) * y^2 / (Dof^2 * (1 + temp1)) + DDS(Dof))
+ w * dl.ddf * ddf.deta
+ }), list( .ldof = ldof, .edof = edof ))),
+ weight = eval(substitute(expression({
+ const2 = (Dof + 0) / (Dof + 3)
+ const2[!is.finite(Dof)] <- 1 # Handles Inf
+ tmp6 = DDS(Dof)
+ edl2.dnu2 <- 0.5 * (tmp6 * (const2 * tmp6 - 2 / (Dof + 1)) - DDSp(Dof))
+
+ wz <- w * edl2.dnu2 * ddf.deta^2
+ wz
+ }), list( .ldof = ldof, .edof = edof ))))
+}
-dsimplex = function(x, mu = 0.5, dispersion = 1, log = FALSE) {
- if (!is.logical(log.arg <- log))
- stop("bad input for argument 'log'")
- rm(log)
- sigma = dispersion
- deeFun = function(y, mu)
- (((y - mu) / (mu * (1 - mu)))^2) / (y * (1 - y))
- logpdf = (-0.5 * log(2 * pi) - log(sigma) - 1.5 * log(x) -
- 1.5 * log1p(-x) - 0.5 * deeFun(x, mu) / sigma^2)
- logpdf[x <= 0.0] = -Inf # log(0.0)
- logpdf[x >= 1.0] = -Inf # log(0.0)
- logpdf[mu <= 0.0] = NaN
- logpdf[mu >= 1.0] = NaN
- logpdf[sigma <= 0.0] = NaN
- if (log.arg) logpdf else exp(logpdf)
-}
-rsimplex = function(n, mu = 0.5, dispersion = 1) {
- use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integ=TRUE, allow=1, posit=TRUE))
- stop("bad input for argument 'n'") else n
- oneval <- (length(mu) == 1 && length(dispersion) == 1)
- answer = rep(0.0, len = use.n)
- mu = rep(mu, len = use.n); dispersion = rep(dispersion, len = use.n)
- Kay1 = 3 * (dispersion * mu * (1-mu))^2
+ Kayfun.studentt <- function(df, bigno = .Machine$double.eps^(-0.46)) {
+ ind1 <- is.finite(df)
- if (oneval) {
- Kay1 = Kay1[1] # Since oneval means there is only one unique value
- mymu = mu[1]
- myroots = polyroot(c(-mymu^2, Kay1+2*mymu^2, -3*Kay1+1-2*mymu, 2*Kay1))
- myroots = myroots[abs(Im(myroots)) < 0.00001]
- myroots = Re(myroots)
- myroots = myroots[myroots >= 0.0]
- myroots = myroots[myroots <= 1.0]
- pdfmax = dsimplex(myroots, mymu, dispersion[1])
- pdfmax = rep(max(pdfmax), len = use.n) # For multiple peaks
- } else {
- pdfmax = numeric(use.n)
- for (ii in 1:use.n) {
- myroots = polyroot(c(-mu[ii]^2, Kay1[ii]+2*mu[ii]^2,
- -3*Kay1[ii]+1-2*mu[ii], 2*Kay1[ii]))
- myroots = myroots[abs(Im(myroots)) < 0.00001]
- myroots = Re(myroots)
- myroots = myroots[myroots >= 0.0]
- myroots = myroots[myroots <= 1.0]
- pdfmax[ii] = max(dsimplex(myroots, mu[ii], dispersion[ii]))
- }
- }
+ const4 = dnorm(0)
+ ans <- df
- index = 1:use.n
- nleft = length(index)
- while (nleft > 0) {
- xx = runif(nleft) # , 0, 1
- yy = runif(nleft, max = pdfmax[index])
- newindex = (1:nleft)[yy < dsimplex(xx, mu[index], dispersion[index])]
- if (length(newindex)) {
- answer[index[newindex]] = xx[newindex]
- index = setdiff(index, index[newindex])
- nleft = nleft - length(newindex)
+ if (any(ind1))
+ ans[ind1] <- exp(lgamma((df[ind1] + 1) / 2) -
+ lgamma( df[ind1] / 2)) / sqrt(pi * df[ind1])
+ ans[df <= 0] = NaN
+ ind2 <- (df >= bigno)
+ if (any(ind2)) {
+ dff = df[ind2]
+ ans[ind2] <- const4 # 1 / const3 # for handling df = Inf
+ }
+ ans[!ind1] <- const4 # 1 / const3 # for handling df = Inf
+
+ ans
}
- }
- answer
-}
+ studentt3 <- function(llocation = "identity", elocation = list(),
+ lscale = "loge", escale = list(),
+ ldf = "loglog", edf = list(),
+ ilocation = NULL, iscale = NULL, idf = NULL,
+ method.init = 1,
+ zero = -(2:3))
+{
- simplex = function(lmu = "logit", lsigma = "loge",
- emu = list(), esigma = list(),
- imu = NULL, isigma = NULL,
- method.init = 1, shrinkage.init = 0.95,
- zero = 2) {
+ lloc <- llocation; lsca <- lscale; ldof <- ldf
+ eloc <- elocation; esca <- escale; edof <- edf
+ iloc <- ilocation; isca <- iscale; idof <- idf
+
+ if (mode(lloc) != "character" && mode(lloc) != "name")
+ lloc <- as.character(substitute(lloc))
+ if (!is.list(eloc)) eloc <- list()
+
+ if (mode(lsca) != "character" && mode(lsca) != "name")
+ lsca <- as.character(substitute(lsca))
+ if (!is.list(esca)) esca <- list()
+
+ if (mode(ldof) != "character" && mode(ldof) != "name")
+ ldof <- as.character(substitute(ldof))
+ if (!is.list(edof)) edof <- list()
- if (mode(lmu) != "character" && mode(lmu) != "name")
- lmu = as.character(substitute(lmu))
- if (mode(lsigma) != "character" && mode(lsigma) != "name")
- lsigma = as.character(substitute(lsigma))
- if (!is.list(emu)) emu = list()
- if (!is.list(esigma)) esigma = list()
if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
- method.init > 3)
+ method.init > 3)
stop("'method.init' must be 1 or 2 or 3")
- if (!is.Numeric(shrinkage.init, allow = 1) || shrinkage.init < 0 ||
- shrinkage.init > 1) stop("bad input for argument 'shrinkage.init'")
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument 'zero'")
+
+ if (length(iloc))
+ if (!is.Numeric(iloc))
+ stop("bad input in argument 'ilocation'")
+ if (length(isca))
+ if (!is.Numeric(isca, posit = TRUE))
+ stop("argument 'iscale' should be positive")
+ if (length(idof))
+ if (!is.Numeric(idof) || any(idof <= 1))
+ stop("argument 'idf' should be > 1")
new("vglmff",
- blurb = c("Univariate Simplex distribution \n",
- "f(y) = [2*pi*sigma^2*(y*(1-y))^3]^(-0.5) * \n",
- " exp[-0.5*(y-mu)^2 / (sigma^2 * y*(1-y)*mu^2*(1-mu)^2)],\n",
- " 0 < y < 1, 0 < mu < 1, sigma > 0\n",
- "Links: ",
- namesof("mu", lmu, earg = emu), ", ",
- namesof("sigma", lsigma, earg = esigma), "\n\n",
- "Mean: mu\n",
- "Variance function: V(mu) = mu^3 * (1-mu)^3"),
- constraints=eval(substitute(expression({
+ blurb = c("Student t-distribution\n\n",
+ "Link: ",
+ namesof("location", lloc, earg = eloc), ", ",
+ namesof("scale", lsca, earg = esca), ", ",
+ namesof("df", ldof, earg = edof), "\n",
+ "Variance: scale^2 * df / (df - 2) if df > 2\n"),
+ constraints = eval(substitute(expression({
+
+ dotzero <- .zero
+ Musual <- 3
+ eval(negzero.expression)
+ }), list( .zero = zero ))),
+ infos = eval(substitute(function(...) {
+ list(Musual = 3,
+ zero = .zero)
+ }, list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ Musual <- 3
+ if (ncol(cbind(w)) != 1)
+ stop("prior weights must be a vector or a one-column matrix")
+
+ y <- as.matrix(y)
+ extra$NOS <- NOS <- ncoly <- ncol(y) # Number of species
+ extra$Musual <- Musual
+ M <- Musual * ncoly #
+
+ mynames1 <- paste("location", if (NOS > 1) 1:NOS else "", sep = "")
+ mynames2 <- paste("scale", if (NOS > 1) 1:NOS else "", sep = "")
+ mynames3 <- paste("df", if (NOS > 1) 1:NOS else "", sep = "")
+ predictors.names <-
+ c(namesof(mynames1, .lloc, earg = .eloc, tag = FALSE),
+ namesof(mynames2, .lsca, earg = .esca, tag = FALSE),
+ namesof(mynames3, .ldof, earg = .edof, tag = FALSE))
+ predictors.names <-
+ predictors.names[interleave.VGAM(Musual * NOS, M = Musual)]
+
+ if (!length(etastart)) {
+
+ init.loc <- if (length( .iloc )) .iloc else {
+ if ( .method.init == 2) apply(y, 2, median) else
+ if ( .method.init == 3) y else {
+ colSums(w * y) / sum(w)
+ }
+ }
+
+ sdvec <- apply(y, 2, sd)
+ init.sca <- if (length( .isca )) .isca else
+ sdvec / 2.3
+
+ sdvec <- rep(sdvec, len = max(length(sdvec), length(init.sca)))
+ init.sca <- rep(init.sca, len = max(length(sdvec), length(init.sca)))
+ ind9 <- (sdvec / init.sca <= (1 + 0.12))
+ sdvec[ind9] <- sqrt(1.12) * init.sca[ind9]
+ init.dof <- if (length( .idof )) .idof else
+ (2 * (sdvec / init.sca)^2) / ((sdvec / init.sca)^2 - 1)
+ if (!is.Numeric(init.dof) || init.dof <= 1)
+ init.dof <- rep(3, len = ncoly)
+
+ mat1 <- matrix(theta2eta(init.loc, .lloc, earg = .eloc), n, NOS,
+ byrow = TRUE)
+ mat2 <- matrix(theta2eta(init.sca, .lsca, earg = .esca), n, NOS,
+ byrow = TRUE)
+ mat3 <- matrix(theta2eta(init.dof, .ldof, earg = .edof), n, NOS,
+ byrow = TRUE)
+ etastart <- cbind(mat1, mat2, mat3)
+ etastart <- etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
+ }
+ }), list( .lloc = lloc, .eloc = eloc, .iloc = iloc,
+ .lsca = lsca, .esca = esca, .isca = isca,
+ .ldof = ldof, .edof = edof, .idof = idof,
+ .method.init = method.init ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ NOS <- extra$NOS
+ Musual <- extra$Musual
+ Loc <- eta2theta(eta[, Musual*(1:NOS)-2], .lloc, earg = .eloc)
+ Dof <- eta2theta(eta[, Musual*(1:NOS)-0], .ldof, earg = .edof)
+ Loc[Dof <= 1] <- NA
+ Loc
+ }, list( .lloc = lloc, .eloc = eloc,
+ .lsca = lsca, .esca = esca,
+ .ldof = ldof, .edof = edof ))),
+ last = eval(substitute(expression({
+ Musual <- extra$Musual
+ misc$link <- c(rep( .lloc, length = NOS),
+ rep( .lsca, length = NOS),
+ rep( .ldof, length = NOS))
+ misc$link <- misc$link[interleave.VGAM(Musual * NOS, M = Musual)]
+ temp.names <- c(mynames1, mynames2, mynames3)
+ temp.names <- temp.names[interleave.VGAM(Musual * NOS, M = Musual)]
+ names(misc$link) <- temp.names
+
+ misc$earg <- vector("list", Musual * NOS)
+ names(misc$earg) <- temp.names
+ for(ii in 1:NOS) {
+ misc$earg[[Musual*ii-2]] <- .eloc
+ misc$earg[[Musual*ii-1]] <- .esca
+ misc$earg[[Musual*ii ]] <- .edof
+ }
+
+ misc$Musual <- Musual
+ misc$method.init <- .method.init
+ misc$expected = TRUE
+ }), list( .lloc = lloc, .eloc = eloc,
+ .lsca = lsca, .esca = esca,
+ .ldof = ldof, .edof = edof,
+ .method.init = method.init ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ NOS <- extra$NOS
+ Musual <- extra$Musual
+ Loc <- eta2theta(eta[, Musual*(1:NOS)-2], .lloc, earg = .eloc)
+ Sca <- eta2theta(eta[, Musual*(1:NOS)-1], .lsca, earg = .esca)
+ Dof <- eta2theta(eta[, Musual*(1:NOS)-0], .ldof, earg = .edof)
+ zedd <- (y - Loc) / Sca
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(w * (dt(x = zedd, df = Dof, log = TRUE) - log(Sca)))
+ }
+ }, list( .lloc = lloc, .eloc = eloc,
+ .lsca = lsca, .esca = esca,
+ .ldof = ldof, .edof = edof ))),
+ vfamily = c("studentt3"),
+ deriv = eval(substitute(expression({
+ Musual <- extra$Musual
+ NOS <- extra$NOS
+ Loc <- eta2theta(eta[, Musual*(1:NOS)-2], .lloc, earg = .eloc)
+ Sca <- eta2theta(eta[, Musual*(1:NOS)-1], .lsca, earg = .esca)
+ Dof <- eta2theta(eta[, Musual*(1:NOS)-0], .ldof, earg = .edof)
+
+ dloc.deta <- cbind(dtheta.deta(theta = Loc, .lloc, earg = .eloc))
+ dsca.deta <- cbind(dtheta.deta(theta = Sca, .lsca, earg = .esca))
+ ddof.deta <- cbind(dtheta.deta(theta = Dof, .ldof, earg = .edof))
+
+ zedd <- (y - Loc) / Sca
+ temp0 <- 1 / Dof
+ temp1 <- temp0 * zedd^2
+ dl.dloc <- (Dof + 1) * zedd / (Sca * (Dof + zedd^2))
+ dl.dsca <- zedd * dl.dloc - 1 / Sca
+ dl.ddof <- 0.5 * (-temp0 - log1p(temp1) +
+ (Dof+1) * zedd^2 / (Dof^2 * (1 + temp1)) +
+ digamma((Dof+1)/2) - digamma(Dof/2))
+
+ ans <- w * cbind(dl.dloc * dloc.deta,
+ dl.dsca * dsca.deta,
+ dl.ddof * ddof.deta)
+ ans <- ans[, interleave.VGAM(ncol(ans), M = Musual)]
+ ans
+ }), list( .lloc = lloc, .eloc = eloc,
+ .lsca = lsca, .esca = esca,
+ .ldof = ldof, .edof = edof ))),
+ weight = eval(substitute(expression({
+
+ const1 = (Dof + 1) / (Dof + 3)
+ const2 = (Dof + 0) / (Dof + 3)
+ const1[!is.finite(Dof)] <- 1 # Handles Inf
+ const2[!is.finite(Dof)] <- 1 # Handles Inf
+
+ const4 = dnorm(0)
+ ed2l.dlocat2 = const1 / (Sca * (Kayfun.studentt(Dof) / const4))^2
+ ed2l.dscale2 = 2 * const2 / Sca^2
+
+ DDS <- function(df) digamma((df + 1) / 2) - digamma(df / 2)
+ DDSp <- function(df) 0.5 * (trigamma((df + 1) / 2) - trigamma(df / 2))
+
+
+ tmp6 = DDS(Dof)
+ edl2.dnu2 <- 0.5 * (tmp6 * (const2 * tmp6 - 2 / (Dof + 1)) - DDSp(Dof))
+ ed2l.dshape2 <- cbind(edl2.dnu2) # cosmetic name change
+
+ ed2l.dshape.dlocat = cbind(0 * Sca)
+ ed2l.dshape.dscale = cbind((-1 / (Dof + 1.0) + const2 * DDS(Dof)) / Sca)
+
+ wz = matrix(0.0, n, dimm(M))
+ wz[, Musual*(1:NOS) - 2] = ed2l.dlocat2 * dloc.deta^2
+ wz[, Musual*(1:NOS) - 1] = ed2l.dscale2 * dsca.deta^2
+ wz[, Musual*(1:NOS) - 0] = ed2l.dshape2 * ddof.deta^2
+
+ for (ii in ((1:NOS) - 1)) {
+ ind3 = 1 + ii
+ wz[, iam(ii*Musual + 1, ii*Musual + 3, M = M)] <-
+ ed2l.dshape.dlocat[, ind3] *
+ dloc.deta[, ind3] * ddof.deta[, ind3]
+ wz[, iam(ii*Musual + 2, ii*Musual + 3, M = M)] <-
+ ed2l.dshape.dscale[, ind3] *
+ dsca.deta[, ind3] * ddof.deta[, ind3]
+ }
+
+ while (all(wz[, ncol(wz)] == 0))
+ wz <- wz[, -ncol(wz)]
+
+ w * wz
+ }), list( .lloc = lloc, .eloc = eloc,
+ .lsca = lsca, .esca = esca,
+ .ldof = ldof, .edof = edof ))))
+}
+
+
+
+
+
+ studentt2 <- function(df = Inf,
+ llocation = "identity", elocation = list(),
+ lscale = "loge", escale = list(),
+ ilocation = NULL, iscale = NULL,
+ method.init = 1,
+ zero = -2)
+{
+
+
+
+ lloc <- llocation; lsca <- lscale
+ eloc <- elocation; esca <- escale
+ iloc <- ilocation; isca <- iscale
+ doff <- df
+
+ if (mode(lloc) != "character" && mode(lloc) != "name")
+ lloc <- as.character(substitute(lloc))
+ if (!is.list(eloc)) eloc <- list()
+
+ if (mode(lsca) != "character" && mode(lsca) != "name")
+ lsca <- as.character(substitute(lsca))
+ if (!is.list(esca)) esca <- list()
+
+ if (is.finite(doff))
+ if (!is.Numeric(doff, posit = TRUE))
+ stop("argument 'df' must be positive")
+
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
+ method.init > 3)
+ stop("argument 'method.init' must be 1 or 2 or 3")
+
+ if (length(iloc))
+ if (!is.Numeric(iloc))
+ stop("bad input in argument 'ilocation'")
+ if (length(isca))
+ if (!is.Numeric(isca, posit = TRUE))
+ stop("argument 'iscale' should be positive")
+
+
+ new("vglmff",
+ blurb = c("Student t-distribution\n\n",
+ "Link: ",
+ namesof("location", lloc, earg = eloc), ", ",
+ namesof("scale", lsca, earg = esca), "\n",
+ "Variance: scale^2 * df / (df - 2) if df > 2\n"),
+ constraints = eval(substitute(expression({
+
+ dotzero <- .zero
+ Musual <- 2
+ eval(negzero.expression)
+ }), list( .zero = zero ))),
+ infos = eval(substitute(function(...) {
+ list(Musual = 2,
+ zero = .zero)
+ }, list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ Musual <- 2
+ if (ncol(cbind(w)) != 1)
+ stop("prior weights must be a vector or a one-column matrix")
+
+ y <- as.matrix(y)
+ extra$NOS <- NOS <- ncoly <- ncol(y) # Number of species
+ extra$Musual <- Musual
+ M <- Musual * ncoly #
+
+ mynames1 <- paste("location", if (NOS > 1) 1:NOS else "", sep = "")
+ mynames2 <- paste("scale", if (NOS > 1) 1:NOS else "", sep = "")
+ predictors.names <-
+ c(namesof(mynames1, .lloc, earg = .eloc, tag = FALSE),
+ namesof(mynames2, .lsca, earg = .esca, tag = FALSE))
+ predictors.names <-
+ predictors.names[interleave.VGAM(Musual * NOS, M = Musual)]
+
+ if (!length(etastart)) {
+
+ init.loc <- if (length( .iloc )) .iloc else {
+ if ( .method.init == 2) apply(y, 2, median) else
+ if ( .method.init == 3) y else {
+ colSums(w * y) / sum(w)
+ }
+ }
+
+ sdvec <- apply(y, 2, sd)
+ init.sca <- if (length( .isca )) .isca else
+ sdvec / 2.3
+
+ mat1 <- matrix(theta2eta(init.loc, .lloc, earg = .eloc), n, NOS,
+ byrow = TRUE)
+ mat2 <- matrix(theta2eta(init.sca, .lsca, earg = .esca), n, NOS,
+ byrow = TRUE)
+ etastart <- cbind(mat1, mat2)
+ etastart <- etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
+ }
+ }), list( .lloc = lloc, .eloc = eloc, .iloc = iloc,
+ .lsca = lsca, .esca = esca, .isca = isca,
+ .doff = doff,
+ .method.init = method.init ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ NOS <- extra$NOS
+ Musual <- extra$Musual
+ Loc <- eta2theta(eta[, Musual*(1:NOS) - 1], .lloc, earg = .eloc)
+ Dof <- matrix( .doff , nrow(cbind(Loc)), NOS, byrow = TRUE)
+ Loc[Dof <= 1] <- NA
+ Loc
+ }, list( .lloc = lloc, .eloc = eloc,
+ .lsca = lsca, .esca = esca,
+ .doff = doff ))),
+ last = eval(substitute(expression({
+ Musual <- extra$Musual
+ misc$link <- c(rep( .lloc, length = NOS),
+ rep( .lsca, length = NOS))
+ temp.names <- c(mynames1, mynames2)
+ temp.names <- temp.names[interleave.VGAM(Musual * NOS, M = Musual)]
+ names(misc$link) <- temp.names
+ misc$earg <- vector("list", Musual * NOS)
+ names(misc$earg) <- temp.names
+ for(ii in 1:NOS) {
+ misc$earg[[Musual*ii-1]] <- .eloc
+ misc$earg[[Musual*ii-0]] <- .esca
+ }
+
+ misc$Musual <- Musual
+ misc$simEIM <- TRUE
+ misc$df <- .doff
+ misc$method.init <- .method.init
+ misc$expected = TRUE
+ }), list( .lloc = lloc, .eloc = eloc,
+ .lsca = lsca, .esca = esca,
+ .doff = doff,
+ .method.init = method.init ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ NOS <- extra$NOS
+ Musual <- extra$Musual
+ Loc <- eta2theta(eta[, Musual*(1:NOS)-1], .lloc, earg = .eloc)
+ Sca <- eta2theta(eta[, Musual*(1:NOS)-0], .lsca, earg = .esca)
+ Dof <- matrix( .doff , nrow(cbind(Loc)), NOS, byrow = TRUE)
+ zedd <- (y - Loc) / Sca
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(w * (dt(x = zedd, df = Dof, log = TRUE) - log(Sca)))
+ }
+ }, list( .lloc = lloc, .eloc = eloc,
+ .lsca = lsca, .esca = esca,
+ .doff = doff ))),
+ vfamily = c("studentt2"),
+ deriv = eval(substitute(expression({
+ Musual <- extra$Musual
+ NOS <- extra$NOS
+ Loc <- eta2theta(eta[, Musual*(1:NOS)-1], .lloc, earg = .eloc)
+ Sca <- eta2theta(eta[, Musual*(1:NOS)-0], .lsca, earg = .esca)
+ Dof <- matrix( .doff , n, NOS, byrow = TRUE)
+
+ dlocat.deta <- dtheta.deta(theta = Loc, .lloc, earg = .eloc)
+ dscale.deta <- dtheta.deta(theta = Sca, .lsca, earg = .esca)
+
+ zedd <- (y - Loc) / Sca
+ temp0 <- 1 / Dof
+ temp1 <- temp0 * zedd^2
+ dl.dlocat <- (Dof + 1) * zedd / (Sca * (Dof + zedd^2))
+ dl.dlocat[!is.finite(Dof)] <- zedd / Sca # Adjust for df=Inf
+ dl.dscale <- zedd * dl.dlocat - 1 / Sca
+
+ ans <- w * cbind(dl.dlocat * dlocat.deta,
+ dl.dscale * dscale.deta)
+ ans <- ans[, interleave.VGAM(ncol(ans), M = Musual)]
+ ans
+ }), list( .lloc = lloc, .eloc = eloc,
+ .lsca = lsca, .esca = esca,
+ .doff = doff ))),
+ weight = eval(substitute(expression({
+
+ const1 = (Dof + 1) / (Dof + 3)
+ const2 = (Dof + 0) / (Dof + 3)
+ const1[!is.finite( Dof )] <- 1 # Handles Inf
+ const2[!is.finite( Dof )] <- 1 # Handles Inf
+
+ const4 = dnorm(0)
+ ed2l.dlocat2 = const1 / (Sca * (Kayfun.studentt(Dof) / const4))^2
+
+ ed2l.dscale2 = 2.0 * const2 / Sca^2 # 2.0 seems to work
+
+ wz = matrix(as.numeric(NA), n, M) #2=M; diagonal!
+ wz[, Musual*(1:NOS) - 1] = ed2l.dlocat2 * dlocat.deta^2
+ wz[, Musual*(1:NOS) ] = ed2l.dscale2 * dscale.deta^2
+ w * wz
+ }), list( .lloc = lloc, .eloc = eloc,
+ .lsca = lsca, .esca = esca,
+ .doff = doff ))))
+}
+
+
+
+
+
+
+ chisq <- function(link = "loge", earg = list())
+{
+ if (mode(link) != "character" && mode(link) != "name")
+ link <- as.character(substitute(link))
+ if (!is.list(earg)) earg <- list()
+
+ new("vglmff",
+ blurb = c("Chi-squared distribution\n\n",
+ "Link: ",
+ namesof("df", link, earg = earg, tag = FALSE)),
+ initialize = eval(substitute(expression({
+ if (ncol(cbind(w)) != 1)
+ stop("argument 'weights' must be a vector or a one-column matrix")
+
+ y <- as.matrix(y)
+
+ extra$ncoly <- NOS <- ncol(y) # Number of species
+ mynames1 <- paste("df", if (NOS > 1) 1:NOS else "", sep = "")
+ predictors.names <- namesof(mynames1, .link, earg = .earg, tag = FALSE)
+
+ if (!length(mustart) && !length(etastart))
+ mustart <- y + (1 / 8) * (y == 0)
+ }), list( .link = link, .earg = earg ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ eta2theta(eta, .link, earg = .earg)
+ }, list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
+ misc$link <- c(df = .link)
+ misc$earg <- list(df = .earg )
+ }), list( .link = link, .earg = earg ))),
+ link = eval(substitute(function(mu, extra = NULL) {
+ theta2eta(mu, .link, earg = .earg)
+ }, list( .link = link, .earg = earg ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ mydf <- eta2theta(eta, .link, earg = .earg)
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
+ sum(w * dchisq(x = y, df = mydf, ncp = 0, log = TRUE))
+ }, list( .link = link, .earg = earg ))),
+ vfamily = "chisq",
+ deriv = eval(substitute(expression({
+ mydf <- eta2theta(eta, .link, earg = .earg)
+ dl.dv <- (log(y / 2) - digamma(mydf / 2)) / 2
+ dv.deta <- dtheta.deta(mydf, .link, earg = .earg)
+ w * dl.dv * dv.deta
+ }), list( .link = link, .earg = earg ))),
+ weight = eval(substitute(expression({
+ ed2l.dv2 <- -trigamma(mydf / 2) / 4
+ wz <- -ed2l.dv2 * dv.deta^2
+ wz * w
+ }), list( .link = link, .earg = earg ))))
+}
+
+
+
+
+
+
+
+dsimplex = function(x, mu = 0.5, dispersion = 1, log = FALSE) {
+ if (!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+ sigma = dispersion
+
+ deeFun = function(y, mu)
+ (((y - mu) / (mu * (1 - mu)))^2) / (y * (1 - y))
+ logpdf = (-0.5 * log(2 * pi) - log(sigma) - 1.5 * log(x) -
+ 1.5 * log1p(-x) - 0.5 * deeFun(x, mu) / sigma^2)
+ logpdf[x <= 0.0] = -Inf # log(0.0)
+ logpdf[x >= 1.0] = -Inf # log(0.0)
+ logpdf[mu <= 0.0] = NaN
+ logpdf[mu >= 1.0] = NaN
+ logpdf[sigma <= 0.0] = NaN
+ if (log.arg) logpdf else exp(logpdf)
+}
+
+
+rsimplex = function(n, mu = 0.5, dispersion = 1) {
+ use.n = if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integ = TRUE, allow = 1, posit = TRUE))
+ stop("bad input for argument 'n'") else n
+
+ oneval <- (length(mu) == 1 && length(dispersion) == 1)
+ answer = rep(0.0, len = use.n)
+ mu = rep(mu, len = use.n); dispersion = rep(dispersion, len = use.n)
+ Kay1 = 3 * (dispersion * mu * (1-mu))^2
+
+ if (oneval) {
+ Kay1 = Kay1[1] # Since oneval means there is only one unique value
+ mymu = mu[1]
+ myroots = polyroot(c(-mymu^2, Kay1+2*mymu^2, -3*Kay1+1-2*mymu, 2*Kay1))
+ myroots = myroots[abs(Im(myroots)) < 0.00001]
+ myroots = Re(myroots)
+ myroots = myroots[myroots >= 0.0]
+ myroots = myroots[myroots <= 1.0]
+ pdfmax = dsimplex(myroots, mymu, dispersion[1])
+ pdfmax = rep(max(pdfmax), len = use.n) # For multiple peaks
+ } else {
+ pdfmax = numeric(use.n)
+ for (ii in 1:use.n) {
+ myroots = polyroot(c(-mu[ii]^2, Kay1[ii]+2*mu[ii]^2,
+ -3*Kay1[ii]+1-2*mu[ii], 2*Kay1[ii]))
+ myroots = myroots[abs(Im(myroots)) < 0.00001]
+ myroots = Re(myroots)
+ myroots = myroots[myroots >= 0.0]
+ myroots = myroots[myroots <= 1.0]
+ pdfmax[ii] = max(dsimplex(myroots, mu[ii], dispersion[ii]))
+ }
+ }
+
+ index = 1:use.n
+ nleft = length(index)
+ while (nleft > 0) {
+ xx = runif(nleft) # , 0, 1
+ yy = runif(nleft, max = pdfmax[index])
+ newindex = (1:nleft)[yy < dsimplex(xx, mu[index], dispersion[index])]
+ if (length(newindex)) {
+ answer[index[newindex]] = xx[newindex]
+ index = setdiff(index, index[newindex])
+ nleft = nleft - length(newindex)
+ }
+ }
+ answer
+}
+
+
+
+
+
+
+ simplex = function(lmu = "logit", lsigma = "loge",
+ emu = list(), esigma = list(),
+ imu = NULL, isigma = NULL,
+ method.init = 1, shrinkage.init = 0.95,
+ zero = 2) {
+
+
+ if (mode(lmu) != "character" && mode(lmu) != "name")
+ lmu = as.character(substitute(lmu))
+ if (mode(lsigma) != "character" && mode(lsigma) != "name")
+ lsigma = as.character(substitute(lsigma))
+ if (!is.list(emu)) emu = list()
+ if (!is.list(esigma)) esigma = list()
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
+ method.init > 3)
+ stop("'method.init' must be 1 or 2 or 3")
+ if (!is.Numeric(shrinkage.init, allow = 1) || shrinkage.init < 0 ||
+ shrinkage.init > 1) stop("bad input for argument 'shrinkage.init'")
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+ stop("bad input for argument 'zero'")
+
+ new("vglmff",
+ blurb = c("Univariate Simplex distribution\n\n",
+ "f(y) = [2*pi*sigma^2*(y*(1-y))^3]^(-0.5) * \n",
+ " exp[-0.5*(y-mu)^2 / (sigma^2 * y*(1-y)*mu^2*(1-mu)^2)],\n",
+ " 0 < y < 1, 0 < mu < 1, sigma > 0\n\n",
+ "Links: ",
+ namesof("mu", lmu, earg = emu), ", ",
+ namesof("sigma", lsigma, earg = esigma), "\n\n",
+ "Mean: mu\n",
+ "Variance function: V(mu) = mu^3 * (1 - mu)^3"),
+ constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
@@ -4138,15 +4144,15 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
- rig = function(lmu="identity", llambda="loge",
- emu=list(), elambda=list(), imu=NULL, ilambda=1)
+ rig = function(lmu = "identity", llambda = "loge",
+ emu = list(), elambda=list(), imu = NULL, ilambda=1)
{
if (mode(lmu) != "character" && mode(lmu) != "name")
lmu = as.character(substitute(lmu))
if (mode(llambda) != "character" && mode(llambda) != "name")
llambda = as.character(substitute(llambda))
- if (!is.Numeric(ilambda, posit=TRUE))
+ if (!is.Numeric(ilambda, posit = TRUE))
stop("bad input for 'ilambda'")
if (!is.list(emu)) emu = list()
if (!is.list(elambda)) elambda = list()
@@ -4157,69 +4163,69 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
" exp[-0.5*(lambda/y) * (y-mu)^2], ",
" 0 < y,\n",
"Links: ",
- namesof("mu", lmu, earg=emu), ", ",
- namesof("lambda", llambda, earg=elambda), "\n\n",
+ namesof("mu", lmu, earg = emu), ", ",
+ namesof("lambda", llambda, earg = elambda), "\n\n",
"Mean: mu"),
- initialize=eval(substitute(expression({
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
y = as.numeric(y)
if (any(y <= 0))
stop("all y values must be > 0")
predictors.names =
- c(namesof("mu", .lmu, earg=.emu, tag=FALSE),
- namesof("lambda", .llambda, earg=.elambda, tag=FALSE))
+ c(namesof("mu", .lmu, earg = .emu, tag = FALSE),
+ namesof("lambda", .llambda, earg = .elambda, tag = FALSE))
if (!length(etastart)) {
mu.init = rep(if (length( .imu)) .imu else
- median(y), length=n)
+ median(y), length = n)
lambda.init = rep(if (length( .ilambda )) .ilambda else
- sqrt(var(y)), length=n)
- etastart = cbind(theta2eta(mu.init, .lmu, earg= .emu),
- theta2eta(lambda.init, .llambda, earg= .elambda))
+ sqrt(var(y)), length = n)
+ etastart = cbind(theta2eta(mu.init, .lmu, earg = .emu),
+ theta2eta(lambda.init, .llambda, earg = .elambda))
}
- }), list( .lmu=lmu, .llambda=llambda,
- .emu=emu, .elambda=elambda,
+ }), list( .lmu = lmu, .llambda = llambda,
+ .emu = emu, .elambda = elambda,
.imu=imu, .ilambda=ilambda ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- eta2theta(eta[,1], .lmu, earg= .emu)
- }, list( .lmu=lmu,
- .emu=emu, .elambda=elambda ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ eta2theta(eta[,1], .lmu, earg = .emu)
+ }, list( .lmu = lmu,
+ .emu = emu, .elambda = elambda ))),
last = eval(substitute(expression({
- misc$d3 = d3 # because save.weights=FALSE
+ misc$d3 = d3 # because save.weights = FALSE
misc$link = c(mu= .lmu, lambda= .llambda)
misc$earg = list(mu= .emu, lambda= .elambda)
misc$pooled.weight = pooled.weight
- }), list( .lmu=lmu, .llambda=llambda,
- .emu=emu, .elambda=elambda ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- lambda = eta2theta(eta[,2], .llambda, earg= .elambda)
+ }), list( .lmu = lmu, .llambda = llambda,
+ .emu = emu, .elambda = elambda ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ lambda = eta2theta(eta[,2], .llambda, earg = .elambda)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
sum(w * (-0.5*log(y) + 0.5*log(lambda) - (0.5*lambda/y) * (y-mu)^2))
- }, list( .llambda=llambda,
- .emu=emu, .elambda=elambda ))),
- vfamily=c("rig"),
- deriv=eval(substitute(expression({
- if (iter==1) {
+ }, list( .llambda = llambda,
+ .emu = emu, .elambda = elambda ))),
+ vfamily = c("rig"),
+ deriv = eval(substitute(expression({
+ if (iter == 1) {
d3 = deriv3(~ w *
(-0.5*log(y) + 0.5*log(lambda) - (0.5*lambda/y) * (y-mu)^2),
c("mu", "lambda"), hessian= TRUE)
}
- lambda = eta2theta(eta[,2], .llambda, earg= .elambda)
+ lambda = eta2theta(eta[,2], .llambda, earg = .elambda)
eval.d3 = eval(d3)
dl.dthetas = attr(eval.d3, "gradient")
- dmu.deta = dtheta.deta(mu, .lmu, earg= .emu)
- dlambda.deta = dtheta.deta(lambda, .llambda, earg= .elambda)
+ dmu.deta = dtheta.deta(mu, .lmu, earg = .emu)
+ dlambda.deta = dtheta.deta(lambda, .llambda, earg = .elambda)
dtheta.detas = cbind(dmu.deta, dlambda.deta)
dl.dthetas * dtheta.detas
- }), list( .lmu=lmu, .llambda=llambda,
- .emu=emu, .elambda=elambda ))),
- weight=eval(substitute(expression({
+ }), list( .lmu = lmu, .llambda = llambda,
+ .emu = emu, .elambda = elambda ))),
+ weight = eval(substitute(expression({
d2l.dthetas2 = attr(eval.d3, "hessian")
wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
@@ -4228,8 +4234,8 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
wz[,iam(1,2,M)] = -d2l.dthetas2[,1,2] * dtheta.detas[,1] *
dtheta.detas[,2]
if (!.expected) {
- d2mudeta2 = d2theta.deta2(mu, .lmu, earg= .emu)
- d2lambda = d2theta.deta2(lambda, .llambda, earg= .elambda)
+ d2mudeta2 = d2theta.deta2(mu, .lmu, earg = .emu)
+ d2lambda = d2theta.deta2(lambda, .llambda, earg = .elambda)
wz[,iam(1,1,M)] = wz[,iam(1,1,M)] - dl.dthetas[,1] * d2mudeta2
wz[,iam(2,2,M)] = wz[,iam(2,2,M)] - dl.dthetas[,2] * d2lambda
}
@@ -4244,15 +4250,15 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
pooled.weight = FALSE
wz
- }), list( .lmu=lmu, .llambda=llambda, .expected=FALSE,
- .emu=emu, .elambda=elambda ))))
+ }), list( .lmu = lmu, .llambda = llambda, .expected = FALSE,
+ .emu = emu, .elambda = elambda ))))
}
- hypersecant = function(link.theta="elogit",
- earg = if (link.theta=="elogit") list(min=-pi/2, max=pi/2) else list(),
- init.theta=NULL)
+ hypersecant = function(link.theta = "elogit",
+ earg = if (link.theta == "elogit") list(min=-pi/2, max=pi/2) else list(),
+ init.theta = NULL)
{
if (mode(link.theta) != "character" && mode(link.theta) != "name")
@@ -4264,42 +4270,42 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
"f(y) = exp(theta*y + log(cos(theta ))) / (2*cosh(pi*y/2))\n",
" for all y,\n",
"Link: ",
- namesof("theta", link.theta, earg=earg), "\n\n",
+ namesof("theta", link.theta, earg = earg), "\n\n",
"Mean: tan(theta)"),
- initialize=eval(substitute(expression({
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
- predictors.names = namesof("theta", .link.theta, earg=.earg, tag=FALSE)
+ predictors.names = namesof("theta", .link.theta, earg = .earg, tag = FALSE)
if (!length(etastart)) {
theta.init = rep(if (length( .init.theta)) .init.theta else
- median(y), length=n)
- etastart = theta2eta(theta.init, .link.theta, earg= .earg)
+ median(y), length = n)
+ etastart = theta2eta(theta.init, .link.theta, earg = .earg)
}
- }), list( .link.theta=link.theta, .earg=earg,
+ }), list( .link.theta=link.theta, .earg = earg,
.init.theta=init.theta ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- theta = eta2theta(eta, .link.theta, earg= .earg)
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ theta = eta2theta(eta, .link.theta, earg = .earg)
tan(theta)
- }, list( .link.theta=link.theta, .earg=earg ))),
+ }, list( .link.theta=link.theta, .earg = earg ))),
last = eval(substitute(expression({
misc$link = c(theta= .link.theta )
misc$earg = list(theta= .earg )
misc$expected = TRUE
- }), list( .link.theta=link.theta, .earg=earg ))),
- loglikelihood=eval(substitute(function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- theta = eta2theta(eta, .link.theta, earg= .earg)
+ }), list( .link.theta=link.theta, .earg = earg ))),
+ loglikelihood = eval(substitute(function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ theta = eta2theta(eta, .link.theta, earg = .earg)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
sum(w * (theta*y + log(cos(theta)) - log(cosh(pi*y/2 ))))
- }, list( .link.theta=link.theta, .earg=earg ))),
- vfamily=c("hypersecant"),
- deriv=eval(substitute(expression({
- theta = eta2theta(eta, .link.theta, earg= .earg)
+ }, list( .link.theta=link.theta, .earg = earg ))),
+ vfamily = c("hypersecant"),
+ deriv = eval(substitute(expression({
+ theta = eta2theta(eta, .link.theta, earg = .earg)
dl.dthetas = y - tan(theta)
- dparam.deta = dtheta.deta(theta, .link.theta, earg= .earg)
+ dparam.deta = dtheta.deta(theta, .link.theta, earg = .earg)
w * dl.dthetas * dparam.deta
- }), list( .link.theta=link.theta, .earg=earg ))),
- weight=expression({
+ }), list( .link.theta=link.theta, .earg = earg ))),
+ weight = expression({
d2l.dthetas2 = 1 / cos(theta)^2
wz = w * d2l.dthetas2 * dparam.deta^2
wz
@@ -4308,9 +4314,9 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
- hypersecant.1 = function(link.theta="elogit",
+ hypersecant.1 = function(link.theta = "elogit",
earg=if (link.theta == "elogit") list(min=-pi/2, max=pi/2) else list(),
- init.theta=NULL)
+ init.theta = NULL)
{
if (mode(link.theta) != "character" && mode(link.theta) != "name")
@@ -4323,48 +4329,48 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
" (1-y)^(-0.5-theta/pi), ",
" 0 < y < 1,\n",
"Link: ",
- namesof("theta", link.theta, earg=earg), "\n\n",
+ namesof("theta", link.theta, earg = earg), "\n\n",
"Mean: 0.5 + theta/pi", "\n",
"Variance: (pi^2 - 4*theta^2) / (8*pi^2)"),
- initialize=eval(substitute(expression({
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
y = as.numeric(y)
if (any(y <= 0 | y >= 1))
stop("all y values must be in (0,1)")
- predictors.names = namesof("theta", .link.theta, earg=.earg, tag=FALSE)
+ predictors.names = namesof("theta", .link.theta, earg = .earg, tag = FALSE)
if (!length(etastart)) {
theta.init = rep(if (length( .init.theta)) .init.theta else
- median(y), length=n)
+ median(y), length = n)
- etastart = theta2eta(theta.init, .link.theta, earg= .earg)
+ etastart = theta2eta(theta.init, .link.theta, earg = .earg)
}
- }), list( .link.theta=link.theta, .earg=earg,
+ }), list( .link.theta=link.theta, .earg = earg,
.init.theta=init.theta ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- theta = eta2theta(eta, .link.theta, earg= .earg)
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ theta = eta2theta(eta, .link.theta, earg = .earg)
0.5 + theta/pi
- }, list( .link.theta=link.theta, .earg=earg ))),
+ }, list( .link.theta=link.theta, .earg = earg ))),
last = eval(substitute(expression({
misc$link = c(theta= .link.theta)
misc$earg = list(theta= .earg )
misc$expected = TRUE
- }), list( .link.theta=link.theta, .earg=earg ))),
- loglikelihood=eval(substitute(function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- theta = eta2theta(eta, .link.theta, earg= .earg)
+ }), list( .link.theta=link.theta, .earg = earg ))),
+ loglikelihood = eval(substitute(function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ theta = eta2theta(eta, .link.theta, earg = .earg)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
sum(w * (log(cos(theta)) + (-0.5+theta/pi)*log(y) +
(-0.5-theta/pi)*log1p(-y )))
- }, list( .link.theta=link.theta, .earg=earg ))),
- vfamily=c("hypersecant.1"),
- deriv=eval(substitute(expression({
- theta = eta2theta(eta, .link.theta, earg= .earg)
+ }, list( .link.theta=link.theta, .earg = earg ))),
+ vfamily = c("hypersecant.1"),
+ deriv = eval(substitute(expression({
+ theta = eta2theta(eta, .link.theta, earg = .earg)
dl.dthetas = -tan(theta) + log(y/(1-y)) / pi
- dparam.deta = dtheta.deta(theta, .link.theta, earg= .earg)
+ dparam.deta = dtheta.deta(theta, .link.theta, earg = .earg)
w * dl.dthetas * dparam.deta
- }), list( .link.theta=link.theta, .earg=earg ))),
- weight=expression({
+ }), list( .link.theta=link.theta, .earg = earg ))),
+ weight = expression({
d2l.dthetas2 = 1 / cos(theta)^2
wz = w * d2l.dthetas2 * dparam.deta^2
wz
@@ -4373,8 +4379,8 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
- leipnik = function(lmu="logit", llambda="loge",
- emu=list(), elambda=list(), imu=NULL, ilambda=NULL)
+ leipnik = function(lmu = "logit", llambda = "loge",
+ emu = list(), elambda=list(), imu = NULL, ilambda = NULL)
{
@@ -4393,64 +4399,64 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
" Beta[(lambda+1)/2, 1/2], ",
" 0 < y < 1, lambda > -1\n",
"Links: ",
- namesof("mu", lmu, earg=emu), ", ",
- namesof("lambda", llambda, earg=elambda), "\n\n",
+ namesof("mu", lmu, earg = emu), ", ",
+ namesof("lambda", llambda, earg = elambda), "\n\n",
"Mean: mu\n",
"Variance: mu*(1-mu)"),
- initialize=eval(substitute(expression({
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
y = as.numeric(y)
if (any(y <= 0 | y >= 1))
stop("all y values must be in (0,1)")
predictors.names =
- c(namesof("mu", .lmu, earg=.emu, tag=FALSE),
- namesof("lambda", .llambda, earg=.elambda, tag=FALSE))
+ c(namesof("mu", .lmu, earg = .emu, tag = FALSE),
+ namesof("lambda", .llambda, earg = .elambda, tag = FALSE))
if (!length(etastart)) {
mu.init = rep(if (length( .imu)) .imu else
- (y), length=n)
+ (y), length = n)
lambda.init = rep(if (length( .ilambda)) .ilambda else
- 1/var(y), length=n)
- etastart = cbind(theta2eta(mu.init, .lmu, earg= .emu),
- theta2eta(lambda.init, .llambda, earg= .elambda))
- }
- }), list( .lmu=lmu, .llambda=llambda, .imu=imu, .ilambda=ilambda,
- .emu=emu, .elambda=elambda ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- eta2theta(eta[,1], .lmu, earg= .emu)
- }, list( .lmu=lmu,
- .emu=emu, .elambda=elambda ))),
+ 1/var(y), length = n)
+ etastart = cbind(theta2eta(mu.init, .lmu, earg = .emu),
+ theta2eta(lambda.init, .llambda, earg = .elambda))
+ }
+ }), list( .lmu = lmu, .llambda = llambda, .imu=imu, .ilambda=ilambda,
+ .emu = emu, .elambda = elambda ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ eta2theta(eta[,1], .lmu, earg = .emu)
+ }, list( .lmu = lmu,
+ .emu = emu, .elambda = elambda ))),
last = eval(substitute(expression({
misc$link = c(mu= .lmu, lambda= .llambda)
misc$earg = list(mu= .emu, lambda= .elambda)
misc$pooled.weight = pooled.weight
misc$expected = FALSE
- }), list( .lmu=lmu, .llambda=llambda,
- .emu=emu, .elambda=elambda ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- lambda = eta2theta(eta[,2], .llambda, earg= .elambda)
+ }), list( .lmu = lmu, .llambda = llambda,
+ .emu = emu, .elambda = elambda ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ lambda = eta2theta(eta[,2], .llambda, earg = .elambda)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
sum(w * (-0.5*log(y*(1-y)) - 0.5 * lambda *
log1p((y-mu)^2 / (y*(1-y ))) - lgamma((lambda+1)/2) +
lgamma(1+ lambda/2 )))
- }, list( .llambda=llambda,
- .emu=emu, .elambda=elambda ))),
- vfamily=c("leipnik"),
- deriv=eval(substitute(expression({
- lambda = eta2theta(eta[,2], .llambda, earg= .elambda)
+ }, list( .llambda = llambda,
+ .emu = emu, .elambda = elambda ))),
+ vfamily = c("leipnik"),
+ deriv = eval(substitute(expression({
+ lambda = eta2theta(eta[,2], .llambda, earg = .elambda)
dl.dthetas = w * cbind(dl.dmu = lambda*(y-mu) / (y*(1-y)+(y-mu)^2),
dl.dlambda= -0.5 * log1p((y-mu)^2 / (y*(1-y))) -
0.5*digamma((lambda+1)/2) +
0.5*digamma(1+lambda/2))
- dmu.deta = dtheta.deta(mu, .lmu, earg= .emu)
- dlambda.deta = dtheta.deta(lambda, .llambda, earg= .elambda)
+ dmu.deta = dtheta.deta(mu, .lmu, earg = .emu)
+ dlambda.deta = dtheta.deta(lambda, .llambda, earg = .elambda)
dtheta.detas = cbind(dmu.deta, dlambda.deta)
dl.dthetas * dtheta.detas
- }), list( .lmu=lmu, .llambda=llambda,
- .emu=emu, .elambda=elambda ))),
- weight=eval(substitute(expression({
+ }), list( .lmu = lmu, .llambda = llambda,
+ .emu = emu, .elambda = elambda ))),
+ weight = eval(substitute(expression({
if (is.R()) {
denominator = y*(1-y) + (y-mu)^2
d2l.dthetas2 = array(NA, c(n,2,2))
@@ -4469,8 +4475,8 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
wz[,iam(1,2,M)] = -d2l.dthetas2[,1,2] * dtheta.detas[,1] *
dtheta.detas[,2]
if (!.expected) {
- d2mudeta2 = d2theta.deta2(mu, .lmu, earg= .emu)
- d2lambda = d2theta.deta2(lambda, .llambda, earg= .elambda)
+ d2mudeta2 = d2theta.deta2(mu, .lmu, earg = .emu)
+ d2lambda = d2theta.deta2(lambda, .llambda, earg = .elambda)
wz[,iam(1,1,M)] = wz[,iam(1,1,M)] - dl.dthetas[,1] * d2mudeta2
wz[,iam(2,2,M)] = wz[,iam(2,2,M)] - dl.dthetas[,2] * d2lambda
}
@@ -4485,27 +4491,27 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
pooled.weight = FALSE
wz
- }), list( .lmu=lmu, .llambda=llambda, .expected=FALSE,
- .emu=emu, .elambda=elambda ))))
+ }), list( .lmu = lmu, .llambda = llambda, .expected = FALSE,
+ .emu = emu, .elambda = elambda ))))
}
- invbinomial = function(lrho="elogit", llambda="loge",
+ invbinomial = function(lrho = "elogit", llambda = "loge",
erho=if (lrho == "elogit") list(min = 0.5, max = 1) else list(),
elambda=list(),
- irho=NULL,
- ilambda=NULL,
- zero=NULL)
+ irho = NULL,
+ ilambda = NULL,
+ zero = NULL)
{
if (mode(lrho) != "character" && mode(lrho) != "name")
lrho = as.character(substitute(lrho))
if (mode(llambda) != "character" && mode(llambda) != "name")
llambda = as.character(substitute(llambda))
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
stop("bad input for argument 'zero'")
if (!is.list(erho)) erho = list()
if (!is.list(elambda)) elambda = list()
@@ -4513,71 +4519,71 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
new("vglmff",
blurb = c("Inverse binomial distribution\n\n",
"Links: ",
- namesof("rho", lrho, earg=erho), ", ",
- namesof("lambda", llambda, earg=elambda), "\n",
+ namesof("rho", lrho, earg = erho), ", ",
+ namesof("lambda", llambda, earg = elambda), "\n",
"Mean: lambda*(1-rho)/(2*rho-1)\n",
"Variance: lambda*rho*(1-rho)/(2*rho-1)^3\n"),
- constraints=eval(substitute(expression({
+ constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
predictors.names =
- c(namesof("rho", .lrho, earg=.erho, tag=FALSE),
- namesof("lambda", .llambda, earg=.elambda, tag=FALSE))
+ c(namesof("rho", .lrho, earg = .erho, tag = FALSE),
+ namesof("lambda", .llambda, earg = .elambda, tag = FALSE))
if (!length(etastart)) {
covarn = sd(y)^2 / weighted.mean(y, w)
temp1 = 0.5 + (1 + sqrt(1+8*covarn)) / (8*covarn)
temp2 = 0.5 + (1 - sqrt(1+8*covarn)) / (8*covarn)
init.rho = rep(if (length( .irho)) .irho else {
ifelse(temp1 > 0.5 && temp1 < 1, temp1, temp2)
- }, length=n)
+ }, length = n)
init.lambda = rep(if (length( .ilambda)) .ilambda else {
(2*init.rho-1) * weighted.mean(y, w) / (1-init.rho)
- }, length=n)
- etastart = cbind(theta2eta(init.rho, .lrho, earg= .erho),
- theta2eta(init.lambda, .llambda, earg= .elambda))
+ }, length = n)
+ etastart = cbind(theta2eta(init.rho, .lrho, earg = .erho),
+ theta2eta(init.lambda, .llambda, earg = .elambda))
}
- }), list( .llambda=llambda, .lrho=lrho,
- .elambda=elambda, .erho=erho,
+ }), list( .llambda = llambda, .lrho=lrho,
+ .elambda = elambda, .erho=erho,
.ilambda=ilambda, .irho=irho ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- rho = eta2theta(eta[,1], .lrho, earg= .erho)
- lambda = eta2theta(eta[,2], .llambda, earg= .elambda)
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ rho = eta2theta(eta[,1], .lrho, earg = .erho)
+ lambda = eta2theta(eta[,2], .llambda, earg = .elambda)
ifelse(rho > 0.5, lambda*(1-rho)/(2*rho-1), NA)
- }, list( .llambda=llambda, .lrho=lrho,
- .elambda=elambda, .erho=erho ))),
+ }, list( .llambda = llambda, .lrho=lrho,
+ .elambda = elambda, .erho=erho ))),
last = eval(substitute(expression({
misc$link = c(rho= .lrho, lambda= .llambda)
misc$earg = list(rho= .erho, lambda= .elambda)
misc$pooled.weight = pooled.weight
- }), list( .llambda=llambda, .lrho=lrho,
- .elambda=elambda, .erho=erho ))),
- loglikelihood=eval(substitute(
- function(mu, y, w, residuals= FALSE,eta, extra=NULL) {
- rho = eta2theta(eta[,1], .lrho, earg= .erho)
- lambda = eta2theta(eta[,2], .llambda, earg= .elambda)
+ }), list( .llambda = llambda, .lrho=lrho,
+ .elambda = elambda, .erho=erho ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ rho = eta2theta(eta[,1], .lrho, earg = .erho)
+ lambda = eta2theta(eta[,2], .llambda, earg = .elambda)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
sum(w*(log(lambda) - lgamma(2*y+lambda) - lgamma(y+1) -
lgamma(y+lambda+1) + y*log(rho) + y*log1p(-rho) +
lambda*log(rho)))
- }, list( .llambda=llambda, .lrho=lrho,
- .elambda=elambda, .erho=erho ))),
- vfamily=c("invbinomial"),
- deriv=eval(substitute(expression({
- rho = eta2theta(eta[,1], .lrho, earg= .erho)
- lambda = eta2theta(eta[,2], .llambda, earg= .elambda)
+ }, list( .llambda = llambda, .lrho=lrho,
+ .elambda = elambda, .erho=erho ))),
+ vfamily = c("invbinomial"),
+ deriv = eval(substitute(expression({
+ rho = eta2theta(eta[,1], .lrho, earg = .erho)
+ lambda = eta2theta(eta[,2], .llambda, earg = .elambda)
dl.drho = (y + lambda)/rho - y/(1-rho)
dl.dlambda = 1/lambda - digamma(2*y+lambda) - digamma(y+lambda+1) +
log(rho)
- drho.deta = dtheta.deta(rho, .lrho, earg= .erho)
- dlambda.deta = dtheta.deta(lambda, .llambda, earg= .elambda)
+ drho.deta = dtheta.deta(rho, .lrho, earg = .erho)
+ dlambda.deta = dtheta.deta(lambda, .llambda, earg = .elambda)
w * cbind( dl.drho * drho.deta, dl.dlambda * dlambda.deta )
- }), list( .llambda=llambda, .lrho=lrho,
- .elambda=elambda, .erho=erho ))),
- weight=eval(substitute(expression({
+ }), list( .llambda = llambda, .lrho=lrho,
+ .elambda = elambda, .erho=erho ))),
+ weight = eval(substitute(expression({
ed2l.drho2 = (mu+lambda) / rho^2 + mu / (1-rho)^2
d2l.dlambda2 = 1/(lambda^2) + trigamma(2*y+lambda)+trigamma(y+lambda+1)
ed2l.dlambdarho = -1/rho
@@ -4586,8 +4592,8 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
wz[,iam(1,2,M)] = ed2l.dlambdarho * dlambda.deta * drho.deta
wz[,iam(2,2,M)] = d2l.dlambda2 * dlambda.deta^2
- d2rhodeta2 = d2theta.deta2(rho, .lrho, earg= .erho)
- d2lambda.deta2 = d2theta.deta2(lambda, .llambda, earg= .elambda)
+ d2rhodeta2 = d2theta.deta2(rho, .lrho, earg = .erho)
+ d2lambda.deta2 = d2theta.deta2(lambda, .llambda, earg = .elambda)
wz = w * wz
if (intercept.only) {
@@ -4599,18 +4605,18 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
pooled.weight = FALSE
wz
- }), list( .llambda=llambda, .lrho=lrho,
- .elambda=elambda, .erho=erho ))))
+ }), list( .llambda = llambda, .lrho=lrho,
+ .elambda = elambda, .erho=erho ))))
}
- genpoisson = function(llambda="elogit", ltheta="loge",
- elambda=if (llambda=="elogit") list(min=-1,max=1) else list(),
- etheta=list(),
- ilambda=NULL, itheta=NULL,
- use.approx=TRUE,
- method.init=1, zero=1)
+ genpoisson = function(llambda = "elogit", ltheta = "loge",
+ elambda=if (llambda == "elogit") list(min=-1,max=1) else list(),
+ etheta = list(),
+ ilambda = NULL, itheta = NULL,
+ use.approx = TRUE,
+ method.init = 1, zero=1)
{
@@ -4618,11 +4624,11 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
llambda = as.character(substitute(llambda))
if (mode(ltheta) != "character" && mode(ltheta) != "name")
ltheta = as.character(substitute(ltheta))
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
stop("bad input for argument 'zero'")
if (!is.list(elambda)) elambda = list()
if (!is.list(etheta)) etheta = list()
- if (!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
method.init > 2)
stop("'method.init' must be 1 or 2")
if (!is.logical(use.approx) || length(use.approx) != 1)
@@ -4631,19 +4637,19 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
new("vglmff",
blurb = c("Generalized Poisson distribution\n\n",
"Links: ",
- namesof("lambda", llambda, earg=elambda), ", ",
- namesof("theta", ltheta, earg=etheta), "\n",
+ namesof("lambda", llambda, earg = elambda), ", ",
+ namesof("theta", ltheta, earg = etheta), "\n",
"Mean: theta / (1-lambda)\n",
"Variance: theta / (1-lambda)^3"),
- constraints=eval(substitute(expression({
+ constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
predictors.names =
- c(namesof("lambda", .llambda, earg=.elambda, tag=FALSE),
- namesof("theta", .ltheta, earg=.etheta, tag=FALSE))
+ c(namesof("lambda", .llambda, earg = .elambda, tag = FALSE),
+ namesof("theta", .ltheta, earg = .etheta, tag = FALSE))
init.lambda = if ( .method.init == 1)
1 - sqrt(weighted.mean(y,w) / var(y)) else 0.5
init.theta = if ( .method.init == 1)
@@ -4661,33 +4667,33 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
init.lambda = 0.9
if (!length(etastart)) {
lambda = rep(if (length( .ilambda)) .ilambda else
- init.lambda, length=n)
- theta = rep(if (length( .itheta)) .itheta else init.theta, length=n)
- etastart = cbind(theta2eta(lambda, .llambda, earg= .elambda),
- theta2eta(theta, .ltheta, earg= .etheta))
- }
- }), list( .ltheta=ltheta, .llambda=llambda,
- .etheta=etheta, .elambda=elambda,
- .method.init=method.init,
+ init.lambda, length = n)
+ theta = rep(if (length( .itheta)) .itheta else init.theta, length = n)
+ etastart = cbind(theta2eta(lambda, .llambda, earg = .elambda),
+ theta2eta(theta, .ltheta, earg = .etheta))
+ }
+ }), list( .ltheta=ltheta, .llambda = llambda,
+ .etheta=etheta, .elambda = elambda,
+ .method.init = method.init,
.itheta=itheta, .ilambda=ilambda )) ),
- inverse=eval(substitute(function(eta, extra=NULL) {
- lambda = eta2theta(eta[,1], .llambda, earg= .elambda)
- theta = eta2theta(eta[,2], .ltheta, earg= .etheta)
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ lambda = eta2theta(eta[,1], .llambda, earg = .elambda)
+ theta = eta2theta(eta[,2], .ltheta, earg = .etheta)
theta/(1-lambda)
- }, list( .ltheta=ltheta, .llambda=llambda,
- .etheta=etheta, .elambda=elambda ))),
+ }, list( .ltheta=ltheta, .llambda = llambda,
+ .etheta=etheta, .elambda = elambda ))),
last = eval(substitute(expression({
misc$link = c(lambda=.llambda, theta=.ltheta)
misc$earg = list(lambda=.elambda, theta=.etheta)
if (! .use.approx )
misc$pooled.weight = pooled.weight
- }), list( .ltheta=ltheta, .llambda=llambda,
+ }), list( .ltheta=ltheta, .llambda = llambda,
.use.approx = use.approx,
- .etheta=etheta, .elambda=elambda ))),
- loglikelihood=eval(substitute(
- function(mu, y, w, residuals= FALSE,eta, extra=NULL) {
- lambda = eta2theta(eta[,1], .llambda, earg= .elambda)
- theta = eta2theta(eta[,2], .ltheta, earg= .etheta)
+ .etheta=etheta, .elambda = elambda ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ lambda = eta2theta(eta[,1], .llambda, earg = .elambda)
+ theta = eta2theta(eta[,2], .ltheta, earg = .etheta)
index = (y == 0)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
@@ -4695,20 +4701,20 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
sum(w[!index] * (-y[!index]*lambda[!index]-theta[!index] +
(y[!index]-1)*log(theta[!index]+y[!index]*lambda[!index]) +
log(theta[!index]) - lgamma(y[!index]+1)) )
- }, list( .ltheta=ltheta, .llambda=llambda,
- .etheta=etheta, .elambda=elambda ))),
- vfamily=c("genpoisson"),
- deriv=eval(substitute(expression({
- lambda = eta2theta(eta[,1], .llambda, earg= .elambda)
- theta = eta2theta(eta[,2], .ltheta, earg= .etheta)
+ }, list( .ltheta=ltheta, .llambda = llambda,
+ .etheta=etheta, .elambda = elambda ))),
+ vfamily = c("genpoisson"),
+ deriv = eval(substitute(expression({
+ lambda = eta2theta(eta[,1], .llambda, earg = .elambda)
+ theta = eta2theta(eta[,2], .ltheta, earg = .etheta)
dl.dlambda = -y + y*(y-1)/(theta+y*lambda)
dl.dtheta = -1 + (y-1)/(theta+y*lambda) + 1/theta
- dTHETA.deta = dtheta.deta(theta, .ltheta, earg= .etheta)
- dlambda.deta = dtheta.deta(lambda, .llambda, earg= .elambda)
+ dTHETA.deta = dtheta.deta(theta, .ltheta, earg = .etheta)
+ dlambda.deta = dtheta.deta(lambda, .llambda, earg = .elambda)
w * cbind( dl.dlambda * dlambda.deta, dl.dtheta * dTHETA.deta )
- }), list( .ltheta=ltheta, .llambda=llambda,
- .etheta=etheta, .elambda=elambda ))),
- weight=eval(substitute(expression({
+ }), list( .ltheta=ltheta, .llambda = llambda,
+ .etheta=etheta, .elambda = elambda ))),
+ weight = eval(substitute(expression({
wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
if ( .use.approx ) {
BBB = (theta+2)*(theta+2*lambda-theta*lambda)-(theta^2)*(1-lambda)
@@ -4727,8 +4733,8 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
wz[,iam(2,2,M)] = -d2l.dtheta2 * dTHETA.deta^2
wz[,iam(1,2,M)] = -d2l.dthetalambda * dTHETA.deta * dlambda.deta
- d2THETA.deta2 = d2theta.deta2(theta, .ltheta, earg= .etheta)
- d2lambdadeta2 = d2theta.deta2(lambda, .llambda, earg= .elambda)
+ d2THETA.deta2 = d2theta.deta2(theta, .ltheta, earg = .etheta)
+ d2lambdadeta2 = d2theta.deta2(lambda, .llambda, earg = .elambda)
wz[,iam(1,1,M)] = wz[,iam(1,1,M)] - dl.dlambda * d2lambdadeta2
wz[,iam(2,2,M)] = wz[,iam(2,2,M)] - dl.dtheta * d2THETA.deta2
wz = w * wz
@@ -4743,9 +4749,9 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
pooled.weight = FALSE
}
wz
- }), list( .ltheta=ltheta, .llambda=llambda,
+ }), list( .ltheta=ltheta, .llambda = llambda,
.use.approx = use.approx,
- .etheta=etheta, .elambda=elambda ))))
+ .etheta=etheta, .elambda = elambda ))))
}
@@ -4753,13 +4759,13 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
-dlgamma = function(x, location=0, scale=1, k=1, log=FALSE) {
+dlgamma = function(x, location=0, scale=1, k=1, log = FALSE) {
if (!is.logical(log.arg <- log))
stop("bad input for argument 'log'")
rm(log)
- if (!is.Numeric(scale, posit=TRUE)) stop("bad input for argument 'scale'")
- if (!is.Numeric(k, posit=TRUE)) stop("bad input for argument 'k'")
+ if (!is.Numeric(scale, posit = TRUE)) stop("bad input for argument 'scale'")
+ if (!is.Numeric(k, posit = TRUE)) stop("bad input for argument 'k'")
z = (x-location) / scale
if (log.arg) {
k * z - exp(z) - log(scale) - lgamma(k)
@@ -4768,29 +4774,29 @@ dlgamma = function(x, location=0, scale=1, k=1, log=FALSE) {
}
}
plgamma = function(q, location=0, scale=1, k=1) {
- if (!is.Numeric(scale, posit=TRUE)) stop("bad input for argument 'scale'")
- if (!is.Numeric(k, posit=TRUE)) stop("bad input for argument 'k'")
+ if (!is.Numeric(scale, posit = TRUE)) stop("bad input for argument 'scale'")
+ if (!is.Numeric(k, posit = TRUE)) stop("bad input for argument 'k'")
z = (q-location)/scale
pgamma(exp(z), k)
}
qlgamma = function(p, location=0, scale=1, k=1) {
- if (!is.Numeric(scale, posit=TRUE)) stop("bad input for argument 'scale'")
- if (!is.Numeric(k, posit=TRUE)) stop("bad input for argument 'k'")
+ if (!is.Numeric(scale, posit = TRUE)) stop("bad input for argument 'scale'")
+ if (!is.Numeric(k, posit = TRUE)) stop("bad input for argument 'k'")
q = qgamma(p, k)
location + scale * log(q)
}
rlgamma = function(n, location=0, scale=1, k=1) {
- if (!is.Numeric(n, posit=TRUE, integ=TRUE, allow=1))
+ if (!is.Numeric(n, posit = TRUE, integ = TRUE, allow = 1))
stop("bad input for argument 'n'")
- if (!is.Numeric(scale, posit=TRUE)) stop("bad input for argument 'scale'")
- if (!is.Numeric(k, posit=TRUE)) stop("bad input for argument 'k'")
+ if (!is.Numeric(scale, posit = TRUE)) stop("bad input for argument 'scale'")
+ if (!is.Numeric(k, posit = TRUE)) stop("bad input for argument 'k'")
y = rgamma(n, k)
location + scale * log(y)
}
- lgammaff = function(link="loge", earg=list(), init.k=NULL)
+ lgammaff = function(link = "loge", earg = list(), init.k = NULL)
{
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
@@ -4799,55 +4805,60 @@ rlgamma = function(n, location=0, scale=1, k=1) {
new("vglmff",
blurb = c("Log-gamma distribution f(y) = exp(ky - e^y)/gamma(k)), k>0\n\n",
"Link: ",
- namesof("k", link, earg=earg), "\n", "\n",
+ namesof("k", link, earg = earg), "\n", "\n",
"Mean: digamma(k)", "\n"),
- initialize=eval(substitute(expression({
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
- predictors.names = namesof("k", .link, earg=.earg, tag=FALSE)
+ predictors.names = namesof("k", .link, earg = .earg, tag = FALSE)
if (!length(etastart)) {
- k.init = if (length( .init.k)) rep( .init.k, len=length(y)) else {
+ k.init = if (length( .init.k)) rep( .init.k, len = length(y)) else {
medy = median(y)
if (medy < 2) 5 else if (medy < 4) 20 else exp(0.7 * medy)
}
- etastart = theta2eta(k.init, .link, earg= .earg)
+ etastart = theta2eta(k.init, .link, earg = .earg)
}
- }), list( .link=link, .earg=earg, .init.k=init.k ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- k = eta2theta(eta, .link, earg= .earg)
+ }), list( .link = link, .earg = earg, .init.k=init.k ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ k = eta2theta(eta, .link, earg = .earg)
digamma(k)
- }, list( .link=link, .earg=earg ))),
+ }, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
misc$link = c(k= .link )
misc$earg = list(k= .earg )
misc$expected = TRUE
- }), list( .link=link, .earg=earg ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- kk = eta2theta(eta, .link, earg= .earg)
+ }), list( .link = link, .earg = earg ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ kk = eta2theta(eta, .link, earg = .earg)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- sum(w * dlgamma(x=y, location=0, scale=1, k=kk, log=TRUE))
+ sum(w * dlgamma(x=y, location=0, scale=1, k=kk, log = TRUE))
}
- }, list( .link=link, .earg=earg ))),
- vfamily=c("lgammaff"),
- deriv=eval(substitute(expression({
- k = eta2theta(eta, .link, earg= .earg)
+ }, list( .link = link, .earg = earg ))),
+ vfamily = c("lgammaff"),
+ deriv = eval(substitute(expression({
+ k = eta2theta(eta, .link, earg = .earg)
dl.dk = y - digamma(k)
- dk.deta = dtheta.deta(k, .link, earg= .earg)
+ dk.deta = dtheta.deta(k, .link, earg = .earg)
w * dl.dk * dk.deta
- }), list( .link=link, .earg=earg ))),
- weight=eval(substitute(expression({
+ }), list( .link = link, .earg = earg ))),
+ weight = eval(substitute(expression({
ed2l.dk2 = trigamma(k)
wz = w * dk.deta^2 * ed2l.dk2
wz
- }), list( .link=link, .earg=earg ))))
+ }), list( .link = link, .earg = earg ))))
}
- lgamma3ff = function(llocation="identity", lscale="loge", lshape="loge",
- elocation=list(), escale=list(), eshape=list(),
- ilocation=NULL, iscale=NULL, ishape=1, zero=NULL)
+
+
+
+
+
+ lgamma3ff = function(llocation = "identity", lscale = "loge", lshape = "loge",
+ elocation = list(), escale = list(), eshape = list(),
+ ilocation = NULL, iscale = NULL, ishape=1, zero = NULL)
{
if (mode(llocation) != "character" && mode(llocation) != "name")
llocation = as.character(substitute(llocation))
@@ -4855,9 +4866,9 @@ rlgamma = function(n, location=0, scale=1, k=1) {
lscale = as.character(substitute(lscale))
if (mode(lshape) != "character" && mode(lshape) != "name")
lshape = as.character(substitute(lshape))
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
stop("bad input for argument 'zero'")
- if (length(iscale) && !is.Numeric(iscale, posit=TRUE))
+ if (length(iscale) && !is.Numeric(iscale, posit = TRUE))
stop("bad input for argument 'iscale'")
if (!is.list(elocation)) elocation = list()
if (!is.list(escale)) escale = list()
@@ -4868,75 +4879,75 @@ rlgamma = function(n, location=0, scale=1, k=1) {
" f(y) = exp(k(y-a)/b - e^((y-a)/b))/(b*gamma(k)), ",
"location=a, scale=b>0, shape=k>0\n\n",
"Links: ",
- namesof("location", llocation, earg=elocation), ", ",
- namesof("scale", lscale, earg=escale), ", ",
- namesof("shape", lshape, earg=eshape), "\n\n",
+ namesof("location", llocation, earg = elocation), ", ",
+ namesof("scale", lscale, earg = escale), ", ",
+ namesof("shape", lshape, earg = eshape), "\n\n",
"Mean: a + b*digamma(k)", "\n"),
- constraints=eval(substitute(expression({
+ constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
predictors.names =
- c(namesof("location", .llocation, earg=.elocation, tag=FALSE),
- namesof("scale", .lscale, earg=.escale, tag=FALSE),
- namesof("shape", .lshape, earg=.eshape, tag=FALSE))
+ c(namesof("location", .llocation, earg = .elocation, tag = FALSE),
+ namesof("scale", .lscale, earg = .escale, tag = FALSE),
+ namesof("shape", .lshape, earg = .eshape, tag = FALSE))
if (!length(etastart)) {
- k.init = if (length( .ishape)) rep( .ishape, len=length(y)) else {
- rep(exp(median(y)), len=length(y))
+ k.init = if (length( .ishape)) rep( .ishape, len = length(y)) else {
+ rep(exp(median(y)), len = length(y))
}
- scale.init = if (length( .iscale)) rep( .iscale, len=length(y)) else {
- rep(sqrt(var(y) / trigamma(k.init)), len=length(y))
+ scale.init = if (length( .iscale)) rep( .iscale, len = length(y)) else {
+ rep(sqrt(var(y) / trigamma(k.init)), len = length(y))
}
- loc.init = if (length( .iloc)) rep( .iloc, len=length(y)) else {
- rep(median(y) - scale.init * digamma(k.init), len=length(y))
+ loc.init = if (length( .iloc)) rep( .iloc, len = length(y)) else {
+ rep(median(y) - scale.init * digamma(k.init), len = length(y))
}
- etastart = cbind(theta2eta(loc.init, .llocation, earg= .elocation),
- theta2eta(scale.init, .lscale, earg= .escale),
- theta2eta(k.init, .lshape, earg= .eshape))
- }
- }), list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
- .elocation=elocation, .escale=escale, .eshape=eshape,
- .iloc=ilocation, .iscale=iscale, .ishape=ishape ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- eta2theta(eta[,1], .llocation, earg= .elocation) +
- eta2theta(eta[,2], .lscale, earg= .escale) *
- digamma(eta2theta(eta[,3], .lshape, earg= .eshape))
- }, list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
- .elocation=elocation, .escale=escale, .eshape=eshape ))),
+ etastart = cbind(theta2eta(loc.init, .llocation, earg = .elocation),
+ theta2eta(scale.init, .lscale, earg = .escale),
+ theta2eta(k.init, .lshape, earg = .eshape))
+ }
+ }), list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
+ .elocation = elocation, .escale = escale, .eshape = eshape,
+ .iloc=ilocation, .iscale = iscale, .ishape = ishape ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ eta2theta(eta[,1], .llocation, earg = .elocation) +
+ eta2theta(eta[,2], .lscale, earg = .escale) *
+ digamma(eta2theta(eta[,3], .lshape, earg = .eshape))
+ }, list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
+ .elocation = elocation, .escale = escale, .eshape = eshape ))),
last = eval(substitute(expression({
misc$link = c(location= .llocation, scale= .lscale, shape= .lshape)
misc$earg = list(location= .elocation, scale= .escale, shape= .eshape)
- }), list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
- .elocation=elocation, .escale=escale, .eshape=eshape ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- aa = eta2theta(eta[,1], .llocation, earg= .elocation)
- bb = eta2theta(eta[,2], .lscale, earg= .escale)
- kk = eta2theta(eta[,3], .lshape, earg= .eshape)
+ }), list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
+ .elocation = elocation, .escale = escale, .eshape = eshape ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ aa = eta2theta(eta[,1], .llocation, earg = .elocation)
+ bb = eta2theta(eta[,2], .lscale, earg = .escale)
+ kk = eta2theta(eta[,3], .lshape, earg = .eshape)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- sum(w * dlgamma(x=y, location=aa, scale=bb, k=kk, log=TRUE))
- }
- }, list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
- .elocation=elocation, .escale=escale, .eshape=eshape ))),
- vfamily=c("lgamma3ff"),
- deriv=eval(substitute(expression({
- a = eta2theta(eta[,1], .llocation, earg= .elocation)
- b = eta2theta(eta[,2], .lscale, earg= .escale)
- k = eta2theta(eta[,3], .lshape, earg= .eshape)
+ sum(w * dlgamma(x=y, location=aa, scale=bb, k=kk, log = TRUE))
+ }
+ }, list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
+ .elocation = elocation, .escale = escale, .eshape = eshape ))),
+ vfamily = c("lgamma3ff"),
+ deriv = eval(substitute(expression({
+ a = eta2theta(eta[,1], .llocation, earg = .elocation)
+ b = eta2theta(eta[,2], .lscale, earg = .escale)
+ k = eta2theta(eta[,3], .lshape, earg = .eshape)
zedd = (y-a)/b
dl.da = (exp(zedd) - k) / b
dl.db = (zedd * (exp(zedd) - k) - 1) / b
dl.dk = zedd - digamma(k)
- da.deta = dtheta.deta(a, .llocation, earg= .elocation)
- db.deta = dtheta.deta(b, .lscale, earg= .escale)
- dk.deta = dtheta.deta(k, .lshape, earg= .eshape)
+ da.deta = dtheta.deta(a, .llocation, earg = .elocation)
+ db.deta = dtheta.deta(b, .lscale, earg = .escale)
+ dk.deta = dtheta.deta(k, .lshape, earg = .eshape)
w * cbind(dl.da * da.deta, dl.db * db.deta, dl.dk * dk.deta)
- }), list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
- .elocation=elocation, .escale=escale, .eshape=eshape ))),
- weight=eval(substitute(expression({
+ }), list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
+ .elocation = elocation, .escale = escale, .eshape = eshape ))),
+ weight = eval(substitute(expression({
ed2l.da2 = k / b^2
ed2l.db2 = (1 + k*(trigamma(k+1) + (digamma(k+1))^2)) / b^2
ed2l.dk2 = trigamma(k)
@@ -4952,13 +4963,13 @@ rlgamma = function(n, location=0, scale=1, k=1) {
wz[,iam(2,3,M)] = ed2l.dbdk * db.deta * dk.deta
wz = w * wz
wz
- }), list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
- .elocation=elocation, .escale=escale, .eshape=eshape ))))
+ }), list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
+ .elocation = elocation, .escale = escale, .eshape = eshape ))))
}
- prentice74 = function(llocation="identity", lscale="loge", lshape="identity",
- elocation=list(), escale=list(), eshape=list(),
- ilocation=NULL, iscale=NULL, ishape=NULL, zero=2:3)
+ prentice74 = function(llocation = "identity", lscale = "loge", lshape = "identity",
+ elocation = list(), escale = list(), eshape = list(),
+ ilocation = NULL, iscale = NULL, ishape = NULL, zero = 2:3)
{
if (mode(llocation) != "character" && mode(llocation) != "name")
llocation = as.character(substitute(llocation))
@@ -4966,9 +4977,9 @@ rlgamma = function(n, location=0, scale=1, k=1) {
lscale = as.character(substitute(lscale))
if (mode(lshape) != "character" && mode(lshape) != "name")
lshape = as.character(substitute(lshape))
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
stop("bad input for argument 'zero'")
- if (length(iscale) && !is.Numeric(iscale, posit=TRUE))
+ if (length(iscale) && !is.Numeric(iscale, posit = TRUE))
stop("bad input for argument 'iscale'")
if (!is.list(elocation)) elocation = list()
if (!is.list(escale)) escale = list()
@@ -4979,65 +4990,65 @@ rlgamma = function(n, location=0, scale=1, k=1) {
" f(y) = |q| * exp(w/q^2 - e^w) / (b*gamma(1/q^2)) ,\n",
"w=(y-a)*q/b + digamma(1/q^2), location=a, scale=b>0, shape=q\n\n",
"Links: ",
- namesof("location", llocation, earg=elocation), ", ",
- namesof("scale", lscale, earg=escale), ", ",
- namesof("shape", lshape, earg=eshape), "\n", "\n",
+ namesof("location", llocation, earg = elocation), ", ",
+ namesof("scale", lscale, earg = escale), ", ",
+ namesof("shape", lshape, earg = eshape), "\n", "\n",
"Mean: a", "\n"),
- constraints=eval(substitute(expression({
+ constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
predictors.names =
- c(namesof("location", .llocation, earg=.elocation, tag=FALSE),
- namesof("scale", .lscale, earg=.escale, tag=FALSE),
- namesof("shape", .lshape, earg=.eshape, tag=FALSE))
+ c(namesof("location", .llocation, earg = .elocation, tag = FALSE),
+ namesof("scale", .lscale, earg = .escale, tag = FALSE),
+ namesof("shape", .lshape, earg = .eshape, tag = FALSE))
if (!length(etastart)) {
sdy = sqrt(var(y))
- k.init = if (length( .ishape)) rep( .ishape, len=length(y)) else {
+ k.init = if (length( .ishape)) rep( .ishape, len = length(y)) else {
skewness = mean((y-mean(y))^3) / sdy^3 # <0 Left Skewed
- rep(-skewness, len=length(y))
+ rep(-skewness, len = length(y))
}
- scale.init = if (length( .iscale)) rep( .iscale, len=length(y)) else {
- rep(sdy, len=length(y))
+ scale.init = if (length( .iscale)) rep( .iscale, len = length(y)) else {
+ rep(sdy, len = length(y))
}
- loc.init = if (length( .iloc)) rep( .iloc, len=length(y)) else {
- rep(median(y), len=length(y))
+ loc.init = if (length( .iloc)) rep( .iloc, len = length(y)) else {
+ rep(median(y), len = length(y))
}
- etastart = cbind(theta2eta(loc.init, .llocation, earg= .elocation),
- theta2eta(scale.init, .lscale, earg= .escale),
- theta2eta(k.init, .lshape, earg= .eshape))
- }
- }), list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
- .elocation=elocation, .escale=escale, .eshape=eshape,
- .iloc=ilocation, .iscale=iscale, .ishape=ishape ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- eta2theta(eta[,1], .llocation, earg= .elocation)
- }, list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
- .elocation=elocation, .escale=escale, .eshape=eshape ))),
+ etastart = cbind(theta2eta(loc.init, .llocation, earg = .elocation),
+ theta2eta(scale.init, .lscale, earg = .escale),
+ theta2eta(k.init, .lshape, earg = .eshape))
+ }
+ }), list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
+ .elocation = elocation, .escale = escale, .eshape = eshape,
+ .iloc=ilocation, .iscale = iscale, .ishape = ishape ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ eta2theta(eta[,1], .llocation, earg = .elocation)
+ }, list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
+ .elocation = elocation, .escale = escale, .eshape = eshape ))),
last = eval(substitute(expression({
misc$link = c(location= .llocation, scale= .lscale, shape= .lshape)
misc$earg = list(location= .elocation, scale= .escale, shape= .eshape)
- }), list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
- .elocation=elocation, .escale=escale, .eshape=eshape ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- a = eta2theta(eta[,1], .llocation, earg= .elocation)
- b = eta2theta(eta[,2], .lscale, earg= .escale)
- k = eta2theta(eta[,3], .lshape, earg= .eshape)
+ }), list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
+ .elocation = elocation, .escale = escale, .eshape = eshape ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ a = eta2theta(eta[,1], .llocation, earg = .elocation)
+ b = eta2theta(eta[,2], .lscale, earg = .escale)
+ k = eta2theta(eta[,3], .lshape, earg = .eshape)
tmp55 = k^(-2)
doubw = (y-a)*k/b + digamma(tmp55)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
sum(w*(log(abs(k)) -log(b) -lgamma(tmp55) + doubw*tmp55 -exp(doubw )))
- }, list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
- .elocation=elocation, .escale=escale, .eshape=eshape ))),
- vfamily=c("prentice74"),
- deriv=eval(substitute(expression({
- a = eta2theta(eta[,1], .llocation, earg= .elocation)
- b = eta2theta(eta[,2], .lscale, earg= .escale)
- k = eta2theta(eta[,3], .lshape, earg= .eshape)
+ }, list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
+ .elocation = elocation, .escale = escale, .eshape = eshape ))),
+ vfamily = c("prentice74"),
+ deriv = eval(substitute(expression({
+ a = eta2theta(eta[,1], .llocation, earg = .elocation)
+ b = eta2theta(eta[,2], .lscale, earg = .escale)
+ k = eta2theta(eta[,3], .lshape, earg = .eshape)
tmp55 = k^(-2)
mustar = digamma(tmp55)
doubw = (y-a)*k/b + mustar
@@ -5046,13 +5057,13 @@ rlgamma = function(n, location=0, scale=1, k=1) {
dl.db = ((doubw - mustar) * (exp(doubw) - tmp55) - 1) / b
dl.dk = 1/k - 2 * (doubw - mustar) / k^3 - (exp(doubw) - tmp55) *
((doubw - mustar) / k - 2 * sigmastar2 / k^3)
- da.deta = dtheta.deta(a, .llocation, earg= .elocation)
- db.deta = dtheta.deta(b, .lscale, earg= .escale)
- dk.deta = dtheta.deta(k, .lshape, earg= .eshape)
+ da.deta = dtheta.deta(a, .llocation, earg = .elocation)
+ db.deta = dtheta.deta(b, .lscale, earg = .escale)
+ dk.deta = dtheta.deta(k, .lshape, earg = .eshape)
w * cbind(dl.da * da.deta, dl.db * db.deta, dl.dk * dk.deta)
- }), list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
- .elocation=elocation, .escale=escale, .eshape=eshape ))),
- weight=eval(substitute(expression({
+ }), list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
+ .elocation = elocation, .escale = escale, .eshape = eshape ))),
+ weight = eval(substitute(expression({
ed2l.da2 = 1 / b^2
ed2l.db2 = (1 + sigmastar2*tmp55) / b^2
ed2l.dk2 = tmp55 - 3*sigmastar2*tmp55^2 + 4*sigmastar2*tmp55^4 *
@@ -5069,8 +5080,8 @@ rlgamma = function(n, location=0, scale=1, k=1) {
wz[,iam(2,3,M)] = ed2l.dbdk * db.deta * dk.deta
wz = w * wz
wz
- }), list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
- .elocation=elocation, .escale=escale, .eshape=eshape ))))
+ }), list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
+ .elocation = elocation, .escale = escale, .eshape = eshape ))))
}
@@ -5080,11 +5091,11 @@ dgengamma = function(x, scale=1, d=1, k=1, log = FALSE) {
stop("bad input for argument 'log'")
rm(log)
- if (!is.Numeric(scale, posit=TRUE))
+ if (!is.Numeric(scale, posit = TRUE))
stop("bad input for argument 'scale'")
- if (!is.Numeric(d, posit=TRUE))
+ if (!is.Numeric(d, posit = TRUE))
stop("bad input for argument 'd'")
- if (!is.Numeric(k, posit=TRUE))
+ if (!is.Numeric(k, posit = TRUE))
stop("bad input for argument 'k'")
N = max(length(x), length(scale), length(d), length(k))
x = rep(x, len=N); scale = rep(scale, len=N);
@@ -5108,11 +5119,11 @@ dgengamma = function(x, scale=1, d=1, k=1, log = FALSE) {
pgengamma = function(q, scale=1, d=1, k=1) {
- if (!is.Numeric(scale, posit=TRUE))
+ if (!is.Numeric(scale, posit = TRUE))
stop("bad input for argument 'scale'")
- if (!is.Numeric(d, posit=TRUE))
+ if (!is.Numeric(d, posit = TRUE))
stop("bad input for argument 'd'")
- if (!is.Numeric(k, posit=TRUE))
+ if (!is.Numeric(k, posit = TRUE))
stop("bad input for argument 'k'")
z = (q/scale)^d
pgamma(z, k)
@@ -5120,11 +5131,11 @@ pgengamma = function(q, scale=1, d=1, k=1) {
qgengamma = function(p, scale=1, d=1, k=1) {
- if (!is.Numeric(scale, posit=TRUE))
+ if (!is.Numeric(scale, posit = TRUE))
stop("bad input for argument 'scale'")
- if (!is.Numeric(d, posit=TRUE))
+ if (!is.Numeric(d, posit = TRUE))
stop("bad input for argument 'd'")
- if (!is.Numeric(k, posit=TRUE))
+ if (!is.Numeric(k, posit = TRUE))
stop("bad input for argument 'k'")
q = qgamma(p, k)
scale * q^(1/d)
@@ -5132,13 +5143,13 @@ qgengamma = function(p, scale=1, d=1, k=1) {
rgengamma = function(n, scale=1, d=1, k=1) {
- if (!is.Numeric(n, posit=TRUE, integ=TRUE, allow=1))
+ if (!is.Numeric(n, posit = TRUE, integ = TRUE, allow = 1))
stop("bad input for 'n'")
- if (!is.Numeric(scale, posit=TRUE))
+ if (!is.Numeric(scale, posit = TRUE))
stop("bad input for 'scale'")
- if (!is.Numeric(d, posit=TRUE))
+ if (!is.Numeric(d, posit = TRUE))
stop("bad input for 'd'")
- if (!is.Numeric(k, posit=TRUE))
+ if (!is.Numeric(k, posit = TRUE))
stop("bad input for 'k'")
y = rgamma(n, k)
scale * y^(1/d)
@@ -5146,9 +5157,9 @@ rgengamma = function(n, scale=1, d=1, k=1) {
- gengamma = function(lscale="loge", ld="loge", lk="loge",
- escale=list(), ed=list(), ek=list(),
- iscale=NULL, id=NULL, ik=NULL, zero=NULL)
+ gengamma = function(lscale = "loge", ld = "loge", lk = "loge",
+ escale = list(), ed=list(), ek=list(),
+ iscale = NULL, id = NULL, ik = NULL, zero = NULL)
{
if (mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
@@ -5156,9 +5167,9 @@ rgengamma = function(n, scale=1, d=1, k=1) {
ld = as.character(substitute(ld))
if (mode(lk) != "character" && mode(lk) != "name")
lk = as.character(substitute(lk))
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
stop("bad input for argument 'zero'")
- if (length(iscale) && !is.Numeric(iscale, posit=TRUE))
+ if (length(iscale) && !is.Numeric(iscale, posit = TRUE))
stop("bad input for argument 'iscale'")
if (!is.list(escale)) escale = list()
if (!is.list(ed)) ed = list()
@@ -5169,77 +5180,77 @@ rgengamma = function(n, scale=1, d=1, k=1) {
" f(y) = d * b^(-d*k) * y^(d*k-1) * exp(-(y/b)^d) / gamma(k),\n",
"scale=b>0, d>0, k>0, y>0\n\n",
"Links: ",
- namesof("scale", lscale, earg=escale), ", ",
- namesof("d", ld, earg=ed), ", ",
- namesof("k", lk, earg=ek), "\n", "\n",
+ namesof("scale", lscale, earg = escale), ", ",
+ namesof("d", ld, earg = ed), ", ",
+ namesof("k", lk, earg = ek), "\n", "\n",
"Mean: b*k", "\n"),
- constraints=eval(substitute(expression({
+ constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
if (any(y <= 0)) stop("response must be have positive values only")
predictors.names =
- c(namesof("scale", .lscale, earg=.escale, tag=FALSE),
- namesof("d", .ld, earg=.ed, tag=FALSE),
- namesof("k", .lk, earg=.ek, tag=FALSE))
+ c(namesof("scale", .lscale, earg = .escale, tag = FALSE),
+ namesof("d", .ld, earg = .ed, tag = FALSE),
+ namesof("k", .lk, earg = .ek, tag = FALSE))
if (!length(etastart)) {
- b.init = if (length( .iscale)) rep( .iscale, len=length(y)) else {
- rep(mean(y^2) / mean(y), len=length(y))
+ b.init = if (length( .iscale)) rep( .iscale, len = length(y)) else {
+ rep(mean(y^2) / mean(y), len = length(y))
}
- k.init = if (length( .ik)) rep( .ik, len=length(y)) else {
- rep(mean(y) / b.init, len=length(y))
+ k.init = if (length( .ik)) rep( .ik, len = length(y)) else {
+ rep(mean(y) / b.init, len = length(y))
}
- d.init = if (length( .id)) rep( .id, len=length(y)) else {
- rep(digamma(k.init) / mean(log(y/b.init)), len=length(y))
+ d.init = if (length( .id)) rep( .id, len = length(y)) else {
+ rep(digamma(k.init) / mean(log(y/b.init)), len = length(y))
}
- etastart = cbind(theta2eta(b.init, .lscale, earg= .escale),
- theta2eta(d.init, .ld, earg= .ed),
- theta2eta(k.init, .lk, earg= .ek))
- }
- }), list( .lscale=lscale, .ld=ld, .lk=lk,
- .escale=escale, .ed=ed, .ek=ek,
- .iscale=iscale, .id=id, .ik=ik ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- b = eta2theta(eta[,1], .lscale, earg= .escale)
- k = eta2theta(eta[,3], .lk, earg= .ek)
+ etastart = cbind(theta2eta(b.init, .lscale, earg = .escale),
+ theta2eta(d.init, .ld, earg = .ed),
+ theta2eta(k.init, .lk, earg = .ek))
+ }
+ }), list( .lscale = lscale, .ld = ld, .lk = lk,
+ .escale = escale, .ed = ed, .ek = ek,
+ .iscale = iscale, .id=id, .ik=ik ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ b = eta2theta(eta[,1], .lscale, earg = .escale)
+ k = eta2theta(eta[,3], .lk, earg = .ek)
b * k
- }, list( .ld=ld, .lscale=lscale, .lk=lk,
- .escale=escale, .ed=ed, .ek=ek ))),
+ }, list( .ld = ld, .lscale = lscale, .lk = lk,
+ .escale = escale, .ed = ed, .ek = ek ))),
last = eval(substitute(expression({
misc$link = c(scale= .lscale, d= .ld, k= .lk)
misc$earg = list(scale= .escale, d= .ed, k= .ek)
- }), list( .lscale=lscale, .ld=ld, .lk=lk,
- .escale=escale, .ed=ed, .ek=ek ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- b = eta2theta(eta[,1], .lscale, earg= .escale)
- d = eta2theta(eta[,2], .ld, earg= .ed)
- k = eta2theta(eta[,3], .lk, earg= .ek)
+ }), list( .lscale = lscale, .ld = ld, .lk = lk,
+ .escale = escale, .ed = ed, .ek = ek ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ b = eta2theta(eta[,1], .lscale, earg = .escale)
+ d = eta2theta(eta[,2], .ld, earg = .ed)
+ k = eta2theta(eta[,3], .lk, earg = .ek)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(w * dgengamma(x=y, scale=b, d=d, k=k, log = TRUE))
}
- }, list( .lscale=lscale, .ld=ld, .lk=lk,
- .escale=escale, .ed=ed, .ek=ek ))),
- vfamily=c("gengamma"),
- deriv=eval(substitute(expression({
- b = eta2theta(eta[,1], .lscale, earg= .escale)
- d = eta2theta(eta[,2], .ld, earg= .ed)
- k = eta2theta(eta[,3], .lk, earg= .ek)
+ }, list( .lscale = lscale, .ld = ld, .lk = lk,
+ .escale = escale, .ed = ed, .ek = ek ))),
+ vfamily = c("gengamma"),
+ deriv = eval(substitute(expression({
+ b = eta2theta(eta[,1], .lscale, earg = .escale)
+ d = eta2theta(eta[,2], .ld, earg = .ed)
+ k = eta2theta(eta[,3], .lk, earg = .ek)
tmp22 = (y/b)^d
tmp33 = log(y/b)
dl.db = d * (tmp22 - k) / b
dl.dd = 1/d + tmp33 * (k - tmp22)
dl.dk = d * tmp33 - digamma(k)
- db.deta = dtheta.deta(b, .lscale, earg= .escale)
- dd.deta = dtheta.deta(d, .ld, earg= .ed)
- dk.deta = dtheta.deta(k, .lk, earg= .ek)
+ db.deta = dtheta.deta(b, .lscale, earg = .escale)
+ dd.deta = dtheta.deta(d, .ld, earg = .ed)
+ dk.deta = dtheta.deta(k, .lk, earg = .ek)
w * cbind(dl.db * db.deta, dl.dd * dd.deta, dl.dk * dk.deta)
- }), list( .lscale=lscale, .ld=ld, .lk=lk,
- .escale=escale, .ed=ed, .ek=ek ))),
- weight=eval(substitute(expression({
+ }), list( .lscale = lscale, .ld = ld, .lk = lk,
+ .escale = escale, .ed = ed, .ek = ek ))),
+ weight = eval(substitute(expression({
ed2l.db2 = k * (d/b)^2
ed2l.dd2 = (1 + k * (trigamma(k+1) + (digamma(k+1))^2)) / d^2
ed2l.dk2 = trigamma(k)
@@ -5255,17 +5266,17 @@ rgengamma = function(n, scale=1, d=1, k=1) {
wz[,iam(2,3,M)] = ed2l.dddk * dd.deta * dk.deta
wz = w * wz
wz
- }), list( .lscale=lscale, .ld=ld, .lk=lk,
- .escale=escale, .ed=ed, .ek=ek ))))
+ }), list( .lscale = lscale, .ld = ld, .lk = lk,
+ .escale = escale, .ed = ed, .ek = ek ))))
}
-dlog = function(x, prob, log=FALSE) {
+dlog = function(x, prob, log = FALSE) {
if (!is.logical(log.arg <- log))
stop("bad input for argument 'log'")
rm(log)
- if (!is.Numeric(prob, posit=TRUE) || max(prob) >= 1)
+ if (!is.Numeric(prob, posit = TRUE) || max(prob) >= 1)
stop("bad input for argument 'prob'")
N = max(length(x), length(prob))
if (length(x) != N)
@@ -5274,7 +5285,7 @@ dlog = function(x, prob, log=FALSE) {
prob = rep(prob, len=N)
ox = !is.finite(x)
zero = ox | round(x) != x | x < 1
- ans = rep(0.0, len=length(x))
+ ans = rep(0.0, len = length(x))
if (log.arg) {
ans[ zero] = log(0.0)
ans[!zero] = x[!zero] * log(prob[!zero]) - log(x[!zero]) -
@@ -5290,9 +5301,9 @@ dlog = function(x, prob, log=FALSE) {
-plog = function(q, prob, log.p=FALSE) {
+plog = function(q, prob, log.p = FALSE) {
if (!is.Numeric(q)) stop("bad input for argument 'q'")
- if (!is.Numeric(prob, posit=TRUE) || max(prob) >= 1)
+ if (!is.Numeric(prob, posit = TRUE) || max(prob) >= 1)
stop("bad input for argument 'prob'")
N = max(length(q), length(prob))
q = rep(q, len=N); prob = rep(prob, len=N);
@@ -5316,7 +5327,7 @@ plog = function(q, prob, log.p=FALSE) {
seqq = sequence(floorq)
seqp = rep(prob, floorq)
onevector = (seqp^seqq / seqq) / (-log1p(-seqp))
- rlist = dotC(name="tyee_C_cum8sum",
+ rlist = dotC(name = "tyee_C_cum8sum",
as.double(onevector), answer=double(N),
as.integer(N), as.double(seqq),
as.integer(length(onevector)), notok=integer(1))
@@ -5333,9 +5344,9 @@ plog = function(q, prob, log.p=FALSE) {
if (FALSE)
-plog = function(q, prob, log.p=FALSE) {
+plog = function(q, prob, log.p = FALSE) {
if (!is.Numeric(q)) stop("bad input for argument 'q'")
- if (!is.Numeric(prob, posit=TRUE) || max(prob) >= 1)
+ if (!is.Numeric(prob, posit = TRUE) || max(prob) >= 1)
stop("bad input for argument 'prob'")
N = max(length(q), length(prob))
q = rep(q, len=N); prob = rep(prob, len=N);
@@ -5364,14 +5375,14 @@ plog = function(q, prob, log.p=FALSE) {
rlog = function(n, prob, Smallno=1.0e-6) {
- if (!is.Numeric(n, posit=TRUE, integ=TRUE))
+ if (!is.Numeric(n, posit = TRUE, integ = TRUE))
stop("bad input for argument 'n'")
- if (!is.Numeric(prob, allow=1, posit=TRUE) || max(prob) >= 1)
+ if (!is.Numeric(prob, allow = 1, posit = TRUE) || max(prob) >= 1)
stop("bad input for argument 'prob'")
- if (!is.Numeric(Smallno, posit=TRUE, allow=1) || Smallno > 0.01 ||
+ if (!is.Numeric(Smallno, posit = TRUE, allow = 1) || Smallno > 0.01 ||
Smallno < 2 * .Machine$double.eps)
stop("bad input for argument 'Smallno'")
- ans = rep(0.0, len=n)
+ ans = rep(0.0, len = n)
ptr1 = 1; ptr2 = 0
a = -1 / log1p(-prob)
@@ -5403,10 +5414,10 @@ rlog = function(n, prob, Smallno=1.0e-6) {
- logff = function(link="logit", earg=list(), init.c=NULL)
+ logff = function(link = "logit", earg = list(), init.c = NULL)
{
if (length(init.c) &&
- (!is.Numeric(init.c, posit=TRUE) || max(init.c) >= 1))
+ (!is.Numeric(init.c, posit = TRUE) || max(init.c) >= 1))
stop("init.c must be in (0,1)")
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
@@ -5415,12 +5426,12 @@ rlog = function(n, prob, Smallno=1.0e-6) {
new("vglmff",
blurb = c("Logarithmic distribution f(y) = a * c^y / y, y=1,2,3,...,\n",
" 0 < c < 1, a = -1 / log(1-c) \n\n",
- "Link: ", namesof("c", link, earg=earg), "\n", "\n",
+ "Link: ", namesof("c", link, earg = earg), "\n", "\n",
"Mean: a * c / (1 - c)", "\n"),
- initialize=eval(substitute(expression({
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
- predictors.names = namesof("c", .link, earg=.earg, tag=FALSE)
+ predictors.names = namesof("c", .link, earg = .earg, tag = FALSE)
if (!length(etastart)) {
llfun = function(cc, y, w) {
a = -1 / log1p(-cc)
@@ -5429,53 +5440,53 @@ rlog = function(n, prob, Smallno=1.0e-6) {
c.init = if (length( .init.c )) .init.c else
getInitVals(gvals=seq(0.05, 0.95, len=9), llfun=llfun, y=y, w=w)
c.init = rep(c.init, length=length(y))
- etastart = theta2eta(c.init, .link, earg= .earg)
+ etastart = theta2eta(c.init, .link, earg = .earg)
}
- }), list( .link=link, .earg=earg, .init.c=init.c ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- cc = eta2theta(eta, .link, earg= .earg)
+ }), list( .link = link, .earg = earg, .init.c=init.c ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ cc = eta2theta(eta, .link, earg = .earg)
a = -1 / log1p(-cc)
a * cc / (1-cc)
- }, list( .link=link, .earg=earg ))),
+ }, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
misc$link = c(c= .link)
misc$earg = list(c= .earg)
misc$expected = TRUE
- }), list( .link=link, .earg=earg ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- cc = eta2theta(eta, .link, earg= .earg)
+ }), list( .link = link, .earg = earg ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ cc = eta2theta(eta, .link, earg = .earg)
a = -1 / log1p(-cc)
if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(w * dlog(x=y, prob=-expm1(-1/a), log=TRUE))
+ sum(w * dlog(x=y, prob=-expm1(-1/a), log = TRUE))
}
- }, list( .link=link, .earg=earg ))),
- vfamily=c("logff"),
- deriv=eval(substitute(expression({
- cc = eta2theta(eta, .link, earg= .earg)
+ }, list( .link = link, .earg = earg ))),
+ vfamily = c("logff"),
+ deriv = eval(substitute(expression({
+ cc = eta2theta(eta, .link, earg = .earg)
a = -1 / log1p(-cc)
dl.dc = 1 / ((1-cc) * log1p(-cc)) + y / cc
- dc.deta = dtheta.deta(cc, .link, earg= .earg)
+ dc.deta = dtheta.deta(cc, .link, earg = .earg)
w * dl.dc * dc.deta
- }), list( .link=link, .earg=earg ))),
- weight=eval(substitute(expression({
+ }), list( .link = link, .earg = earg ))),
+ weight = eval(substitute(expression({
ed2l.dc2 = a * (1 - a * cc) / (cc * (1-cc)^2)
wz = w * dc.deta^2 * ed2l.dc2
wz
- }), list( .link=link, .earg=earg ))))
+ }), list( .link = link, .earg = earg ))))
}
- levy = function(delta=NULL, link.gamma="loge",
- earg=list(), idelta=NULL, igamma=NULL)
+ levy = function(delta = NULL, link.gamma = "loge",
+ earg = list(), idelta = NULL, igamma = NULL)
{
- delta.known = is.Numeric(delta, allow=1)
+ delta.known = is.Numeric(delta, allow = 1)
if (mode(link.gamma) != "character" && mode(link.gamma) != "name")
link.gamma = as.character(substitute(link.gamma))
if (!is.list(earg)) earg = list()
@@ -5485,22 +5496,22 @@ rlog = function(n, prob, Smallno=1.0e-6) {
"(y-delta)^(-3/2) * \n",
" exp(-gamma / (2*(y-delta ))),\n",
" delta < y, gamma > 0",
- if (delta.known) paste(", delta = ", delta, ",", sep=""),
+ if (delta.known) paste(", delta = ", delta, ",", sep = ""),
"\n\n",
if (delta.known) "Link: " else "Links: ",
- namesof("gamma", link.gamma, earg=earg),
+ namesof("gamma", link.gamma, earg = earg),
if (! delta.known)
- c(", ", namesof("delta", "identity", earg=list())),
+ c(", ", namesof("delta", "identity", earg = list())),
"\n\n",
"Mean: NA",
"\n"),
- initialize=eval(substitute(expression({
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
predictors.names =
- c(namesof("gamma", .link.gamma, earg=.earg, tag=FALSE),
+ c(namesof("gamma", .link.gamma, earg = .earg, tag = FALSE),
if ( .delta.known) NULL else
- namesof("delta", "identity", earg=list(), tag=FALSE))
+ namesof("delta", "identity", earg = list(), tag = FALSE))
if (!length(etastart)) {
delta.init = if ( .delta.known) {
@@ -5515,60 +5526,60 @@ rlog = function(n, prob, Smallno=1.0e-6) {
gamma.init = if (length( .igamma)) .igamma else
median(y - delta.init) # = 1/median(1/(y-delta.init))
gamma.init = rep(gamma.init, length=length(y))
- etastart = cbind(theta2eta(gamma.init, .link.gamma, earg= .earg),
+ etastart = cbind(theta2eta(gamma.init, .link.gamma, earg = .earg),
if ( .delta.known) NULL else delta.init)
}
- }), list( .link.gamma=link.gamma, .earg=earg,
+ }), list( .link.gamma=link.gamma, .earg = earg,
.delta.known=delta.known,
.delta=delta,
.idelta=idelta,
.igamma=igamma ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
+ inverse = eval(substitute(function(eta, extra = NULL) {
eta = as.matrix(eta)
- mygamma = eta2theta(eta[,1], .link.gamma, earg= .earg)
+ mygamma = eta2theta(eta[,1], .link.gamma, earg = .earg)
delta = if ( .delta.known) .delta else eta[,2]
NA * mygamma
- }, list( .link.gamma=link.gamma, .earg=earg,
+ }, list( .link.gamma=link.gamma, .earg = earg,
.delta.known=delta.known,
.delta=delta ))),
last = eval(substitute(expression({
- misc$link = if ( .delta.known) NULL else c(delta="identity")
+ misc$link = if ( .delta.known) NULL else c(delta = "identity")
misc$link = c(gamma = .link.gamma, misc$link)
misc$earg = if ( .delta.known) list(gamma = .earg) else
list(gamma = .earg, delta=list())
if ( .delta.known)
misc$delta = .delta
- }), list( .link.gamma=link.gamma, .earg=earg,
+ }), list( .link.gamma=link.gamma, .earg = earg,
.delta.known=delta.known,
.delta=delta ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
eta = as.matrix(eta)
- mygamma = eta2theta(eta[,1], .link.gamma, earg= .earg)
+ mygamma = eta2theta(eta[,1], .link.gamma, earg = .earg)
delta = if ( .delta.known) .delta else eta[,2]
if (residuals) stop("loglikelihood residuals not implemented yet") else
sum(w * 0.5 * (log(mygamma) -3*log(y-delta) - mygamma / (y-delta )))
}, list( .link.gamma=link.gamma, .earg = earg,
.delta.known=delta.known,
.delta=delta ))),
- vfamily=c("levy"),
- deriv=eval(substitute(expression({
+ vfamily = c("levy"),
+ deriv = eval(substitute(expression({
eta = as.matrix(eta)
- mygamma = eta2theta(eta[,1], .link.gamma, earg= .earg)
+ mygamma = eta2theta(eta[,1], .link.gamma, earg = .earg)
delta = if ( .delta.known) .delta else eta[,2]
if (! .delta.known)
dl.ddelta = (3 - mygamma / (y-delta)) / (2 * (y-delta))
dl.dgamma = 0.5 * (1 / mygamma - 1 / (y-delta))
- dgamma.deta = dtheta.deta(mygamma, .link.gamma, earg= .earg)
+ dgamma.deta = dtheta.deta(mygamma, .link.gamma, earg = .earg)
w * cbind(dl.dgamma * dgamma.deta,
if ( .delta.known) NULL else dl.ddelta)
- }), list( .link.gamma=link.gamma, .earg=earg,
+ }), list( .link.gamma=link.gamma, .earg = earg,
.delta.known=delta.known,
.delta=delta ))),
- weight=eval(substitute(expression({
+ weight = eval(substitute(expression({
wz = matrix(as.numeric(NA), n, dimm(M)) # M = if (delta is known) 1 else 2
wz[,iam(1,1,M)] = 1 * dgamma.deta^2
if (! .delta.known) {
@@ -5577,7 +5588,7 @@ rlog = function(n, prob, Smallno=1.0e-6) {
}
wz = w * wz / (2 * mygamma^2)
wz
- }), list( .link.gamma=link.gamma, .earg=earg,
+ }), list( .link.gamma=link.gamma, .earg = earg,
.delta.known=delta.known,
.delta=delta ))))
}
@@ -5587,11 +5598,11 @@ rlog = function(n, prob, Smallno=1.0e-6) {
if (FALSE)
stoppa = function(y0,
- link.alpha="loge",
- link.theta="loge", ealpha=list(), etheta=list(),
- ialpha=NULL,
+ link.alpha = "loge",
+ link.theta = "loge", ealpha=list(), etheta = list(),
+ ialpha = NULL,
itheta=1.0,
- zero=NULL)
+ zero = NULL)
{
if (!is.Numeric(y0, allo=1) || y0 <= 0)
stop("y0 must be a positive value")
@@ -5600,7 +5611,7 @@ rlog = function(n, prob, Smallno=1.0e-6) {
link.alpha = as.character(substitute(link.alpha))
if (mode(link.theta) != "character" && mode(link.theta) != "name")
link.theta = as.character(substitute(link.theta))
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
stop("bad input for argument 'zero'")
if (!is.list(ealpha)) ealpha = list()
if (!is.list(etheta)) etheta = list()
@@ -5608,68 +5619,68 @@ rlog = function(n, prob, Smallno=1.0e-6) {
new("vglmff",
blurb = c("Stoppa distribution\n\n",
"Links: ",
- namesof("alpha", link.alpha, earg=ealpha), ", ",
- namesof("theta", link.theta, earg=etheta), "\n",
+ namesof("alpha", link.alpha, earg = ealpha), ", ",
+ namesof("theta", link.theta, earg = etheta), "\n",
"Mean: theta*y0*beta(1-1/alpha, theta)"),
- constraints=eval(substitute(expression({
+ constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
predictors.names =
- c(namesof("alpha", .link.alpha, earg=.ealpha, tag=FALSE),
- namesof("theta", .link.theta, earg=.etheta, tag=FALSE))
+ c(namesof("alpha", .link.alpha, earg = .ealpha, tag = FALSE),
+ namesof("theta", .link.theta, earg = .etheta, tag = FALSE))
y0 = .y0
if (min(y) < y0) stop("y0 must lie in the interval (0, min(y))")
if (!length( .ialpha) || !length( .itheta)) {
- qvec = c(.25, .5, .75) # Arbitrary; could be made an argument
+ qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
init.theta = if (length( .itheta)) .itheta else 1
xvec = log1p(-qvec^(1/init.theta))
- fit0 = lsfit(x=xvec, y=log(quantile(y, qvec))-log(y0), intercept=FALSE)
+ fit0 = lsfit(x=xvec, y=log(quantile(y, qvec))-log(y0), intercept = FALSE)
}
extra$y0 = y0
if (!length(etastart)) {
alpha = rep(if (length( .ialpha)) .ialpha else
- -1/fit0$coef[1], length=n)
- theta = rep(if (length( .itheta)) .itheta else 1.0, length=n)
- etastart = cbind(theta2eta(alpha, .link.alpha, earg= .ealpha),
- theta2eta(theta, .link.theta, earg= .etheta))
+ -1/fit0$coef[1], length = n)
+ theta = rep(if (length( .itheta)) .itheta else 1.0, length = n)
+ etastart = cbind(theta2eta(alpha, .link.alpha, earg = .ealpha),
+ theta2eta(theta, .link.theta, earg = .etheta))
}
}), list( .link.theta=link.theta, .link.alpha=link.alpha,
.y0=y0,
.itheta=itheta, .ialpha=ialpha ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- alpha = eta2theta(eta[,1], .link.alpha, earg= .ealpha)
- theta = eta2theta(eta[,2], .link.theta, earg= .etheta)
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ alpha = eta2theta(eta[,1], .link.alpha, earg = .ealpha)
+ theta = eta2theta(eta[,2], .link.theta, earg = .etheta)
theta * extra$y0 * beta(1-1/alpha, theta)
}, list( .link.theta=link.theta, .link.alpha=link.alpha ))),
last = eval(substitute(expression({
misc$link = c(alpha= .link.alpha, theta= .link.theta)
}), list( .link.theta=link.theta, .link.alpha=link.alpha ))),
- loglikelihood=eval(substitute(
- function(mu, y, w, residuals= FALSE,eta, extra=NULL) {
- alpha = eta2theta(eta[,1], .link.alpha, earg= .ealpha)
- theta = eta2theta(eta[,2], .link.theta, earg= .etheta)
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ alpha = eta2theta(eta[,1], .link.alpha, earg = .ealpha)
+ theta = eta2theta(eta[,2], .link.theta, earg = .etheta)
if (residuals) stop("loglikelihood residuals not implemented yet") else
sum(w*(log(theta*alpha) + alpha*log(extra$y0) -(alpha+1)*log(y)+
(theta-1) * log1p(-(y/extra$y0)^(-alpha))))
}, list( .link.theta=link.theta, .link.alpha=link.alpha ))),
- vfamily=c("stoppa"),
- deriv=eval(substitute(expression({
- alpha = eta2theta(eta[,1], .link.alpha, earg= .ealpha)
- theta = eta2theta(eta[,2], .link.theta, earg= .etheta)
+ vfamily = c("stoppa"),
+ deriv = eval(substitute(expression({
+ alpha = eta2theta(eta[,1], .link.alpha, earg = .ealpha)
+ theta = eta2theta(eta[,2], .link.theta, earg = .etheta)
temp8 = (y / extra$y0)^(-alpha)
temp8a = log(temp8)
temp8b = log1p(-temp8)
dl.dalpha = 1/alpha - log(y/extra$y0) + (theta-1) * temp8 *
log(y / extra$y0) / (1-temp8)
dl.dtheta = 1/theta + temp8b
- dalpha.deta = dtheta.deta(alpha, .link.alpha, earg= .ealpha)
- dTHETA.deta = dtheta.deta(theta, .link.theta, earg= .etheta)
+ dalpha.deta = dtheta.deta(alpha, .link.alpha, earg = .ealpha)
+ dTHETA.deta = dtheta.deta(theta, .link.theta, earg = .etheta)
w * cbind( dl.dalpha * dalpha.deta, dl.dtheta * dTHETA.deta )
}), list( .link.theta=link.theta, .link.alpha=link.alpha ))),
- weight=eval(substitute(expression({
+ weight = eval(substitute(expression({
ed2l.dalpha = 1/alpha^2 + theta * (2 * log(extra$y0) * (digamma(2)-
digamma(theta+4)) - (trigamma(1) +
trigamma(theta+3)) / alpha^3) / (alpha *
@@ -5693,7 +5704,7 @@ dlino = function(x, shape1, shape2, lambda=1, log = FALSE) {
stop("bad input for argument 'log'")
rm(log)
- loglik = dbeta(x=x, shape1=shape1, shape2=shape2, log=TRUE) +
+ loglik = dbeta(x=x, shape1=shape1, shape2=shape2, log = TRUE) +
shape1 * log(lambda) -
(shape1+shape2) * log1p(-(1-lambda)*x)
if (log.arg) loglik else exp(loglik)
@@ -5701,21 +5712,21 @@ dlino = function(x, shape1, shape2, lambda=1, log = FALSE) {
plino = function(q, shape1, shape2, lambda=1) {
if (!is.Numeric(q)) stop("bad input for 'q'")
- if (!is.Numeric(shape1, posit=TRUE))
+ if (!is.Numeric(shape1, posit = TRUE))
stop("bad input for argument 'shape1'")
- if (!is.Numeric(shape2, posit=TRUE))
+ if (!is.Numeric(shape2, posit = TRUE))
stop("bad input for argument 'shape2'")
- if (!is.Numeric(lambda, posit=TRUE))
+ if (!is.Numeric(lambda, posit = TRUE))
stop("bad input for argument 'lambda'")
pbeta(q=lambda*q/(1 - (1-lambda)*q), shape1=shape1, shape2=shape2)
}
qlino = function(p, shape1, shape2, lambda=1) {
- if (!is.Numeric(p, posit=TRUE) || any(p >= 1))
+ if (!is.Numeric(p, posit = TRUE) || any(p >= 1))
stop("bad input for argument 'p'")
- if (!is.Numeric(shape1, posit=TRUE))
+ if (!is.Numeric(shape1, posit = TRUE))
stop("bad input for argument 'shape1'")
- if (!is.Numeric(lambda, posit=TRUE))
+ if (!is.Numeric(lambda, posit = TRUE))
stop("bad input for argument 'lambda'")
Y = qbeta(p=p, shape1=shape1, shape2=shape2)
Y / (lambda + (1-lambda)*Y)
@@ -5723,13 +5734,13 @@ qlino = function(p, shape1, shape2, lambda=1) {
rlino = function(n, shape1, shape2, lambda=1) {
- if (!is.Numeric(n, posit=TRUE, integ=TRUE, allow=1))
+ if (!is.Numeric(n, posit = TRUE, integ = TRUE, allow = 1))
stop("bad input for argument 'n'")
- if (!is.Numeric(shape1, posit=TRUE))
+ if (!is.Numeric(shape1, posit = TRUE))
stop("bad input for argument 'shape1'")
- if (!is.Numeric(shape2, posit=TRUE))
+ if (!is.Numeric(shape2, posit = TRUE))
stop("bad input for argument 'shape2'")
- if (!is.Numeric(lambda, posit=TRUE))
+ if (!is.Numeric(lambda, posit = TRUE))
stop("bad input for argument 'lambda'")
Y = rbeta(n=n, shape1=shape1, shape2=shape2)
Y / (lambda + (1-lambda)*Y)
@@ -5737,11 +5748,11 @@ rlino = function(n, shape1, shape2, lambda=1) {
- lino = function(lshape1="loge",
- lshape2="loge",
- llambda="loge",
+ lino = function(lshape1 = "loge",
+ lshape2 = "loge",
+ llambda = "loge",
eshape1=list(), eshape2=list(), elambda=list(),
- ishape1=NULL, ishape2=NULL, ilambda=1, zero=NULL)
+ ishape1 = NULL, ishape2 = NULL, ilambda=1, zero = NULL)
{
if (mode(lshape1) != "character" && mode(lshape1) != "name")
lshape1 = as.character(substitute(lshape1))
@@ -5749,9 +5760,9 @@ rlino = function(n, shape1, shape2, lambda=1) {
lshape2 = as.character(substitute(lshape2))
if (mode(llambda) != "character" && mode(llambda) != "name")
llambda = as.character(substitute(llambda))
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
stop("bad input for argument 'zero'")
- if (!is.Numeric(ilambda, positive=TRUE))
+ if (!is.Numeric(ilambda, positive = TRUE))
stop("bad input for argument 'ilambda'")
if (!is.list(eshape1)) eshape1 = list()
if (!is.list(eshape2)) eshape2 = list()
@@ -5760,84 +5771,84 @@ rlino = function(n, shape1, shape2, lambda=1) {
new("vglmff",
blurb = c("Generalized Beta distribution (Libby and Novick, 1982)\n\n",
"Links: ",
- namesof("shape1", lshape1, earg=eshape1), ", ",
- namesof("shape2", lshape2, earg=eshape2), ", ",
- namesof("lambda", llambda, earg=elambda), "\n",
+ namesof("shape1", lshape1, earg = eshape1), ", ",
+ namesof("shape2", lshape2, earg = eshape2), ", ",
+ namesof("lambda", llambda, earg = elambda), "\n",
"Mean: something complicated"),
- constraints=eval(substitute(expression({
+ constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
predictors.names =
- c(namesof("shape1", .lshape1, earg=.eshape1, tag=FALSE),
- namesof("shape2", .lshape2, earg=.eshape2, tag=FALSE),
- namesof("lambda", .llambda, earg=.elambda, tag=FALSE))
+ c(namesof("shape1", .lshape1, earg = .eshape1, tag = FALSE),
+ namesof("shape2", .lshape2, earg = .eshape2, tag = FALSE),
+ namesof("lambda", .llambda, earg = .elambda, tag = FALSE))
if (min(y) <= 0 || max(y) >= 1)
stop("values of the response must be between 0 and 1 (0,1)")
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
if (!length(etastart)) {
lambda.init = rep(if (length( .ilambda )) .ilambda else 1,
- length=n)
+ length = n)
sh1.init = if (length( .ishape1 ))
- rep( .ishape1, length=n) else NULL
+ rep( .ishape1, length = n) else NULL
sh2.init = if (length( .ishape2 ))
- rep( .ishape2, length=n) else NULL
+ rep( .ishape2, length = n) else NULL
txY.init = lambda.init * y / (1+lambda.init*y - y)
mean1 = mean(txY.init)
mean2 = mean(1/txY.init)
if (!is.Numeric(sh1.init))
- sh1.init = rep((mean2 - 1) / (mean2 - 1/mean1), length=n)
+ sh1.init = rep((mean2 - 1) / (mean2 - 1/mean1), length = n)
if (!is.Numeric(sh2.init))
- sh2.init = rep(sh1.init * (1-mean1) / mean1, length=n)
- etastart = cbind(theta2eta(sh1.init, .lshape1, earg= .eshape1),
- theta2eta(sh2.init, .lshape2, earg= .eshape2),
- theta2eta(lambda.init, .llambda, earg= .elambda))
+ sh2.init = rep(sh1.init * (1-mean1) / mean1, length = n)
+ etastart = cbind(theta2eta(sh1.init, .lshape1, earg = .eshape1),
+ theta2eta(sh2.init, .lshape2, earg = .eshape2),
+ theta2eta(lambda.init, .llambda, earg = .elambda))
}
- }), list( .lshape1=lshape1, .lshape2=lshape2, .llambda=llambda,
- .eshape1=eshape1, .eshape2=eshape2, .elambda=elambda,
+ }), list( .lshape1 = lshape1, .lshape2 = lshape2, .llambda = llambda,
+ .eshape1 = eshape1, .eshape2 = eshape2, .elambda = elambda,
.ishape1=ishape1, .ishape2=ishape2, .ilambda=ilambda ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- sh1 = eta2theta(eta[,1], .lshape1, earg= .eshape1)
- sh2 = eta2theta(eta[,2], .lshape2, earg= .eshape2)
- lambda = eta2theta(eta[,3], .llambda, earg= .elambda)
- rep(as.numeric(NA), length=nrow(eta))
- }, list( .lshape1=lshape1, .lshape2=lshape2, .llambda=llambda,
- .eshape1=eshape1, .eshape2=eshape2, .elambda=elambda ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ sh1 = eta2theta(eta[,1], .lshape1, earg = .eshape1)
+ sh2 = eta2theta(eta[,2], .lshape2, earg = .eshape2)
+ lambda = eta2theta(eta[,3], .llambda, earg = .elambda)
+ rep(as.numeric(NA), length = nrow(eta))
+ }, list( .lshape1 = lshape1, .lshape2 = lshape2, .llambda = llambda,
+ .eshape1 = eshape1, .eshape2 = eshape2, .elambda = elambda ))),
last = eval(substitute(expression({
misc$link = c(shape1 = .lshape1, shape2 = .lshape2, lambda = .llambda)
misc$earg =list(shape1 = .eshape1, shape2 = .eshape2, lambda = .elambda)
- }), list( .lshape1=lshape1, .lshape2=lshape2, .llambda=llambda,
- .eshape1=eshape1, .eshape2=eshape2, .elambda=elambda ))),
- loglikelihood=eval(substitute(
- function(mu, y, w, residuals= FALSE,eta, extra=NULL) {
- sh1 = eta2theta(eta[,1], .lshape1, earg= .eshape1)
- sh2 = eta2theta(eta[,2], .lshape2, earg= .eshape2)
- lambda = eta2theta(eta[,3], .llambda, earg= .elambda)
+ }), list( .lshape1 = lshape1, .lshape2 = lshape2, .llambda = llambda,
+ .eshape1 = eshape1, .eshape2 = eshape2, .elambda = elambda ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ sh1 = eta2theta(eta[,1], .lshape1, earg = .eshape1)
+ sh2 = eta2theta(eta[,2], .lshape2, earg = .eshape2)
+ lambda = eta2theta(eta[,3], .llambda, earg = .elambda)
if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(w * dlino(y, shape1=sh1, shape2=sh2, lambda=lambda, log=TRUE))
- }
- }, list( .lshape1=lshape1, .lshape2=lshape2, .llambda=llambda,
- .eshape1=eshape1, .eshape2=eshape2, .elambda=elambda ))),
- vfamily=c("lino"),
- deriv=eval(substitute(expression({
- sh1 = eta2theta(eta[,1], .lshape1, earg= .eshape1)
- sh2 = eta2theta(eta[,2], .lshape2, earg= .eshape2)
- lambda = eta2theta(eta[,3], .llambda, earg= .elambda)
+ sum(w * dlino(y, shape1=sh1, shape2=sh2, lambda=lambda, log = TRUE))
+ }
+ }, list( .lshape1 = lshape1, .lshape2 = lshape2, .llambda = llambda,
+ .eshape1 = eshape1, .eshape2 = eshape2, .elambda = elambda ))),
+ vfamily = c("lino"),
+ deriv = eval(substitute(expression({
+ sh1 = eta2theta(eta[,1], .lshape1, earg = .eshape1)
+ sh2 = eta2theta(eta[,2], .lshape2, earg = .eshape2)
+ lambda = eta2theta(eta[,3], .llambda, earg = .elambda)
temp1 = log1p(-(1-lambda) * y)
temp2 = digamma(sh1+sh2)
dl.dsh1 = log(lambda) + log(y) - digamma(sh1) + temp2 - temp1
dl.dsh2 = log1p(-y) - digamma(sh2) + temp2 - temp1
dl.dlambda = sh1/lambda - (sh1+sh2) * y / (1 - (1-lambda) * y)
- dsh1.deta = dtheta.deta(sh1, .lshape1, earg= .eshape1)
- dsh2.deta = dtheta.deta(sh2, .lshape2, earg= .eshape2)
- dlambda.deta = dtheta.deta(lambda, .llambda, earg= .elambda)
+ dsh1.deta = dtheta.deta(sh1, .lshape1, earg = .eshape1)
+ dsh2.deta = dtheta.deta(sh2, .lshape2, earg = .eshape2)
+ dlambda.deta = dtheta.deta(lambda, .llambda, earg = .elambda)
w * cbind( dl.dsh1 * dsh1.deta,
dl.dsh2 * dsh2.deta,
dl.dlambda * dlambda.deta)
- }), list( .lshape1=lshape1, .lshape2=lshape2, .llambda=llambda,
- .eshape1=eshape1, .eshape2=eshape2, .elambda=elambda ))),
- weight=eval(substitute(expression({
+ }), list( .lshape1 = lshape1, .lshape2 = lshape2, .llambda = llambda,
+ .eshape1 = eshape1, .eshape2 = eshape2, .elambda = elambda ))),
+ weight = eval(substitute(expression({
temp3 = trigamma(sh1+sh2)
ed2l.dsh1 = trigamma(sh1) - temp3
ed2l.dsh2 = trigamma(sh2) - temp3
@@ -5854,22 +5865,22 @@ rlino = function(n, shape1, shape2, lambda=1) {
wz[,iam(2,3,M)] = ed2l.dsh2lambda * dsh2.deta * dlambda.deta
wz = w * wz
wz
- }), list( .lshape1=lshape1, .lshape2=lshape2, .llambda=llambda,
- .eshape1=eshape1, .eshape2=eshape2, .elambda=elambda ))))
+ }), list( .lshape1 = lshape1, .lshape2 = lshape2, .llambda = llambda,
+ .eshape1 = eshape1, .eshape2 = eshape2, .elambda = elambda ))))
}
- genbetaII= function(link.a="loge",
- link.scale="loge",
- link.p="loge",
- link.q="loge",
- earg.a=list(), earg.scale=list(),
+ genbetaII= function(link.a = "loge",
+ link.scale = "loge",
+ link.p = "loge",
+ link.q = "loge",
+ earg.a=list(), earg.scale = list(),
earg.p=list(), earg.q=list(),
- init.a=NULL,
- init.scale=NULL,
+ init.a = NULL,
+ init.scale = NULL,
init.p=1.0,
init.q=1.0,
- zero=NULL)
+ zero = NULL)
{
if (mode(link.a) != "character" && mode(link.a) != "name")
@@ -5880,7 +5891,7 @@ rlino = function(n, shape1, shape2, lambda=1) {
link.p = as.character(substitute(link.p))
if (mode(link.q) != "character" && mode(link.q) != "name")
link.q = as.character(substitute(link.q))
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
stop("bad input for argument 'zero'")
if (!is.list(earg.a)) earg.a = list()
if (!is.list(earg.scale)) earg.scale = list()
@@ -5890,54 +5901,54 @@ rlino = function(n, shape1, shape2, lambda=1) {
new("vglmff",
blurb = c("Generalized Beta II distribution\n\n",
"Links: ",
- namesof("a", link.a, earg=earg.a), ", ",
- namesof("scale", link.scale, earg=earg.scale), ", ",
- namesof("p", link.p, earg=earg.p), ", ",
- namesof("q", link.q, earg=earg.q), "\n",
+ namesof("a", link.a, earg = earg.a), ", ",
+ namesof("scale", link.scale, earg = earg.scale), ", ",
+ namesof("p", link.p, earg = earg.p), ", ",
+ namesof("q", link.q, earg = earg.q), "\n",
"Mean: scale*gamma(p + 1/a)*gamma(q - 1/a)/(gamma(p)*gamma(q))"),
- constraints=eval(substitute(expression({
+ constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
predictors.names =
- c(namesof("a", .link.a, earg=.earg.a, tag=FALSE),
- namesof("scale", .link.scale, earg=.earg.scale, tag=FALSE),
- namesof("p", .link.p, earg=.earg.p, tag=FALSE),
- namesof("q", .link.q, earg=.earg.q, tag=FALSE))
-
- if (!length(.init.a) || !length(.init.scale)) {
- qvec = c(.25, .5, .75) # Arbitrary; could be made an argument
- init.q = if (length(.init.q)) .init.q else 1
+ c(namesof("a", .link.a, earg = .earg.a, tag = FALSE),
+ namesof("scale", .link.scale, earg = .earg.scale, tag = FALSE),
+ namesof("p", .link.p, earg = .earg.p, tag = FALSE),
+ namesof("q", .link.q, earg = .earg.q, tag = FALSE))
+
+ if (!length( .init.a) || !length( .init.scale )) {
+ qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
+ init.q = if (length( .init.q)) .init.q else 1
xvec = log( (1-qvec)^(-1/ init.q ) - 1 )
fit0 = lsfit(x=xvec, y=log(quantile(y, qvec )))
}
if (!length(etastart)) {
- aa = rep(if (length(.init.a)) .init.a else 1/fit0$coef[2],
- length=n)
- scale = rep(if (length(.init.scale)) .init.scale else
- exp(fit0$coef[1]), length=n)
- qq = rep(if (length(.init.q)) .init.q else 1.0, length=n)
- parg = rep(if (length(.init.p)) .init.p else 1.0, length=n)
- etastart = cbind(theta2eta(aa, .link.a, earg= .earg.a),
- theta2eta(scale, .link.scale, earg= .earg.scale),
- theta2eta(parg, .link.p, earg= .earg.p),
- theta2eta(qq, .link.q, earg= .earg.q))
- }
- }), list( .link.a=link.a, .link.scale=link.scale,
+ aa = rep(if (length( .init.a)) .init.a else 1/fit0$coef[2],
+ length = n)
+ scale = rep(if (length( .init.scale )) .init.scale else
+ exp(fit0$coef[1]), length = n)
+ qq = rep(if (length( .init.q)) .init.q else 1.0, length = n)
+ parg = rep(if (length( .init.p)) .init.p else 1.0, length = n)
+ etastart = cbind(theta2eta(aa, .link.a, earg = .earg.a),
+ theta2eta(scale, .link.scale, earg = .earg.scale),
+ theta2eta(parg, .link.p, earg = .earg.p),
+ theta2eta(qq, .link.q, earg = .earg.q))
+ }
+ }), list( .link.a = link.a, .link.scale = link.scale,
.link.p=link.p, .link.q=link.q,
- .earg.a=earg.a, .earg.scale=earg.scale,
+ .earg.a = earg.a, .earg.scale = earg.scale,
.earg.p=earg.p, .earg.q=earg.q,
- .init.a=init.a, .init.scale=init.scale,
+ .init.a = init.a, .init.scale = init.scale,
.init.p=init.p, .init.q=init.q ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- aa = eta2theta(eta[,1], .link.a, earg= .earg.a)
- scale = eta2theta(eta[,2], .link.scale, earg= .earg.scale)
- parg = eta2theta(eta[,3], .link.p, earg= .earg.p)
- qq = eta2theta(eta[,4], .link.q, earg= .earg.q)
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ aa = eta2theta(eta[,1], .link.a, earg = .earg.a)
+ scale = eta2theta(eta[,2], .link.scale, earg = .earg.scale)
+ parg = eta2theta(eta[,3], .link.p, earg = .earg.p)
+ qq = eta2theta(eta[,4], .link.q, earg = .earg.q)
scale*gamma(parg + 1/aa)*gamma(qq-1/aa)/(gamma(parg)*gamma(qq))
- }, list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale,
+ }, list( .link.a = link.a, .link.scale = link.scale,
+ .earg.a = earg.a, .earg.scale = earg.scale,
.earg.p=earg.p, .earg.q=earg.q,
.link.p=link.p, .link.q=link.q ))),
last = eval(substitute(expression({
@@ -5945,30 +5956,30 @@ rlino = function(n, shape1, shape2, lambda=1) {
p= .link.p, q= .link.q)
misc$earg = list(a= .earg.a, scale= .earg.scale,
p= .earg.p, q= .earg.q)
- }), list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale,
+ }), list( .link.a = link.a, .link.scale = link.scale,
+ .earg.a = earg.a, .earg.scale = earg.scale,
.earg.p=earg.p, .earg.q=earg.q,
.link.p=link.p, .link.q=link.q ))),
- loglikelihood=eval(substitute(
- function(mu, y, w, residuals= FALSE,eta, extra=NULL) {
- aa = eta2theta(eta[,1], .link.a, earg= .earg.a)
- scale = eta2theta(eta[,2], .link.scale, earg= .earg.scale)
- parg = eta2theta(eta[,3], .link.p, earg= .earg.p)
- qq = eta2theta(eta[,4], .link.q, earg= .earg.q)
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ aa = eta2theta(eta[,1], .link.a, earg = .earg.a)
+ scale = eta2theta(eta[,2], .link.scale, earg = .earg.scale)
+ parg = eta2theta(eta[,3], .link.p, earg = .earg.p)
+ qq = eta2theta(eta[,4], .link.q, earg = .earg.q)
if (residuals) stop("loglikelihood residuals not implemented yet") else {
sum(w*(log(aa) + (aa*parg-1)*log(y) - aa*parg*log(scale) +
-lbeta(parg, qq) - (parg+qq)*log1p((y/scale)^aa)))
}
- }, list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale,
+ }, list( .link.a = link.a, .link.scale = link.scale,
+ .earg.a = earg.a, .earg.scale = earg.scale,
.earg.p=earg.p, .earg.q=earg.q,
.link.p=link.p, .link.q=link.q ))),
- vfamily=c("genbetaII"),
- deriv=eval(substitute(expression({
- aa = eta2theta(eta[,1], .link.a, earg= .earg.a)
- scale = eta2theta(eta[,2], .link.scale, earg= .earg.scale)
- parg = eta2theta(eta[,3], .link.p, earg= .earg.p)
- qq = eta2theta(eta[,4], .link.q, earg= .earg.q)
+ vfamily = c("genbetaII"),
+ deriv = eval(substitute(expression({
+ aa = eta2theta(eta[,1], .link.a, earg = .earg.a)
+ scale = eta2theta(eta[,2], .link.scale, earg = .earg.scale)
+ parg = eta2theta(eta[,3], .link.p, earg = .earg.p)
+ qq = eta2theta(eta[,4], .link.q, earg = .earg.q)
temp1 = log(y/scale)
temp2 = (y/scale)^aa
@@ -5981,17 +5992,17 @@ rlino = function(n, shape1, shape2, lambda=1) {
dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
dl.dp = aa * temp1 + temp3 - temp3a - temp4
dl.dq = temp3 - temp3b - temp4
- da.deta = dtheta.deta(aa, .link.a, earg= .earg.a)
- dscale.deta = dtheta.deta(scale, .link.scale, earg= .earg.scale)
- dp.deta = dtheta.deta(parg, .link.p, earg= .earg.p)
- dq.deta = dtheta.deta(qq, .link.q, earg= .earg.q)
+ da.deta = dtheta.deta(aa, .link.a, earg = .earg.a)
+ dscale.deta = dtheta.deta(scale, .link.scale, earg = .earg.scale)
+ dp.deta = dtheta.deta(parg, .link.p, earg = .earg.p)
+ dq.deta = dtheta.deta(qq, .link.q, earg = .earg.q)
w * cbind( dl.da * da.deta, dl.dscale * dscale.deta,
dl.dp * dp.deta, dl.dq * dq.deta )
- }), list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale,
+ }), list( .link.a = link.a, .link.scale = link.scale,
+ .earg.a = earg.a, .earg.scale = earg.scale,
.earg.p=earg.p, .earg.q=earg.q,
.link.p=link.p, .link.q=link.q ))),
- weight=eval(substitute(expression({
+ weight = eval(substitute(expression({
temp5 = trigamma(parg + qq)
temp5a = trigamma(parg)
temp5b = trigamma(qq)
@@ -6021,72 +6032,72 @@ rlino = function(n, shape1, shape2, lambda=1) {
wz[,iam(3,4,M)] = ed2l.dpq * dp.deta * dq.deta
wz = w * wz
wz
- }), list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale,
+ }), list( .link.a = link.a, .link.scale = link.scale,
+ .earg.a = earg.a, .earg.scale = earg.scale,
.earg.p=earg.p, .earg.q=earg.q,
.link.p=link.p, .link.q=link.q ))))
}
-rsinmad = function(n, a, scale=1, q.arg)
+rsinmad <- function(n, a, scale=1, q.arg)
qsinmad(runif(n), a, scale, q.arg)
-rlomax = function(n, scale=1, q.arg)
+rlomax <- function(n, scale=1, q.arg)
rsinmad(n, a=1, scale, q.arg)
-rfisk = function(n, a, scale=1)
+rfisk <- function(n, a, scale=1)
rsinmad(n, a, scale, q.arg=1)
-rparalogistic = function(n, a, scale=1)
+rparalogistic <- function(n, a, scale=1)
rsinmad(n, a, scale, a)
-rdagum = function(n, a, scale=1, p.arg)
+rdagum <- function(n, a, scale=1, p.arg)
qdagum(runif(n), a, scale=1, p.arg)
-rinvlomax = function(n, scale=1, p.arg)
+rinvlomax <- function(n, scale=1, p.arg)
rdagum(n, a=1, scale, p.arg)
-rinvparalogistic = function(n, a, scale=1)
+rinvparalogistic <- function(n, a, scale=1)
rdagum(n, a, scale, a)
-qsinmad = function(p, a, scale=1, q.arg) {
+qsinmad <- function(p, a, scale=1, q.arg) {
bad = (p < 0) | (p > 1)
ans = NA * p
- a = rep(a, len=length(p))[!bad]
- scale = rep(scale, len=length(p))[!bad]
- q = rep(q.arg, len=length(p))[!bad]
+ a = rep(a, len = length(p))[!bad]
+ scale = rep(scale, len = length(p))[!bad]
+ q = rep(q.arg, len = length(p))[!bad]
xx = p[!bad]
ans[!bad] = scale* ((1 - xx)^(-1/q) - 1)^(1/a)
ans
}
-qlomax = function(p, scale=1, q.arg)
+qlomax <- function(p, scale=1, q.arg)
qsinmad(p, a=1, scale, q.arg)
-qfisk = function(p, a, scale=1)
+qfisk <- function(p, a, scale=1)
qsinmad(p, a, scale, q.arg=1)
-qparalogistic = function(p, a, scale=1)
+qparalogistic <- function(p, a, scale=1)
qsinmad(p, a, scale, a)
-qdagum = function(p, a, scale=1, p.arg) {
+qdagum <- function(p, a, scale=1, p.arg) {
bad = (p < 0) | (p > 1)
ans = NA * p
- a = rep(a, len=length(p))[!bad]
- scale = rep(scale, len=length(p))[!bad]
- p.arg = rep(p.arg, len=length(p))[!bad]
+ a = rep(a, len = length(p))[!bad]
+ scale = rep(scale, len = length(p))[!bad]
+ p.arg = rep(p.arg, len = length(p))[!bad]
xx = p[!bad]
ans[!bad] = scale* (xx^(-1/p.arg) - 1)^(-1/a)
ans
}
-qinvlomax = function(p, scale=1, p.arg)
+qinvlomax <- function(p, scale=1, p.arg)
qdagum(p, a=1, scale, p.arg)
-qinvparalogistic = function(p, a, scale=1)
+qinvparalogistic <- function(p, a, scale=1)
qdagum(p, a, scale, a)
@@ -6094,11 +6105,11 @@ qinvparalogistic = function(p, a, scale=1)
-psinmad = function(q, a, scale=1, q.arg) {
+psinmad <- function(q, a, scale=1, q.arg) {
zero = q <= 0
- a = rep(a, len=length(q))[!zero]
- scale = rep(scale, len=length(q))[!zero]
- q.arg = rep(q.arg, len=length(q))[!zero]
+ a = rep(a, len = length(q))[!zero]
+ scale = rep(scale, len = length(q))[!zero]
+ q.arg = rep(q.arg, len = length(q))[!zero]
ans = 0 * q
xx = q[!zero]
ans[!zero] = 1 - (1 + (xx/scale)^a)^(-q.arg)
@@ -6116,62 +6127,64 @@ pparalogistic = function(q, a, scale=1)
-pdagum = function(q, a, scale=1, p.arg) {
- zero = q <= 0
- a = rep(a, len=length(q))[!zero]
- scale = rep(scale, len=length(q))[!zero]
- p = rep(p.arg, len=length(q))[!zero]
- ans = 0 * q
- xx = q[!zero]
- ans[!zero] = (1 + (xx/scale)^(-a))^(-p)
+pdagum <- function(q, a, scale=1, p.arg) {
+ zero <- q <= 0
+ a <- rep(a, len = length(q))[!zero]
+ scale <- rep(scale, len = length(q))[!zero]
+ p <- rep(p.arg, len = length(q))[!zero]
+ ans <- 0 * q
+ xx <- q[!zero]
+ ans[!zero] <- (1 + (xx/scale)^(-a))^(-p)
ans
}
-pinvlomax = function(q, scale=1, p.arg)
+pinvlomax <- function(q, scale=1, p.arg)
pdagum(q, a=1, scale, p.arg)
-pinvparalogistic = function(q, a, scale=1)
+pinvparalogistic <- function(q, a, scale=1)
pdagum(q, a, scale, a)
-dsinmad = function(x, a, scale=1, q.arg, log=FALSE) {
+dsinmad <- function(x, a, scale=1, q.arg, log = FALSE) {
if (!is.logical(log.arg <- log))
stop("bad input for argument 'log'")
rm(log)
- LLL = max(length(x), length(a), length(scale), length(q.arg))
- x = rep(x, len=LLL); a = rep(a, len=LLL)
- scale = rep(scale, len=LLL); q.arg = rep(q.arg, len=LLL)
-
- Loglik = rep(log(0), len=LLL)
- xok = (x > 0) # Avoids evaluating log(x) if x is negative.
- Loglik[xok] = log(a[xok]) + log(q.arg[xok]) + (a[xok]-1)*log(x[xok]) -
+ LLL <- max(length(x), length(a), length(scale), length(q.arg))
+ x <- rep(x, len = LLL);
+ a <- rep(a, len = LLL)
+ scale <- rep(scale, len = LLL);
+ q.arg <- rep(q.arg, len = LLL)
+
+ Loglik <- rep(log(0), len = LLL)
+ xok <- (x > 0) # Avoids evaluating log(x) if x is negative.
+ Loglik[xok] <- log(a[xok]) + log(q.arg[xok]) + (a[xok]-1)*log(x[xok]) -
a[xok]*log(scale[xok]) -
(1+q.arg[xok]) * log1p((x[xok]/scale[xok])^a[xok])
if (log.arg) Loglik else exp(Loglik)
}
-dlomax = function(x, scale=1, q.arg, log=FALSE)
+dlomax <- function(x, scale=1, q.arg, log = FALSE)
dsinmad(x, a=1, scale, q.arg, log=log)
-dfisk = function(x, a, scale=1, log=FALSE)
+dfisk <- function(x, a, scale=1, log = FALSE)
dsinmad(x, a, scale, q.arg=1, log=log)
-dparalogistic = function(x, a, scale=1, log=FALSE)
+dparalogistic <- function(x, a, scale=1, log = FALSE)
dsinmad(x, a, scale, a, log=log)
-ddagum = function(x, a, scale=1, p.arg, log=FALSE) {
+ddagum <- function(x, a, scale=1, p.arg, log = FALSE) {
if (!is.logical(log.arg <- log))
stop("bad input for argument 'log'")
rm(log)
LLL = max(length(x), length(a), length(scale), length(p.arg))
- x = rep(x, len=LLL); a = rep(a, len=LLL)
- scale = rep(scale, len=LLL); p.arg = rep(p.arg, len=LLL)
+ x = rep(x, len = LLL); a = rep(a, len = LLL)
+ scale = rep(scale, len = LLL); p.arg = rep(p.arg, len = LLL)
- Loglik = rep(log(0), len=LLL)
+ Loglik = rep(log(0), len = LLL)
xok = (x > 0) # Avoids evaluating log(x) if x is negative.
Loglik[xok] = log(a[xok]) + log(p.arg[xok]) +
(a[xok]*p.arg[xok]-1)*log(x[xok]) -
@@ -6182,22 +6195,22 @@ ddagum = function(x, a, scale=1, p.arg, log=FALSE) {
}
-dinvlomax = function(x, scale=1, p.arg, log=FALSE)
+dinvlomax <- function(x, scale=1, p.arg, log = FALSE)
ddagum(x, a=1, scale, p.arg, log=log)
-dinvparalogistic = function(x, a, scale=1, log=FALSE)
+dinvparalogistic <- function(x, a, scale=1, log = FALSE)
ddagum(x, a, scale, a, log=log)
- sinmad = function(link.a="loge",
- link.scale="loge",
- link.q="loge",
- earg.a=list(), earg.scale=list(), earg.q=list(),
- init.a=NULL,
- init.scale=NULL,
+ sinmad = function(link.a = "loge",
+ link.scale = "loge",
+ link.q = "loge",
+ earg.a=list(), earg.scale = list(), earg.q=list(),
+ init.a = NULL,
+ init.scale = NULL,
init.q=1.0,
- zero=NULL)
+ zero = NULL)
{
if (mode(link.a) != "character" && mode(link.a) != "name")
@@ -6206,7 +6219,7 @@ dinvparalogistic = function(x, a, scale=1, log=FALSE)
link.scale = as.character(substitute(link.scale))
if (mode(link.q) != "character" && mode(link.q) != "name")
link.q = as.character(substitute(link.q))
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
stop("bad input for argument 'zero'")
if (!is.list(earg.a)) earg.a = list()
if (!is.list(earg.scale)) earg.scale = list()
@@ -6215,78 +6228,78 @@ dinvparalogistic = function(x, a, scale=1, log=FALSE)
new("vglmff",
blurb = c("Singh-Maddala distribution\n\n",
"Links: ",
- namesof("a", link.a, earg=earg.a), ", ",
- namesof("scale", link.scale, earg=earg.scale), ", ",
- namesof("q", link.q, earg=earg.q), "\n",
+ namesof("a", link.a, earg = earg.a), ", ",
+ namesof("scale", link.scale, earg = earg.scale), ", ",
+ namesof("q", link.q, earg = earg.q), "\n",
"Mean: scale*gamma(1 + 1/a)*gamma(q - 1/a)/gamma(q)"),
- constraints=eval(substitute(expression({
+ constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
predictors.names =
- c(namesof("a", .link.a, earg=.earg.a, tag=FALSE),
- namesof("scale", .link.scale, earg=.earg.scale, tag=FALSE),
- namesof("q", .link.q, earg=.earg.q, tag=FALSE))
+ c(namesof("a", .link.a, earg = .earg.a, tag = FALSE),
+ namesof("scale", .link.scale, earg = .earg.scale, tag = FALSE),
+ namesof("q", .link.q, earg = .earg.q, tag = FALSE))
parg = 1
- if (!length(.init.a) || !length(.init.scale)) {
- qvec = c(.25, .5, .75) # Arbitrary; could be made an argument
- init.q = if (length(.init.q)) .init.q else 1
+ if (!length( .init.a) || !length( .init.scale )) {
+ qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
+ init.q = if (length( .init.q)) .init.q else 1
xvec = log( (1-qvec)^(-1/ init.q ) - 1 )
fit0 = lsfit(x=xvec, y=log(quantile(y, qvec )))
}
if (!length(etastart)) {
- aa = rep(if (length(.init.a)) .init.a else 1/fit0$coef[2],
- length=n)
- scale = rep(if (length(.init.scale)) .init.scale else
- exp(fit0$coef[1]), length=n)
- qq = rep(if (length(.init.q)) .init.q else 1.0, length=n)
- etastart = cbind(theta2eta(aa, .link.a, earg= .earg.a),
- theta2eta(scale, .link.scale, earg= .earg.scale),
- theta2eta(qq, .link.q, earg= .earg.q))
- }
- }), list( .link.a=link.a, .link.scale=link.scale,
+ aa = rep(if (length( .init.a)) .init.a else 1/fit0$coef[2],
+ length = n)
+ scale = rep(if (length( .init.scale )) .init.scale else
+ exp(fit0$coef[1]), length = n)
+ qq = rep(if (length( .init.q)) .init.q else 1.0, length = n)
+ etastart = cbind(theta2eta(aa, .link.a, earg = .earg.a),
+ theta2eta(scale, .link.scale, earg = .earg.scale),
+ theta2eta(qq, .link.q, earg = .earg.q))
+ }
+ }), list( .link.a = link.a, .link.scale = link.scale,
.link.q=link.q,
- .earg.a=earg.a, .earg.scale=earg.scale,
+ .earg.a = earg.a, .earg.scale = earg.scale,
.earg.q=earg.q,
- .init.a=init.a, .init.scale=init.scale,
+ .init.a = init.a, .init.scale = init.scale,
.init.q=init.q ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- aa = eta2theta(eta[,1], .link.a, earg= .earg.a)
- scale = eta2theta(eta[,2], .link.scale, earg= .earg.scale)
- qq = eta2theta(eta[,3], .link.q, earg= .earg.q)
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ aa = eta2theta(eta[,1], .link.a, earg = .earg.a)
+ scale = eta2theta(eta[,2], .link.scale, earg = .earg.scale)
+ qq = eta2theta(eta[,3], .link.q, earg = .earg.q)
scale*gamma(1 + 1/aa)*gamma(qq-1/aa)/(gamma(qq))
- }, list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale,
+ }, list( .link.a = link.a, .link.scale = link.scale,
+ .earg.a = earg.a, .earg.scale = earg.scale,
.earg.q=earg.q,
.link.q=link.q ))),
last = eval(substitute(expression({
misc$link = c(a= .link.a, scale= .link.scale, q= .link.q)
misc$earg = list(a= .earg.a, scale= .earg.scale, q= .earg.q)
- }), list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale,
+ }), list( .link.a = link.a, .link.scale = link.scale,
+ .earg.a = earg.a, .earg.scale = earg.scale,
.earg.q=earg.q,
.link.q=link.q ))),
- loglikelihood=eval(substitute(
- function(mu, y, w, residuals= FALSE,eta, extra=NULL) {
- aa = eta2theta(eta[,1], .link.a, earg= .earg.a)
- scale = eta2theta(eta[,2], .link.scale, earg= .earg.scale)
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ aa = eta2theta(eta[,1], .link.a, earg = .earg.a)
+ scale = eta2theta(eta[,2], .link.scale, earg = .earg.scale)
parg = 1
- qq = eta2theta(eta[,3], .link.q, earg= .earg)
+ qq = eta2theta(eta[,3], .link.q, earg = .earg)
if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(w * dsinmad(x=y, a=aa, scale=scale, q.arg=qq, log=TRUE))
- }
- }, list( .link.a=link.a, .link.scale=link.scale, .link.q=link.q,
- .earg.a=earg.a, .earg.scale=earg.scale, .earg.q=earg.q ))),
- vfamily=c("sinmad"),
- deriv=eval(substitute(expression({
- aa = eta2theta(eta[,1], .link.a, earg= .earg.a)
- scale = eta2theta(eta[,2], .link.scale, earg= .earg.scale)
+ sum(w * dsinmad(x=y, a=aa, scale=scale, q.arg=qq, log = TRUE))
+ }
+ }, list( .link.a = link.a, .link.scale = link.scale, .link.q=link.q,
+ .earg.a = earg.a, .earg.scale = earg.scale, .earg.q=earg.q ))),
+ vfamily = c("sinmad"),
+ deriv = eval(substitute(expression({
+ aa = eta2theta(eta[,1], .link.a, earg = .earg.a)
+ scale = eta2theta(eta[,2], .link.scale, earg = .earg.scale)
parg = 1
- qq = eta2theta(eta[,3], .link.q, earg= .earg.q)
+ qq = eta2theta(eta[,3], .link.q, earg = .earg.q)
temp1 = log(y/scale)
temp2 = (y/scale)^aa
@@ -6296,16 +6309,16 @@ dinvparalogistic = function(x, a, scale=1, log=FALSE)
dl.da = 1/aa + parg * temp1 - (parg+qq) * temp1 / (1+1/temp2)
dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
dl.dq = digamma(parg + qq) - temp3b - log1p(temp2)
- da.deta = dtheta.deta(aa, .link.a, earg= .earg.a)
- dscale.deta = dtheta.deta(scale, .link.scale, earg= .earg.scale)
- dq.deta = dtheta.deta(qq, .link.q, earg= .earg.q)
+ da.deta = dtheta.deta(aa, .link.a, earg = .earg.a)
+ dscale.deta = dtheta.deta(scale, .link.scale, earg = .earg.scale)
+ dq.deta = dtheta.deta(qq, .link.q, earg = .earg.q)
w * cbind( dl.da * da.deta, dl.dscale * dscale.deta,
dl.dq * dq.deta )
- }), list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale,
+ }), list( .link.a = link.a, .link.scale = link.scale,
+ .earg.a = earg.a, .earg.scale = earg.scale,
.earg.q=earg.q,
.link.q=link.q ))),
- weight=eval(substitute(expression({
+ weight = eval(substitute(expression({
ed2l.da = (1 + parg+qq + parg * qq * (trigamma(parg) + trigamma(qq) +
(temp3b - temp3a + (parg-qq)/(parg*qq))^2 -
(parg^2 + qq^2) / (parg*qq)^2)) / (aa^2 * (1+parg+qq))
@@ -6324,21 +6337,21 @@ dinvparalogistic = function(x, a, scale=1, log=FALSE)
wz[,iam(2,3,M)] = ed2l.dscaleq * dscale.deta * dq.deta
wz = w * wz
wz
- }), list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale,
+ }), list( .link.a = link.a, .link.scale = link.scale,
+ .earg.a = earg.a, .earg.scale = earg.scale,
.earg.q=earg.q,
.link.q=link.q ))))
}
- dagum = function(link.a="loge",
- link.scale="loge",
- link.p="loge",
- earg.a=list(), earg.scale=list(), earg.p=list(),
- init.a=NULL,
- init.scale=NULL,
+ dagum = function(link.a = "loge",
+ link.scale = "loge",
+ link.p = "loge",
+ earg.a=list(), earg.scale = list(), earg.p=list(),
+ init.a = NULL,
+ init.scale = NULL,
init.p=1.0,
- zero=NULL)
+ zero = NULL)
{
if (mode(link.a) != "character" && mode(link.a) != "name")
@@ -6347,86 +6360,88 @@ dinvparalogistic = function(x, a, scale=1, log=FALSE)
link.scale = as.character(substitute(link.scale))
if (mode(link.p) != "character" && mode(link.p) != "name")
link.p = as.character(substitute(link.p))
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument 'zero'")
if (!is.list(earg.a)) earg.a = list()
if (!is.list(earg.scale)) earg.scale = list()
if (!is.list(earg.p)) earg.p = list()
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+ stop("bad input for argument 'zero'")
+
new("vglmff",
blurb = c("Dagum distribution\n\n",
"Links: ",
- namesof("a", link.a, earg=earg.a), ", ",
- namesof("scale", link.scale, earg=earg.scale), ", ",
- namesof("p", link.p, earg=earg.p), "\n",
+ namesof("a", link.a, earg = earg.a), ", ",
+ namesof("scale", link.scale, earg = earg.scale), ", ",
+ namesof("p", link.p, earg = earg.p), "\n",
"Mean: scale*gamma(p + 1/a)*gamma(1 - 1/a)/gamma(p)"),
- constraints=eval(substitute(expression({
+ constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
- predictors.names =
- c(namesof("a", .link.a, earg=.earg.a, tag=FALSE),
- namesof("scale", .link.scale, earg=.earg.scale, tag=FALSE),
- namesof("p", .link.p, earg=.earg.p, tag=FALSE))
- if (!length(.init.a) || !length(.init.scale)) {
- qvec = c(.25, .5, .75) # Arbitrary; could be made an argument
- init.p = if (length(.init.p)) .init.p else 1
+ predictors.names <-
+ c(namesof("a", .link.a, earg = .earg.a, tag = FALSE),
+ namesof("scale", .link.scale, earg = .earg.scale, tag = FALSE),
+ namesof("p", .link.p, earg = .earg.p, tag = FALSE))
+
+ if (!length( .init.a) || !length( .init.scale )) {
+ qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
+ init.p = if (length( .init.p)) .init.p else 1
xvec = log( qvec^(-1/ init.p ) - 1 )
fit0 = lsfit(x=xvec, y=log(quantile(y, qvec )))
}
if (!length(etastart)) {
- parg = rep(if (length(.init.p)) .init.p else 1.0, length=n)
- aa = rep(if (length(.init.a)) .init.a else -1/fit0$coef[2],
- length=n)
- scale = rep(if (length(.init.scale)) .init.scale else
- exp(fit0$coef[1]), length=n)
- etastart = cbind(theta2eta(aa, .link.a, earg= .earg.a),
- theta2eta(scale, .link.scale, earg= .earg.scale),
- theta2eta(parg, .link.p, earg= .earg.p))
- }
- }), list( .link.a=link.a, .link.scale=link.scale,
+ parg = rep(if (length( .init.p)) .init.p else 1.0, length = n)
+ aa = rep(if (length( .init.a)) .init.a else -1/fit0$coef[2],
+ length = n)
+ scale = rep(if (length( .init.scale )) .init.scale else
+ exp(fit0$coef[1]), length = n)
+ etastart = cbind(theta2eta(aa, .link.a, earg = .earg.a),
+ theta2eta(scale, .link.scale, earg = .earg.scale),
+ theta2eta(parg, .link.p, earg = .earg.p))
+ }
+ }), list( .link.a = link.a, .link.scale = link.scale,
.link.p=link.p,
- .earg.a=earg.a, .earg.scale=earg.scale,
+ .earg.a = earg.a, .earg.scale = earg.scale,
.earg.p=earg.p,
- .init.a=init.a, .init.scale=init.scale,
+ .init.a = init.a, .init.scale = init.scale,
.init.p=init.p ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- aa = eta2theta(eta[,1], .link.a, earg= .earg.a)
- scale = eta2theta(eta[,2], .link.scale, earg= .earg.scale)
- parg = eta2theta(eta[,3], .link.p, earg= .earg.p)
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ aa = eta2theta(eta[,1], .link.a, earg = .earg.a)
+ scale = eta2theta(eta[,2], .link.scale, earg = .earg.scale)
+ parg = eta2theta(eta[,3], .link.p, earg = .earg.p)
qq = 1
scale*gamma(parg + 1/aa)*gamma(qq-1/aa)/(gamma(parg)*gamma(qq))
- }, list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale,
+ }, list( .link.a = link.a, .link.scale = link.scale,
+ .earg.a = earg.a, .earg.scale = earg.scale,
.earg.p=earg.p,
.link.p=link.p ))),
last = eval(substitute(expression({
misc$link = c(a= .link.a, scale= .link.scale, p= .link.p )
misc$earg = list(a= .earg.a, scale= .earg.scale, p= .earg.p)
- }), list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale,
+ }), list( .link.a = link.a, .link.scale = link.scale,
+ .earg.a = earg.a, .earg.scale = earg.scale,
.earg.p=earg.p,
.link.p=link.p ))),
- loglikelihood=eval(substitute(
- function(mu, y, w, residuals= FALSE,eta, extra=NULL) {
- aa = eta2theta(eta[,1], .link.a, earg= .earg.a)
- scale = eta2theta(eta[,2], .link.scale, earg= .earg.scale)
- parg = eta2theta(eta[,3], .link.p, earg= .earg.p)
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ aa = eta2theta(eta[,1], .link.a, earg = .earg.a)
+ scale = eta2theta(eta[,2], .link.scale, earg = .earg.scale)
+ parg = eta2theta(eta[,3], .link.p, earg = .earg.p)
qq = 1
if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(w * ddagum(x=y, a=aa, scale=scale, p.arg=parg, log=TRUE))
- }
- }, list( .link.a=link.a, .link.scale=link.scale, .link.p=link.p,
- .earg.a=earg.a, .earg.scale=earg.scale, .earg.p=earg.p ))),
- vfamily=c("dagum"),
- deriv=eval(substitute(expression({
- aa = eta2theta(eta[,1], .link.a, earg= .earg.a)
- scale = eta2theta(eta[,2], .link.scale, earg= .earg.scale)
- parg = eta2theta(eta[,3], .link.p, earg= .earg.p)
+ sum(w * ddagum(x=y, a=aa, scale=scale, p.arg=parg, log = TRUE))
+ }
+ }, list( .link.a = link.a, .link.scale = link.scale, .link.p=link.p,
+ .earg.a = earg.a, .earg.scale = earg.scale, .earg.p=earg.p ))),
+ vfamily = c("dagum"),
+ deriv = eval(substitute(expression({
+ aa = eta2theta(eta[,1], .link.a, earg = .earg.a)
+ scale = eta2theta(eta[,2], .link.scale, earg = .earg.scale)
+ parg = eta2theta(eta[,3], .link.p, earg = .earg.p)
qq = 1
temp1 = log(y/scale)
@@ -6437,16 +6452,16 @@ dinvparalogistic = function(x, a, scale=1, log=FALSE)
dl.da = 1/aa + parg * temp1 - (parg+qq) * temp1 / (1+1/temp2)
dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
dl.dp = aa * temp1 + digamma(parg + qq) - temp3a - log1p(temp2)
- da.deta = dtheta.deta(aa, .link.a, earg= .earg.a)
- dscale.deta = dtheta.deta(scale, .link.scale, earg= .earg.scale)
- dp.deta = dtheta.deta(parg, .link.p, earg= .earg.p)
+ da.deta = dtheta.deta(aa, .link.a, earg = .earg.a)
+ dscale.deta = dtheta.deta(scale, .link.scale, earg = .earg.scale)
+ dp.deta = dtheta.deta(parg, .link.p, earg = .earg.p)
w * cbind( dl.da * da.deta, dl.dscale * dscale.deta,
dl.dp * dp.deta )
- }), list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale,
+ }), list( .link.a = link.a, .link.scale = link.scale,
+ .earg.a = earg.a, .earg.scale = earg.scale,
.earg.p=earg.p,
.link.p=link.p ))),
- weight=eval(substitute(expression({
+ weight = eval(substitute(expression({
ed2l.da = (1 + parg+qq + parg * qq * (trigamma(parg) + trigamma(qq) +
(temp3b - temp3a + (parg-qq)/(parg*qq))^2 -
(parg^2 + qq^2) / (parg*qq)^2)) / (aa^2 * (1+parg+qq))
@@ -6465,17 +6480,17 @@ dinvparalogistic = function(x, a, scale=1, log=FALSE)
wz[,iam(2,3,M)] = ed2l.dscalep * dscale.deta * dp.deta
wz = w * wz
wz
- }), list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale,
+ }), list( .link.a = link.a, .link.scale = link.scale,
+ .earg.a = earg.a, .earg.scale = earg.scale,
.earg.p=earg.p,
.link.p=link.p ))))
}
- betaII = function(link.scale="loge", link.p="loge", link.q="loge",
- earg.scale=list(), earg.p=list(), earg.q=list(),
- init.scale=NULL, init.p=1.0, init.q=1.0, zero=NULL)
+ betaII = function(link.scale = "loge", link.p = "loge", link.q = "loge",
+ earg.scale = list(), earg.p=list(), earg.q=list(),
+ init.scale = NULL, init.p=1.0, init.q=1.0, zero = NULL)
{
if (mode(link.scale) != "character" && mode(link.scale) != "name")
@@ -6484,7 +6499,7 @@ dinvparalogistic = function(x, a, scale=1, log=FALSE)
link.p = as.character(substitute(link.p))
if (mode(link.q) != "character" && mode(link.q) != "name")
link.q = as.character(substitute(link.q))
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
stop("bad input for argument 'zero'")
if (!is.list(earg.scale)) earg.scale = list()
if (!is.list(earg.p)) earg.p = list()
@@ -6493,79 +6508,79 @@ dinvparalogistic = function(x, a, scale=1, log=FALSE)
new("vglmff",
blurb = c("Beta II distribution\n\n",
"Links: ",
- namesof("scale", link.scale, earg=earg.scale), ", ",
- namesof("p", link.p, earg=earg.p), ", ",
- namesof("q", link.q, earg=earg.q), "\n",
+ namesof("scale", link.scale, earg = earg.scale), ", ",
+ namesof("p", link.p, earg = earg.p), ", ",
+ namesof("q", link.q, earg = earg.q), "\n",
"Mean: scale*gamma(p + 1)*gamma(q - 1)/(gamma(p)*gamma(q))"),
- constraints=eval(substitute(expression({
+ constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
predictors.names =
- c(namesof("scale", .link.scale, earg=.earg.scale, tag=FALSE),
- namesof("p", .link.p, earg=.earg.p, tag=FALSE),
- namesof("q", .link.q, earg=.earg.q, tag=FALSE))
+ c(namesof("scale", .link.scale, earg = .earg.scale, tag = FALSE),
+ namesof("p", .link.p, earg = .earg.p, tag = FALSE),
+ namesof("q", .link.q, earg = .earg.q, tag = FALSE))
- if (!length(.init.scale)) {
- qvec = c(.25, .5, .75) # Arbitrary; could be made an argument
- init.q = if (length(.init.q)) .init.q else 1
+ if (!length( .init.scale )) {
+ qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
+ init.q = if (length( .init.q)) .init.q else 1
xvec = log( (1-qvec)^(-1/ init.q ) - 1 )
fit0 = lsfit(x=xvec, y=log(quantile(y, qvec )))
}
if (!length(etastart)) {
- scale = rep(if (length(.init.scale)) .init.scale else
- exp(fit0$coef[1]), length=n)
- qq = rep(if (length(.init.q)) .init.q else 1.0, length=n)
- parg = rep(if (length(.init.p)) .init.p else 1.0, length=n)
- etastart = cbind(theta2eta(scale, .link.scale, earg= .earg.scale),
- theta2eta(parg, .link.p, earg= .earg.p),
- theta2eta(qq, .link.q, earg= .earg.q))
- }
- }), list( .link.scale=link.scale,
+ scale = rep(if (length( .init.scale )) .init.scale else
+ exp(fit0$coef[1]), length = n)
+ qq = rep(if (length( .init.q)) .init.q else 1.0, length = n)
+ parg = rep(if (length( .init.p)) .init.p else 1.0, length = n)
+ etastart = cbind(theta2eta(scale, .link.scale, earg = .earg.scale),
+ theta2eta(parg, .link.p, earg = .earg.p),
+ theta2eta(qq, .link.q, earg = .earg.q))
+ }
+ }), list( .link.scale = link.scale,
.link.p=link.p, .link.q=link.q,
- .earg.scale=earg.scale,
+ .earg.scale = earg.scale,
.earg.p=earg.p, .earg.q=earg.q,
- .init.scale=init.scale,
+ .init.scale = init.scale,
.init.p=init.p, .init.q=init.q ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
+ inverse = eval(substitute(function(eta, extra = NULL) {
aa = 1
- scale = eta2theta(eta[,1], .link.scale, earg= .earg.scale)
- parg = eta2theta(eta[,2], .link.p, earg= .earg.p)
- qq = eta2theta(eta[,3], .link.q, earg= .earg.q)
+ scale = eta2theta(eta[,1], .link.scale, earg = .earg.scale)
+ parg = eta2theta(eta[,2], .link.p, earg = .earg.p)
+ qq = eta2theta(eta[,3], .link.q, earg = .earg.q)
scale*gamma(parg + 1/aa)*gamma(qq-1/aa)/(gamma(parg)*gamma(qq))
- }, list( .link.scale=link.scale,
- .earg.scale=earg.scale,
+ }, list( .link.scale = link.scale,
+ .earg.scale = earg.scale,
.earg.p=earg.p, .earg.q=earg.q,
.link.p=link.p, .link.q=link.q ))),
last = eval(substitute(expression({
misc$link = c(scale= .link.scale, p= .link.p, q= .link.q)
misc$earg = list(scale= .earg.scale, p= .earg.p, q= .earg.q)
- }), list( .link.scale=link.scale,
- .earg.scale=earg.scale,
+ }), list( .link.scale = link.scale,
+ .earg.scale = earg.scale,
.earg.p=earg.p, .earg.q=earg.q,
.link.p=link.p, .link.q=link.q ))),
- loglikelihood=eval(substitute(
- function(mu, y, w, residuals= FALSE,eta, extra=NULL) {
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
aa = 1
- scale = eta2theta(eta[,1], .link.scale, earg= .earg.scale)
- parg = eta2theta(eta[,2], .link.p, earg= .earg.p)
- qq = eta2theta(eta[,3], .link.q, earg= .earg.q)
+ scale = eta2theta(eta[,1], .link.scale, earg = .earg.scale)
+ parg = eta2theta(eta[,2], .link.p, earg = .earg.p)
+ qq = eta2theta(eta[,3], .link.q, earg = .earg.q)
if (residuals) stop("loglikelihood residuals not implemented yet") else
sum(w*(log(aa) + (aa*parg-1)*log(y) - aa*parg*log(scale) +
(-lbeta(parg, qq)) - (parg+qq)*log1p((y/scale)^aa)))
- }, list( .link.scale=link.scale,
- .earg.scale=earg.scale,
+ }, list( .link.scale = link.scale,
+ .earg.scale = earg.scale,
.earg.p=earg.p, .earg.q=earg.q,
.link.p=link.p, .link.q=link.q ))),
- vfamily=c("betaII"),
- deriv=eval(substitute(expression({
+ vfamily = c("betaII"),
+ deriv = eval(substitute(expression({
aa = 1
- scale = eta2theta(eta[,1], .link.scale, earg= .earg.scale)
- parg = eta2theta(eta[,2], .link.p, earg= .earg.p)
- qq = eta2theta(eta[,3], .link.q, earg= .earg.q)
+ scale = eta2theta(eta[,1], .link.scale, earg = .earg.scale)
+ parg = eta2theta(eta[,2], .link.p, earg = .earg.p)
+ qq = eta2theta(eta[,3], .link.q, earg = .earg.q)
temp1 = log(y/scale)
temp2 = (y/scale)^aa
@@ -6577,16 +6592,16 @@ dinvparalogistic = function(x, a, scale=1, log=FALSE)
dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
dl.dp = aa * temp1 + temp3 - temp3a - temp4
dl.dq = temp3 - temp3b - temp4
- dscale.deta = dtheta.deta(scale, .link.scale, earg= .earg.scale)
- dp.deta = dtheta.deta(parg, .link.p, earg= .earg.p)
- dq.deta = dtheta.deta(qq, .link.q, earg= .earg.q)
+ dscale.deta = dtheta.deta(scale, .link.scale, earg = .earg.scale)
+ dp.deta = dtheta.deta(parg, .link.p, earg = .earg.p)
+ dq.deta = dtheta.deta(qq, .link.q, earg = .earg.q)
w * cbind( dl.dscale * dscale.deta,
dl.dp * dp.deta, dl.dq * dq.deta )
- }), list( .link.scale=link.scale,
- .earg.scale=earg.scale,
+ }), list( .link.scale = link.scale,
+ .earg.scale = earg.scale,
.earg.p=earg.p, .earg.q=earg.q,
.link.p=link.p, .link.q=link.q ))),
- weight=eval(substitute(expression({
+ weight = eval(substitute(expression({
temp5 = trigamma(parg + qq)
ed2l.dscale = aa^2 * parg * qq / (scale^2 * (1+parg+qq))
ed2l.dp = trigamma(parg) - temp5
@@ -6603,27 +6618,27 @@ dinvparalogistic = function(x, a, scale=1, log=FALSE)
wz[,iam(2,3,M)] = ed2l.dpq * dp.deta * dq.deta
wz = w * wz
wz
- }), list( .link.scale=link.scale,
- .earg.scale=earg.scale,
+ }), list( .link.scale = link.scale,
+ .earg.scale = earg.scale,
.earg.p=earg.p, .earg.q=earg.q,
.link.p=link.p, .link.q=link.q ))))
}
- lomax = function(link.scale="loge",
- link.q="loge",
- earg.scale=list(), earg.q=list(),
- init.scale=NULL,
+ lomax = function(link.scale = "loge",
+ link.q = "loge",
+ earg.scale = list(), earg.q=list(),
+ init.scale = NULL,
init.q=1.0,
- zero=NULL)
+ zero = NULL)
{
if (mode(link.scale) != "character" && mode(link.scale) != "name")
link.scale = as.character(substitute(link.scale))
if (mode(link.q) != "character" && mode(link.q) != "name")
link.q = as.character(substitute(link.q))
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
stop("bad input for argument 'zero'")
if (!is.list(earg.scale)) earg.scale = list()
if (!is.list(earg.q)) earg.q = list()
@@ -6631,76 +6646,76 @@ dinvparalogistic = function(x, a, scale=1, log=FALSE)
new("vglmff",
blurb = c("Lomax distribution\n\n",
"Links: ",
- namesof("scale", link.scale, earg=earg.scale), ", ",
- namesof("q", link.q, earg=earg.q), "\n",
+ namesof("scale", link.scale, earg = earg.scale), ", ",
+ namesof("q", link.q, earg = earg.q), "\n",
"Mean: scale/(q-1)"),
- constraints=eval(substitute(expression({
+ constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
predictors.names =
- c(namesof("scale", .link.scale, earg=.earg.scale, tag=FALSE),
- namesof("q", .link.q, earg=.earg.q, tag=FALSE))
+ c(namesof("scale", .link.scale, earg = .earg.scale, tag = FALSE),
+ namesof("q", .link.q, earg = .earg.q, tag = FALSE))
aa = parg = 1
- if (!length(.init.scale)) {
- qvec = c(.25, .5, .75) # Arbitrary; could be made an argument
- init.q = if (length(.init.q)) .init.q else 1
+ if (!length( .init.scale )) {
+ qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
+ init.q = if (length( .init.q)) .init.q else 1
xvec = log( (1-qvec)^(-1/ init.q ) - 1 )
fit0 = lsfit(x=xvec, y=log(quantile(y, qvec )))
}
if (!length(etastart)) {
- qq = rep(if (length(.init.q)) .init.q else 1.0, length=n)
- scale = rep(if (length(.init.scale)) .init.scale else
- exp(fit0$coef[1]), length=n)
- etastart = cbind(theta2eta(scale, .link.scale, earg= .earg.scale),
- theta2eta(qq, .link.q, earg= .earg.q))
- }
- }), list( .link.scale=link.scale, .link.q=link.q,
- .earg.scale=earg.scale, .earg.q=earg.q,
- .init.scale=init.scale, .init.q=init.q ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- scale = eta2theta(eta[,1], .link.scale, earg= .earg.scale)
- qq = eta2theta(eta[,2], .link.q, earg= .earg.q)
+ qq = rep(if (length( .init.q)) .init.q else 1.0, length = n)
+ scale = rep(if (length( .init.scale )) .init.scale else
+ exp(fit0$coef[1]), length = n)
+ etastart = cbind(theta2eta(scale, .link.scale, earg = .earg.scale),
+ theta2eta(qq, .link.q, earg = .earg.q))
+ }
+ }), list( .link.scale = link.scale, .link.q=link.q,
+ .earg.scale = earg.scale, .earg.q=earg.q,
+ .init.scale = init.scale, .init.q=init.q ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ scale = eta2theta(eta[,1], .link.scale, earg = .earg.scale)
+ qq = eta2theta(eta[,2], .link.q, earg = .earg.q)
scale/(qq-1)
- }, list( .link.scale=link.scale, .link.q=link.q,
- .earg.scale=earg.scale, .earg.q=earg.q ))),
+ }, list( .link.scale = link.scale, .link.q=link.q,
+ .earg.scale = earg.scale, .earg.q=earg.q ))),
last = eval(substitute(expression({
misc$link = c(scale= .link.scale, q= .link.q)
misc$earg = list(scale= .earg.scale, q= .earg.q)
- }), list( .link.scale=link.scale, .link.q=link.q,
- .earg.scale=earg.scale, .earg.q=earg.q ))),
- loglikelihood=eval(substitute(
- function(mu, y, w, residuals= FALSE,eta, extra=NULL) {
+ }), list( .link.scale = link.scale, .link.q=link.q,
+ .earg.scale = earg.scale, .earg.q=earg.q ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
aa = 1
- scale = eta2theta(eta[,1], .link.scale, earg= .earg.scale)
+ scale = eta2theta(eta[,1], .link.scale, earg = .earg.scale)
parg = 1
- qq = eta2theta(eta[,2], .link.q, earg= .earg.q)
+ qq = eta2theta(eta[,2], .link.q, earg = .earg.q)
if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(w * dlomax(x=y, scale=scale, q.arg=qq, log=TRUE))
+ sum(w * dlomax(x=y, scale=scale, q.arg=qq, log = TRUE))
}
- }, list( .link.scale=link.scale, .link.q=link.q,
- .earg.scale=earg.scale, .earg.q=earg.q ))),
- vfamily=c("lomax"),
- deriv=eval(substitute(expression({
+ }, list( .link.scale = link.scale, .link.q=link.q,
+ .earg.scale = earg.scale, .earg.q=earg.q ))),
+ vfamily = c("lomax"),
+ deriv = eval(substitute(expression({
aa = 1
- scale = eta2theta(eta[,1], .link.scale, earg= .earg.scale)
+ scale = eta2theta(eta[,1], .link.scale, earg = .earg.scale)
parg = 1
- qq = eta2theta(eta[,2], .link.q, earg= .earg.q)
+ qq = eta2theta(eta[,2], .link.q, earg = .earg.q)
temp2 = (y/scale)^aa
dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
dl.dq = digamma(parg + qq) - digamma(qq) - log1p(temp2)
- dscale.deta = dtheta.deta(scale, .link.scale, earg= .earg.scale)
- dq.deta = dtheta.deta(qq, .link.q, earg= .earg.q)
+ dscale.deta = dtheta.deta(scale, .link.scale, earg = .earg.scale)
+ dq.deta = dtheta.deta(qq, .link.q, earg = .earg.q)
w * cbind( dl.dscale * dscale.deta,
dl.dq * dq.deta )
- }), list( .link.scale=link.scale, .link.q=link.q,
- .earg.scale=earg.scale, .earg.q=earg.q ))),
- weight=eval(substitute(expression({
+ }), list( .link.scale = link.scale, .link.q=link.q,
+ .earg.scale = earg.scale, .earg.q=earg.q ))),
+ weight = eval(substitute(expression({
ed2l.dscale = aa^2 * parg * qq / (scale^2 * (1+parg+qq))
ed2l.dq = 1/qq^2
ed2l.dscaleq = -aa * parg / (scale*(parg+qq))
@@ -6710,24 +6725,24 @@ dinvparalogistic = function(x, a, scale=1, log=FALSE)
wz[,iam(1,2,M)] = ed2l.dscaleq * dscale.deta * dq.deta
wz = w * wz
wz
- }), list( .link.scale=link.scale, .link.q=link.q,
- .earg.scale=earg.scale, .earg.q=earg.q ))))
+ }), list( .link.scale = link.scale, .link.q=link.q,
+ .earg.scale = earg.scale, .earg.q=earg.q ))))
}
- fisk = function(link.a="loge",
- link.scale="loge",
- earg.a=list(), earg.scale=list(),
- init.a=NULL,
- init.scale=NULL,
- zero=NULL)
+ fisk = function(link.a = "loge",
+ link.scale = "loge",
+ earg.a=list(), earg.scale = list(),
+ init.a = NULL,
+ init.scale = NULL,
+ zero = NULL)
{
if (mode(link.a) != "character" && mode(link.a) != "name")
link.a = as.character(substitute(link.a))
if (mode(link.scale) != "character" && mode(link.scale) != "name")
link.scale = as.character(substitute(link.scale))
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
stop("bad input for argument 'zero'")
if (!is.list(earg.a)) earg.a = list()
if (!is.list(earg.scale)) earg.scale = list()
@@ -6735,62 +6750,62 @@ dinvparalogistic = function(x, a, scale=1, log=FALSE)
new("vglmff",
blurb = c("Fisk distribution\n\n",
"Links: ",
- namesof("a", link.a, earg=earg.a), ", ",
- namesof("scale", link.scale, earg=earg.scale), "\n",
+ namesof("a", link.a, earg = earg.a), ", ",
+ namesof("scale", link.scale, earg = earg.scale), "\n",
"Mean: scale * gamma(1 + 1/a) * gamma(1 - 1/a)"),
- constraints=eval(substitute(expression({
+ constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
predictors.names =
- c(namesof("a", .link.a, earg=.earg.a, tag=FALSE),
- namesof("scale", .link.scale, earg=.earg.scale, tag=FALSE))
+ c(namesof("a", .link.a, earg = .earg.a, tag = FALSE),
+ namesof("scale", .link.scale, earg = .earg.scale, tag = FALSE))
qq = parg = 1
- if (!length(.init.scale)) {
- qvec = c(.25, .5, .75) # Arbitrary; could be made an argument
+ if (!length( .init.scale )) {
+ qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
xvec = log( 1/qvec - 1 )
fit0 = lsfit(x=xvec, y=log(quantile(y, qvec )))
}
if (!length(etastart)) {
- aa = rep(if (length(.init.a)) .init.a else -1/fit0$coef[2],
- length=n)
- scale = rep(if (length(.init.scale)) .init.scale else
- exp(fit0$coef[1]), length=n)
- etastart = cbind(theta2eta(aa, .link.a, earg= .earg.a),
- theta2eta(scale, .link.scale, earg= .earg.scale))
- }
- }), list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale,
- .init.a=init.a, .init.scale=init.scale ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- aa = eta2theta(eta[,1], .link.a, earg= .earg.a)
- scale = eta2theta(eta[,2], .link.scale, earg= .earg.scale)
+ aa = rep(if (length( .init.a)) .init.a else -1/fit0$coef[2],
+ length = n)
+ scale = rep(if (length( .init.scale )) .init.scale else
+ exp(fit0$coef[1]), length = n)
+ etastart = cbind(theta2eta(aa, .link.a, earg = .earg.a),
+ theta2eta(scale, .link.scale, earg = .earg.scale))
+ }
+ }), list( .link.a = link.a, .link.scale = link.scale,
+ .earg.a = earg.a, .earg.scale = earg.scale,
+ .init.a = init.a, .init.scale = init.scale ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ aa = eta2theta(eta[,1], .link.a, earg = .earg.a)
+ scale = eta2theta(eta[,2], .link.scale, earg = .earg.scale)
qq = 1
scale*gamma(1 + 1/aa)*gamma(1-1/aa)
- }, list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale ))),
+ }, list( .link.a = link.a, .link.scale = link.scale,
+ .earg.a = earg.a, .earg.scale = earg.scale ))),
last = eval(substitute(expression({
misc$link = c(a= .link.a, scale= .link.scale)
misc$earg = list(a= .earg.a, scale= .earg.scale)
- }), list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale
+ }), list( .link.a = link.a, .link.scale = link.scale,
+ .earg.a = earg.a, .earg.scale = earg.scale
))),
- loglikelihood=eval(substitute(
- function(mu, y, w, residuals= FALSE,eta, extra=NULL) {
- aa = eta2theta(eta[,1], .link.a, earg= .earg)
- scale = eta2theta(eta[,2], .link.scale, earg= .earg.scale)
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ aa = eta2theta(eta[,1], .link.a, earg = .earg)
+ scale = eta2theta(eta[,2], .link.scale, earg = .earg.scale)
parg = qq = 1
if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(w * dfisk(x=y, a=aa, scale=scale, log=TRUE))
- }
- }, list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale ))),
- vfamily=c("fisk"),
- deriv=eval(substitute(expression({
- aa = eta2theta(eta[,1], .link.a, earg= .earg.a)
- scale = eta2theta(eta[,2], .link.scale, earg= .earg.scale)
+ sum(w * dfisk(x=y, a=aa, scale=scale, log = TRUE))
+ }
+ }, list( .link.a = link.a, .link.scale = link.scale,
+ .earg.a = earg.a, .earg.scale = earg.scale ))),
+ vfamily = c("fisk"),
+ deriv = eval(substitute(expression({
+ aa = eta2theta(eta[,1], .link.a, earg = .earg.a)
+ scale = eta2theta(eta[,2], .link.scale, earg = .earg.scale)
parg = qq = 1
temp1 = log(y/scale)
@@ -6800,12 +6815,12 @@ dinvparalogistic = function(x, a, scale=1, log=FALSE)
dl.da = 1/aa + parg * temp1 - (parg+qq) * temp1 / (1+1/temp2)
dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
- da.deta = dtheta.deta(aa, .link.a, earg= .earg.a)
- dscale.deta = dtheta.deta(scale, .link.scale, earg= .earg.scale)
+ da.deta = dtheta.deta(aa, .link.a, earg = .earg.a)
+ dscale.deta = dtheta.deta(scale, .link.scale, earg = .earg.scale)
w * cbind( dl.da * da.deta, dl.dscale * dscale.deta )
- }), list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale ))),
- weight=eval(substitute(expression({
+ }), list( .link.a = link.a, .link.scale = link.scale,
+ .earg.a = earg.a, .earg.scale = earg.scale ))),
+ weight = eval(substitute(expression({
ed2l.da = (1 + parg+qq + parg * qq * (trigamma(parg) + trigamma(qq) +
(temp3b - temp3a + (parg-qq)/(parg*qq))^2 -
(parg^2 + qq^2) / (parg*qq)^2)) / (aa^2 * (1+parg+qq))
@@ -6818,24 +6833,24 @@ dinvparalogistic = function(x, a, scale=1, log=FALSE)
wz[,iam(1,2,M)] = ed2l.dascale * da.deta * dscale.deta
wz = w * wz
wz
- }), list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale ))))
+ }), list( .link.a = link.a, .link.scale = link.scale,
+ .earg.a = earg.a, .earg.scale = earg.scale ))))
}
- invlomax = function(link.scale="loge",
- link.p="loge",
- earg.scale=list(), earg.p=list(),
- init.scale=NULL,
+ invlomax = function(link.scale = "loge",
+ link.p = "loge",
+ earg.scale = list(), earg.p=list(),
+ init.scale = NULL,
init.p=1.0,
- zero=NULL)
+ zero = NULL)
{
if (mode(link.scale) != "character" && mode(link.scale) != "name")
link.scale = as.character(substitute(link.scale))
if (mode(link.p) != "character" && mode(link.p) != "name")
link.p = as.character(substitute(link.p))
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
stop("bad input for argument 'zero'")
if (!is.list(earg.scale)) earg.scale = list()
if (!is.list(earg.p)) earg.p = list()
@@ -6843,80 +6858,80 @@ dinvparalogistic = function(x, a, scale=1, log=FALSE)
new("vglmff",
blurb = c("Inverse Lomax distribution\n\n",
"Links: ",
- namesof("scale", link.scale, earg=earg.scale), ", ",
- namesof("p", link.p, earg=earg.p), "\n",
+ namesof("scale", link.scale, earg = earg.scale), ", ",
+ namesof("p", link.p, earg = earg.p), "\n",
"Mean: does not exist"),
- constraints=eval(substitute(expression({
+ constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
predictors.names =
- c(namesof("scale", .link.scale, earg=.earg.scale, tag=FALSE),
- namesof("p", .link.p, earg=.earg.p, tag=FALSE))
+ c(namesof("scale", .link.scale, earg = .earg.scale, tag = FALSE),
+ namesof("p", .link.p, earg = .earg.p, tag = FALSE))
qq = aa = 1
- if (!length(.init.scale)) {
- qvec = c(.25, .5, .75) # Arbitrary; could be made an argument
- init.p = if (length(.init.p)) .init.p else 1
+ if (!length( .init.scale )) {
+ qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
+ init.p = if (length( .init.p)) .init.p else 1
xvec = log( qvec^(-1/ init.p ) - 1 )
fit0 = lsfit(x=xvec, y=log(quantile(y, qvec )))
}
if (!length(etastart)) {
- scale = rep(if (length(.init.scale)) .init.scale else
- exp(fit0$coef[1]), length=n)
- parg = rep(if (length(.init.p)) .init.p else 1.0, length=n)
- etastart = cbind(theta2eta(scale, .link.scale, earg= .earg.scale),
- theta2eta(parg, .link.p, earg= .earg.p))
+ scale = rep(if (length( .init.scale )) .init.scale else
+ exp(fit0$coef[1]), length = n)
+ parg = rep(if (length( .init.p)) .init.p else 1.0, length = n)
+ etastart = cbind(theta2eta(scale, .link.scale, earg = .earg.scale),
+ theta2eta(parg, .link.p, earg = .earg.p))
}
- }), list( .link.scale=link.scale,
+ }), list( .link.scale = link.scale,
.link.p=link.p,
- .earg.scale=earg.scale,
+ .earg.scale = earg.scale,
.earg.p=earg.p,
- .init.scale=init.scale,
+ .init.scale = init.scale,
.init.p=init.p ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- rep(as.numeric(NA), len=nrow(eta))
- }, list( .link.scale=link.scale,
- .earg.scale=earg.scale,
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ rep(as.numeric(NA), len = nrow(eta))
+ }, list( .link.scale = link.scale,
+ .earg.scale = earg.scale,
.earg.p=earg.p,
.link.p=link.p ))),
last = eval(substitute(expression({
misc$link = c(scale= .link.scale, p= .link.p )
misc$earg = list(scale= .earg.scale, p= .earg.p )
- }), list( .link.scale=link.scale,
- .earg.scale=earg.scale,
+ }), list( .link.scale = link.scale,
+ .earg.scale = earg.scale,
.earg.p=earg.p,
.link.p=link.p ))),
- loglikelihood=eval(substitute(
- function(mu, y, w, residuals= FALSE,eta, extra=NULL) {
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
aa = qq = 1
- scale = eta2theta(eta[,1], .link.scale, earg= .earg.scale)
- parg = eta2theta(eta[,2], .link.p, earg= .earg.p)
+ scale = eta2theta(eta[,1], .link.scale, earg = .earg.scale)
+ parg = eta2theta(eta[,2], .link.p, earg = .earg.p)
if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(w * dinvlomax(x=y, scale=scale, p.arg=parg, log=TRUE))
+ sum(w * dinvlomax(x=y, scale=scale, p.arg=parg, log = TRUE))
}
- }, list( .link.scale=link.scale, .link.p=link.p,
- .earg.scale=earg.scale, .earg.p=earg.p ))),
- vfamily=c("invlomax"),
- deriv=eval(substitute(expression({
+ }, list( .link.scale = link.scale, .link.p=link.p,
+ .earg.scale = earg.scale, .earg.p=earg.p ))),
+ vfamily = c("invlomax"),
+ deriv = eval(substitute(expression({
aa = qq = 1
- scale = eta2theta(eta[,1], .link.scale, earg= .earg.scale)
- parg = eta2theta(eta[,2], .link.p, earg= .earg.p)
+ scale = eta2theta(eta[,1], .link.scale, earg = .earg.scale)
+ parg = eta2theta(eta[,2], .link.p, earg = .earg.p)
temp1 = log(y/scale)
temp2 = (y/scale)^aa
dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
dl.dp = aa * temp1 + digamma(parg + qq) - digamma(parg) - log1p(temp2)
- dscale.deta = dtheta.deta(scale, .link.scale, earg= .earg.scale)
- dp.deta = dtheta.deta(parg, .link.p, earg= .earg.p)
+ dscale.deta = dtheta.deta(scale, .link.scale, earg = .earg.scale)
+ dp.deta = dtheta.deta(parg, .link.p, earg = .earg.p)
w * cbind( dl.dscale * dscale.deta,
dl.dp * dp.deta )
- }), list( .link.scale=link.scale, .link.p=link.p,
- .earg.scale=earg.scale, .earg.p=earg.p ))),
- weight=eval(substitute(expression({
+ }), list( .link.scale = link.scale, .link.p=link.p,
+ .earg.scale = earg.scale, .earg.p=earg.p ))),
+ weight = eval(substitute(expression({
ed2l.dscale = aa^2 * parg * qq / (scale^2 * (1+parg+qq))
ed2l.dp = 1/parg^2
ed2l.dscalep = aa * qq / (scale*(parg+qq))
@@ -6926,24 +6941,24 @@ dinvparalogistic = function(x, a, scale=1, log=FALSE)
wz[,iam(1,2,M)] = ed2l.dscalep * dscale.deta * dp.deta
wz = w * wz
wz
- }), list( .link.scale=link.scale, .link.p=link.p,
- .earg.scale=earg.scale, .earg.p=earg.p ))))
+ }), list( .link.scale = link.scale, .link.p=link.p,
+ .earg.scale = earg.scale, .earg.p=earg.p ))))
}
- paralogistic = function(link.a="loge",
- link.scale="loge",
- earg.a=list(), earg.scale=list(),
+ paralogistic = function(link.a = "loge",
+ link.scale = "loge",
+ earg.a=list(), earg.scale = list(),
init.a=1.0,
- init.scale=NULL,
- zero=NULL)
+ init.scale = NULL,
+ zero = NULL)
{
if (mode(link.a) != "character" && mode(link.a) != "name")
link.a = as.character(substitute(link.a))
if (mode(link.scale) != "character" && mode(link.scale) != "name")
link.scale = as.character(substitute(link.scale))
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
stop("bad input for argument 'zero'")
if (!is.list(earg.a)) earg.a = list()
if (!is.list(earg.scale)) earg.scale = list()
@@ -6951,66 +6966,66 @@ dinvparalogistic = function(x, a, scale=1, log=FALSE)
new("vglmff",
blurb = c("Paralogistic distribution\n\n",
"Links: ",
- namesof("a", link.a, earg=earg.a), ", ",
- namesof("scale", link.scale, earg=earg.scale), "\n",
+ namesof("a", link.a, earg = earg.a), ", ",
+ namesof("scale", link.scale, earg = earg.scale), "\n",
"Mean: scale*gamma(1 + 1/a)*gamma(a - 1/a)/gamma(a)"),
- constraints=eval(substitute(expression({
+ constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
predictors.names =
- c(namesof("a", .link.a, earg=.earg.a, tag=FALSE),
- namesof("scale", .link.scale, earg=.earg.scale, tag=FALSE))
+ c(namesof("a", .link.a, earg = .earg.a, tag = FALSE),
+ namesof("scale", .link.scale, earg = .earg.scale, tag = FALSE))
parg = 1
- if (!length(.init.a) || !length(.init.scale)) {
- qvec = c(.25, .5, .75) # Arbitrary; could be made an argument
- init.a = if (length(.init.a)) .init.a else 1
+ if (!length( .init.a) || !length( .init.scale )) {
+ qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
+ init.a = if (length( .init.a)) .init.a else 1
xvec = log( (1-qvec)^(-1/ init.a ) - 1 )
fit0 = lsfit(x=xvec, y=log(quantile(y, qvec )))
}
if (!length(etastart)) {
- aa = rep(if (length(.init.a)) .init.a else 1/fit0$coef[2],
- length=n)
- scale = rep(if (length(.init.scale)) .init.scale else
- exp(fit0$coef[1]), length=n)
- etastart = cbind(theta2eta(aa, .link.a, earg= .earg.a),
- theta2eta(scale, .link.scale, earg= .earg.scale))
- }
- }), list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale,
- .init.a=init.a, .init.scale=init.scale
+ aa = rep(if (length( .init.a)) .init.a else 1/fit0$coef[2],
+ length = n)
+ scale = rep(if (length( .init.scale )) .init.scale else
+ exp(fit0$coef[1]), length = n)
+ etastart = cbind(theta2eta(aa, .link.a, earg = .earg.a),
+ theta2eta(scale, .link.scale, earg = .earg.scale))
+ }
+ }), list( .link.a = link.a, .link.scale = link.scale,
+ .earg.a = earg.a, .earg.scale = earg.scale,
+ .init.a = init.a, .init.scale = init.scale
))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- aa = eta2theta(eta[,1], .link.a, earg= .earg.a)
- scale = eta2theta(eta[,2], .link.scale, earg= .earg.scale)
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ aa = eta2theta(eta[,1], .link.a, earg = .earg.a)
+ scale = eta2theta(eta[,2], .link.scale, earg = .earg.scale)
qq = aa
scale*gamma(1 + 1/aa)*gamma(qq-1/aa)/(gamma(qq))
- }, list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale ))),
+ }, list( .link.a = link.a, .link.scale = link.scale,
+ .earg.a = earg.a, .earg.scale = earg.scale ))),
last = eval(substitute(expression({
misc$link = c(a= .link.a, scale= .link.scale)
misc$earg = list(a= .earg.a, scale= .earg.scale )
- }), list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale ))),
- loglikelihood=eval(substitute(
- function(mu, y, w, residuals= FALSE,eta, extra=NULL) {
- aa = eta2theta(eta[,1], .link.a, earg= .earg.a)
- scale = eta2theta(eta[,2], .link.scale, earg= .earg.scale)
+ }), list( .link.a = link.a, .link.scale = link.scale,
+ .earg.a = earg.a, .earg.scale = earg.scale ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ aa = eta2theta(eta[,1], .link.a, earg = .earg.a)
+ scale = eta2theta(eta[,2], .link.scale, earg = .earg.scale)
parg = 1
qq = aa
if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(w * dparalogistic(x=y, a=aa, scale=scale, log=TRUE))
- }
- }, list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale ))),
- vfamily=c("paralogistic"),
- deriv=eval(substitute(expression({
- aa = eta2theta(eta[,1], .link.a, earg= .earg.a)
- scale = eta2theta(eta[,2], .link.scale, earg= .earg.scale)
+ sum(w * dparalogistic(x=y, a=aa, scale=scale, log = TRUE))
+ }
+ }, list( .link.a = link.a, .link.scale = link.scale,
+ .earg.a = earg.a, .earg.scale = earg.scale ))),
+ vfamily = c("paralogistic"),
+ deriv = eval(substitute(expression({
+ aa = eta2theta(eta[,1], .link.a, earg = .earg.a)
+ scale = eta2theta(eta[,2], .link.scale, earg = .earg.scale)
parg = 1
qq = aa
@@ -7021,12 +7036,12 @@ dinvparalogistic = function(x, a, scale=1, log=FALSE)
dl.da = 1/aa + parg * temp1 - (parg+qq) * temp1 / (1+1/temp2)
dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
- da.deta = dtheta.deta(aa, .link.a, earg= .earg.a)
- dscale.deta = dtheta.deta(scale, .link.scale, earg= .earg.scale)
+ da.deta = dtheta.deta(aa, .link.a, earg = .earg.a)
+ dscale.deta = dtheta.deta(scale, .link.scale, earg = .earg.scale)
w * cbind( dl.da * da.deta, dl.dscale * dscale.deta)
- }), list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale ))),
- weight=eval(substitute(expression({
+ }), list( .link.a = link.a, .link.scale = link.scale,
+ .earg.a = earg.a, .earg.scale = earg.scale ))),
+ weight = eval(substitute(expression({
ed2l.da = (1 + parg+qq + parg * qq * (trigamma(parg) + trigamma(qq) +
(temp3b - temp3a + (parg-qq)/(parg*qq))^2 -
(parg^2 + qq^2) / (parg*qq)^2)) / (aa^2 * (1+parg+qq))
@@ -7039,24 +7054,24 @@ dinvparalogistic = function(x, a, scale=1, log=FALSE)
wz[,iam(1,2,M)] = ed2l.dascale * da.deta * dscale.deta
wz = w * wz
wz
- }), list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale ))))
+ }), list( .link.a = link.a, .link.scale = link.scale,
+ .earg.a = earg.a, .earg.scale = earg.scale ))))
}
- invparalogistic = function(link.a="loge",
- link.scale="loge",
- earg.a=list(), earg.scale=list(),
+ invparalogistic = function(link.a = "loge",
+ link.scale = "loge",
+ earg.a=list(), earg.scale = list(),
init.a=1.0,
- init.scale=NULL,
- zero=NULL)
+ init.scale = NULL,
+ zero = NULL)
{
if (mode(link.a) != "character" && mode(link.a) != "name")
link.a = as.character(substitute(link.a))
if (mode(link.scale) != "character" && mode(link.scale) != "name")
link.scale = as.character(substitute(link.scale))
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
stop("bad input for argument 'zero'")
if (!is.list(earg.a)) earg.a = list()
if (!is.list(earg.scale)) earg.scale = list()
@@ -7064,66 +7079,67 @@ dinvparalogistic = function(x, a, scale=1, log=FALSE)
new("vglmff",
blurb = c("Inverse paralogistic distribution\n\n",
"Links: ",
- namesof("a", link.a, earg=earg.a), ", ",
- namesof("scale", link.scale, earg=earg.scale), "\n",
+ namesof("a", link.a, earg = earg.a), ", ",
+ namesof("scale", link.scale, earg = earg.scale), "\n",
"Mean: scale*gamma(a + 1/a)*gamma(1 - 1/a)/gamma(a)"),
- constraints=eval(substitute(expression({
+ constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
predictors.names =
- c(namesof("a", .link.a, earg=.earg.a, tag=FALSE),
- namesof("scale", .link.scale, earg=.earg.scale, tag=FALSE))
+ c(namesof("a", .link.a, earg = .earg.a, tag = FALSE),
+ namesof("scale", .link.scale, earg = .earg.scale, tag = FALSE))
- if (!length(.init.a) || !length(.init.scale)) {
- qvec = c(.25, .5, .75) # Arbitrary; could be made an argument
- init.p = if (length(.init.a)) .init.a else 1
+ if (!length( .init.a) || !length( .init.scale )) {
+ qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
+ init.p = if (length( .init.a)) .init.a else 1
xvec = log( qvec^(-1/ init.p ) - 1 )
fit0 = lsfit(x=xvec, y=log(quantile(y, qvec )))
}
qq = 1
if (!length(etastart)) {
- aa = rep(if (length(.init.a)) .init.a else -1/fit0$coef[2],
- length=n)
- scale = rep(if (length(.init.scale)) .init.scale else
- exp(fit0$coef[1]), length=n)
- etastart = cbind(theta2eta(aa, .link.a, earg= .earg.a),
- theta2eta(scale, .link.scale, earg= .earg.scale))
- }
- }), list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale,
- .init.a=init.a, .init.scale=init.scale ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- aa = eta2theta(eta[,1], .link.a, earg= .earg.a)
- scale = eta2theta(eta[,2], .link.scale, earg= .earg.scale)
+ aa = rep(if (length( .init.a)) .init.a else -1/fit0$coef[2],
+ length = n)
+ scale = rep(if (length( .init.scale )) .init.scale else
+ exp(fit0$coef[1]), length = n)
+ etastart = cbind(theta2eta(aa, .link.a, earg = .earg.a),
+ theta2eta(scale, .link.scale, earg = .earg.scale))
+ }
+ }), list( .link.a = link.a, .link.scale = link.scale,
+ .earg.a = earg.a, .earg.scale = earg.scale,
+ .init.a = init.a, .init.scale = init.scale ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ aa = eta2theta(eta[,1], .link.a, earg = .earg.a)
+ scale = eta2theta(eta[,2], .link.scale, earg = .earg.scale)
parg = aa
qq = 1
- scale*gamma(parg + 1/aa)*gamma(qq-1/aa)/(gamma(parg)*gamma(qq))
- }, list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale ))),
+ scale * gamma(parg + 1/aa) *
+ gamma(qq - 1/aa) / (gamma(parg) * gamma(qq))
+ }, list( .link.a = link.a, .link.scale = link.scale,
+ .earg.a = earg.a, .earg.scale = earg.scale ))),
last = eval(substitute(expression({
misc$link = c(a= .link.a, scale= .link.scale )
misc$earg = list(a= .earg.a, scale= .earg.scale )
- }), list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale ))),
- loglikelihood=eval(substitute(
- function(mu, y, w, residuals= FALSE,eta, extra=NULL) {
- aa = eta2theta(eta[,1], .link.a, earg= .earg.a)
- scale = eta2theta(eta[,2], .link.scale, earg= .earg.scale)
+ }), list( .link.a = link.a, .link.scale = link.scale,
+ .earg.a = earg.a, .earg.scale = earg.scale ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ aa = eta2theta(eta[,1], .link.a, earg = .earg.a)
+ scale = eta2theta(eta[,2], .link.scale, earg = .earg.scale)
parg = aa
qq = 1
if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(w * dinvparalogistic(x=y, a=aa, scale=scale, log=TRUE))
- }
- }, list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale ))),
- vfamily=c("invparalogistic"),
- deriv=eval(substitute(expression({
- aa = eta2theta(eta[,1], .link.a, earg= .earg.a)
- scale = eta2theta(eta[,2], .link.scale, earg= .earg.scale)
+ sum(w * dinvparalogistic(x=y, a=aa, scale=scale, log = TRUE))
+ }
+ }, list( .link.a = link.a, .link.scale = link.scale,
+ .earg.a = earg.a, .earg.scale = earg.scale ))),
+ vfamily = c("invparalogistic"),
+ deriv = eval(substitute(expression({
+ aa = eta2theta(eta[,1], .link.a, earg = .earg.a)
+ scale = eta2theta(eta[,2], .link.scale, earg = .earg.scale)
parg = aa
qq = 1
@@ -7134,12 +7150,12 @@ dinvparalogistic = function(x, a, scale=1, log=FALSE)
dl.da = 1/aa + parg * temp1 - (parg+qq) * temp1 / (1+1/temp2)
dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
- da.deta = dtheta.deta(aa, .link.a, earg= .earg.a)
- dscale.deta = dtheta.deta(scale, .link.scale, earg= .earg.scale)
+ da.deta = dtheta.deta(aa, .link.a, earg = .earg.a)
+ dscale.deta = dtheta.deta(scale, .link.scale, earg = .earg.scale)
w * cbind( dl.da * da.deta, dl.dscale * dscale.deta )
- }), list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale ))),
- weight=eval(substitute(expression({
+ }), list( .link.a = link.a, .link.scale = link.scale,
+ .earg.a = earg.a, .earg.scale = earg.scale ))),
+ weight = eval(substitute(expression({
ed2l.da = (1 + parg+qq + parg * qq * (trigamma(parg) + trigamma(qq) +
(temp3b - temp3a + (parg-qq)/(parg*qq))^2 -
(parg^2 + qq^2) / (parg*qq)^2)) / (aa^2 * (1+parg+qq))
@@ -7152,16 +7168,16 @@ dinvparalogistic = function(x, a, scale=1, log=FALSE)
wz[,iam(1,2,M)] = ed2l.dascale * da.deta * dscale.deta
wz = w * wz
wz
- }), list( .link.a=link.a, .link.scale=link.scale,
- .earg.a=earg.a, .earg.scale=earg.scale ))))
+ }), list( .link.a = link.a, .link.scale = link.scale,
+ .earg.a = earg.a, .earg.scale = earg.scale ))))
}
if (FALSE)
- genlognormal = function(link.sigma="loge", link.r="loge",
+ genlognormal = function(link.sigma = "loge", link.r = "loge",
esigma=list(), er=list(),
- init.sigma=1, init.r=1, zero=NULL)
+ init.sigma=1, init.r=1, zero = NULL)
{
warning("2/4/04; doesn't work, possibly because first derivs are ",
"not continuous (sign() is used). Certainly, the derivs wrt ",
@@ -7173,7 +7189,7 @@ warning("2/4/04; doesn't work, possibly because first derivs are ",
link.sigma = as.character(substitute(link.sigma))
if (mode(link.r) != "character" && mode(link.r) != "name")
link.r = as.character(substitute(link.r))
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
stop("bad input for argument 'zero'")
if (!is.list(esigma)) esigma = list()
if (!is.list(er)) er = list()
@@ -7181,59 +7197,60 @@ warning("2/4/04; doesn't work, possibly because first derivs are ",
new("vglmff",
blurb = c("Three-parameter generalized lognormal distribution\n\n",
"Links: ",
- "loc; ", namesof("sigma", link.sigma, earg=esigma, tag= TRUE),
- ", ", namesof("r", link.r, earg=er, tag= TRUE)),
- constraints=eval(substitute(expression({
+ "loc; ",
+ namesof("sigma", link.sigma, earg = esigma, tag = TRUE), ", ",
+ namesof("r", link.r, earg = er, tag = TRUE)),
+ constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
predictors.names =
- c(namesof("loc", "identity", earg= list(), tag=FALSE),
- namesof("sigma", .link.sigma, earg=.esigma, tag=FALSE),
- namesof("r", .link.r, earg=.er, tag=FALSE))
+ c(namesof("loc", "identity", earg = list(), tag = FALSE),
+ namesof("sigma", .link.sigma, earg = .esigma, tag = FALSE),
+ namesof("r", .link.r, earg = .er, tag = FALSE))
- if (!length(.init.sigma) || !length(.init.r)) {
- init.r = if (length(.init.r)) .init.r else 1
+ if (!length( .init.sigma) || !length( .init.r)) {
+ init.r = if (length( .init.r)) .init.r else 1
sigma.init = (0.5 * sum(abs(log(y) - mean(log(y )))^init.r))^(1/init.r)
}
if (any(y <= 0)) stop("y must be positive")
if (!length(etastart)) {
sigma.init = rep(if (length( .init.sigma)) .init.sigma else
- sigma.init, len=n)
+ sigma.init, len = n)
r.init = if (length( .init.r)) .init.r else init.r
- etastart = cbind(mu=rep(log(median(y)), len=n),
+ etastart = cbind(mu=rep(log(median(y)), len = n),
sigma=sigma.init,
r = r.init)
}
}), list( .link.sigma=link.sigma, .link.r=link.r,
.init.sigma=init.sigma, .init.r=init.r ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- mymu = eta2theta(eta[,1], "identity", earg=list())
- sigma = eta2theta(eta[,2], .link.sigma, earg= .esigma)
- r = eta2theta(eta[,3], .link.r, earg= .er)
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ mymu = eta2theta(eta[,1], "identity", earg = list())
+ sigma = eta2theta(eta[,2], .link.sigma, earg = .esigma)
+ r = eta2theta(eta[,3], .link.r, earg = .er)
r
}, list( .link.sigma=link.sigma, .link.r=link.r ))),
last = eval(substitute(expression({
- misc$link = c(loc="identity", "sigma"= .link.sigma, r= .link.r )
+ misc$link = c(loc = "identity", "sigma" = .link.sigma, r = .link.r )
misc$expected = TRUE
}), list( .link.sigma=link.sigma, .link.r=link.r ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- mymu = eta2theta(eta[,1], "identity", earg=list())
- sigma = eta2theta(eta[,2], .link.sigma, earg= .esigma)
- r = eta2theta(eta[,3], .link.r, earg= .er)
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ mymu = eta2theta(eta[,1], "identity", earg = list())
+ sigma = eta2theta(eta[,2], .link.sigma, earg = .esigma)
+ r = eta2theta(eta[,3], .link.r, earg = .er)
temp89 = (abs(log(y)-mymu)/sigma)^r
if (residuals) stop("loglikelihood residuals not implemented yet") else
sum(w * (-log(r^(1/r) * sigma) - lgamma(1+1/r) - temp89/r))
}, list( .link.sigma=link.sigma, .link.r=link.r ))),
- vfamily=c("genlognormal3"),
- deriv=eval(substitute(expression({
- mymu = eta2theta(eta[,1], "identity", earg=list())
- sigma = eta2theta(eta[,2], .link.sigma, earg= .esigma)
- r = eta2theta(eta[,3], .link.r, earg= .er)
+ vfamily = c("genlognormal3"),
+ deriv = eval(substitute(expression({
+ mymu = eta2theta(eta[,1], "identity", earg = list())
+ sigma = eta2theta(eta[,2], .link.sigma, earg = .esigma)
+ r = eta2theta(eta[,3], .link.r, earg = .er)
ss = 1 + 1/r
temp33 = (abs(log(y)-mymu)/sigma)
temp33r1 = temp33^(r-1)
@@ -7242,14 +7259,14 @@ warning("2/4/04; doesn't work, possibly because first derivs are ",
dl.dr = (log(r) - 1 + digamma(ss) + temp33*temp33r1)/r^2 -
temp33r1 * log(temp33r1) / r
- dmymu.deta = dtheta.deta(mymu, "identity", earg=list())
- dsigma.deta = dtheta.deta(sigma, .link.sigma, earg= .esigma)
- dr.deta = dtheta.deta(r, .link.r, earg= .er)
+ dmymu.deta = dtheta.deta(mymu, "identity", earg = list())
+ dsigma.deta = dtheta.deta(sigma, .link.sigma, earg = .esigma)
+ dr.deta = dtheta.deta(r, .link.r, earg = .er)
w * cbind(dl.dmymu * dmymu.deta,
dl.dsigma * dsigma.deta,
dl.dr * dr.deta)
}), list( .link.sigma=link.sigma, .link.r=link.r ))),
- weight=expression({
+ weight = expression({
wz = matrix(0, n, 6) # 5 will have small savings of 1 column
B = log(r) + digamma(ss)
ed2l.dmymu2 = (r-1) * gamma(1-1/r) / (sigma^2 * r^(2/r) * gamma(ss))
@@ -7266,7 +7283,7 @@ warning("2/4/04; doesn't work, possibly because first derivs are ",
}
- betaprime = function(link="loge", earg=list(), i1=2, i2=NULL, zero=NULL)
+ betaprime = function(link = "loge", earg = list(), i1=2, i2 = NULL, zero = NULL)
{
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
@@ -7277,59 +7294,59 @@ warning("2/4/04; doesn't work, possibly because first derivs are ",
"y^(shape1-1) * (1+y)^(-shape1-shape2) / Beta(shape1,shape2),",
" y>0, shape1>0, shape2>0\n\n",
"Links: ",
- namesof("shape1", link, earg=earg), ", ",
- namesof("shape2", link, earg=earg), "\n",
+ namesof("shape1", link, earg = earg), ", ",
+ namesof("shape2", link, earg = earg), "\n",
"Mean: shape1/(shape2-1) provided shape2>1"),
- constraints=eval(substitute(expression({
+ constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
if (ncol(y <- as.matrix(y)) > 1)
stop("betaprime cannot handle matrix responses yet")
if (min(y) <= 0)
stop("response must be positive")
- predictors.names = c(namesof("shape1", .link, earg=.earg, short=TRUE),
- namesof("shape2", .link, earg=.earg, short=TRUE))
- if (is.numeric(.i1) && is.numeric(.i2)) {
- vec = c(.i1, .i2)
- vec = c(theta2eta(vec[1], .link, earg= .earg),
- theta2eta(vec[2], .link, earg= .earg))
+ predictors.names = c(namesof("shape1", .link, earg = .earg, short = TRUE),
+ namesof("shape2", .link, earg = .earg, short = TRUE))
+ if (is.numeric( .i1) && is.numeric( .i2)) {
+ vec = c( .i1, .i2)
+ vec = c(theta2eta(vec[1], .link, earg = .earg),
+ theta2eta(vec[2], .link, earg = .earg))
etastart = matrix(vec, n, 2, byrow= TRUE)
}
if (!length(etastart)) {
- init1 = if (length( .i1)) rep( .i1, len=n) else rep(1, len=n)
- init2 = if (length( .i2)) rep( .i2, len=n) else 1 + init1 / (y + 0.1)
- etastart = matrix(theta2eta(c(init1, init2), .link, earg= .earg),
- n,2,byrow=TRUE)
- }
- }), list( .link=link, .earg=earg, .i1=i1, .i2=i2 ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- shapes = eta2theta(eta, .link, earg= .earg)
+ init1 = if (length( .i1)) rep( .i1, len = n) else rep(1, len = n)
+ init2 = if (length( .i2)) rep( .i2, len = n) else 1 + init1 / (y + 0.1)
+ etastart = matrix(theta2eta(c(init1, init2), .link, earg = .earg),
+ n,2,byrow = TRUE)
+ }
+ }), list( .link = link, .earg = earg, .i1=i1, .i2=i2 ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ shapes = eta2theta(eta, .link, earg = .earg)
ifelse(shapes[,2] > 1, shapes[,1]/(shapes[,2]-1), NA)
- }, list( .link=link, .earg=earg ))),
+ }, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
misc$link = c(shape1 = .link, shape2 = .link)
misc$earg = list(shape1 = .earg, shape2 = .earg)
- }), list( .link=link, .earg=earg ))),
- loglikelihood=eval(substitute(
- function(mu, y, w, residuals= FALSE, eta, extra=NULL){
- shapes = eta2theta(eta, .link, earg= .earg)
+ }), list( .link = link, .earg = earg ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL){
+ shapes = eta2theta(eta, .link, earg = .earg)
if (residuals) stop("loglikelihood residuals not implemented yet") else {
sum(w *((shapes[,1]-1) * log(y) - lbeta(shapes[,1], shapes[,2]) -
(shapes[,2]+shapes[,1]) * log1p(y)))
}
- }, list( .link=link, .earg=earg ))),
- vfamily="betaprime",
- deriv=eval(substitute(expression({
- shapes = eta2theta(eta, .link, earg= .earg)
- dshapes.deta = dtheta.deta(shapes, .link, earg= .earg)
+ }, list( .link = link, .earg = earg ))),
+ vfamily = "betaprime",
+ deriv = eval(substitute(expression({
+ shapes = eta2theta(eta, .link, earg = .earg)
+ dshapes.deta = dtheta.deta(shapes, .link, earg = .earg)
dl.dshapes = cbind(log(y) - log1p(y) - digamma(shapes[,1]) +
digamma(shapes[,1]+shapes[,2]),
- log1p(y) - digamma(shapes[,2]) +
digamma(shapes[,1]+shapes[,2]))
w * dl.dshapes * dshapes.deta
- }), list( .link=link, .earg=earg ))),
- weight=expression({
+ }), list( .link = link, .earg = earg ))),
+ weight = expression({
temp2 = trigamma(shapes[,1]+shapes[,2])
d2l.dshape12 = temp2 - trigamma(shapes[,1])
d2l.dshape22 = temp2 - trigamma(shapes[,2])
@@ -7355,8 +7372,8 @@ dmaxwell = function(x, a, log = FALSE) {
rm(log)
L = max(length(x), length(a))
- x = rep(x, len=L); a = rep(a, len=L);
- logdensity = rep(log(0), len=L)
+ x = rep(x, len = L); a = rep(a, len = L);
+ logdensity = rep(log(0), len = L)
xok = (x > 0)
logdensity[xok] = 0.5 * log(2/pi) + 1.5 * log(a[xok]) +
2 * log(x[xok]) - 0.5 * a[xok] * x[xok]^2
@@ -7366,19 +7383,19 @@ dmaxwell = function(x, a, log = FALSE) {
pmaxwell = function(q, a) {
if (any(a <= 0)) stop("argument 'a' must be positive")
L = max(length(q), length(a))
- q = rep(q, len=L); a = rep(a, len=L);
+ q = rep(q, len = L); a = rep(a, len = L);
ifelse(q > 0, erf(q*sqrt(a/2)) - q*exp(-0.5*a*q^2) * sqrt(2*a/pi), 0)
}
rmaxwell = function(n, a) {
- if (!is.Numeric(n, posit=TRUE, allow=1))
+ if (!is.Numeric(n, posit = TRUE, allow = 1))
stop("bad input for argument 'n'")
if (any(a <= 0)) stop("argument 'a' must be positive")
sqrt(2 * rgamma(n=n, 1.5) / a)
}
qmaxwell = function(p, a) {
- if (!is.Numeric(p, posit=TRUE) || any(p>=1))
+ if (!is.Numeric(p, posit = TRUE) || any(p>=1))
stop("bad input for argument 'p'")
if (any(a <= 0)) stop("argument 'a' must be positive")
N = max(length(p), length(a)); p = rep(p, len=N); a = rep(a, len=N)
@@ -7386,7 +7403,7 @@ qmaxwell = function(p, a) {
}
- maxwell = function(link="loge", earg=list()) {
+ maxwell = function(link = "loge", earg = list()) {
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if (!is.list(earg)) earg = list()
@@ -7394,43 +7411,43 @@ qmaxwell = function(p, a) {
new("vglmff",
blurb = c("Maxwell distribution f(y) = sqrt(2/pi) * a^(3/2) * y^2 *",
" exp(-0.5*a*y^2), y>0, a>0\n",
- "Link: ", namesof("a", link, earg=earg), "\n", "\n",
+ "Link: ", namesof("a", link, earg = earg), "\n", "\n",
"Mean: sqrt(8 / (a * pi))"),
- initialize=eval(substitute(expression({
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
- predictors.names = namesof("a", .link, earg=.earg, tag=FALSE)
+ predictors.names = namesof("a", .link, earg = .earg, tag = FALSE)
if (!length(etastart)) {
a.init = rep(8 / (pi*(y+0.1)^2), length=length(y))
- etastart = theta2eta(a.init, .link, earg= .earg)
+ etastart = theta2eta(a.init, .link, earg = .earg)
}
- }), list( .link=link, .earg=earg ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- a = eta2theta(eta, .link, earg= .earg)
+ }), list( .link = link, .earg = earg ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ a = eta2theta(eta, .link, earg = .earg)
sqrt(8 / (a * pi))
- }, list( .link=link, .earg=earg ))),
+ }, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
misc$link = c(a= .link)
misc$earg = list(a = .earg)
- }), list( .link=link, .earg=earg ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- aa = eta2theta(eta, .link, earg= .earg)
+ }), list( .link = link, .earg = earg ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ aa = eta2theta(eta, .link, earg = .earg)
if (residuals) stop("loglikelihood residuals not implemented yet") else
sum(w * dmaxwell(x=y, a=aa, log = TRUE))
- }, list( .link=link, .earg=earg ))),
- vfamily=c("maxwell"),
- deriv=eval(substitute(expression({
- a = eta2theta(eta, .link, earg= .earg)
+ }, list( .link = link, .earg = earg ))),
+ vfamily = c("maxwell"),
+ deriv = eval(substitute(expression({
+ a = eta2theta(eta, .link, earg = .earg)
dl.da = 1.5 / a - 0.5 * y^2
- da.deta = dtheta.deta(a, .link, earg= .earg)
+ da.deta = dtheta.deta(a, .link, earg = .earg)
w * dl.da * da.deta
- }), list( .link=link, .earg=earg ))),
- weight=eval(substitute(expression({
+ }), list( .link = link, .earg = earg ))),
+ weight = eval(substitute(expression({
ed2l.da2 = 1.5 / a^2
wz = w * da.deta^2 * ed2l.da2
wz
- }), list( .link=link, .earg=earg ))))
+ }), list( .link = link, .earg = earg ))))
}
@@ -7441,12 +7458,12 @@ dnaka = function(x, shape, scale=1, log = FALSE) {
stop("bad input for argument 'log'")
rm(log)
L = max(length(x), length(shape), length(scale))
- x = rep(x, len=L); shape = rep(shape, len=L); scale = rep(scale, len=L);
+ x = rep(x, len = L); shape = rep(shape, len = L); scale = rep(scale, len = L);
- logdensity = rep(log(0), len=L)
+ logdensity = rep(log(0), len = L)
xok = (x > 0)
logdensity[xok] = dgamma(x=x[xok]^2, shape=shape[xok],
- scale=scale[xok]/shape[xok], log=TRUE) +
+ scale=scale[xok]/shape[xok], log = TRUE) +
log(2) + log(x[xok])
if (log.arg) logdensity else exp(logdensity)
}
@@ -7455,26 +7472,26 @@ dnaka = function(x, shape, scale=1, log = FALSE) {
pnaka = function(q, shape, scale=1) {
if (!is.Numeric(q))
stop("bad input for argument 'q'")
- if (!is.Numeric(shape, posit=TRUE))
+ if (!is.Numeric(shape, posit = TRUE))
stop("bad input for argument 'shape'")
- if (!is.Numeric(scale, posit=TRUE))
+ if (!is.Numeric(scale, posit = TRUE))
stop("bad input for argument 'scale'")
L = max(length(q), length(shape), length(scale))
- q = rep(q, len=L); shape = rep(shape, len=L); scale = rep(scale, len=L);
+ q = rep(q, len = L); shape = rep(shape, len = L); scale = rep(scale, len = L);
ifelse(q <= 0, 0, pgamma(shape * q^2 / scale, shape))
}
qnaka = function(p, shape, scale=1, ...) {
- if (!is.Numeric(p, posit=TRUE) || max(p) >= 1)
+ if (!is.Numeric(p, posit = TRUE) || max(p) >= 1)
stop("bad input for argument 'p'")
- if (!is.Numeric(shape, posit=TRUE))
+ if (!is.Numeric(shape, posit = TRUE))
stop("bad input for argument 'shape'")
- if (!is.Numeric(scale, posit=TRUE))
+ if (!is.Numeric(scale, posit = TRUE))
stop("bad input for argument 'scale'")
L = max(length(p), length(shape), length(scale))
- p = rep(p, len=L); shape = rep(shape, len=L); scale = rep(scale, len=L);
- ans = rep(0.0, len=L)
+ p = rep(p, len = L); shape = rep(shape, len = L); scale = rep(scale, len = L);
+ ans = rep(0.0, len = L)
myfun = function(x, shape, scale=1, p)
pnaka(q=x, shape=shape, scale=scale) - p
for(ii in 1:L) {
@@ -7490,16 +7507,16 @@ qnaka = function(p, shape, scale=1, ...) {
rnaka = function(n, shape, scale=1, Smallno=1.0e-6) {
- if (!is.Numeric(n, posit=TRUE, integ=TRUE))
+ if (!is.Numeric(n, posit = TRUE, integ = TRUE))
stop("bad input for argument 'n'")
- if (!is.Numeric(scale, posit=TRUE, allow=1))
+ if (!is.Numeric(scale, posit = TRUE, allow = 1))
stop("bad input for argument 'scale'")
- if (!is.Numeric(shape, posit=TRUE, allow=1))
+ if (!is.Numeric(shape, posit = TRUE, allow = 1))
stop("bad input for argument 'shape'")
- if (!is.Numeric(Smallno, posit=TRUE, allow=1) || Smallno > 0.01 ||
+ if (!is.Numeric(Smallno, posit = TRUE, allow = 1) || Smallno > 0.01 ||
Smallno < 2 * .Machine$double.eps)
stop("bad input for argument 'Smallno'")
- ans = rep(0.0, len=n)
+ ans = rep(0.0, len = n)
ptr1 = 1; ptr2 = 0
ymax = dnaka(x=sqrt(scale*(1 - 0.5/shape)), shape=shape, scale=scale)
@@ -7525,13 +7542,13 @@ rnaka = function(n, shape, scale=1, Smallno=1.0e-6) {
- nakagami = function(lshape="loge", lscale="loge",
- eshape=list(), escale=list(), ishape=NULL, iscale=1) {
+ nakagami = function(lshape = "loge", lscale = "loge",
+ eshape = list(), escale = list(), ishape = NULL, iscale=1) {
if (mode(lshape) != "character" && mode(lshape) != "name")
lshape = as.character(substitute(lshape))
if (mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
- if (!is.null(iscale) && !is.Numeric(iscale, positi=TRUE))
+ if (!is.null(iscale) && !is.Numeric(iscale, positi = TRUE))
stop("argument 'iscale' must be a positive number or NULL")
if (!is.list(eshape)) eshape = list()
if (!is.list(escale)) escale = list()
@@ -7543,165 +7560,176 @@ rnaka = function(n, shape, scale=1, Smallno=1.0e-6) {
" ",
"y>0, shape>0, scale>0\n",
"Links: ",
- namesof("shape", lshape, earg=eshape), ", ",
- namesof("scale", lscale, earg=escale),
+ namesof("shape", lshape, earg = eshape), ", ",
+ namesof("scale", lscale, earg = escale),
"\n",
"\n",
"Mean: sqrt(scale/shape) * gamma(shape+0.5) / gamma(shape)"),
- initialize=eval(substitute(expression({
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
- predictors.names = c(namesof("shape", .lshape, earg=.eshape, tag=FALSE),
- namesof("scale", .lscale, earg=.escale, tag=FALSE))
+ predictors.names = c(namesof("shape", .lshape, earg = .eshape, tag = FALSE),
+ namesof("scale", .lscale, earg = .escale, tag = FALSE))
if (!length(etastart)) {
- init2 = if (is.Numeric( .iscale, posit=TRUE))
- rep( .iscale, len=n) else rep(1, len=n)
- init1 = if (is.Numeric( .ishape, posit=TRUE))
- rep( .ishape, len=n) else
- rep(init2 / (y+1/8)^2, len=n)
- etastart = cbind(theta2eta(init1, .lshape, earg= .eshape),
- theta2eta(init2, .lscale, earg= .escale))
- }
- }), list( .lscale=lscale, .lshape=lshape,
- .escale=escale, .eshape=eshape,
- .ishape=ishape, .iscale=iscale ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- shape = eta2theta(eta[,1], .lshape, earg= .eshape)
- scale = eta2theta(eta[,2], .lscale, earg= .escale)
+ init2 = if (is.Numeric( .iscale, posit = TRUE))
+ rep( .iscale, len = n) else rep(1, len = n)
+ init1 = if (is.Numeric( .ishape, posit = TRUE))
+ rep( .ishape, len = n) else
+ rep(init2 / (y+1/8)^2, len = n)
+ etastart = cbind(theta2eta(init1, .lshape, earg = .eshape),
+ theta2eta(init2, .lscale, earg = .escale))
+ }
+ }), list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape,
+ .ishape = ishape, .iscale = iscale ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ shape = eta2theta(eta[,1], .lshape, earg = .eshape)
+ scale = eta2theta(eta[,2], .lscale, earg = .escale)
sqrt(scale/shape) * gamma(shape+0.5) / gamma(shape)
- }, list( .lscale=lscale, .lshape=lshape,
- .escale=escale, .eshape=eshape ))),
+ }, list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape ))),
last = eval(substitute(expression({
misc$link = c(shape= .lshape, scale= .lscale)
misc$earg = list(shape = .eshape, scale = .escale)
misc$expected = TRUE
- }), list( .lscale=lscale, .lshape=lshape,
- .escale=escale, .eshape=eshape ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- shape = eta2theta(eta[,1], .lshape, earg= .eshape)
- scale = eta2theta(eta[,2], .lscale, earg= .escale)
+ }), list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ shape = eta2theta(eta[,1], .lshape, earg = .eshape)
+ scale = eta2theta(eta[,2], .lscale, earg = .escale)
if (residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * dnaka(x=y, shape=shape, scale=scale, log=TRUE))
- }, list( .lscale=lscale, .lshape=lshape,
- .escale=escale, .eshape=eshape ))),
- vfamily=c("nakagami"),
- deriv=eval(substitute(expression({
- shape = eta2theta(eta[,1], .lshape, earg= .eshape)
- Scale = eta2theta(eta[,2], .lscale, earg= .escale)
+ sum(w * dnaka(x=y, shape=shape, scale=scale, log = TRUE))
+ }, list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape ))),
+ vfamily = c("nakagami"),
+ deriv = eval(substitute(expression({
+ shape = eta2theta(eta[,1], .lshape, earg = .eshape)
+ Scale = eta2theta(eta[,2], .lscale, earg = .escale)
dl.dshape = 1 + log(shape/Scale) - digamma(shape) +
2 * log(y) - y^2 / Scale
dl.dscale = -shape/Scale + shape * (y/Scale)^2
- dshape.deta = dtheta.deta(shape, .lshape, earg= .eshape)
- dscale.deta = dtheta.deta(Scale, .lscale, earg= .escale)
+ dshape.deta = dtheta.deta(shape, .lshape, earg = .eshape)
+ dscale.deta = dtheta.deta(Scale, .lscale, earg = .escale)
w * cbind(dl.dshape * dshape.deta, dl.dscale * dscale.deta)
- }), list( .lscale=lscale, .lshape=lshape,
- .escale=escale, .eshape=eshape ))),
- weight=eval(substitute(expression({
+ }), list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape ))),
+ weight = eval(substitute(expression({
d2l.dshape2 = trigamma(shape) - 1/shape
d2l.dscale2 = shape / Scale^2
wz = matrix(as.numeric(NA), n, M) # diagonal
wz[,iam(1,1,M)] = d2l.dshape2 * dshape.deta^2
wz[,iam(2,2,M)] = d2l.dscale2 * dscale.deta^2
w * wz
- }), list( .lscale=lscale, .lshape=lshape,
- .escale=escale, .eshape=eshape ))))
+ }), list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape ))))
}
-drayleigh = function(x, a, log=FALSE) {
- if (!is.logical(log.arg <- log))
+drayleigh = function(x, scale = 1, log = FALSE) {
+ if (!is.logical(log.arg <- log))
stop("bad input for argument 'log'")
- rm(log)
+ rm(log)
- L = max(length(x), length(a))
- x = rep(x, len=L); a = rep(a, len=L);
- logdensity = rep(log(0), len=L)
- xok = (x > 0)
- logdensity[xok] = log(x[xok]) - 0.5 * (x[xok]/a[xok])^2 - 2*log(a[xok])
- if (log.arg) logdensity else exp(logdensity)
+ L = max(length(x), length(scale))
+ x = rep(x, len = L); scale = rep(scale, len = L);
+ logdensity = rep(log(0), len = L)
+ xok = (x > 0)
+ logdensity[xok] = log(x[xok]) - 0.5 * (x[xok]/scale[xok])^2 -
+ 2*log(scale[xok])
+ if (log.arg) logdensity else exp(logdensity)
}
-prayleigh = function(q, a) {
- if (any(a <= 0)) stop("argument 'a' must be positive")
- L = max(length(q), length(a))
- q = rep(q, len=L); a = rep(a, len=L);
- ifelse(q > 0, 1 - exp(-0.5*(q/a)^2), 0)
+
+prayleigh = function(q, scale = 1) {
+ if (any(scale <= 0))
+ stop("argument 'scale' must be positive")
+ L = max(length(q), length(scale))
+ q = rep(q, len = L); scale = rep(scale, len = L);
+ ifelse(q > 0, -expm1(-0.5*(q/scale)^2), 0)
}
-qrayleigh = function(p, a) {
- if (any(a <= 0)) stop("argument 'a' must be positive")
- if (any(p <= 0) || any(p >= 1)) stop("argument 'p' must be between 0 and 1")
- a * sqrt(-2 * log1p(-p))
+
+qrayleigh = function(p, scale = 1) {
+ if (any(scale <= 0))
+ stop("argument 'scale' must be positive")
+ if (any(p <= 0) || any(p >= 1))
+ stop("argument 'p' must be between 0 and 1")
+ scale * sqrt(-2 * log1p(-p))
}
-rrayleigh = function(n, a) {
- if (!is.Numeric(n, posit=TRUE, allow=1, integ=TRUE))
- stop("bad input for argument 'n'")
- if (any(a <= 0)) stop("a must be positive")
- a * sqrt(-2 * log(runif(n )))
+
+rrayleigh = function(n, scale = 1) {
+ if (any(scale <= 0))
+ stop("argument 'scale' must be positive")
+ scale * sqrt(-2 * log(runif(n)))
}
- rayleigh = function(link="loge", earg=list(), nrfs=1/3+0.01) {
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.list(earg)) earg = list()
- if (!is.Numeric(nrfs, allow=1) || nrfs<0 || nrfs > 1)
+ rayleigh = function(lscale = "loge", escale = list(), nrfs = 1 / 3 + 0.01) {
+ if (mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
+ if (!is.list(escale)) escale = list()
+ if (!is.Numeric(nrfs, allow = 1) || nrfs<0 || nrfs > 1)
stop("bad input for 'nrfs'")
new("vglmff",
- blurb = c("Rayleigh distribution f(y) = y*exp(-0.5*(y/a)^2)/a^2, y>0, a>0\n",
+ blurb = c("Rayleigh distribution\n\n",
+ "f(y) = y*exp(-0.5*(y/scale)^2)/scale^2, y>0, scale>0\n\n",
"Link: ",
- namesof("a", link, earg=earg), "\n\n",
- "Mean: a * sqrt(pi / 2)"),
- initialize=eval(substitute(expression({
+ namesof("scale", lscale, earg = escale), "\n\n",
+ "Mean: scale * sqrt(pi / 2)"),
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
- predictors.names = namesof("a", .link, earg=.earg, tag=FALSE)
+
+ predictors.names =
+ namesof("scale", .lscale, earg = .escale, tag = FALSE)
+
if (!length(etastart)) {
- a.init = (y+1/8) / sqrt(pi/2)
- etastart = theta2eta(a.init, .link, earg= .earg)
- }
- }), list( .link=link, .earg=earg ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- a = eta2theta(eta, .link, earg= .earg)
- a * sqrt(pi/2)
- }, list( .link=link, .earg=earg ))),
+ b.init = (y + 1/8) / sqrt(pi/2)
+ etastart = theta2eta(b.init, .lscale, earg = .escale)
+ }
+ }), list( .lscale = lscale, .escale = escale ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ Scale = eta2theta(eta, .lscale, earg = .escale)
+ Scale * sqrt(pi/2)
+ }, list( .lscale = lscale, .escale = escale ))),
last = eval(substitute(expression({
- misc$link = c(a= .link)
- misc$earg = list(a = .earg)
+ misc$link = c(scale = .lscale)
+ misc$earg = list(scale = .escale)
misc$nrfs = .nrfs
- }), list( .link=link, .earg=earg, .nrfs=nrfs ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- a = eta2theta(eta, .link, earg= .earg)
+ }), list( .lscale = lscale, .escale = escale, .nrfs = nrfs ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ Scale = eta2theta(eta, .lscale, earg = .escale)
if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(w * drayleigh(x=y, a=a, log=TRUE))
- }
- }, list( .link=link, .earg=earg ))),
- vfamily=c("rayleigh"),
- deriv=eval(substitute(expression({
- a = eta2theta(eta, .link, earg= .earg)
- dl.da = ((y/a)^2 - 2) / a
- da.deta = dtheta.deta(a, .link, earg= .earg)
- w * dl.da * da.deta
- }), list( .link=link, .earg=earg ))),
- weight=eval(substitute(expression({
- d2l.da2 = (3 * (y/a)^2 - 2) / a^2
- ed2l.da2 = 4 / a^2
- wz = w * da.deta^2 * ((1- .nrfs) * d2l.da2 + .nrfs * ed2l.da2)
+ sum(w * drayleigh(x=y, scale = Scale, log = TRUE))
+ }
+ }, list( .lscale = lscale, .escale = escale ))),
+ vfamily = c("rayleigh"),
+ deriv = eval(substitute(expression({
+ Scale = eta2theta(eta, .lscale, earg = .escale)
+ dl.dScale = ((y/Scale)^2 - 2) / Scale
+ dScale.deta = dtheta.deta(Scale, .lscale, earg = .escale)
+ w * dl.dScale * dScale.deta
+ }), list( .lscale = lscale, .escale = escale ))),
+ weight = eval(substitute(expression({
+ d2l.dScale2 = (3 * (y/Scale)^2 - 2) / Scale^2
+ ed2l.dScale2 = 4 / Scale^2
+ wz = w * dScale.deta^2 *
+ ((1 - .nrfs) * d2l.dScale2 + .nrfs * ed2l.dScale2)
wz
- }), list( .link=link, .earg=earg, .nrfs=nrfs ))))
+ }), list( .lscale = lscale, .escale = escale, .nrfs = nrfs ))))
}
-dparetoIV = function(x, location=0, scale=1, inequality=1, shape=1, log=FALSE) {
+dparetoIV = function(x, location=0, scale=1, inequality=1, shape=1, log = FALSE) {
if (!is.logical(log.arg <- log))
stop("bad input for argument 'log'")
rm(log)
@@ -7723,11 +7751,11 @@ dparetoIV = function(x, location=0, scale=1, inequality=1, shape=1, log=FALSE) {
pparetoIV = function(q, location=0, scale=1, inequality=1, shape=1) {
if (!is.Numeric(q)) stop("bad input for argument 'q'")
- if (!is.Numeric(scale, posit=TRUE))
+ if (!is.Numeric(scale, posit = TRUE))
stop("bad input for argument 'scale'")
- if (!is.Numeric(inequality, posi=TRUE))
+ if (!is.Numeric(inequality, posi = TRUE))
stop("bad input for argument 'inequality'")
- if (!is.Numeric(shape, posit=TRUE))
+ if (!is.Numeric(shape, posit = TRUE))
stop("bad input for argument 'shape'")
N = max(length(q), length(location), length(scale), length(inequality),
length(shape))
@@ -7742,29 +7770,29 @@ pparetoIV = function(q, location=0, scale=1, inequality=1, shape=1) {
}
qparetoIV = function(p, location=0, scale=1, inequality=1, shape=1) {
- if (!is.Numeric(p, posit=TRUE) || any(p >= 1))
+ if (!is.Numeric(p, posit = TRUE) || any(p >= 1))
stop("bad input for argument 'p'")
- if (!is.Numeric(scale, posit=TRUE))
+ if (!is.Numeric(scale, posit = TRUE))
stop("bad input for argument 'scale'")
- if (!is.Numeric(inequality, posi=TRUE))
+ if (!is.Numeric(inequality, posi = TRUE))
stop("bad input for argument 'inequality'")
- if (!is.Numeric(shape, posit=TRUE))
+ if (!is.Numeric(shape, posit = TRUE))
stop("bad input for argument 'shape'")
location + scale * (-1 + (1-p)^(-1/shape))^inequality
}
rparetoIV = function(n, location=0, scale=1, inequality=1, shape=1) {
- if (!is.Numeric(n, posit=TRUE, integ=TRUE, allow=1))
+ if (!is.Numeric(n, posit = TRUE, integ = TRUE, allow = 1))
stop("bad input for argument n")
- if (!is.Numeric(scale, posit=TRUE)) stop("bad input for argument 'scale'")
- if (!is.Numeric(inequality, posi=TRUE))
+ if (!is.Numeric(scale, posit = TRUE)) stop("bad input for argument 'scale'")
+ if (!is.Numeric(inequality, posi = TRUE))
stop("bad input for argument 'inequality'")
- if (!is.Numeric(shape, posit=TRUE)) stop("bad input for argument 'shape'")
+ if (!is.Numeric(shape, posit = TRUE)) stop("bad input for argument 'shape'")
location + scale * (-1 + runif(n)^(-1/shape))^inequality
}
-dparetoIII = function(x, location=0, scale=1, inequality=1, log=FALSE)
+dparetoIII = function(x, location=0, scale=1, inequality=1, log = FALSE)
dparetoIV(x=x, location=location, scale=scale, inequality=inequality,
shape=1, log=log)
@@ -7782,7 +7810,7 @@ rparetoIII = function(n, location=0, scale=1, inequality=1)
-dparetoII = function(x, location=0, scale=1, shape=1, log=FALSE)
+dparetoII = function(x, location=0, scale=1, shape=1, log = FALSE)
dparetoIV(x=x, location=location, scale=scale, inequality=1, shape=shape,
log=log)
@@ -7811,12 +7839,12 @@ rparetoI = function(n, scale=1, shape=1)
paretoIV = function(location=0,
- lscale="loge",
- linequality="loge",
- lshape="loge",
- escale=list(), einequality=list(), eshape=list(),
- iscale=1, iinequality=1, ishape=NULL,
- method.init=1) {
+ lscale = "loge",
+ linequality = "loge",
+ lshape = "loge",
+ escale = list(), einequality=list(), eshape = list(),
+ iscale=1, iinequality=1, ishape = NULL,
+ method.init = 1) {
if (mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
if (mode(linequality) != "character" && mode(linequality) != "name")
@@ -7831,7 +7859,7 @@ rparetoI = function(n, scale=1, shape=1)
stop("argument 'iinequality' must be positive")
if (is.Numeric(ishape) && any(ishape <= 0))
stop("argument 'ishape' must be positive")
- if (!is.Numeric(method.init, allow=1, integ=TRUE) || method.init>2)
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE) || method.init>2)
stop("bad input for argument 'method.init'")
if (linequality == "nloge" && location != 0)
warning("The Burr distribution has 'location=0' and 'linequality=nloge'")
@@ -7844,22 +7872,22 @@ rparetoI = function(n, scale=1, shape=1)
")/scale)^(1/inequality)]^(-shape),",
"\n", " y > ",
location, ", scale > 0, inequality > 0, shape > 0,\n",
- "Links: ", namesof("scale", lscale, earg=escale ), ", ",
- namesof("inequality", linequality, earg=einequality ), ", ",
- namesof("shape", lshape, earg=eshape ), "\n",
+ "Links: ", namesof("scale", lscale, earg = escale ), ", ",
+ namesof("inequality", linequality, earg = einequality ), ", ",
+ namesof("shape", lshape, earg = eshape ), "\n",
"Mean: location + scale * NA"),
- initialize=eval(substitute(expression({
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
predictors.names =
- c(namesof("scale", .lscale, earg=.escale, tag=FALSE),
- namesof("inequality", .linequality, earg=.einequality, tag=FALSE),
- namesof("shape", .lshape, earg=.eshape, tag=FALSE))
+ c(namesof("scale", .lscale, earg = .escale, tag = FALSE),
+ namesof("inequality", .linequality, earg = .einequality, tag = FALSE),
+ namesof("shape", .lshape, earg = .eshape, tag = FALSE))
extra$location = location = .location
if (any(y <= location))
stop("the response must have values > than the 'location' argument")
if (!length(etastart)) {
- inequality.init = if (length(.iinequality)) .iinequality else 1
+ inequality.init = if (length( .iinequality)) .iinequality else 1
scale.init = if (length( .iscale)) .iscale else 1
shape.init = if (length( .ishape)) .ishape else NULL
if (!length(shape.init)) {
@@ -7874,64 +7902,64 @@ rparetoI = function(n, scale=1, shape=1)
shape.init = max(0.01, (2*A2-A1)/(A1-A2))
}
etastart=cbind(
- theta2eta(rep(scale.init, len=n), .lscale, earg= .escale),
- theta2eta(rep(inequality.init, len=n), .linequality, earg= .einequality),
- theta2eta(rep(shape.init, len=n), .lshape, earg= .eshape))
- }
- }), list( .location=location, .lscale=lscale,
- .linequality=linequality, .lshape=lshape, .method.init=method.init,
- .escale=escale, .einequality=einequality, .eshape=eshape,
- .iscale=iscale, .iinequality=iinequality, .ishape=ishape ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
+ theta2eta(rep(scale.init, len = n), .lscale, earg = .escale),
+ theta2eta(rep(inequality.init, len = n), .linequality, earg = .einequality),
+ theta2eta(rep(shape.init, len = n), .lshape, earg = .eshape))
+ }
+ }), list( .location = location, .lscale = lscale,
+ .linequality=linequality, .lshape = lshape, .method.init = method.init,
+ .escale = escale, .einequality=einequality, .eshape = eshape,
+ .iscale = iscale, .iinequality=iinequality, .ishape = ishape ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
location = extra$location
- Scale = eta2theta(eta[,1], .lscale, earg= .escale)
- inequality = eta2theta(eta[,2], .linequality, earg= .einequality)
- shape = eta2theta(eta[,3], .lshape, earg= .eshape)
+ Scale = eta2theta(eta[,1], .lscale, earg = .escale)
+ inequality = eta2theta(eta[,2], .linequality, earg = .einequality)
+ shape = eta2theta(eta[,3], .lshape, earg = .eshape)
location + Scale * NA
- }, list( .lscale=lscale, .linequality=linequality, .lshape=lshape,
- .escale=escale, .einequality=einequality, .eshape=eshape ))),
+ }, list( .lscale = lscale, .linequality=linequality, .lshape = lshape,
+ .escale = escale, .einequality=einequality, .eshape = eshape ))),
last = eval(substitute(expression({
- misc$link=c("scale"= .lscale, "inequality"= .linequality,
- "shape"= .lshape)
+ misc$link=c("scale" = .lscale, "inequality" = .linequality,
+ "shape" = .lshape)
misc$earg = list(scale = .escale, inequality= .einequality,
shape = .eshape)
misc$location = extra$location # Use this for prediction
- }), list( .lscale=lscale, .linequality=linequality, .lshape=lshape,
- .escale=escale, .einequality=einequality, .eshape=eshape ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ }), list( .lscale = lscale, .linequality=linequality, .lshape = lshape,
+ .escale = escale, .einequality=einequality, .eshape = eshape ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
location = extra$location
- Scale = eta2theta(eta[,1], .lscale, earg= .escale)
- inequality = eta2theta(eta[,2], .linequality, earg= .einequality)
- shape = eta2theta(eta[,3], .lshape, earg= .eshape)
+ Scale = eta2theta(eta[,1], .lscale, earg = .escale)
+ inequality = eta2theta(eta[,2], .linequality, earg = .einequality)
+ shape = eta2theta(eta[,3], .lshape, earg = .eshape)
zedd = (y - location) / Scale
if (residuals) stop("loglikelihood residuals not implemented yet") else {
sum(w * dparetoIV(x=y, location=location, scale=Scale,
- inequality=inequality, shape=shape, log=TRUE))
+ inequality=inequality, shape=shape, log = TRUE))
}
- }, list( .lscale=lscale, .linequality=linequality, .lshape=lshape,
- .escale=escale, .einequality=einequality, .eshape=eshape ))),
- vfamily=c("paretoIV"),
- deriv=eval(substitute(expression({
+ }, list( .lscale = lscale, .linequality=linequality, .lshape = lshape,
+ .escale = escale, .einequality=einequality, .eshape = eshape ))),
+ vfamily = c("paretoIV"),
+ deriv = eval(substitute(expression({
location = extra$location
- Scale = eta2theta(eta[,1], .lscale, earg= .escale)
- inequality = eta2theta(eta[,2], .linequality, earg= .einequality)
- shape = eta2theta(eta[,3], .lshape, earg= .eshape)
+ Scale = eta2theta(eta[,1], .lscale, earg = .escale)
+ inequality = eta2theta(eta[,2], .linequality, earg = .einequality)
+ shape = eta2theta(eta[,3], .lshape, earg = .eshape)
zedd = (y - location) / Scale
temp100 = 1 + zedd^(1/inequality)
dl.dscale = (shape - (1+shape) / temp100) / (inequality * Scale)
dl.dinequality = ((log(zedd) * (shape - (1+shape)/temp100)) /
inequality - 1) / inequality
dl.dshape = -log(temp100) + 1/shape
- dscale.deta = dtheta.deta(Scale, .lscale, earg= .escale)
- dinequality.deta = dtheta.deta(inequality, .linequality, earg= .einequality)
- dshape.deta = dtheta.deta(shape, .lshape, earg= .eshape)
+ dscale.deta = dtheta.deta(Scale, .lscale, earg = .escale)
+ dinequality.deta = dtheta.deta(inequality, .linequality, earg = .einequality)
+ dshape.deta = dtheta.deta(shape, .lshape, earg = .eshape)
w * cbind(dl.dscale * dscale.deta,
dl.dinequality * dinequality.deta,
dl.dshape * dshape.deta)
- }), list( .lscale=lscale, .linequality=linequality, .lshape=lshape,
- .escale=escale, .einequality=einequality, .eshape=eshape ))),
- weight=eval(substitute(expression({
+ }), list( .lscale = lscale, .linequality=linequality, .lshape = lshape,
+ .escale = escale, .einequality=einequality, .eshape = eshape ))),
+ weight = eval(substitute(expression({
temp200 = digamma(shape) - digamma(1) - 1
d2scale.deta2 = shape / ((inequality*Scale)^2 * (shape+2))
d2inequality.deta2 = (shape * (temp200^2 + trigamma(shape) + trigamma(1)
@@ -7948,18 +7976,18 @@ rparetoI = function(n, scale=1, shape=1)
wz[,iam(1,3,M)] = dscale.deta * dshape.deta * d2ss.deta2
wz[,iam(2,3,M)] = dinequality.deta * dshape.deta * d2is.deta2
w * wz
- }), list( .lscale=lscale, .linequality=linequality, .lshape=lshape,
- .escale=escale, .einequality=einequality, .eshape=eshape ))))
+ }), list( .lscale = lscale, .linequality=linequality, .lshape = lshape,
+ .escale = escale, .einequality=einequality, .eshape = eshape ))))
}
paretoIII = function(location=0,
- lscale="loge",
- linequality="loge",
- escale=list(), einequality=list(),
- iscale=NULL, iinequality=NULL) {
+ lscale = "loge",
+ linequality = "loge",
+ escale = list(), einequality=list(),
+ iscale = NULL, iinequality = NULL) {
if (mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
if (mode(linequality) != "character" && mode(linequality) != "name")
@@ -7979,89 +8007,89 @@ rparetoI = function(n, scale=1, shape=1)
"\n", " y > ",
location, ", scale > 0, inequality > 0, \n",
"Links: ",
- namesof("scale", lscale, earg=escale ), ", ",
- namesof("inequality", linequality, earg=einequality ), "\n",
+ namesof("scale", lscale, earg = escale ), ", ",
+ namesof("inequality", linequality, earg = einequality ), "\n",
"Mean: location + scale * NA"),
- initialize=eval(substitute(expression({
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("the response must be a vector or a one-column matrix")
predictors.names =
- c(namesof("scale", .lscale, earg=.escale, tag=FALSE),
- namesof("inequality", .linequality, earg=.einequality, tag=FALSE))
+ c(namesof("scale", .lscale, earg = .escale, tag = FALSE),
+ namesof("inequality", .linequality, earg = .einequality, tag = FALSE))
extra$location = location = .location
if (any(y <= location))
stop("the response must have values > than the 'location' argument")
if (!length(etastart)) {
- inequality.init = if (length(.iinequality)) .iinequality else NULL
+ inequality.init = if (length( .iinequality)) .iinequality else NULL
scale.init = if (length( .iscale)) .iscale else NULL
if (!length(inequality.init) || !length(scale.init)) {
probs = (1:4)/5
ytemp = quantile(x=log(y-location), probs=probs)
- fittemp = lsfit(x=logit(probs), y=ytemp, int=TRUE)
+ fittemp = lsfit(x=logit(probs), y=ytemp, int = TRUE)
if (!length(inequality.init))
inequality.init = max(fittemp$coef["X"], 0.01)
if (!length(scale.init))
scale.init = exp(fittemp$coef["Intercept"])
}
etastart=cbind(
- theta2eta(rep(scale.init, len=n), .lscale, earg= .escale),
- theta2eta(rep(inequality.init, len=n), .linequality,
- earg= .einequality))
- }
- }), list( .location=location, .lscale=lscale, .linequality=linequality,
- .escale=escale, .einequality=einequality,
- .iscale=iscale, .iinequality=iinequality ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
+ theta2eta(rep(scale.init, len = n), .lscale, earg = .escale),
+ theta2eta(rep(inequality.init, len = n), .linequality,
+ earg = .einequality))
+ }
+ }), list( .location = location, .lscale = lscale, .linequality=linequality,
+ .escale = escale, .einequality=einequality,
+ .iscale = iscale, .iinequality=iinequality ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
location = extra$location
- Scale = eta2theta(eta[,1], .lscale, earg= .escale)
- inequality = eta2theta(eta[,2], .linequality, earg= .einequality)
+ Scale = eta2theta(eta[,1], .lscale, earg = .escale)
+ inequality = eta2theta(eta[,2], .linequality, earg = .einequality)
location + Scale * NA
- }, list( .lscale=lscale, .linequality=linequality,
- .escale=escale, .einequality=einequality ))),
+ }, list( .lscale = lscale, .linequality=linequality,
+ .escale = escale, .einequality=einequality ))),
last = eval(substitute(expression({
- misc$link=c("scale"= .lscale, "inequality"= .linequality)
+ misc$link=c("scale" = .lscale, "inequality" = .linequality)
misc$earg = list(scale = .escale, inequality= .einequality)
misc$location = extra$location # Use this for prediction
- }), list( .lscale=lscale, .linequality=linequality,
- .escale=escale, .einequality=einequality ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ }), list( .lscale = lscale, .linequality=linequality,
+ .escale = escale, .einequality=einequality ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
location = extra$location
- Scale = eta2theta(eta[,1], .lscale, earg= .escale)
- inequality = eta2theta(eta[,2], .linequality, earg= .einequality)
+ Scale = eta2theta(eta[,1], .lscale, earg = .escale)
+ inequality = eta2theta(eta[,2], .linequality, earg = .einequality)
zedd = (y - location) / Scale
if (residuals) stop("loglikelihood residuals not implemented yet") else {
sum(w * dparetoIII(x=y, location=location, scale=Scale,
- inequality=inequality, log=TRUE))
+ inequality=inequality, log = TRUE))
}
- }, list( .lscale=lscale, .linequality=linequality,
- .escale=escale, .einequality=einequality ))),
- vfamily=c("paretoIII"),
- deriv=eval(substitute(expression({
+ }, list( .lscale = lscale, .linequality=linequality,
+ .escale = escale, .einequality=einequality ))),
+ vfamily = c("paretoIII"),
+ deriv = eval(substitute(expression({
location = extra$location
- Scale = eta2theta(eta[,1], .lscale, earg= .escale)
- inequality = eta2theta(eta[,2], .linequality, earg= .einequality)
+ Scale = eta2theta(eta[,1], .lscale, earg = .escale)
+ inequality = eta2theta(eta[,2], .linequality, earg = .einequality)
shape = 1
zedd = (y - location) / Scale
temp100 = 1 + zedd^(1/inequality)
dl.dscale = (shape - (1+shape) / temp100) / (inequality * Scale)
dl.dinequality = ((log(zedd) * (shape - (1+shape)/temp100)) /
inequality - 1) / inequality
- dscale.deta = dtheta.deta(Scale, .lscale, earg= .escale)
- dinequality.deta = dtheta.deta(inequality, .linequality, earg= .einequality)
+ dscale.deta = dtheta.deta(Scale, .lscale, earg = .escale)
+ dinequality.deta = dtheta.deta(inequality, .linequality, earg = .einequality)
w * cbind(dl.dscale * dscale.deta,
dl.dinequality * dinequality.deta)
- }), list( .lscale=lscale, .linequality=linequality,
- .escale=escale, .einequality=einequality ))),
- weight=eval(substitute(expression({
+ }), list( .lscale = lscale, .linequality=linequality,
+ .escale = escale, .einequality=einequality ))),
+ weight = eval(substitute(expression({
d2scale.deta2 = 1 / ((inequality*Scale)^2 * 3)
d2inequality.deta2 = (1 + 2* trigamma(1)) / (inequality^2 * 3)
wz = matrix(0, n, M) # It is diagonal
wz[,iam(1,1,M)] = dscale.deta^2 * d2scale.deta2
wz[,iam(2,2,M)] = dinequality.deta^2 * d2inequality.deta2
w * wz
- }), list( .lscale=lscale, .linequality=linequality,
- .escale=escale, .einequality=einequality ))))
+ }), list( .lscale = lscale, .linequality=linequality,
+ .escale = escale, .einequality=einequality ))))
}
@@ -8069,10 +8097,10 @@ rparetoI = function(n, scale=1, shape=1)
paretoII = function(location=0,
- lscale="loge",
- lshape="loge",
- escale=list(), eshape=list(),
- iscale=NULL, ishape=NULL) {
+ lscale = "loge",
+ lshape = "loge",
+ escale = list(), eshape = list(),
+ iscale = NULL, ishape = NULL) {
if (mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
if (mode(lshape) != "character" && mode(lshape) != "name")
@@ -8091,15 +8119,15 @@ rparetoI = function(n, scale=1, shape=1)
")/scale]^(-shape),",
"\n", " y > ",
location, ", scale > 0, shape > 0,\n",
- "Links: ", namesof("scale", lscale, earg=escale ), ", ",
- namesof("shape", lshape, earg=eshape ), "\n",
+ "Links: ", namesof("scale", lscale, earg = escale ), ", ",
+ namesof("shape", lshape, earg = eshape ), "\n",
"Mean: location + scale * NA"),
- initialize=eval(substitute(expression({
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("the response must be a vector or a one-column matrix")
predictors.names =
- c(namesof("scale", .lscale, earg=.escale, tag=FALSE),
- namesof("shape", .lshape, earg=.eshape, tag=FALSE))
+ c(namesof("scale", .lscale, earg = .escale, tag = FALSE),
+ namesof("shape", .lshape, earg = .eshape, tag = FALSE))
extra$location = location = .location
if (any(y <= location))
stop("the response must have values > than the 'location' argument")
@@ -8110,60 +8138,60 @@ rparetoI = function(n, scale=1, shape=1)
probs = (1:4)/5
scale.init.0 = 1
ytemp = quantile(x=log(y-location+scale.init.0), probs=probs)
- fittemp = lsfit(x=log1p(-probs), y=ytemp, int=TRUE)
+ fittemp = lsfit(x=log1p(-probs), y=ytemp, int = TRUE)
if (!length(shape.init))
shape.init = max(-1/fittemp$coef["X"], 0.01)
if (!length(scale.init))
scale.init = exp(fittemp$coef["Intercept"])
}
etastart=cbind(
- theta2eta(rep(scale.init, len=n), .lscale, earg= .escale),
- theta2eta(rep(shape.init, len=n), .lshape, earg= .eshape))
+ theta2eta(rep(scale.init, len = n), .lscale, earg = .escale),
+ theta2eta(rep(shape.init, len = n), .lshape, earg = .eshape))
}
- }), list( .location=location, .lscale=lscale,
- .escale=escale, .eshape=eshape,
- .lshape=lshape, .iscale=iscale, .ishape=ishape ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
+ }), list( .location = location, .lscale = lscale,
+ .escale = escale, .eshape = eshape,
+ .lshape = lshape, .iscale = iscale, .ishape = ishape ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
location = extra$location
- Scale = eta2theta(eta[,1], .lscale, earg= .escale)
- shape = eta2theta(eta[,2], .lshape, earg= .eshape)
+ Scale = eta2theta(eta[,1], .lscale, earg = .escale)
+ shape = eta2theta(eta[,2], .lshape, earg = .eshape)
location + Scale * NA
- }, list( .lscale=lscale, .lshape=lshape,
- .escale=escale, .eshape=eshape ))),
+ }, list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape ))),
last = eval(substitute(expression({
- misc$link=c("scale"= .lscale, "shape"= .lshape)
- misc$earg = list(scale = .escale, shape= .eshape)
+ misc$link = c("scale" = .lscale, "shape" = .lshape)
+ misc$earg = list("scale" = .escale, "shape" = .eshape)
misc$location = extra$location # Use this for prediction
- }), list( .lscale=lscale, .lshape=lshape,
- .escale=escale, .eshape=eshape ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ }), list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
location = extra$location
- Scale = eta2theta(eta[,1], .lscale, earg= .escale)
- shape = eta2theta(eta[,2], .lshape, earg= .eshape)
+ Scale = eta2theta(eta[,1], .lscale, earg = .escale)
+ shape = eta2theta(eta[,2], .lshape, earg = .eshape)
zedd = (y - location) / Scale
if (residuals) stop("loglikelihood residuals not implemented yet") else {
sum(w * dparetoII(x=y, location=location, scale=Scale,
- shape=shape, log=TRUE))
+ shape=shape, log = TRUE))
}
- }, list( .lscale=lscale, .lshape=lshape,
- .escale=escale, .eshape=eshape ))),
- vfamily=c("paretoII"),
- deriv=eval(substitute(expression({
+ }, list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape ))),
+ vfamily = c("paretoII"),
+ deriv = eval(substitute(expression({
location = extra$location
- Scale = eta2theta(eta[,1], .lscale, earg= .escale)
- shape = eta2theta(eta[,2], .lshape, earg= .eshape)
+ Scale = eta2theta(eta[,1], .lscale, earg = .escale)
+ shape = eta2theta(eta[,2], .lshape, earg = .eshape)
zedd = (y - location) / Scale
temp100 = 1 + zedd
dl.dscale = (shape - (1+shape) / temp100) / (1 * Scale)
dl.dshape = -log(temp100) + 1/shape
- dscale.deta = dtheta.deta(Scale, .lscale, earg= .escale)
- dshape.deta = dtheta.deta(shape, .lshape, earg= .eshape)
+ dscale.deta = dtheta.deta(Scale, .lscale, earg = .escale)
+ dshape.deta = dtheta.deta(shape, .lshape, earg = .eshape)
w * cbind(dl.dscale * dscale.deta,
dl.dshape * dshape.deta)
- }), list( .lscale=lscale, .lshape=lshape,
- .escale=escale, .eshape=eshape ))),
- weight=eval(substitute(expression({
+ }), list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape ))),
+ weight = eval(substitute(expression({
d2scale.deta2 = shape / (Scale^2 * (shape+2))
d2shape.deta2 = 1 / shape^2
d2ss.deta2 = -1 / (Scale * (shape+1))
@@ -8172,8 +8200,8 @@ rparetoI = function(n, scale=1, shape=1)
wz[,iam(2,2,M)] = dshape.deta^2 * d2shape.deta2
wz[,iam(1,2,M)] = dscale.deta * dshape.deta * d2ss.deta2
w * wz
- }), list( .lscale=lscale, .lshape=lshape,
- .escale=escale, .eshape=eshape ))))
+ }), list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape ))))
}
@@ -8186,9 +8214,9 @@ dpareto = function(x, location, shape, log = FALSE) {
rm(log)
L = max(length(x), length(location), length(shape))
- x = rep(x, len=L); location = rep(location, len=L); shape= rep(shape, len=L)
+ x = rep(x, len = L); location = rep(location, len = L); shape= rep(shape, len = L)
- logdensity = rep(log(0), len=L)
+ logdensity = rep(log(0), len = L)
xok = (x > location)
logdensity[xok] = log(shape[xok]) + shape[xok] * log(location[xok]) -
(shape[xok]+1) * log(x[xok])
@@ -8199,7 +8227,7 @@ ppareto = function(q, location, shape) {
if (any(location <= 0)) stop("argument 'location' must be positive")
if (any(shape <= 0)) stop("argument 'shape' must be positive")
L = max(length(q), length(location), length(shape))
- q = rep(q, len=L); location = rep(location, len=L); shape= rep(shape, len=L)
+ q = rep(q, len = L); location = rep(location, len = L); shape= rep(shape, len = L)
ifelse(q > location, 1 - (location/q)^shape, 0)
}
@@ -8211,7 +8239,7 @@ qpareto = function(p, location, shape) {
}
rpareto = function(n, location, shape) {
- if (!is.Numeric(n, posit=TRUE, integ=TRUE, allow=1))
+ if (!is.Numeric(n, posit = TRUE, integ = TRUE, allow = 1))
stop("bad input for argument 'n'")
if (any(location <= 0)) stop("argument 'location' must be positive")
if (any(shape <= 0)) stop("argument 'shape' must be positive")
@@ -8220,7 +8248,7 @@ rpareto = function(n, location, shape) {
- pareto1 = function(lshape="loge", earg=list(), location=NULL) {
+ pareto1 = function(lshape = "loge", earg = list(), location = NULL) {
if (mode(lshape) != "character" && mode(lshape) != "name")
lshape = as.character(substitute(lshape))
if (is.Numeric(location) && location <= 0)
@@ -8230,12 +8258,12 @@ rpareto = function(n, location, shape) {
new("vglmff",
blurb = c("Pareto distribution f(y) = shape * location^shape / y^(shape+1),",
" 0<location<y, shape>0\n",
- "Link: ", namesof("shape", lshape, earg=earg), "\n", "\n",
+ "Link: ", namesof("shape", lshape, earg = earg), "\n", "\n",
"Mean: location*shape/(shape-1) for shape>1"),
- initialize=eval(substitute(expression({
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
- predictors.names = namesof("shape", .lshape, earg=.earg, tag=FALSE)
+ predictors.names = namesof("shape", .lshape, earg = .earg, tag = FALSE)
locationhat = if (!length( .location)) {
locationEstimated = TRUE
min(y) # - .smallno
@@ -8249,67 +8277,76 @@ rpareto = function(n, location, shape) {
extra$locationEstimated = locationEstimated
if (!length(etastart)) {
k.init = (y + 1/8) / (y - locationhat + 1/8)
- etastart = theta2eta(k.init, .lshape, earg= .earg)
+ etastart = theta2eta(k.init, .lshape, earg = .earg)
}
- }), list( .lshape=lshape, .earg=earg,
- .location=location ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- k = eta2theta(eta, .lshape, earg= .earg)
+ }), list( .lshape = lshape, .earg = earg,
+ .location = location ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ k = eta2theta(eta, .lshape, earg = .earg)
location = extra$location
ifelse(k>1, k * location / (k-1), NA)
- }, list( .lshape=lshape, .earg=earg ))),
+ }, list( .lshape = lshape, .earg = earg ))),
last = eval(substitute(expression({
misc$link = c(k= .lshape)
misc$earg = list(k = .earg)
misc$location = extra$location # Use this for prediction
- }), list( .lshape=lshape, .earg=earg ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- k = eta2theta(eta, .lshape, earg= .earg)
+ }), list( .lshape = lshape, .earg = earg ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ k = eta2theta(eta, .lshape, earg = .earg)
location = extra$location
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
sum(w * (log(k) + k * log(location) - (k+1) * log(y )))
}
- }, list( .lshape=lshape, .earg=earg ))),
- vfamily=c("pareto1"),
- deriv=eval(substitute(expression({
+ }, list( .lshape = lshape, .earg = earg ))),
+ vfamily = c("pareto1"),
+ deriv = eval(substitute(expression({
location = extra$location
- k = eta2theta(eta, .lshape, earg= .earg)
+ k = eta2theta(eta, .lshape, earg = .earg)
dl.dk = 1/k + log(location/y)
- dk.deta = dtheta.deta(k, .lshape, earg= .earg)
+ dk.deta = dtheta.deta(k, .lshape, earg = .earg)
w * dl.dk * dk.deta
- }), list( .lshape=lshape, .earg=earg ))),
- weight=eval(substitute(expression({
+ }), list( .lshape = lshape, .earg = earg ))),
+ weight = eval(substitute(expression({
ed2l.dk2 = 1 / k^2
wz = w * dk.deta^2 * ed2l.dk2
wz
- }), list( .lshape=lshape, .earg=earg ))))
+ }), list( .lshape = lshape, .earg = earg ))))
}
dtpareto = function(x, lower, upper, shape) {
- if (!is.Numeric(x)) stop("bad input for argument 'x'")
- if (!is.Numeric(lower, pos=TRUE)) stop("argument 'lower' must be positive")
- if (!is.Numeric(upper, pos=TRUE)) stop("argument 'upper' must be positive")
- if (!is.Numeric(shape, pos=TRUE)) stop("argument 'shape' must be positive")
+ if (!is.Numeric(x))
+ stop("bad input for argument 'x'")
+ if (!is.Numeric(lower, pos = TRUE))
+ stop("argument 'lower' must be positive")
+ if (!is.Numeric(upper, pos = TRUE))
+ stop("argument 'upper' must be positive")
+ if (!is.Numeric(shape, pos = TRUE))
+ stop("argument 'shape' must be positive")
L = max(length(x), length(lower), length(upper), length(shape))
- x = rep(x, len=L); lower = rep(lower, len=L); upper = rep(upper, len=L);
- shape= rep(shape, len=L)
+ x = rep(x, len = L); lower = rep(lower, len = L); upper = rep(upper, len = L);
+ shape= rep(shape, len = L)
ifelse(x > lower & x < upper,
shape * lower^shape / (x^(shape+1) * (1-(lower/upper)^shape)), 0)
}
ptpareto = function(q, lower, upper, shape) {
- if (!is.Numeric(q)) stop("bad input for argument 'q'")
- if (!is.Numeric(lower, pos=TRUE)) stop("argument 'lower' must be positive")
- if (!is.Numeric(upper, pos=TRUE)) stop("argument 'upper' must be positive")
- if (!is.Numeric(shape, pos=TRUE)) stop("argument 'shape' must be positive")
+ if (!is.Numeric(q))
+ stop("bad input for argument 'q'")
+ if (!is.Numeric(lower, pos = TRUE))
+ stop("argument 'lower' must be positive")
+ if (!is.Numeric(upper, pos = TRUE))
+ stop("argument 'upper' must be positive")
+ if (!is.Numeric(shape, pos = TRUE))
+ stop("argument 'shape' must be positive")
L = max(length(q), length(lower), length(upper), length(shape))
- q = rep(q, len=L); lower = rep(lower, len=L); upper = rep(upper, len=L);
- shape= rep(shape, len=L)
+ q = rep(q, len = L); lower = rep(lower, len = L);
+ upper = rep(upper, len = L); shape= rep(shape, len = L)
ans = q * 0
ans[q > lower & q < upper] = (1-(lower/q)^shape) / (1-(lower/upper)^shape)
ans[q >= upper] = 1
@@ -8317,40 +8354,48 @@ ptpareto = function(q, lower, upper, shape) {
}
qtpareto = function(p, lower, upper, shape) {
- if (!is.Numeric(p, posit=TRUE)) stop("bad input for argument 'p'")
- if (!is.Numeric(lower, pos=TRUE)) stop("argument 'lower' must be positive")
- if (!is.Numeric(upper, pos=TRUE)) stop("argument 'upper' must be positive")
- if (!is.Numeric(shape, pos=TRUE)) stop("argument 'shape' must be positive")
- if (max(p) >= 1) stop("argument 'p' must be between 0 and 1")
+ if (!is.Numeric(p, posit = TRUE))
+ stop("bad input for argument 'p'")
+ if (!is.Numeric(lower, pos = TRUE))
+ stop("argument 'lower' must be positive")
+ if (!is.Numeric(upper, pos = TRUE))
+ stop("argument 'upper' must be positive")
+ if (!is.Numeric(shape, pos = TRUE))
+ stop("argument 'shape' must be positive")
+ if (max(p) >= 1)
+ stop("argument 'p' must be between 0 and 1")
lower / (1 - p*(1-(lower/upper)^shape))^(1/shape)
}
rtpareto = function(n, lower, upper, shape) {
- if (!is.Numeric(n, posit=TRUE, integ=TRUE, allow=1))
+ if (!is.Numeric(n, posit = TRUE, integ = TRUE, allow = 1))
stop("bad input for argument 'n'")
- if (!is.Numeric(lower, pos=TRUE)) stop("argument 'lower' must be positive")
- if (!is.Numeric(upper, pos=TRUE)) stop("argument 'upper' must be positive")
- if (!is.Numeric(shape, pos=TRUE)) stop("argument 'shape' must be positive")
+ if (!is.Numeric(lower, pos = TRUE))
+ stop("argument 'lower' must be positive")
+ if (!is.Numeric(upper, pos = TRUE))
+ stop("argument 'upper' must be positive")
+ if (!is.Numeric(shape, pos = TRUE))
+ stop("argument 'shape' must be positive")
lower / (1 - runif(n)*(1-(lower/upper)^shape))^(1/shape)
}
- tpareto1 = function(lower, upper, lshape="loge", earg=list(), ishape=NULL,
- method.init=1) {
+ tpareto1 = function(lower, upper, lshape = "loge", earg = list(), ishape = NULL,
+ method.init = 1) {
if (mode(lshape) != "character" && mode(lshape) != "name")
lshape = as.character(substitute(lshape))
- if (!is.Numeric(lower, posit=TRUE, allow=1))
+ if (!is.Numeric(lower, posit = TRUE, allow = 1))
stop("bad input for argument 'lower'")
- if (!is.Numeric(upper, posit=TRUE, allow=1))
+ if (!is.Numeric(upper, posit = TRUE, allow = 1))
stop("bad input for argument 'upper'")
if (lower >= upper)
stop("lower < upper is required")
- if (length(ishape) && !is.Numeric(ishape, posit=TRUE))
+ if (length(ishape) && !is.Numeric(ishape, posit = TRUE))
stop("bad input for argument 'ishape'")
if (!is.list(earg)) earg = list()
- if (!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
method.init > 2)
stop("'method.init' must be 1 or 2")
@@ -8358,13 +8403,13 @@ rtpareto = function(n, lower, upper, shape) {
blurb = c("Truncated Pareto distribution f(y) = shape * lower^shape /",
"(y^(shape+1) * (1-(lower/upper)^shape)),",
" 0 < lower < y < upper < Inf, shape>0\n",
- "Link: ", namesof("shape", lshape, earg=earg), "\n", "\n",
+ "Link: ", namesof("shape", lshape, earg = earg), "\n", "\n",
"Mean: shape*lower^shape*(upper^(1-shape)-lower^(1-shape)) /",
" ((1-shape) * (1-(lower/upper)^shape))"),
- initialize=eval(substitute(expression({
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
- predictors.names = namesof("shape", .lshape, earg=.earg, tag=FALSE)
+ predictors.names = namesof("shape", .lshape, earg = .earg, tag = FALSE)
if (any(y <= .lower))
stop("the value of argument 'lower' is too high ",
"(requires '0 < lower < min(y)')")
@@ -8386,50 +8431,50 @@ rtpareto = function(n, lower, upper, shape) {
shape.grid = 2^((-4):4)
try.this = getMaxMin(shape.grid, objfun=tpareto1.Loglikfun,
y=y, x=x, w=w)
- try.this = rep(try.this, len=n)
+ try.this = rep(try.this, len = n)
try.this
}
- etastart = theta2eta(shape.init, .lshape, earg= .earg)
+ etastart = theta2eta(shape.init, .lshape, earg = .earg)
}
- }), list( .ishape=ishape, .earg=earg, .lshape=lshape,
- .method.init=method.init,
+ }), list( .ishape = ishape, .earg = earg, .lshape = lshape,
+ .method.init = method.init,
.lower=lower, .upper=upper ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- shape = eta2theta(eta, .lshape, earg= .earg)
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ shape = eta2theta(eta, .lshape, earg = .earg)
myratio = .lower / .upper
constprop = shape * .lower^shape / (1 - myratio^shape)
constprop * ( .upper^(1-shape) - .lower^(1-shape)) / (1-shape)
- }, list( .lshape=lshape, .earg=earg, .lower=lower, .upper=upper ))),
+ }, list( .lshape = lshape, .earg = earg, .lower=lower, .upper=upper ))),
last = eval(substitute(expression({
misc$link = c(shape= .lshape)
misc$earg = list(shape = .earg)
misc$lower = extra$lower
misc$upper = extra$upper
misc$expected = TRUE
- }), list( .lshape=lshape, .earg=earg, .lower=lower, .upper=upper ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- shape = eta2theta(eta, .lshape, earg= .earg)
+ }), list( .lshape = lshape, .earg = earg, .lower=lower, .upper=upper ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ shape = eta2theta(eta, .lshape, earg = .earg)
myratio = .lower / .upper
if (residuals) stop("loglikelihood residuals not implemented yet") else
sum(w * (log(shape) + shape * log( .lower) - (shape+1) * log(y) -
log1p(-myratio^shape)))
- }, list( .lshape=lshape, .earg=earg, .lower=lower, .upper=upper ))),
- vfamily=c("tpareto1"),
- deriv=eval(substitute(expression({
- shape = eta2theta(eta, .lshape, earg= .earg)
+ }, list( .lshape = lshape, .earg = earg, .lower=lower, .upper=upper ))),
+ vfamily = c("tpareto1"),
+ deriv = eval(substitute(expression({
+ shape = eta2theta(eta, .lshape, earg = .earg)
myratio = .lower / .upper
myratio2 = myratio^shape
tmp330 = myratio2 * log(myratio) / (1 - myratio2)
dl.dshape = 1/shape + log( .lower) - log(y) + tmp330
- dshape.deta = dtheta.deta(shape, .lshape, earg= .earg)
+ dshape.deta = dtheta.deta(shape, .lshape, earg = .earg)
w * dl.dshape * dshape.deta
- }), list( .lshape=lshape, .earg=earg, .lower=lower, .upper=upper ))),
- weight=eval(substitute(expression({
+ }), list( .lshape = lshape, .earg = earg, .lower=lower, .upper=upper ))),
+ weight = eval(substitute(expression({
ed2l.dshape2 = 1 / shape^2 - tmp330^2 / myratio2
wz = w * dshape.deta^2 * ed2l.dshape2
wz
- }), list( .lshape=lshape, .earg=earg, .lower=lower, .upper=upper ))))
+ }), list( .lshape = lshape, .earg = earg, .lower=lower, .upper=upper ))))
}
@@ -8439,11 +8484,11 @@ erf = function(x)
2 * pnorm(x * sqrt(2)) - 1
erfc = function(x)
- 2 * pnorm(x * sqrt(2), lower=FALSE)
+ 2 * pnorm(x * sqrt(2), lower = FALSE)
- wald <- function(link.lambda="loge", earg=list(), init.lambda=NULL)
+ wald <- function(link.lambda = "loge", earg = list(), init.lambda = NULL)
{
if (mode(link.lambda) != "character" && mode(link.lambda) != "name")
link.lambda = as.character(substitute(link.lambda))
@@ -8454,134 +8499,136 @@ erfc = function(x)
"f(y) = sqrt(lambda/(2*pi*y^3)) * exp(-lambda*(y-1)^2/(2*y)), y&lambda>0",
"\n",
"Link: ",
- namesof("lambda", link.lambda, earg=earg), "\n",
+ namesof("lambda", link.lambda, earg = earg), "\n",
"Mean: ", "1\n",
"Variance: 1 / lambda"),
- initialize=eval(substitute(expression({
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
if (any(y <= 0)) stop("Require the response to have positive values")
predictors.names =
- namesof("lambda", .link.lambda, earg=.earg, short=TRUE)
+ namesof("lambda", .link.lambda, earg = .earg, short = TRUE)
if (!length(etastart)) {
initlambda = if (length( .init.lambda)) .init.lambda else
1 / (0.01 + (y-1)^2)
- initlambda = rep(initlambda, len=n)
- etastart = cbind(theta2eta(initlambda, link=.link.lambda, earg= .earg))
+ initlambda = rep(initlambda, len = n)
+ etastart = cbind(theta2eta(initlambda, link=.link.lambda, earg = .earg))
}
- }), list( .link.lambda=link.lambda, .earg=earg,
+ }), list( .link.lambda=link.lambda, .earg = earg,
.init.lambda=init.lambda ))),
- inverse=function(eta, extra=NULL) {
+ inverse=function(eta, extra = NULL) {
0*eta + 1
},
last = eval(substitute(expression({
misc$link = c(lambda = .link.lambda )
misc$earg = list(lambda = .earg )
- }), list( .link.lambda=link.lambda, .earg=earg ))),
- loglikelihood=eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
- lambda = eta2theta(eta, link=.link.lambda, earg= .earg)
- if (residuals) stop("loglikelihood residuals not implemented yet") else
+ }), list( .link.lambda=link.lambda, .earg = earg ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ lambda = eta2theta(eta, link=.link.lambda, earg = .earg)
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else
sum(w * (0.5 * log(lambda/(2*pi*y^3)) - lambda * (y-1)^2 / (2*y)))
- }, list( .link.lambda=link.lambda, .earg=earg ))),
- vfamily="wald",
- deriv=eval(substitute(expression({
- lambda = eta2theta(eta, link=.link.lambda, earg= .earg)
+ }, list( .link.lambda=link.lambda, .earg = earg ))),
+ vfamily = "wald",
+ deriv = eval(substitute(expression({
+ lambda = eta2theta(eta, link=.link.lambda, earg = .earg)
dl.dlambda = 0.5 / lambda + 1 - 0.5 * (y + 1/y)
- dlambda.deta = dtheta.deta(theta=lambda, link=.link.lambda, earg= .earg)
+ dlambda.deta = dtheta.deta(theta=lambda, link=.link.lambda, earg = .earg)
w * cbind(dl.dlambda * dlambda.deta)
- }), list( .link.lambda=link.lambda, .earg=earg ))),
- weight=eval(substitute(expression({
+ }), list( .link.lambda=link.lambda, .earg = earg ))),
+ weight = eval(substitute(expression({
d2l.dlambda2 = 0.5 / (lambda^2)
w * cbind(dlambda.deta^2 * d2l.dlambda2)
- }), list( .link.lambda=link.lambda, .earg=earg ))))
+ }), list( .link.lambda=link.lambda, .earg = earg ))))
}
- expexp = function(lshape="loge", lscale="loge",
- eshape=list(), escale=list(),
- ishape=1.1, iscale=NULL, # ishape cannot be 1
+ expexp = function(lshape = "loge", lscale = "loge",
+ eshape = list(), escale = list(),
+ ishape=1.1, iscale = NULL, # ishape cannot be 1
tolerance = 1.0e-6,
- zero=NULL) {
+ zero = NULL) {
if (mode(lshape) != "character" && mode(lshape) != "name")
lshape = as.character(substitute(lshape))
if (mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
stop("bad input for argument 'zero'")
- if (!is.Numeric(tolerance, posit=TRUE, allow=1) || tolerance>1.0e-2)
+ if (!is.Numeric(tolerance, posit = TRUE, allow = 1) || tolerance>1.0e-2)
stop("bad input for argument 'tolerance'")
- if (!is.Numeric(ishape, posit=TRUE))
+ if (!is.Numeric(ishape, posit = TRUE))
stop("bad input for argument 'ishape'")
- if (length(iscale) && !is.Numeric(iscale, posit=TRUE))
+ if (length(iscale) && !is.Numeric(iscale, posit = TRUE))
stop("bad input for argument 'iscale'")
- ishape[ishape==1] = 1.1 # Fails in @deriv
+ ishape[ishape == 1] = 1.1 # Fails in @deriv
if (!is.list(escale)) escale = list()
if (!is.list(eshape)) eshape = list()
new("vglmff",
blurb = c("Exponentiated Exponential Distribution\n",
"Links: ",
- namesof("shape", lshape, earg=eshape), ", ",
- namesof("scale", lscale, earg=escale),"\n",
+ namesof("shape", lshape, earg = eshape), ", ",
+ namesof("scale", lscale, earg = escale),"\n",
"Mean: (digamma(shape+1)-digamma(1))/scale"),
- constraints=eval(substitute(expression({
+ constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
predictors.names =
- c(namesof("shape", .lshape, earg=.eshape, short=TRUE),
- namesof("scale", .lscale, earg=.escale, short=TRUE))
+ c(namesof("shape", .lshape, earg = .eshape, short = TRUE),
+ namesof("scale", .lscale, earg = .escale, short = TRUE))
if (!length(etastart)) {
- shape.init = if (!is.Numeric( .ishape, posit=TRUE))
+ shape.init = if (!is.Numeric( .ishape, posit = TRUE))
stop("argument 'ishape' must be positive") else
- rep(.ishape, len=n)
- scale.init = if (length( .iscale)) rep(.iscale, len=n) else
+ rep( .ishape, len = n)
+ scale.init = if (length( .iscale)) rep( .iscale, len = n) else
(digamma(shape.init+1) - digamma(1)) / (y+1/8)
- scale.init = rep(weighted.mean(scale.init, w=w), len=n)
- etastart = cbind(theta2eta(shape.init, .lshape, earg= .eshape),
- theta2eta(scale.init, .lscale, earg= .escale))
- }
- }), list( .lshape=lshape, .lscale=lscale, .iscale=iscale, .ishape=ishape,
- .eshape=eshape, .escale=escale ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- shape = eta2theta(eta[,1], .lshape, earg= .eshape)
- scale = eta2theta(eta[,2], .lscale, earg= .escale)
+ scale.init = rep(weighted.mean(scale.init, w=w), len = n)
+ etastart = cbind(theta2eta(shape.init, .lshape, earg = .eshape),
+ theta2eta(scale.init, .lscale, earg = .escale))
+ }
+ }), list( .lshape = lshape, .lscale = lscale, .iscale = iscale, .ishape = ishape,
+ .eshape = eshape, .escale = escale ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ shape = eta2theta(eta[,1], .lshape, earg = .eshape)
+ scale = eta2theta(eta[,2], .lscale, earg = .escale)
(digamma(shape+1)-digamma(1)) / scale
- }, list( .lshape=lshape, .lscale=lscale,
- .eshape=eshape, .escale=escale ))),
+ }, list( .lshape = lshape, .lscale = lscale,
+ .eshape = eshape, .escale = escale ))),
last = eval(substitute(expression({
- misc$link = c("shape"= .lshape, "scale"= .lscale)
- misc$earg = list(shape= .eshape, scale= .escale)
+ misc$link = c("shape" = .lshape, "scale" = .lscale)
+ misc$earg = list("shape" = .eshape, "scale" = .escale)
misc$expected = TRUE
- }), list( .lshape=lshape, .lscale=lscale,
- .eshape=eshape, .escale=escale ))),
+ }), list( .lshape = lshape, .lscale = lscale,
+ .eshape = eshape, .escale = escale ))),
loglikelihood= eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
- shape = eta2theta(eta[,1], .lshape, earg= .eshape)
- scale = eta2theta(eta[,2], .lscale, earg= .escale)
- if (residuals) stop("loglikelihood residuals not implemented yet") else
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ shape = eta2theta(eta[,1], .lshape, earg = .eshape)
+ scale = eta2theta(eta[,2], .lscale, earg = .escale)
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else
sum(w * (log(shape) + log(scale) +
(shape-1)*log1p(-exp(-scale*y)) - scale*y))
- }, list( .lscale=lscale, .lshape=lshape,
- .eshape=eshape, .escale=escale ))),
- vfamily=c("expexp"),
- deriv=eval(substitute(expression({
- shape = eta2theta(eta[,1], .lshape, earg= .eshape)
- scale = eta2theta(eta[,2], .lscale, earg= .escale)
+ }, list( .lscale = lscale, .lshape = lshape,
+ .eshape = eshape, .escale = escale ))),
+ vfamily = c("expexp"),
+ deriv = eval(substitute(expression({
+ shape = eta2theta(eta[,1], .lshape, earg = .eshape)
+ scale = eta2theta(eta[,2], .lscale, earg = .escale)
dl.dscale = 1/scale + (shape-1)*y*exp(-scale*y) / (-expm1(-scale*y)) - y
dl.dshape = 1/shape + log1p(-exp(-scale*y))
- dscale.deta = dtheta.deta(scale, .lscale, earg= .escale)
- dshape.deta = dtheta.deta(shape, .lshape, earg= .eshape)
+ dscale.deta = dtheta.deta(scale, .lscale, earg = .escale)
+ dshape.deta = dtheta.deta(shape, .lshape, earg = .eshape)
w * cbind(dl.dshape * dshape.deta, dl.dscale * dscale.deta)
- }), list( .lshape=lshape, .lscale=lscale,
- .eshape=eshape, .escale=escale ))),
- weight=eval(substitute(expression({
+ }), list( .lshape = lshape, .lscale = lscale,
+ .eshape = eshape, .escale = escale ))),
+ weight = eval(substitute(expression({
d11 = 1 / shape^2 # True for all shape
- d22 = d12 = rep(as.numeric(NA), len=n)
+ d22 = d12 = rep(as.numeric(NA), len = n)
index2 = abs(shape - 2) > .tolerance # index2 = shape != 1
largeno = 10000
if (any(index2)) {
@@ -8589,7 +8636,7 @@ erfc = function(x)
Shape[abs(Shape-1) < .tolerance] = 1.001 # digamma(0) is undefined
Scale = scale[index2]
tmp200 = trigamma(1)-trigamma(Shape-1) +
- (digamma(Shape-1)-digamma(1))^2 # Fails when Shape==1
+ (digamma(Shape-1)-digamma(1))^2 # Fails when Shape == 1
tmp300 = trigamma(1)-digamma(Shape)+(digamma(Shape)-digamma(1))^2
d22[index2] = (1 + Shape*(Shape-1)*tmp200/(Shape-2)) / Scale^2 +
Shape*tmp300 / Scale^2
@@ -8622,27 +8669,27 @@ erfc = function(x)
- expexp1 = function(lscale="loge",
- escale=list(),
- iscale=NULL,
+ expexp1 = function(lscale = "loge",
+ escale = list(),
+ iscale = NULL,
ishape=1) {
if (mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
if (!is.list(escale)) escale = list()
- if (length(iscale) && !is.Numeric(iscale, posit=TRUE))
+ if (length(iscale) && !is.Numeric(iscale, posit = TRUE))
stop("bad input for argument 'iscale'")
new("vglmff",
blurb = c("Exponentiated Exponential Distribution",
" (profile likelihood estimation)\n",
"Links: ",
- namesof("scale", lscale, earg=escale), "\n",
+ namesof("scale", lscale, earg = escale), "\n",
"Mean: (digamma(shape+1)-digamma(1))/scale"),
- initialize=eval(substitute(expression({
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
- predictors.names = namesof("scale", .lscale, earg=.escale, short=TRUE)
- if (length(w) != n || !is.Numeric(w, integer=TRUE, posit=TRUE))
+ predictors.names = namesof("scale", .lscale, earg = .escale, short = TRUE)
+ if (length(w) != n || !is.Numeric(w, integer = TRUE, posit = TRUE))
stop("weights must be a vector of positive integers")
if (!intercept.only)
stop("this family function only works for an intercept-only, i.e., y ~ 1")
@@ -8650,54 +8697,55 @@ erfc = function(x)
extra$sumw = sum(w)
extra$w = w
if (!length(etastart)) {
- shape.init = if (!is.Numeric( .ishape, posit=TRUE))
+ shape.init = if (!is.Numeric( .ishape, posit = TRUE))
stop("argument 'ishape' must be positive") else
- rep(.ishape, len=n)
- scaleinit = if (length( .iscale)) rep(.iscale, len=n) else
+ rep( .ishape, len = n)
+ scaleinit = if (length( .iscale)) rep( .iscale, len = n) else
(digamma(shape.init+1) - digamma(1)) / (y+1/8)
- etastart = cbind(theta2eta(scaleinit, .lscale, earg= .escale))
+ etastart = cbind(theta2eta(scaleinit, .lscale, earg = .escale))
}
- }), list( .lscale=lscale, .iscale=iscale, .ishape=ishape,
- .escale=escale ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- scale = eta2theta(eta, .lscale, earg= .escale)
+ }), list( .lscale = lscale, .iscale = iscale, .ishape = ishape,
+ .escale = escale ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ scale = eta2theta(eta, .lscale, earg = .escale)
temp7 = -expm1(-scale*extra$yvector)
shape = -extra$sumw / sum(extra$w*log(temp7)) # \gamma(\theta)
(digamma(shape+1)-digamma(1)) / scale
- }, list( .lscale=lscale,
- .escale=escale ))),
+ }, list( .lscale = lscale,
+ .escale = escale ))),
last = eval(substitute(expression({
- misc$link = c("scale"= .lscale)
- misc$earg = list(scale= .escale)
+ misc$link = c("scale" = .lscale)
+ misc$earg = list("scale" = .escale)
temp7 = -expm1(-scale*y)
shape = -extra$sumw / sum(w*log(temp7)) # \gamma(\theta)
misc$shape = shape # Store the ML estimate here
misc$pooled.weight = pooled.weight
- }), list( .lscale=lscale, .escale=escale ))),
+ }), list( .lscale = lscale, .escale = escale ))),
loglikelihood= eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
- scale = eta2theta(eta, .lscale, earg= .escale)
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ scale = eta2theta(eta, .lscale, earg = .escale)
temp7 = -expm1(-scale*y)
shape = -extra$sumw / sum(w*log(temp7)) # \gamma(\theta)
- if (residuals) stop("loglikelihood residuals not implemented yet") else
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else
sum(w * (log(shape) + log(scale) +
(shape-1)*log1p(-exp(-scale*y)) - scale*y))
- }, list( .lscale=lscale, .escale=escale ))),
- vfamily=c("expexp1"),
- deriv=eval(substitute(expression({
- scale = eta2theta(eta, .lscale, earg= .escale)
+ }, list( .lscale = lscale, .escale = escale ))),
+ vfamily = c("expexp1"),
+ deriv = eval(substitute(expression({
+ scale = eta2theta(eta, .lscale, earg = .escale)
temp6 = exp(-scale*y)
temp7 = 1-temp6
shape = -extra$sumw / sum(w*log(temp7)) # \gamma(\theta)
d1 = 1/scale + (shape-1)*y*temp6/temp7 - y
- w * cbind(d1 * dtheta.deta(scale, .lscale, earg= .escale))
- }), list( .lscale=lscale, .escale=escale ))),
- weight=eval(substitute(expression({
+ w * cbind(d1 * dtheta.deta(scale, .lscale, earg = .escale))
+ }), list( .lscale = lscale, .escale = escale ))),
+ weight = eval(substitute(expression({
d11 = 1/scale^2 + y*(temp6/temp7^2) * ((shape-1) *
(y*temp7+temp6) - y*temp6 / (log(temp7))^2)
wz = matrix(0, n, dimm(M))
- wz[,iam(1,1,M)] = dtheta.deta(scale, .lscale, earg= .escale)^2 * d11 -
- d2theta.deta2(scale, .lscale, earg= .escale) * d1
+ wz[,iam(1,1,M)] = dtheta.deta(scale, .lscale, earg = .escale)^2 * d11 -
+ d2theta.deta2(scale, .lscale, earg = .escale) * d1
if (FALSE && intercept.only) {
sumw = sum(w)
@@ -8708,28 +8756,28 @@ erfc = function(x)
} else
pooled.weight = FALSE
w * wz
- }), list( .lscale=lscale, .escale=escale ))))
+ }), list( .lscale = lscale, .escale = escale ))))
}
-betaffqn.control <- function(save.weight=TRUE, ...)
+betaffqn.control <- function(save.weight = TRUE, ...)
{
- list(save.weight=save.weight)
+ list(save.weight = save.weight)
}
if (FALSE)
- betaffqn = function(link="loge", earg=list(),
- i1=NULL, i2=NULL, trim=0.05, A=0, B=1)
+ betaffqn = function(link = "loge", earg = list(),
+ i1 = NULL, i2 = NULL, trim=0.05, A=0, B=1)
{
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
- if (!is.Numeric(A, allow=1) || !is.Numeric(B, allow=1) || A >= B)
+ if (!is.Numeric(A, allow = 1) || !is.Numeric(B, allow = 1) || A >= B)
stop("A must be < B, and both must be of length one")
- stdbeta = (A==0 && B==1) # stdbeta==T iff standard beta distribution
+ stdbeta = (A == 0 && B == 1) # stdbeta==T iff standard beta distribution
if (!is.list(earg)) earg = list()
new("vglmff",
@@ -8738,67 +8786,68 @@ betaffqn.control <- function(save.weight=TRUE, ...)
"y^(shape1-1) * (1-y)^(shape2-1), 0<=y<=1, shape1>0, shape2>0\n\n"
else
paste("(y-",A,")^(shape1-1) * (",B,
- "-y)^(shape2-1), ",A,"<=y<=",B," shape1>0, shape2>0\n\n", sep=""),
+ "-y)^(shape2-1), ",A,"<=y< = ",B," shape1>0, shape2>0\n\n", sep = ""),
"Links: ",
- namesof("shape1", link, earg=earg), ", ",
- namesof("shape2", link, earg=earg)),
- initialize=eval(substitute(expression({
+ namesof("shape1", link, earg = earg), ", ",
+ namesof("shape2", link, earg = earg)),
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
if (min(y) <= .A || max(y) >= .B)
stop("data not within (A, B)")
- predictors.names = c(namesof("shape1", .link, earg=.earg, short=TRUE),
- namesof("shape2", .link, earg=.earg, short=TRUE))
- if (is.numeric(.i1) && is.numeric(.i2)) {
- vec = c(.i1, .i2)
- vec = c(theta2eta(vec[1], .link, earg= .earg),
- theta2eta(vec[2], .link, earg= .earg))
+ predictors.names = c(namesof("shape1", .link, earg = .earg, short = TRUE),
+ namesof("shape2", .link, earg = .earg, short = TRUE))
+ if (is.numeric( .i1) && is.numeric( .i2)) {
+ vec = c( .i1, .i2)
+ vec = c(theta2eta(vec[1], .link, earg = .earg),
+ theta2eta(vec[2], .link, earg = .earg))
etastart = matrix(vec, n, 2, byrow= TRUE)
}
# For QN update below
- if (length(w) != n || !is.Numeric(w, posit=TRUE))
+ if (length(w) != n || !is.Numeric(w, posit = TRUE))
stop("weights must be a vector of positive weights")
if (!length(etastart)) {
mu1d = mean(y, trim=.trim)
- uu = (mu1d-.A) / (.B - .A)
- DD = (.B - .A)^2
+ uu = (mu1d-.A) / ( .B - .A)
+ DD = ( .B - .A)^2
pinit = uu^2 * (1-uu)*DD/var(y) - uu # But var(y) is not robust
qinit = pinit * (1-uu) / uu
- etastart = matrix(theta2eta(c(pinit,qinit), .link, earg= .earg),
- n,2,byrow=TRUE)
- }
- }), list( .link=link, .earg=earg, .i1=i1, .i2=i2, .trim=trim, .A=A, .B=B ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- shapes = eta2theta(eta, .link, earg= .earg)
- .A + (.B-.A) * shapes[,1] / (shapes[,1] + shapes[,2])
- }, list( .link=link, .earg=earg, .A=A, .B=B ))),
+ etastart = matrix(theta2eta(c(pinit,qinit), .link, earg = .earg),
+ n,2,byrow = TRUE)
+ }
+ }), list( .link = link, .earg = earg, .i1=i1, .i2=i2, .trim=trim, .A = A, .B = B ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ shapes = eta2theta(eta, .link, earg = .earg)
+ .A + ( .B-.A) * shapes[,1] / (shapes[,1] + shapes[,2])
+ }, list( .link = link, .earg = earg, .A = A, .B = B ))),
last = eval(substitute(expression({
misc$link = c(shape1 = .link, shape2 = .link)
misc$earg = list(shape1 = .earg, shape2 = .earg)
- misc$limits = c(.A, .B)
+ misc$limits = c( .A, .B)
misc$expected = FALSE
misc$BFGS = TRUE
- }), list( .link=link, .earg=earg, .A=A, .B=B ))),
- loglikelihood=eval(substitute(
- function(mu, y, w, residuals= FALSE, eta, extra=NULL){
- shapes = eta2theta(eta, .link, earg= .earg)
+ }), list( .link = link, .earg = earg, .A = A, .B = B ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL){
+ shapes = eta2theta(eta, .link, earg = .earg)
temp = lbeta(shapes[,1], shapes[,2])
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
- sum(w * ((shapes[,1]-1)*log(y-.A) + (shapes[,2]-1)*log(.B-y) - temp -
- (shapes[,1]+shapes[,2]-1)*log(.B-.A )))
+ sum(w * ((shapes[,1]-1)*log(y-.A) + (shapes[,2]-1)*log( .B-y) - temp -
+ (shapes[,1]+shapes[,2]-1)*log( .B-.A )))
}
- }, list( .link=link, .earg=earg, .A=A, .B=B ))),
- vfamily="betaffqn",
- deriv=eval(substitute(expression({
- shapes = eta2theta(eta, .link, earg= .earg)
- dshapes.deta = dtheta.deta(shapes, .link, earg= .earg)
- dl.dshapes = cbind(log(y-.A), log(.B-y)) - digamma(shapes) +
- digamma(shapes[,1] + shapes[,2]) - log(.B - .A)
+ }, list( .link = link, .earg = earg, .A = A, .B = B ))),
+ vfamily = "betaffqn",
+ deriv = eval(substitute(expression({
+ shapes = eta2theta(eta, .link, earg = .earg)
+ dshapes.deta = dtheta.deta(shapes, .link, earg = .earg)
+ dl.dshapes = cbind(log(y-.A), log( .B-y)) - digamma(shapes) +
+ digamma(shapes[,1] + shapes[,2]) - log( .B - .A)
if (iter == 1) {
etanew = eta
} else {
@@ -8808,8 +8857,8 @@ betaffqn.control <- function(save.weight=TRUE, ...)
}
derivnew = w * dl.dshapes * dshapes.deta
derivnew
- }), list( .link=link, .earg=earg, .A=A, .B=B ))),
- weight=expression({
+ }), list( .link = link, .earg = earg, .A = A, .B = B ))),
+ weight = expression({
if (iter == 1) {
wznew = cbind(matrix(w, n, M), matrix(0, n, dimm(M)-M))
} else {
@@ -8826,21 +8875,21 @@ betaffqn.control <- function(save.weight=TRUE, ...)
- logistic2 = function(llocation="identity",
- lscale="loge",
- elocation=list(),
- escale=list(),
- ilocation=NULL, iscale=NULL,
- method.init=1, zero=NULL) {
+ logistic2 = function(llocation = "identity",
+ lscale = "loge",
+ elocation = list(),
+ escale = list(),
+ ilocation = NULL, iscale = NULL,
+ method.init = 1, zero = NULL) {
if (mode(llocation) != "character" && mode(llocation) != "name")
llocation = as.character(substitute(llocation))
if (mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
- if (!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
method.init > 2) stop("argument 'method.init' must be 1 or 2")
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
stop("bad input for argument 'zero'")
- if (length(iscale) && !is.Numeric(iscale, posit=TRUE))
+ if (length(iscale) && !is.Numeric(iscale, posit = TRUE))
stop("bad input for argument 'iscale'")
if (!is.list(elocation)) elocation = list()
if (!is.list(escale)) escale = list()
@@ -8848,20 +8897,20 @@ betaffqn.control <- function(save.weight=TRUE, ...)
new("vglmff",
blurb = c("Two-parameter logistic distribution\n\n",
"Links: ",
- namesof("location", llocation, earg=elocation), ", ",
- namesof("scale", lscale, earg=escale),
+ namesof("location", llocation, earg = elocation), ", ",
+ namesof("scale", lscale, earg = escale),
"\n", "\n",
"Mean: location", "\n",
"Variance: (pi*scale)^2 / 3"),
- constraints=eval(substitute(expression({
+ constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
predictors.names =
- c(namesof("location", .llocation, earg=.elocation, tag=FALSE),
- namesof("scale", .lscale, earg=.escale, tag=FALSE))
+ c(namesof("location", .llocation, earg = .elocation, tag = FALSE),
+ namesof("scale", .lscale, earg = .escale, tag = FALSE))
if (!length(etastart)) {
if ( .method.init == 1) {
location.init = y
@@ -8870,1972 +8919,67 @@ betaffqn.control <- function(save.weight=TRUE, ...)
location.init = median(rep(y, w))
scale.init = sqrt(3) * sum(w*(y-location.init)^2) / (sum(w)*pi)
}
- location.init = if (length(.ilocation)) rep(.ilocation, len=n) else
- rep(location.init, len=n)
- if (.llocation == "loge") location.init = abs(location.init) + 0.001
- scale.init = if (length(.iscale)) rep(.iscale, len=n) else
- rep(1, len=n)
+ location.init = if (length( .ilocation)) rep( .ilocation, len = n) else
+ rep(location.init, len = n)
+ if ( .llocation == "loge") location.init = abs(location.init) + 0.001
+ scale.init = if (length( .iscale)) rep( .iscale, len = n) else
+ rep(1, len = n)
etastart = cbind(
- theta2eta(location.init, .llocation, earg= .elocation),
- theta2eta(scale.init, .lscale, earg= .escale))
- }
- }), list( .method.init=method.init, .ilocation=ilocation,
- .elocation=elocation, .escale=escale,
- .llocation=llocation, .iscale=iscale, .lscale=lscale ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- eta2theta(eta[,1], .llocation, earg= .elocation)
- }, list( .llocation=llocation,
- .elocation=elocation, .escale=escale ))),
+ theta2eta(location.init, .llocation, earg = .elocation),
+ theta2eta(scale.init, .lscale, earg = .escale))
+ }
+ }), list( .method.init = method.init,
+ .elocation = elocation, .escale = escale,
+ .llocation = llocation, .lscale = lscale,
+ .ilocation = ilocation, .iscale = iscale ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ eta2theta(eta[,1], .llocation, earg = .elocation)
+ }, list( .llocation = llocation,
+ .elocation = elocation, .escale = escale ))),
last = eval(substitute(expression({
- misc$link = c(location=.llocation, scale= .lscale)
- misc$earg = list(location= .elocation, scale= .escale)
- }), list( .llocation=llocation, .lscale=lscale,
- .elocation=elocation, .escale=escale ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- location = eta2theta(eta[,1], .llocation, earg= .elocation)
- Scale = eta2theta(eta[,2], .lscale, earg= .escale)
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
+ misc$link = c(location = .llocation, scale = .lscale)
+ misc$earg = list(location = .elocation, scale = .escale)
+ }), list( .llocation = llocation, .lscale = lscale,
+ .elocation = elocation, .escale = escale ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ location = eta2theta(eta[,1], .llocation, earg = .elocation)
+ Scale = eta2theta(eta[,2], .lscale, earg = .escale)
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
sum(w * dlogis(x=y, location = location,
scale = Scale, log = TRUE))
}
- }, list( .llocation=llocation, .lscale=lscale,
- .elocation=elocation, .escale=escale ))),
- vfamily=c("logistic2"),
- deriv=eval(substitute(expression({
- location = eta2theta(eta[,1], .llocation, earg= .elocation)
- Scale = eta2theta(eta[,2], .lscale, earg= .escale)
+ }, list( .llocation = llocation, .lscale = lscale,
+ .elocation = elocation, .escale = escale ))),
+ vfamily = c("logistic2"),
+ deriv = eval(substitute(expression({
+ location = eta2theta(eta[,1], .llocation, earg = .elocation)
+ Scale = eta2theta(eta[,2], .lscale, earg = .escale)
zedd = (y-location) / Scale
ezedd = exp(-zedd)
dl.dlocation = (1-ezedd) / ((1 + ezedd) * Scale)
- dlocation.deta = dtheta.deta(location, .llocation, earg= .elocation)
+ dlocation.deta = dtheta.deta(location, .llocation, earg = .elocation)
dl.dscale = zedd * (1-ezedd) / ((1 + ezedd) * Scale) - 1/Scale
- dscale.deta = dtheta.deta(Scale, .lscale, earg= .escale)
+ dscale.deta = dtheta.deta(Scale, .lscale, earg = .escale)
w * cbind(dl.dlocation * dlocation.deta,
dl.dscale * dscale.deta)
- }), list( .llocation=llocation, .lscale=lscale,
- .elocation=elocation, .escale=escale ))),
- weight=eval(substitute(expression({
+ }), list( .llocation = llocation, .lscale = lscale,
+ .elocation = elocation, .escale = escale ))),
+ weight = eval(substitute(expression({
d2l.location2 = 1 / (3*Scale^2)
d2l.dscale2 = (3 + pi^2) / (9*Scale^2)
wz = matrix(as.numeric(NA), nrow=n, ncol=M) # diagonal
wz[,iam(1,1,M)] = d2l.location2 * dlocation.deta^2
wz[,iam(2,2,M)] = d2l.dscale2 * dscale.deta^2
w * wz
- }), list( .llocation=llocation, .lscale=lscale,
- .elocation=elocation, .escale=escale ))))
-}
-
-
-
-
-
-
- alaplace2 = function(tau = NULL,
- llocation="identity", lscale="loge",
- elocation=list(), escale=list(),
- ilocation=NULL, iscale=NULL,
- kappa = sqrt(tau/(1-tau)),
- shrinkage.init=0.95, parallelLocation=FALSE, digt=4,
- sameScale=TRUE,
- dfmu.init = 3,
- method.init=1, zero="(1 + M/2):M") {
-
- if (!is.Numeric(kappa, posit=TRUE))
- stop("bad input for argument 'kappa'")
- if (length(tau) && max(abs(kappa - sqrt(tau/(1-tau)))) > 1.0e-6)
- stop("arguments 'kappa' and 'tau' do not match")
- if (mode(llocation) != "character" && mode(llocation) != "name")
- llocation = as.character(substitute(llocation))
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
- if (!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
- method.init > 4) stop("argument 'method.init' must be 1, 2 or ... 4")
- if (length(iscale) && !is.Numeric(iscale, posit=TRUE))
- stop("bad input for argument 'iscale'")
- if (!is.list(elocation)) elocation = list()
- if (!is.list(escale)) escale = list()
- if (!is.Numeric(shrinkage.init, allow=1) || shrinkage.init < 0 ||
- shrinkage.init > 1) stop("bad input for argument 'shrinkage.init'")
- if (length(zero) &&
- !(is.Numeric(zero, integer=TRUE, posit=TRUE) || is.character(zero )))
- stop("bad input for argument 'zero'")
- if (!is.logical(sameScale) || length(sameScale) != 1)
- stop("bad input for argument 'sameScale'")
- if (!is.logical(parallelLocation) || length(parallelLocation) != 1)
- stop("bad input for argument 'parallelLocation'")
- fittedMean = FALSE
- if (!is.logical(fittedMean) || length(fittedMean) != 1)
- stop("bad input for argument 'fittedMean'")
-
- new("vglmff",
- blurb = c("Two-parameter asymmetric Laplace distribution\n\n",
- "Links: ",
- namesof("location", llocation, earg=elocation), ", ",
- namesof("scale", lscale, earg=escale),
- "\n", "\n",
- "Mean: location + scale * (1/kappa - kappa) / sqrt(2)", "\n",
- "Quantiles: location", "\n",
- "Variance: scale^2 * (1 + kappa^4) / (2 * kappa^2)"),
- constraints=eval(substitute(expression({
- .ZERO = .zero
- if (is.character(.ZERO)) .ZERO = eval(parse(text = .ZERO))
- .PARALLEL = .parallelLocation
- parHmat = if (is.logical(.PARALLEL) && .PARALLEL)
- matrix(1, M/2, 1) else diag(M/2)
- scaleHmat = if (is.logical(.sameScale) && .sameScale)
- matrix(1, M/2, 1) else diag(M/2)
- mycmatrix = cbind(rbind(parHmat, 0*parHmat),
- rbind(0*scaleHmat, scaleHmat))
- constraints=cm.vgam(mycmatrix, x, .PARALLEL, constraints, int=FALSE)
- constraints = cm.zero.vgam(constraints, x, .ZERO, M)
-
- if (.PARALLEL && names(constraints)[1] == "(Intercept)") {
- parHmat = diag(M/2)
- mycmatrix = cbind(rbind(parHmat, 0*parHmat),
- rbind(0*scaleHmat, scaleHmat))
- constraints[["(Intercept)"]] = mycmatrix
- }
- if (is.logical(.sameScale) && .sameScale &&
- names(constraints)[1] == "(Intercept)") {
- temp3 = constraints[["(Intercept)"]]
- temp3 = cbind(temp3[,1:(M/2)], rbind(0*scaleHmat, scaleHmat))
- constraints[["(Intercept)"]] = temp3
- }
- }), list( .sameScale=sameScale, .parallelLocation=parallelLocation,
- .zero=zero ))),
- initialize=eval(substitute(expression({
- extra$kappa = .kappa
- extra$tau = extra$kappa^2 / (1 + extra$kappa^2)
- if (ncol(y <- cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- extra$M = M = 2 * length(extra$kappa)
- extra$n = n
- extra$y.names = y.names = paste("tau=", round(extra$tau, dig=.digt),
- sep="")
- extra$individual = FALSE
- predictors.names =
- c(namesof(paste("quantile(", y.names, ")", sep=""),
- .llocation, earg=.elocation, tag=FALSE),
- namesof(if (M == 2) "scale" else
- paste("scale", 1:(M/2), sep=""),
- .lscale, earg=.escale, tag=FALSE))
-
- if (!length(etastart)) {
- if ( .method.init == 1) {
- location.init = weighted.mean(y, w)
- scale.init = sqrt(var(y) / 2)
- } else if ( .method.init == 2) {
- location.init = median(y)
- scale.init = sqrt(sum(w*abs(y-median(y))) / (sum(w) *2))
- } else if ( .method.init == 3) {
- fit500 = vsmooth.spline(x=x[,min(ncol(x),2)], y=y, w=w,
- df= .dfmu.init)
- location.init = c(predict(fit500, x=x[,min(ncol(x),2)])$y)
- scale.init = sqrt(sum(w*abs(y-median(y))) / (sum(w) *2))
- } else {
- use.this = weighted.mean(y, w)
- location.init = (1- .sinit)*y + .sinit * use.this
- scale.init = sqrt(sum(w*abs(y-median(y ))) / (sum(w) *2))
- }
- location.init = if (length(.ilocation)) rep(.ilocation, len=n) else
- rep(location.init, len=n)
- location.init = matrix(location.init, n, M/2)
- scale.init = if (length(.iscale)) rep(.iscale, len=n) else
- rep(scale.init, len=n)
- scale.init = matrix(scale.init, n, M/2)
- etastart =
- cbind(theta2eta(location.init, .llocation, earg= .elocation),
- theta2eta(scale.init, .lscale, earg= .escale))
- }
- }), list( .method.init=method.init,
- .dfmu.init=dfmu.init,
- .sinit=shrinkage.init, .digt=digt,
- .elocation=elocation, .escale=escale,
- .llocation=llocation, .lscale=lscale, .kappa=kappa,
- .ilocation=ilocation, .iscale=iscale ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- if ( .fittedMean) {
- kappamat = matrix(extra$kappa, extra$n, extra$M/2, byrow=TRUE)
- location = eta2theta(eta[,1:(extra$M/2),drop=FALSE],
- .llocation, earg= .elocation)
- Scale = eta2theta(eta[,(1+extra$M/2):extra$M], .lscale, earg= .escale)
- location + Scale * (1/kappamat - kappamat)
- } else {
- location = eta2theta(eta[,1:(extra$M/2),drop=FALSE],
- .llocation, earg= .elocation)
- dimnames(location) = list(dimnames(eta)[[1]], extra$y.names)
- location
- }
- }, list( .elocation=elocation, .llocation=llocation,
- .fittedMean=fittedMean, .escale=escale, .lscale=lscale,
- .kappa=kappa ))),
- last = eval(substitute(expression({
- misc$link = c(location= .llocation, scale= .lscale)
- misc$earg = list(location= .elocation, scale= .escale)
- misc$expected = TRUE
- extra$kappa = misc$kappa = .kappa
- extra$tau = misc$tau = misc$kappa^2 / (1 + misc$kappa^2)
- misc$true.mu = .fittedMean # @fitted is not a true mu?
- extra$percentile = numeric(length(misc$kappa))
- location = as.matrix(location)
- for(ii in 1:length(misc$kappa))
- extra$percentile[ii] = 100 * weighted.mean(y <= location[,ii], w)
- }), list( .elocation=elocation, .llocation=llocation,
- .escale=escale, .lscale=lscale,
- .fittedMean=fittedMean,
- .kappa=kappa ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- ymat = matrix(y, extra$n, extra$M/2)
- kappamat = matrix(extra$kappa, extra$n, extra$M/2, byrow=TRUE)
- location = eta2theta(eta[,1:(extra$M/2),drop=FALSE],
- .llocation, earg= .elocation)
- Scale = eta2theta(eta[,(1+extra$M/2):extra$M], .lscale, earg= .escale)
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(w * dalap(x=c(ymat), location=c(location),
- scale=c(Scale), kappa=c(kappamat), log=TRUE))
- }
- }, list( .elocation=elocation, .llocation=llocation,
- .escale=escale, .lscale=lscale,
- .kappa=kappa ))),
- vfamily=c("alaplace2"),
- deriv=eval(substitute(expression({
- ymat = matrix(y, n, M/2)
- Scale = eta2theta(eta[,(1+extra$M/2):extra$M], .lscale, earg= .escale)
- location = eta2theta(eta[,1:(extra$M/2),drop=FALSE],
- .llocation, earg= .elocation)
- kappamat = matrix(extra$kappa, n, M/2, byrow=TRUE)
- zedd = abs(ymat-location) / Scale
- dl.dlocation = sqrt(2) * ifelse(ymat >= location, kappamat, 1/kappamat) *
- sign(ymat-location) / Scale
- dl.dscale = sqrt(2) * ifelse(ymat >= location, kappamat, 1/kappamat) *
- zedd / Scale - 1 / Scale
- dlocation.deta = dtheta.deta(location, .llocation, earg= .elocation)
- dscale.deta = dtheta.deta(Scale, .lscale, earg= .escale)
- w * cbind(dl.dlocation * dlocation.deta,
- dl.dscale * dscale.deta)
- }), list( .escale=escale, .lscale=lscale,
- .elocation=elocation, .llocation=llocation,
- .kappa=kappa ))),
- weight=eval(substitute(expression({
- d2l.dlocation2 = 2 / Scale^2
- d2l.dscale2 = 1 / Scale^2
- wz = cbind(d2l.dlocation2 * dlocation.deta^2,
- d2l.dscale2 * dscale.deta^2)
- w * wz
- }), list( .escale=escale, .lscale=lscale,
- .elocation=elocation, .llocation=llocation ))))
-}
-
-
-
-
-
-alaplace1.control <- function(maxit=300, ...)
-{
- list(maxit=maxit)
+ }), list( .llocation = llocation, .lscale = lscale,
+ .elocation = elocation, .escale = escale ))))
}
- alaplace1 = function(tau = NULL,
- llocation="identity",
- elocation=list(),
- ilocation=NULL,
- kappa = sqrt(tau/(1-tau)),
- Scale.arg=1,
- shrinkage.init=0.95, parallelLocation=FALSE, digt=4,
- dfmu.init = 3,
- method.init=1, zero=NULL) {
-
-
- if (!is.Numeric(kappa, posit=TRUE))
- stop("bad input for argument 'kappa'")
- if (length(tau) && max(abs(kappa - sqrt(tau/(1-tau)))) > 1.0e-6)
- stop("arguments 'kappa' and 'tau' do not match")
- if (mode(llocation) != "character" && mode(llocation) != "name")
- llocation = as.character(substitute(llocation))
- if (!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
- method.init > 4) stop("argument 'method.init' must be 1, 2 or ... 4")
- if (!is.list(elocation)) elocation = list()
- if (!is.Numeric(shrinkage.init, allow=1) || shrinkage.init < 0 ||
- shrinkage.init > 1) stop("bad input for argument 'shrinkage.init'")
- if (length(zero) &&
- !(is.Numeric(zero, integer=TRUE, posit=TRUE) || is.character(zero )))
- stop("bad input for argument 'zero'")
- if (!is.Numeric(Scale.arg, posit=TRUE))
- stop("bad input for argument 'Scale.arg'")
- if (!is.logical(parallelLocation) || length(parallelLocation) != 1)
- stop("bad input for argument 'parallelLocation'")
- fittedMean = FALSE
- if (!is.logical(fittedMean) || length(fittedMean) != 1)
- stop("bad input for argument 'fittedMean'")
-
- new("vglmff",
- blurb = c("One-parameter asymmetric Laplace distribution\n\n",
- "Links: ",
- namesof("location", llocation, earg=elocation),
- "\n", "\n",
- "Mean: location + scale * (1/kappa - kappa) / sqrt(2)", "\n",
- "Quantiles: location", "\n",
- "Variance: scale^2 * (1 + kappa^4) / (2 * kappa^2)"),
- constraints=eval(substitute(expression({
- constraints = cm.vgam(matrix(1,M,1), x, .parallelLocation,
- constraints, intercept=FALSE)
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .parallelLocation=parallelLocation,
- .Scale.arg=Scale.arg, .zero=zero ))),
- initialize=eval(substitute(expression({
- extra$M = M = max(length( .Scale.arg ), length( .kappa )) # Recycle
- extra$Scale = rep( .Scale.arg, length=M)
- extra$kappa = rep( .kappa, length=M)
- extra$tau = extra$kappa^2 / (1 + extra$kappa^2)
- if (ncol(y <- cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- extra$n = n
- extra$y.names = y.names = paste("tau=", round(extra$tau, dig=.digt),
- sep="")
- extra$individual = FALSE
- predictors.names = namesof(paste("quantile(", y.names, ")", sep=""),
- .llocation, earg=.elocation, tag=FALSE)
-
- if (!length(etastart)) {
- if ( .method.init == 1) {
- location.init = weighted.mean(y, w)
- } else if ( .method.init == 2) {
- location.init = median(y)
- } else if ( .method.init == 3) {
- fit500 = vsmooth.spline(x=x[,min(ncol(x),2)], y=y, w=w,
- df= .dfmu.init)
- location.init = c(predict(fit500, x=x[,min(ncol(x),2)])$y)
- } else {
- use.this = weighted.mean(y, w)
- location.init = (1- .sinit)*y + .sinit * use.this
- }
- location.init = if (length(.ilocation)) rep(.ilocation, len=n) else
- rep(location.init, len=n)
- location.init = matrix(location.init, n, M)
- if ( .llocation == "loge") location.init = abs(location.init)
- etastart =
- cbind(theta2eta(location.init, .llocation, earg= .elocation))
- }
- }), list( .method.init=method.init,
- .dfmu.init=dfmu.init,
- .sinit=shrinkage.init, .digt=digt,
- .elocation=elocation, .Scale.arg=Scale.arg,
- .llocation=llocation, .kappa=kappa,
- .ilocation=ilocation ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- if ( .fittedMean) {
- kappamat = matrix(extra$kappa, extra$n, extra$M, byrow=TRUE)
- location = eta2theta(eta, .llocation, earg= .elocation)
- Scale = matrix(extra$Scale, extra$n, extra$M, byrow=TRUE)
- location + Scale * (1/kappamat - kappamat)
- } else {
- location = eta2theta(eta, .llocation, earg= .elocation)
- if (length(location) > extra$n)
- dimnames(location) = list(dimnames(eta)[[1]], extra$y.names)
- location
- }
- }, list( .elocation=elocation, .llocation=llocation,
- .fittedMean=fittedMean, .Scale.arg=Scale.arg,
- .kappa=kappa ))),
- last = eval(substitute(expression({
- misc$link = c(location= .llocation)
- misc$earg = list(location= .elocation)
- misc$expected = TRUE
- extra$kappa = misc$kappa = .kappa
- extra$tau = misc$tau = misc$kappa^2 / (1 + misc$kappa^2)
- extra$Scale.arg = .Scale.arg
- misc$true.mu = .fittedMean # @fitted is not a true mu?
- extra$percentile = numeric(length(misc$kappa))
- location = as.matrix(location)
- for(ii in 1:length(misc$kappa))
- extra$percentile[ii] = 100 * weighted.mean(y <= location[,ii], w)
- }), list( .elocation=elocation, .llocation=llocation,
- .Scale.arg=Scale.arg, .fittedMean=fittedMean,
- .kappa=kappa ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- ymat = matrix(y, extra$n, extra$M)
- kappamat = matrix(extra$kappa, extra$n, extra$M, byrow=TRUE)
- location = eta2theta(eta, .llocation, earg= .elocation)
- Scale = matrix(extra$Scale, extra$n, extra$M, byrow=TRUE)
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(w * dalap(x=c(ymat), location=c(location),
- scale=c(Scale), kappa=c(kappamat), log=TRUE))
- }
- }, list( .elocation=elocation, .llocation=llocation,
- .Scale.arg=Scale.arg, .kappa=kappa ))),
- vfamily=c("alaplace1"),
- deriv=eval(substitute(expression({
- ymat = matrix(y, n, M)
- Scale = matrix(extra$Scale, extra$n, extra$M, byrow=TRUE)
- location = eta2theta(eta, .llocation, earg= .elocation)
- kappamat = matrix(extra$kappa, n, M, byrow=TRUE)
- zedd = abs(ymat-location) / Scale
- dl.dlocation = ifelse(ymat >= location, kappamat, 1/kappamat) *
- sqrt(2) * sign(ymat-location) / Scale
- dlocation.deta = dtheta.deta(location, .llocation, earg= .elocation)
- w * cbind(dl.dlocation * dlocation.deta)
- }), list( .Scale.arg=Scale.arg, .elocation=elocation,
- .llocation=llocation, .kappa=kappa ))),
- weight=eval(substitute(expression({
- d2l.dlocation2 = 2 / Scale^2
- wz = cbind(d2l.dlocation2 * dlocation.deta^2)
- w * wz
- }), list( .Scale.arg=Scale.arg,
- .elocation=elocation, .llocation=llocation ))))
-}
-
-
-
-
-
-
- alaplace3 = function(llocation="identity", lscale="loge", lkappa="loge",
- elocation=list(), escale=list(), ekappa=list(),
- ilocation=NULL, iscale=NULL, ikappa=1.0,
- method.init=1, zero=2:3) {
- if (mode(llocation) != "character" && mode(llocation) != "name")
- llocation = as.character(substitute(llocation))
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
- if (mode(lkappa) != "character" && mode(lkappa) != "name")
- lkappa = as.character(substitute(lkappa))
- if (!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
- method.init > 2) stop("argument 'method.init' must be 1 or 2")
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument 'zero'")
- if (length(iscale) && !is.Numeric(iscale, posit=TRUE))
- stop("bad input for argument 'iscale'")
- if (!is.list(elocation)) elocation = list()
- if (!is.list(escale)) escale = list()
- if (!is.list(ekappa)) ekappa = list()
-
- new("vglmff",
- blurb = c("Three-parameter asymmetric Laplace distribution\n\n",
- "Links: ",
- namesof("location", llocation, earg=elocation), ", ",
- namesof("scale", lscale, earg=escale), ", ",
- namesof("kappa", lkappa, earg=ekappa),
- "\n", "\n",
- "Mean: location + scale * (1/kappa - kappa) / sqrt(2)",
- "\n",
- "Variance: Scale^2 * (1 + kappa^4) / (2 * kappa^2)"),
- constraints=eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names =
- c(namesof("location", .llocation, earg=.elocation, tag=FALSE),
- namesof("scale", .lscale, earg=.escale, tag=FALSE),
- namesof("kappa", .lkappa, earg=.ekappa, tag=FALSE))
- if (!length(etastart)) {
- kappa.init = if (length( .ikappa)) rep( .ikappa, len=n) else
- rep( 1.0, len=n)
- if ( .method.init == 1) {
- location.init = median(y)
- scale.init = sqrt(var(y) / 2)
- } else {
- location.init = y
- scale.init = sqrt(sum(w*abs(y-median(y ))) / (sum(w) *2))
- }
- location.init = if (length(.ilocation)) rep(.ilocation, len=n) else
- rep(location.init, len=n)
- scale.init = if (length(.iscale)) rep(.iscale, len=n) else
- rep(scale.init, len=n)
- etastart =
- cbind(theta2eta(location.init, .llocation, earg= .elocation),
- theta2eta(scale.init, .lscale, earg= .escale),
- theta2eta(kappa.init, .lkappa, earg= .ekappa))
- }
- }), list( .method.init=method.init,
- .elocation=elocation, .escale=escale, .ekappa=ekappa,
- .llocation=llocation, .lscale=lscale, .lkappa=lkappa,
- .ilocation=ilocation, .iscale=iscale, .ikappa=ikappa ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- location = eta2theta(eta[,1], .llocation, earg= .elocation)
- Scale = eta2theta(eta[,2], .lscale, earg= .escale)
- kappa = eta2theta(eta[,3], .lkappa, earg= .ekappa)
- location + Scale * (1/kappa - kappa) / sqrt(2)
- }, list( .elocation=elocation, .llocation=llocation,
- .escale=escale, .lscale=lscale,
- .ekappa=ekappa, .lkappa=lkappa ))),
- last = eval(substitute(expression({
- misc$link = c(location= .llocation, scale= .lscale, kappa= .lkappa)
- misc$earg = list(location= .elocation, scale= .escale, kappa= .ekappa)
- misc$expected = TRUE
- }), list( .elocation=elocation, .llocation=llocation,
- .escale=escale, .lscale=lscale,
- .ekappa=ekappa, .lkappa=lkappa ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- location = eta2theta(eta[,1], .llocation, earg= .elocation)
- Scale = eta2theta(eta[,2], .lscale, earg= .escale)
- kappamat = eta2theta(eta[,3], .lkappa, earg= .ekappa)
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(w * dalap(x=y, location=location,
- scale=Scale, kappa=kappamat, log=TRUE))
- }
- }, list( .elocation=elocation, .llocation=llocation,
- .escale=escale, .lscale=lscale,
- .ekappa=ekappa, .lkappa=lkappa ))),
- vfamily=c("alaplace3"),
- deriv=eval(substitute(expression({
- location = eta2theta(eta[,1], .llocation, earg= .elocation)
- Scale = eta2theta(eta[,2], .lscale, earg= .escale)
- kappa = eta2theta(eta[,3], .lkappa, earg= .ekappa)
- zedd = abs(y-location) / Scale
- dl.dlocation = sqrt(2) * ifelse(y >= location, kappa, 1/kappa) *
- sign(y-location) / Scale
- dl.dscale = sqrt(2) * ifelse(y >= location, kappa, 1/kappa) *
- zedd / Scale - 1 / Scale
- dl.dkappa = 1 / kappa - 2 * kappa / (1+kappa^2) -
- (sqrt(2) / Scale) *
- ifelse(y > location, 1, -1/kappa^2) * abs(y-location)
- dlocation.deta = dtheta.deta(location, .llocation, earg= .elocation)
- dscale.deta = dtheta.deta(Scale, .lscale, earg= .escale)
- dkappa.deta = dtheta.deta(kappa, .lkappa, earg= .ekappa)
- w * cbind(dl.dlocation * dlocation.deta,
- dl.dscale * dscale.deta,
- dl.dkappa * dkappa.deta)
- }), list( .escale=escale, .lscale=lscale,
- .elocation=elocation, .llocation=llocation,
- .ekappa=ekappa, .lkappa=lkappa ))),
- weight=eval(substitute(expression({
- d2l.dlocation2 = 2 / Scale^2
- d2l.dscale2 = 1 / Scale^2
- d2l.dkappa2 = 1 / kappa^2 + 4 / (1+kappa^2)^2
- d2l.dkappadloc = -sqrt(8) / ((1+kappa^2) * Scale)
- d2l.dkappadscale = -(1-kappa^2) / ((1+kappa^2) * kappa * Scale)
- wz = matrix(0, nrow=n, dimm(M))
- wz[,iam(1,1,M)] = d2l.dlocation2 * dlocation.deta^2
- wz[,iam(2,2,M)] = d2l.dscale2 * dscale.deta^2
- wz[,iam(3,3,M)] = d2l.dkappa2 * dkappa.deta^2
- wz[,iam(1,3,M)] = d2l.dkappadloc * dkappa.deta * dlocation.deta
- wz[,iam(2,3,M)] = d2l.dkappadscale * dkappa.deta * dscale.deta
- w * wz
- }), list( .escale=escale, .lscale=lscale,
- .elocation=elocation, .llocation=llocation ))))
-}
-
-
-
-dlaplace = function(x, location=0, scale=1, log = FALSE) {
- if (!is.logical(log.arg <- log))
- stop("bad input for argument 'log'")
- rm(log)
-
- logdensity = (-abs(x-location)/scale) - log(2*scale)
- if (log.arg) logdensity else exp(logdensity)
-}
-
-plaplace = function(q, location=0, scale=1) {
- if (!is.Numeric(scale, posit=TRUE))
- stop("argument 'scale' must be positive")
- zedd = (q-location) / scale
- L = max(length(q), length(location), length(scale))
- q = rep(q, len=L); location = rep(location, len=L); scale= rep(scale, len=L)
- ifelse(q < location, 0.5*exp(zedd), 1-0.5*exp(-zedd))
-}
-
-qlaplace = function(p, location=0, scale=1) {
- if (!is.Numeric(scale, posit=TRUE))
- stop("argument 'scale' must be positive")
- L = max(length(p), length(location), length(scale))
- p = rep(p, len=L); location = rep(location, len=L); scale= rep(scale, len=L)
- location - sign(p-0.5) * scale * log(2*ifelse(p < 0.5, p, 1-p))
-}
-
-rlaplace = function(n, location=0, scale=1) {
- if (!is.Numeric(n, posit=TRUE, integ=TRUE, allow=1))
- stop("bad input for argument 'n'")
- if (!is.Numeric(scale, posit=TRUE)) stop("'scale' must be positive")
- location = rep(location, len=n); scale= rep(scale, len=n)
- r = runif(n)
- location - sign(r-0.5) * scale * log(2*ifelse(r < 0.5, r, 1-r))
-}
-
-
- laplace = function(llocation="identity", lscale="loge",
- elocation=list(), escale=list(),
- ilocation=NULL, iscale=NULL,
- method.init=1, zero=2) {
- if (mode(llocation) != "character" && mode(llocation) != "name")
- llocation = as.character(substitute(llocation))
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
- if (!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
- method.init > 3) stop("argument 'method.init' must be 1 or 2 or 3")
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument 'zero'")
- if (!is.list(elocation)) elocation = list()
- if (!is.list(escale)) escale = list()
- if (length(iscale) && !is.Numeric(iscale, posit=TRUE))
- stop("bad input for argument 'iscale'")
-
- new("vglmff",
- blurb = c("Two-parameter Laplace distribution\n\n",
- "Links: ",
- namesof("location", llocation, earg=elocation), ", ",
- namesof("scale", lscale, earg=escale),
- "\n", "\n",
- "Mean: location", "\n",
- "Variance: 2*scale^2"),
- constraints=eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names =
- c(namesof("location", .llocation, earg=.elocation, tag=FALSE),
- namesof("scale", .lscale, earg=.escale, tag=FALSE))
- if (!length(etastart)) {
- if ( .method.init == 1) {
- location.init = median(y)
- scale.init = sqrt(var(y) / 2)
- } else if ( .method.init == 2) {
- location.init = weighted.mean(y, w)
- scale.init = sqrt(var(y) / 2)
- } else {
- location.init = median(y)
- scale.init = sqrt(sum(w*abs(y-median(y ))) / (sum(w) *2))
- }
- location.init = if (length(.ilocation)) rep(.ilocation, len=n) else
- rep(location.init, len=n)
- scale.init = if (length(.iscale)) rep(.iscale, len=n) else
- rep(scale.init, len=n)
- etastart =
- cbind(theta2eta(location.init, .llocation, earg= .elocation),
- theta2eta(scale.init, .lscale, earg= .escale))
- }
- }), list( .method.init=method.init,
- .elocation=elocation, .escale=escale,
- .llocation=llocation, .lscale=lscale,
- .ilocation=ilocation, .iscale=iscale ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- eta2theta(eta[,1], .llocation, earg= .elocation)
- }, list( .elocation=elocation, .llocation=llocation ))),
- last = eval(substitute(expression({
- misc$link = c(location= .llocation, scale= .lscale)
- misc$earg = list(location= .elocation, scale= .escale)
- misc$expected = TRUE
- misc$RegCondOK = FALSE # Save this for later
- }), list( .escale=escale, .lscale=lscale,
- .elocation=elocation, .llocation=llocation ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- location = eta2theta(eta[,1], .llocation, earg= .elocation)
- Scale = eta2theta(eta[,2], .lscale, earg= .escale)
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(w * dlaplace(x=y, location=location, scale=Scale, log=TRUE))
- }
- }, list( .escale=escale, .lscale=lscale,
- .elocation=elocation, .llocation=llocation ))),
- vfamily=c("laplace"),
- deriv=eval(substitute(expression({
- location = eta2theta(eta[,1], .llocation, earg= .elocation)
- Scale = eta2theta(eta[,2], .lscale, earg= .escale)
- zedd = abs(y-location) / Scale
- dl.dlocation = sign(y-location) / Scale
- dl.dscale = zedd / Scale - 1/Scale
- dlocation.deta = dtheta.deta(location, .llocation, earg= .elocation)
- dscale.deta = dtheta.deta(Scale, .lscale, earg= .escale)
- w * cbind(dl.dlocation * dlocation.deta, dl.dscale * dscale.deta)
- }), list( .escale=escale, .lscale=lscale,
- .elocation=elocation, .llocation=llocation ))),
- weight=eval(substitute(expression({
- d2l.dlocation2 = d2l.dscale2 = 1 / Scale^2
- wz = matrix(0, nrow=n, ncol=M) # diagonal
- wz[,iam(1,1,M)] = d2l.dlocation2 * dlocation.deta^2
- wz[,iam(2,2,M)] = d2l.dscale2 * dscale.deta^2
- w * wz
- }), list( .escale=escale, .lscale=lscale,
- .elocation=elocation, .llocation=llocation ))))
-}
-
-
-
-fff.control <- function(save.weight=TRUE, ...)
-{
- list(save.weight=save.weight)
-}
-
- fff = function(link="loge", earg=list(),
- idf1=NULL, idf2=NULL, nsimEIM=100, # ncp=0,
- method.init=1, zero=NULL) {
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
- method.init > 2) stop("argument 'method.init' must be 1 or 2")
- if (length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
- stop("bad input for argument 'zero'")
- if (!is.list(earg)) earg = list()
- if (!is.Numeric(nsimEIM, allow=1, integ=TRUE) || nsimEIM <= 10)
- stop("'nsimEIM' should be an integer greater than 10")
- ncp = 0
- if (any(ncp != 0)) warning("not sure about ncp != 0 wrt dl/dtheta")
-
- new("vglmff",
- blurb = c("F-distribution\n\n",
- "Links: ",
- namesof("df1", link, earg=earg), ", ",
- namesof("df2", link, earg=earg),
- "\n", "\n",
- "Mean: df2/(df2-2) provided df2>2 and ncp=0", "\n",
- "Variance: ",
- "2*df2^2*(df1+df2-2)/(df1*(df2-2)^2*(df2-4)) ",
- "provided df2>4 and ncp=0"),
- constraints=eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names = c(namesof("df1", .link, earg=.earg, tag=FALSE),
- namesof("df2", .link, earg=.earg, tag=FALSE))
- if (!length(etastart)) {
- if ( .method.init == 1) {
- df2.init = b = 2*mean(y) / (mean(y)-1)
- df1.init = 2*b^2*(b-2)/(var(y)*(b-2)^2 * (b-4) - 2*b^2)
- if (df2.init < 4) df2.init = 5
- if (df1.init < 2) df1.init = 3
- } else {
- df2.init = b = 2*median(y) / (median(y)-1)
- summy = summary(y)
- var.est = summy[5] - summy[2]
- df1.init = 2*b^2*(b-2)/(var.est*(b-2)^2 * (b-4) - 2*b^2)
- }
- df1.init = if (length(.idf1)) rep(.idf1, len=n) else
- rep(df1.init, len=n)
- df2.init = if (length(.idf2)) rep(.idf2, len=n) else rep(1, len=n)
- etastart = cbind(theta2eta(df1.init, .link, earg= .earg),
- theta2eta(df2.init, .link, earg= .earg))
- }
- }), list( .method.init=method.init, .idf1=idf1, .earg=earg,
- .idf2=idf2, .link=link ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- df2 = eta2theta(eta[,2], .link, earg= .earg)
- ans = df2 * NA
- ans[df2>2] = df2[df2>2] / (df2[df2>2]-2)
- ans
- }, list( .link=link, .earg=earg ))),
- last = eval(substitute(expression({
- misc$link = c(df1= .link, df2= .link)
- misc$earg = list(df1= .earg, df2= .earg)
- misc$nsimEIM = .nsimEIM
- misc$ncp = .ncp
- }), list( .link=link, .earg=earg,
- .ncp=ncp,
- .nsimEIM=nsimEIM ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- df1 = eta2theta(eta[,1], .link, earg= .earg)
- df2 = eta2theta(eta[,2], .link, earg= .earg)
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(w * df(x=y, df1=df1, df2=df2, ncp= .ncp, log=TRUE))
- }
- }, list( .link=link, .earg=earg, .ncp=ncp ))),
- vfamily=c("fff"),
- deriv=eval(substitute(expression({
- df1 = eta2theta(eta[,1], .link, earg= .earg)
- df2 = eta2theta(eta[,2], .link, earg= .earg)
- dl.ddf1 = 0.5*digamma(0.5*(df1+df2)) + 0.5 + 0.5*log(df1/df2) +
- 0.5*log(y) - 0.5*digamma(0.5*df1) -
- 0.5*(df1+df2)*(y/df2) / (1 + df1*y/df2) -
- 0.5*log1p(df1*y/df2)
- dl.ddf2 = 0.5*digamma(0.5*(df1+df2)) - 0.5*df1/df2 -
- 0.5*digamma(0.5*df2) -
- 0.5*(df1+df2) * (-df1*y/df2^2) / (1 + df1*y/df2) -
- 0.5*log1p(df1*y/df2)
- ddf1.deta = dtheta.deta(df1, .link, earg= .earg)
- ddf2.deta = dtheta.deta(df2, .link, earg= .earg)
- dthetas.detas = cbind(ddf1.deta, ddf2.deta)
- w * dthetas.detas * cbind(dl.ddf1, dl.ddf2)
- }), list( .link=link, .earg=earg ))),
- weight=eval(substitute(expression({
- run.varcov = 0
- ind1 = iam(NA, NA, M=M, both=TRUE, diag=TRUE)
- for(ii in 1:( .nsimEIM )) {
- ysim = rf(n=n, df1=df1, df2=df2)
- dl.ddf1 = 0.5*digamma(0.5*(df1+df2)) + 0.5 + 0.5*log(df1/df2) +
- 0.5*log(ysim) - 0.5*digamma(0.5*df1) -
- 0.5*(df1+df2)*(ysim/df2) / (1 + df1*ysim/df2) -
- 0.5*log1p(df1*ysim/df2)
- dl.ddf2 = 0.5*digamma(0.5*(df1+df2)) - 0.5*df1/df2 -
- 0.5*digamma(0.5*df2) -
- 0.5*(df1+df2) * (-df1*ysim/df2^2)/(1 + df1*ysim/df2) -
- 0.5*log1p(df1*ysim/df2)
- rm(ysim)
- temp3 = cbind(dl.ddf1, dl.ddf2)
- run.varcov = ((ii-1) * run.varcov +
- temp3[,ind1$row.index]*temp3[,ind1$col.index]) / ii
- }
- wz = if (intercept.only)
- matrix(colMeans(run.varcov),
- n, ncol(run.varcov), byrow=TRUE) else run.varcov
-
- wz = w * wz * dthetas.detas[,ind1$row] * dthetas.detas[,ind1$col]
- wz
- }), list( .link=link, .earg=earg, .nsimEIM=nsimEIM,
- .ncp = ncp ))))
-}
-
-
-
-
- hyperg = function(N=NULL, D=NULL,
- lprob="logit", earg=list(),
- iprob=NULL) {
- if (mode(lprob) != "character" && mode(lprob) != "name")
- lprob = as.character(substitute(lprob))
- inputN = is.Numeric(N, positive=TRUE)
- inputD = is.Numeric(D, positive=TRUE)
- if (inputD && inputN)
- stop("only one of 'N' and 'D' is to be inputted")
- if (!inputD && !inputN)
- stop("one of 'N' and 'D' needs to be inputted")
- if (!is.list(earg)) earg = list()
-
- new("vglmff",
- blurb = c("Hypergeometric distribution\n\n",
- "Link: ",
- namesof("prob", lprob, earg=earg), "\n",
- "Mean: D/N\n"),
- initialize=eval(substitute(expression({
- NCOL = function (x)
- if (is.array(x) && length(dim(x)) > 1 ||
- is.data.frame(x)) ncol(x) else as.integer(1)
- if (NCOL(y) == 1) {
- if (is.factor(y)) y = y != levels(y)[1]
- nn = rep(1, len=n)
- if (!all(y >= 0 & y <= 1))
- stop("response values must be in [0, 1]")
- mustart = (0.5 + w * y) / (1 + w)
- no.successes = w * y
- if (any(abs(no.successes - round(no.successes)) > 0.001))
- stop("Number of successes must be integer-valued")
- } else if (NCOL(y) == 2) {
- if (any(abs(y - round(y)) > 0.001))
- stop("Count data must be integer-valued")
- nn = y[,1] + y[,2]
- y = ifelse(nn > 0, y[,1]/nn, 0)
- w = w * nn
- mustart = (0.5 + nn * y) / (1 + nn)
- mustart[mustart >= 1] = 0.95
- } else
- stop("Response not of the right form")
-
- predictors.names = namesof("prob", .lprob, earg=.earg, tag=FALSE)
- extra$Nvector = .N
- extra$Dvector = .D
- extra$Nunknown = length(extra$Nvector) == 0
- if (!length(etastart)) {
- init.prob = if (length( .iprob)) rep( .iprob, len=n) else mustart
- etastart = matrix(init.prob, n, ncol(cbind(y )))
-
- }
- }), list( .lprob=lprob, .earg=earg, .N=N, .D=D, .iprob=iprob ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- eta2theta(eta, .lprob, earg= .earg)
- }, list( .lprob=lprob, .earg=earg ))),
- last = eval(substitute(expression({
- misc$link = c("prob"= .lprob)
- misc$earg = list("prob"= .earg)
- misc$Dvector = .D
- misc$Nvector = .N
- }), list( .N=N, .D=D, .lprob=lprob, .earg=earg ))),
- link=eval(substitute(function(mu, extra=NULL) {
- theta2eta(mu, .lprob, earg= .earg)
- }, list( .lprob=lprob, .earg=earg ))),
- loglikelihood=eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
- N = extra$Nvector
- Dvec = extra$Dvector
- prob = mu
- yvec = w * y
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
- if (extra$Nunknown) {
- tmp12 = Dvec * (1-prob) / prob
- sum(lgamma(1+tmp12) + lgamma(1+Dvec/prob-w) -
- lgamma(1+tmp12-w+yvec) - lgamma(1+Dvec/prob))
- } else {
-
-
- sum(lgamma(1+N*prob) + lgamma(1+N*(1-prob)) -
- lgamma(1+N*prob-yvec) - lgamma(1+N*(1-prob) -w + yvec))
- }
- }
- }, list( .lprob=lprob, .earg=earg ))),
- vfamily=c("hyperg"),
- deriv=eval(substitute(expression({
- prob = mu # equivalently, eta2theta(eta, .lprob, earg= .earg)
- dprob.deta = dtheta.deta(prob, .lprob, earg= .earg)
- Dvec = extra$Dvector
- Nvec = extra$Nvector
- yvec = w * y
- if (extra$Nunknown) {
- tmp72 = -Dvec / prob^2
- tmp12 = Dvec * (1-prob) / prob
- dl.dprob = tmp72 * (digamma(1 + tmp12) + digamma(1 + Dvec/prob -w) -
- digamma(1 + tmp12-w+yvec) - digamma(1 + Dvec/prob))
- } else {
- dl.dprob = Nvec * (digamma(1+Nvec*prob) - digamma(1+Nvec*(1-prob)) -
- digamma(1+Nvec*prob-yvec) + digamma(1+Nvec*(1-prob)-w+yvec))
- }
- w * dl.dprob * dprob.deta
- }), list( .lprob=lprob, .earg=earg ))),
- weight=eval(substitute(expression({
- if (extra$Nunknown) {
- tmp722 = tmp72^2
- tmp13 = 2*Dvec / prob^3
- d2l.dprob2 = tmp722 * (trigamma(1 + tmp12) +
- trigamma(1 + Dvec/prob - w) -
- trigamma(1 + tmp12 - w + yvec) -
- trigamma(1 + Dvec/prob)) +
- tmp13 * (digamma(1 + tmp12) +
- digamma(1 + Dvec/prob - w) -
- digamma(1 + tmp12 - w + yvec) -
- digamma(1 + Dvec/prob))
- } else {
- d2l.dprob2 = Nvec^2 * (trigamma(1+Nvec*prob) +
- trigamma(1+Nvec*(1-prob)) -
- trigamma(1+Nvec*prob-yvec) -
- trigamma(1+Nvec*(1-prob)-w+yvec))
- }
- d2prob.deta2 = d2theta.deta2(prob, .lprob, earg= .earg)
-
- wz = -(dprob.deta^2) * d2l.dprob2
- wz = w * wz
- wz[wz < .Machine$double.eps] = .Machine$double.eps
- wz
- }), list( .lprob=lprob, .earg=earg ))))
-}
-
-
-
-dbenini = function(x, shape, y0, log=FALSE) {
- if (!is.logical(log.arg <- log))
- stop("bad input for argument 'log'")
- rm(log)
-
- N = max(length(x), length(shape), length(y0))
- x = rep(x, len=N); shape = rep(shape, len=N); y0 = rep(y0, len=N);
-
- logdensity = rep(log(0), len=N)
- xok = (x > y0)
- tempxok = log(x[xok]/y0[xok])
- logdensity[xok] = log(2*shape[xok]) - shape[xok] * tempxok^2 +
- log(tempxok) - log(x[xok])
- if (log.arg) logdensity else exp(logdensity)
-}
-
-pbenini = function(q, shape, y0) {
- if (!is.Numeric(q)) stop("bad input for argument 'q'")
- if (!is.Numeric(shape, posit=TRUE)) stop("bad input for argument 'shape'")
- if (!is.Numeric(y0, posit=TRUE)) stop("bad input for argument 'y0'")
- N = max(length(q), length(shape), length(y0))
- q = rep(q, len=N); shape = rep(shape, len=N); y0 = rep(y0, len=N);
- ans = y0 * 0
- ok = q > y0
- ans[ok] = -expm1(-shape[ok] * (log(q[ok]/y0[ok]))^2)
- ans
-}
-
-qbenini = function(p, shape, y0) {
- if (!is.Numeric(p, posit=TRUE) || any(p >= 1))
- stop("bad input for argument 'p'")
- if (!is.Numeric(shape, posit=TRUE)) stop("bad input for argument 'shape'")
- if (!is.Numeric(y0, posit=TRUE)) stop("bad input for argument 'y0'")
- y0 * exp(sqrt(-log1p(-p) / shape))
-}
-
-rbenini = function(n, shape, y0) {
- if (!is.Numeric(n, posit=TRUE, integ=TRUE, allow=1))
- stop("bad input for argument 'n'")
- if (!is.Numeric(shape, posit=TRUE)) stop("bad input for argument 'shape'")
- if (!is.Numeric(y0, posit=TRUE)) stop("bad input for argument 'y0'")
- y0 * exp(sqrt(-log(runif(n)) / shape))
-}
-
- benini = function(y0=stop("argument 'y0' must be specified"),
- lshape="loge", earg=list(),
- ishape=NULL, method.init=1) {
- if (mode(lshape) != "character" && mode(lshape) != "name")
- lshape = as.character(substitute(lshape))
- if (!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
- method.init > 2) stop("argument 'method.init' must be 1 or 2")
- if (!is.Numeric(y0, allow=1, posit=TRUE))
- stop("bad input for argument 'y0'")
- if (!is.list(earg)) earg = list()
-
- new("vglmff",
- blurb = c("1-parameter Benini distribution\n\n",
- "Link: ",
- namesof("shape", lshape, earg=earg),
- "\n", "\n"),
- initialize=eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names = c(namesof("shape", .lshape, earg=.earg, tag=FALSE))
- extra$y0 = .y0
- if (min(y) <= extra$y0) stop("argument 'y0' is too large")
- if (!length(etastart)) {
- probs = (1:3) / 4
- qofy= quantile(rep(y, times=w), probs=probs) # fails if w != integer
- if ( .method.init == 1) {
- shape.init = mean(-log1p(-probs) / (log(qofy))^2)
- } else {
- shape.init = median(-log1p(-probs) / (log(qofy))^2)
- }
- shape.init = if (length(.ishape)) rep(.ishape, len=n) else
- rep(shape.init, len=n)
- etastart = cbind(theta2eta(shape.init, .lshape, earg= .earg))
- }
- }), list( .method.init=method.init, .ishape=ishape, .lshape=lshape, .earg=earg,
- .y0=y0 ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- shape = eta2theta(eta, .lshape, earg= .earg)
- temp = 1/(4*shape)
- extra$y0 * exp(temp) *
- ((sqrt(pi) * (1 - pgamma(temp, 0.5 ))) / (2*sqrt(shape)) +
- 1 - pgamma(temp, 1))
- }, list( .lshape=lshape, .earg=earg ))),
- last = eval(substitute(expression({
- misc$link = c(shape= .lshape)
- misc$earg = list(shape= .earg )
- }), list( .lshape=lshape, .earg=earg ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- shape = eta2theta(eta, .lshape, earg= .earg)
- y0 = extra$y0
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(w * dbenini(x=y, shape=shape, y0=y0, log=TRUE))
- }
- }, list( .lshape=lshape, .earg=earg ))),
- vfamily=c("benini"),
- deriv=eval(substitute(expression({
- shape = eta2theta(eta, .lshape, earg= .earg)
- y0 = extra$y0
- dl.dshape = 1/shape - (log(y/y0))^2
- dshape.deta = dtheta.deta(shape, .lshape, earg= .earg)
- w * dl.dshape * dshape.deta
- }), list( .lshape=lshape, .earg=earg ))),
- weight=eval(substitute(expression({
- d2l.dshape2 = 1 / shape^2
- wz = d2l.dshape2 * dshape.deta^2
- w * wz
- }), list( .lshape=lshape, .earg=earg ))))
-}
-
-
-
-
-dpolono = function(x, meanlog=0, sdlog=1, bigx=Inf, ...) {
- if (!is.Numeric(x)) stop("bad input for argument 'x'")
- if (!is.Numeric(meanlog)) stop("bad input for argument 'meanlog'")
- if (!is.Numeric(sdlog, posit=TRUE)) stop("bad input for argument 'sdlog'")
- if (length(bigx) != 1)
- stop("bad input for argument 'bigx'")
- if (bigx < 10)
- warning("argument 'bigx' is probably too small")
- N = max(length(x), length(meanlog), length(sdlog))
- x = rep(x, len=N); meanlog = rep(meanlog, len=N); sdlog = rep(sdlog, len=N)
- ans = x * 0
- integrand = function(t, x, meanlog, sdlog)
- exp(t*x - exp(t) - 0.5*((t-meanlog)/sdlog)^2)
- for(ii in 1:N) {
- if (x[ii] == round(x[ii]) && x[ii] >= 0) {
- if (x[ii] >= bigx) {
- zedd = (log(x[ii])-meanlog[ii]) / sdlog[ii]
- temp = 1 + (zedd^2 + log(x[ii]) - meanlog[ii] -
- 1) / (2*x[ii]*(sdlog[ii])^2)
- ans[ii] = temp * exp(-0.5*zedd^2)/(sqrt(2*pi)*sdlog[ii] * x[ii])
- } else {
- temp = integrate(f=integrand, lower=-Inf, upper=Inf, x=x[ii],
- meanlog=meanlog[ii], sdlog=sdlog[ii], ...)
- if (temp$message == "OK") {
- ans[ii] = temp$value / (sqrt(2*pi) * sdlog[ii] *
- exp(lgamma(x[ii]+1)))
- } else {
- warning("could not integrate (numerically) observation ",ii)
- ans[ii] = NA
- }
- }
- }
- }
- ans
-}
-
-
-rpolono = function(n, meanlog=0, sdlog=1) {
- lambda = rlnorm(n=n, meanlog=meanlog, sdlog=sdlog)
- rpois(n=n, lambda=lambda)
-}
-
-
-
-
-
-
-
-
-
-
-
-dtriangle = function(x, theta, lower=0, upper=1, log = FALSE) {
- if (!is.logical(log.arg <- log))
- stop("bad input for argument 'log'")
- rm(log)
-
- N = max(length(x), length(theta), length(lower), length(upper))
- x = rep(x, len=N); lower = rep(lower, len=N); upper = rep(upper, len=N);
- theta = rep(theta, len=N)
-
- denom1 = ((upper-lower)*(theta-lower))
- denom2 = ((upper-lower)*(upper-theta))
- logdensity = rep(log(0), len=N)
- xok.neg = (lower < x) & (x <= theta)
- xok.pos = (theta <= x) & (x < upper)
- logdensity[xok.neg] = log(2 * (x[xok.neg]-lower[xok.neg]) / denom1[xok.neg])
- logdensity[xok.pos] = log(2 * (upper[xok.pos]-x[xok.pos]) / denom2[xok.pos])
- logdensity[lower >= upper] = NaN
- logdensity[lower > theta] = NaN
- logdensity[upper < theta] = NaN
- if (log.arg) logdensity else exp(logdensity)
-}
-
-
-rtriangle = function(n, theta, lower=0, upper=1) {
- if (!is.Numeric(n, integ=TRUE,allow=1)) stop("bad input for argument 'n'")
- if (!is.Numeric(theta)) stop("bad input for argument 'theta'")
- if (!is.Numeric(lower)) stop("bad input for argument 'lower'")
- if (!is.Numeric(upper)) stop("bad input for argument 'upper'")
- if (!all(lower < theta & theta < upper))
- stop("lower < theta < upper values are required")
- N = n
- lower = rep(lower, len=N); upper = rep(upper, len=N);
- theta = rep(theta, len=N)
- t1 = sqrt(runif(n))
- t2 = sqrt(runif(n))
- ifelse(runif(n) < (theta-lower)/(upper-lower),
- lower + (theta-lower)*t1,
- upper - (upper-theta)*t2)
-}
-
-
-qtriangle = function(p, theta, lower=0, upper=1) {
- if (!is.Numeric(p, posit=TRUE)) stop("bad input for argument 'p'")
- if (!is.Numeric(theta)) stop("bad input for argument 'theta'")
- if (!is.Numeric(lower)) stop("bad input for argument 'lower'")
- if (!is.Numeric(upper)) stop("bad input for argument 'upper'")
- if (!all(lower < theta & theta < upper))
- stop("lower < theta < upper values are required")
-
- N = max(length(p), length(theta), length(lower), length(upper))
- p = rep(p, len=N); lower = rep(lower, len=N); upper = rep(upper, len=N);
- theta = rep(theta, len=N)
-
- bad = (p < 0) | (p > 1)
- if (any(bad))
- stop("bad input for 'p'")
-
- Neg = (p <= (theta - lower)/(upper - lower))
- ans = as.numeric(NA) * p
- temp1 = p * (upper-lower) * (theta-lower)
- ans[ Neg] = lower[ Neg] + sqrt(temp1[ Neg])
-
- Pos = (p >= (theta - lower)/(upper - lower))
- if (any(Pos)) {
- pstar = (p - (theta-lower)/(upper-lower)) / (1 -
- (theta-lower)/(upper-lower))
- qstar = cbind(1 - sqrt(1-pstar), 1 + sqrt(1-pstar))
- qstar = qstar[Pos,,drop=FALSE]
- qstar = ifelse(qstar[,1] >= 0 & qstar[,1] <= 1, qstar[,1], qstar[,2])
- ans[Pos] = theta[Pos] + qstar * (upper-theta)[Pos]
- }
- ans
-}
-
-
-ptriangle = function(q, theta, lower=0, upper=1) {
- if (!is.Numeric(q)) stop("bad input for argument 'q'")
- if (!is.Numeric(theta)) stop("bad input for argument 'theta'")
- if (!is.Numeric(lower)) stop("bad input for argument 'lower'")
- if (!is.Numeric(upper)) stop("bad input for argument 'upper'")
- if (!all(lower < theta & theta < upper))
- stop("lower < theta < upper values are required")
-
- N = max(length(q), length(theta), length(lower), length(upper))
- q = rep(q, len=N); lower = rep(lower, len=N); upper = rep(upper, len=N);
- theta = rep(theta, len=N)
- ans = q * 0
-
- qstar = (q - lower)^2 / ((upper-lower) * (theta-lower))
- Neg = (lower <= q & q <= theta)
- ans[Neg] = (qstar)[Neg]
-
- Pos = (theta <= q & q <= upper)
- qstar = (q - theta) / (upper-theta)
- ans[Pos] = ((theta-lower)/(upper-lower))[Pos] +
- (qstar * (2-qstar) * (upper-theta) / (upper - lower))[Pos]
- ans[q >= upper] = 1
- ans
-}
-
-
-
- triangle = function(lower=0, upper=1,
- link="elogit", earg = if (link == "elogit")
- list(min = lower, max = upper) else list(), itheta=NULL)
-{
- if (!is.Numeric(lower)) stop("bad input for argument 'lower'")
- if (!is.Numeric(upper)) stop("bad input for argument 'upper'")
- if (!all(lower < upper))
- stop("lower < upper values are required")
- if (length(itheta) && !is.Numeric(itheta))
- stop("bad input for 'itheta'")
-
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.list(earg)) earg = list()
-
- new("vglmff",
- blurb = c(
- "Triangle distribution\n\n",
- "Link: ",
- namesof("theta", link, earg=earg)),
- initialize=eval(substitute(expression({
- y = as.numeric(y)
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- extra$lower = rep( .lower, len=n)
- extra$upper = rep( .upper, len=n)
-
- if (any(y <= extra$lower | y >= extra$upper))
- stop("some y values in [lower,upper] detected")
- predictors.names = namesof("theta", .link, earg= .earg, tag=FALSE)
- if (!length(etastart)) {
- Theta.init = if (length( .itheta)) .itheta else {
- weighted.mean(y, w)
- }
- Theta.init = rep(Theta.init, length=n)
- etastart = theta2eta(Theta.init, .link, earg= .earg )
- }
- }), list( .link=link, .earg=earg, .itheta=itheta,
- .upper=upper, .lower=lower ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- Theta = eta2theta(eta, .link, earg= .earg )
- lower = extra$lower
- upper = extra$upper
- mu = ((Theta^3 / 3 - lower * Theta^2 / 2 +
- lower^3 / 6) / (Theta - lower) +
- ((Theta^3 / 3 - upper * Theta^2 / 2 +
- upper^3 / 6) / (upper - Theta))) * 2 / (upper-lower)
- mu
- }, list( .link=link, .earg=earg ))),
- last = eval(substitute(expression({
- misc$link = c(theta = .link)
- misc$earg = list(theta = .earg)
- misc$expected = TRUE
- }), list( .link=link, .earg=earg ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- Theta = eta2theta(eta, .link, earg= .earg )
- lower = extra$lower
- upper = extra$upper
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(w * dtriangle(x=y, theta=Theta, lower=lower,
- upper=upper, log=TRUE))
- }
- }, list( .link=link, .earg=earg ))),
- vfamily=c("triangle"),
- deriv=eval(substitute(expression({
- Theta = eta2theta(eta, .link, earg= .earg )
- dTheta.deta = dtheta.deta(Theta, .link, earg= .earg )
- pos = y > Theta
- neg = y < Theta
- lower = extra$lower
- upper = extra$upper
- dl.dTheta = 0 * y
- dl.dTheta[neg] = -1 / (Theta[neg]-lower[neg])
- dl.dTheta[pos] = 1 / (upper[pos]-Theta[pos])
- dl.dTheta * dTheta.deta
- }), list( .link=link, .earg=earg ))),
- weight=eval(substitute(expression({
- d2l.dTheta2 = 1 / ((Theta-lower)*(upper-Theta))
- wz = dTheta.deta^2 * d2l.dTheta2
- w * wz
- }), list( .link=link, .earg=earg ))))
-}
-
-
-
-
-
-
-
-adjust0.loglaplace1 = function(ymat, y, w, rep0) {
- rangey0 = range(y[y > 0])
- ymat[ymat <= 0] = min(rangey0[1] / 2, rep0)
- ymat
-}
-
-
-loglaplace1.control <- function(maxit=300, ...)
-{
- list(maxit=maxit)
-}
-
- loglaplace1 = function(tau = NULL,
- llocation="loge",
- elocation=list(),
- ilocation=NULL,
- kappa = sqrt(tau/(1-tau)),
- Scale.arg=1,
- shrinkage.init=0.95, parallelLocation=FALSE, digt=4,
- dfmu.init = 3,
- rep0 = 0.5, # 0.0001,
- minquantile = 0, maxquantile = Inf,
- method.init=1, zero=NULL) {
-
- if (length(minquantile) != 1)
- stop("bad input for argument 'minquantile'")
- if (length(maxquantile) != 1)
- stop("bad input for argument 'maxquantile'")
- if (!is.Numeric(rep0, posit=TRUE, allow=1) || rep0 > 1)
- stop("bad input for argument 'rep0'")
- if (!is.Numeric(kappa, posit=TRUE))
- stop("bad input for argument 'kappa'")
- if (length(tau) && max(abs(kappa - sqrt(tau/(1-tau)))) > 1.0e-6)
- stop("arguments 'kappa' and 'tau' do not match")
- if (mode(llocation) != "character" && mode(llocation) != "name")
- llocation = as.character(substitute(llocation))
- if (!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
- method.init > 4) stop("argument 'method.init' must be 1, 2 or ... 4")
- if (!is.list(elocation)) elocation = list()
- if (!is.Numeric(shrinkage.init, allow=1) || shrinkage.init < 0 ||
- shrinkage.init > 1) stop("bad input for argument 'shrinkage.init'")
- if (length(zero) &&
- !(is.Numeric(zero, integer=TRUE, posit=TRUE) || is.character(zero )))
- stop("bad input for argument 'zero'")
- if (!is.Numeric(Scale.arg, posit=TRUE))
- stop("bad input for argument 'Scale.arg'")
- if (!is.logical(parallelLocation) || length(parallelLocation) != 1)
- stop("bad input for argument 'parallelLocation'")
- fittedMean = FALSE
- if (!is.logical(fittedMean) || length(fittedMean) != 1)
- stop("bad input for argument 'fittedMean'")
-
- mystring0 = namesof("location", llocation, earg=elocation)
- mychars = substring(mystring0, fi=1:nchar(mystring0), la=1:nchar(mystring0))
- mychars[nchar(mystring0)] = ", inverse=TRUE)"
- mystring1 = paste(mychars, collapse="")
-
- new("vglmff",
- blurb = c("One-parameter ",
- if (llocation=="loge") "log-Laplace" else c(llocation, "-Laplace"),
- " distribution\n\n",
- "Links: ", mystring0, "\n", "\n",
- "Quantiles: ", mystring1),
- constraints=eval(substitute(expression({
- constraints = cm.vgam(matrix(1,M,1), x, .parallelLocation,
- constraints, intercept=FALSE)
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .parallelLocation=parallelLocation,
- .Scale.arg=Scale.arg, .zero=zero ))),
- initialize=eval(substitute(expression({
- extra$M = M = max(length( .Scale.arg ), length( .kappa )) # Recycle
- extra$Scale = rep( .Scale.arg, length=M)
- extra$kappa = rep( .kappa, length=M)
- extra$tau = extra$kappa^2 / (1 + extra$kappa^2)
- if (ncol(y <- cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- extra$n = n
- extra$y.names = y.names = paste("tau=", round(extra$tau, dig=.digt),
- sep="")
- extra$individual = FALSE
- predictors.names = namesof(paste("quantile(", y.names, ")", sep=""),
- .llocation, earg=.elocation, tag=FALSE)
-
-
- if (FALSE) {
- if (min(y) < 0) stop("negative response values detected")
- if ((prop.0. <- weighted.mean(1*(y == 0), w)) >= min(extra$tau))
- stop("sample proportion of 0s == ", round(prop.0., dig=4),
- " > minimum 'tau' value. Choose larger values for 'tau'.")
- if (.rep0 == 0.5 &&
- (ave.tau <- (weighted.mean(1*(y <= 0), w) +
- weighted.mean(1*(y <= 1), w))/2) >= min(extra$tau))
- warning("the minimum 'tau' value should be greater than ",
- round(ave.tau, dig=4))
- }
-
- if (!length(etastart)) {
- if ( .method.init == 1) {
- location.init = quantile(rep(y, w), probs= extra$tau) + 1/16
- } else if ( .method.init == 2) {
- location.init = weighted.mean(y, w)
- } else if ( .method.init == 3) {
- location.init = median(y)
- } else if ( .method.init == 4) {
- fit500 = vsmooth.spline(x=x[,min(ncol(x),2)], y=y, w=w,
- df= .dfmu.init)
- location.init = c(predict(fit500, x=x[,min(ncol(x),2)])$y)
- } else {
- use.this = weighted.mean(y, w)
- location.init = (1- .sinit)*y + .sinit * use.this
- }
- location.init = if (length(.ilocation)) rep(.ilocation, len=M) else
- rep(location.init, len=M)
- location.init = matrix(location.init, n, M, byrow=TRUE)
- if ( .llocation == "loge")
- location.init = abs(location.init)
- etastart =
- cbind(theta2eta(location.init, .llocation, earg= .elocation))
- }
- }), list( .method.init=method.init,
- .dfmu.init=dfmu.init, .rep0 = rep0,
- .sinit=shrinkage.init, .digt=digt,
- .elocation=elocation, .Scale.arg=Scale.arg,
- .llocation=llocation, .kappa=kappa,
- .ilocation=ilocation ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- location.y = eta2theta(eta, .llocation, earg= .elocation)
- if ( .fittedMean) {
- stop("Yet to do: handle 'fittedMean = TRUE'")
- kappamat = matrix(extra$kappa, extra$n, extra$M, byrow=TRUE)
- Scale = matrix(extra$Scale, extra$n, extra$M, byrow=TRUE)
- location.y + Scale * (1/kappamat - kappamat)
- } else {
- if (length(location.y) > extra$n)
- dimnames(location.y) = list(dimnames(eta)[[1]], extra$y.names)
- location.y
- }
- location.y[location.y < .minquantile] = .minquantile
- location.y[location.y > .maxquantile] = .maxquantile
- location.y
- }, list( .elocation=elocation, .llocation=llocation,
- .minquantile = minquantile, .maxquantile = maxquantile,
- .fittedMean=fittedMean, .Scale.arg=Scale.arg,
- .kappa=kappa ))),
- last = eval(substitute(expression({
- misc$link = c(location= .llocation)
- misc$earg = list(location= .elocation)
- misc$expected = TRUE
- extra$kappa = misc$kappa = .kappa
- extra$tau = misc$tau = misc$kappa^2 / (1 + misc$kappa^2)
- extra$Scale.arg = .Scale.arg
- misc$true.mu = .fittedMean # @fitted is not a true mu?
- misc$rep0 = .rep0
- misc$minquantile = .minquantile
- misc$maxquantile = .maxquantile
- extra$percentile = numeric(length(misc$kappa))
- location.y = as.matrix(location.y)
- for(ii in 1:length(misc$kappa))
- extra$percentile[ii] = 100 * weighted.mean(y <= location.y[,ii], w)
- }), list( .elocation=elocation, .llocation=llocation,
- .Scale.arg=Scale.arg, .fittedMean=fittedMean,
- .minquantile = minquantile, .maxquantile = maxquantile,
- .rep0 = rep0, .kappa=kappa ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- kappamat = matrix(extra$kappa, extra$n, extra$M, byrow=TRUE)
- Scale.w = matrix(extra$Scale, extra$n, extra$M, byrow=TRUE)
- ymat = matrix(y, extra$n, extra$M)
-
-
- if ( .llocation == "loge")
- ymat = adjust0.loglaplace1(ymat=ymat, y=y, w=w, rep0= .rep0)
- w.mat = theta2eta(ymat, .llocation, earg= .elocation) # e.g., logoff()
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
- ALDans =
- sum(w * dalap(x=c(w.mat), location=c(eta),
- scale=c(Scale.w), kappa=c(kappamat), log=TRUE))
- ALDans
- }
- }, list( .elocation=elocation, .llocation=llocation,
- .rep0 = rep0,
- .Scale.arg=Scale.arg, .kappa=kappa ))),
- vfamily=c("loglaplace1"),
- deriv=eval(substitute(expression({
- ymat = matrix(y, n, M)
- Scale.w = matrix(extra$Scale, extra$n, extra$M, byrow=TRUE)
- location.w = eta
- location.y = eta2theta(location.w, .llocation, earg= .elocation)
- kappamat = matrix(extra$kappa, n, M, byrow=TRUE)
-
- ymat = adjust0.loglaplace1(ymat=ymat, y=y, w=w, rep0= .rep0)
- w.mat = theta2eta(ymat, .llocation, earg= .elocation) # e.g., logit()
- zedd = abs(w.mat-location.w) / Scale.w
- dl.dlocation = ifelse(w.mat >= location.w, kappamat, 1/kappamat) *
- sqrt(2) * sign(w.mat-location.w) / Scale.w
- dlocation.deta = dtheta.deta(location.w, "identity", earg= .elocation)
- w * cbind(dl.dlocation * dlocation.deta)
- }), list( .Scale.arg=Scale.arg, .elocation=elocation,
- .rep0 = rep0,
- .llocation=llocation, .kappa=kappa ))),
- weight=eval(substitute(expression({
- d2l.dlocation2 = 2 / Scale.w^2
- wz = cbind(d2l.dlocation2 * dlocation.deta^2)
- w * wz
- }), list( .Scale.arg=Scale.arg,
- .elocation=elocation, .llocation=llocation ))))
-}
-
-
-
-
-
-loglaplace2.control <- function(save.weight=TRUE, ...)
-{
- list(save.weight=save.weight)
-}
-
- loglaplace2 = function(tau = NULL,
- llocation="loge", lscale="loge",
- elocation=list(), escale=list(),
- ilocation=NULL, iscale=NULL,
- kappa = sqrt(tau/(1-tau)),
- shrinkage.init=0.95, parallelLocation=FALSE, digt=4,
- sameScale=TRUE,
- dfmu.init = 3,
- rep0 = 0.5, nsimEIM=NULL,
- method.init=1, zero="(1 + M/2):M") {
- warning("it is best to use loglaplace1()")
-
- if (length(nsimEIM) &&
- (!is.Numeric(nsimEIM, allow=1, integ=TRUE) || nsimEIM <= 10))
- stop("'nsimEIM' should be an integer greater than 10")
- if (!is.Numeric(rep0, posit=TRUE, allow=1) || rep0 > 1)
- stop("bad input for argument 'rep0'")
- if (!is.Numeric(kappa, posit=TRUE))
- stop("bad input for argument 'kappa'")
- if (length(tau) && max(abs(kappa - sqrt(tau/(1-tau)))) > 1.0e-6)
- stop("arguments 'kappa' and 'tau' do not match")
- if (mode(llocation) != "character" && mode(llocation) != "name")
- llocation = as.character(substitute(llocation))
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
- if (!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
- method.init > 4) stop("argument 'method.init' must be 1, 2 or ... 4")
- if (length(iscale) && !is.Numeric(iscale, posit=TRUE))
- stop("bad input for argument 'iscale'")
- if (!is.list(elocation)) elocation = list()
- if (!is.list(escale)) escale = list()
- if (!is.Numeric(shrinkage.init, allow=1) || shrinkage.init < 0 ||
- shrinkage.init > 1) stop("bad input for argument 'shrinkage.init'")
- if (length(zero) &&
- !(is.Numeric(zero, integer=TRUE, posit=TRUE) || is.character(zero )))
- stop("bad input for argument 'zero'")
- if (!is.logical(sameScale) || length(sameScale) != 1)
- stop("bad input for argument 'sameScale'")
- if (!is.logical(parallelLocation) || length(parallelLocation) != 1)
- stop("bad input for argument 'parallelLocation'")
- fittedMean = FALSE
- if (!is.logical(fittedMean) || length(fittedMean) != 1)
- stop("bad input for argument 'fittedMean'")
-
- if (llocation != "loge")
- stop("argument 'llocation' must be \"loge\"")
-
- new("vglmff",
- blurb = c("Two-parameter log-Laplace distribution\n\n",
- "Links: ",
- namesof("location", llocation, earg=elocation), ", ",
- namesof("scale", lscale, earg=escale),
- "\n", "\n",
- "Mean: zz location + scale * (1/kappa - kappa) / sqrt(2)", "\n",
- "Quantiles: location", "\n",
- "Variance: zz scale^2 * (1 + kappa^4) / (2 * kappa^2)"),
- constraints=eval(substitute(expression({
- .ZERO = .zero
- if (is.character(.ZERO)) .ZERO = eval(parse(text = .ZERO))
- .PARALLEL = .parallelLocation
- parHmat = if (is.logical(.PARALLEL) && .PARALLEL)
- matrix(1, M/2, 1) else diag(M/2)
- scaleHmat = if (is.logical(.sameScale) && .sameScale)
- matrix(1, M/2, 1) else diag(M/2)
- mycmatrix = cbind(rbind(parHmat, 0*parHmat),
- rbind(0*scaleHmat, scaleHmat))
- constraints=cm.vgam(mycmatrix, x, .PARALLEL, constraints, int=FALSE)
- constraints = cm.zero.vgam(constraints, x, .ZERO, M)
-
- if (.PARALLEL && names(constraints)[1] == "(Intercept)") {
- parHmat = diag(M/2)
- mycmatrix = cbind(rbind(parHmat, 0*parHmat),
- rbind(0*scaleHmat, scaleHmat))
- constraints[["(Intercept)"]] = mycmatrix
- }
- if (is.logical(.sameScale) && .sameScale &&
- names(constraints)[1] == "(Intercept)") {
- temp3 = constraints[["(Intercept)"]]
- temp3 = cbind(temp3[,1:(M/2)], rbind(0*scaleHmat, scaleHmat))
- constraints[["(Intercept)"]] = temp3
- }
- }), list( .sameScale=sameScale, .parallelLocation=parallelLocation,
- .zero=zero ))),
- initialize=eval(substitute(expression({
- extra$kappa = .kappa
- extra$tau = extra$kappa^2 / (1 + extra$kappa^2)
- if (ncol(y <- cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- extra$M = M = 2 * length(extra$kappa)
- extra$n = n
- extra$y.names = y.names = paste("tau=", round(extra$tau, dig=.digt),
- sep="")
- extra$individual = FALSE
- predictors.names =
- c(namesof(paste("quantile(", y.names, ")", sep=""),
- .llocation, earg=.elocation, tag=FALSE),
- namesof(if (M == 2) "scale" else paste("scale", 1:(M/2), sep=""),
- .lscale, earg=.escale, tag=FALSE))
- if (weighted.mean(1*(y < 0.001), w) >= min(extra$tau))
- stop("sample proportion of 0s > minimum 'tau' value. ",
- "Choose larger values for 'tau'.")
-
- if (!length(etastart)) {
- if ( .method.init == 1) {
- location.init.y = weighted.mean(y, w)
- scale.init = sqrt(var(y) / 2)
- } else if ( .method.init == 2) {
- location.init.y = median(y)
- scale.init = sqrt(sum(w*abs(y-median(y))) / (sum(w) *2))
- } else if ( .method.init == 3) {
- fit500 = vsmooth.spline(x=x[,min(ncol(x),2)], y=y, w=w,
- df= .dfmu.init)
- location.init.y = c(predict(fit500, x=x[,min(ncol(x),2)])$y)
- scale.init = sqrt(sum(w*abs(y-median(y))) / (sum(w) *2))
- } else {
- use.this = weighted.mean(y, w)
- location.init.y = (1- .sinit)*y + .sinit * use.this
- scale.init = sqrt(sum(w*abs(y-median(y ))) / (sum(w) *2))
- }
- location.init.y = if (length(.ilocation)) rep(.ilocation, len=n) else
- rep(location.init.y, len=n)
- location.init.y = matrix(location.init.y, n, M/2)
- scale.init = if (length(.iscale)) rep(.iscale, len=n) else
- rep(scale.init, len=n)
- scale.init = matrix(scale.init, n, M/2)
- etastart =
- cbind(theta2eta(location.init.y, .llocation, earg= .elocation),
- theta2eta(scale.init, .lscale, earg= .escale))
- }
- }), list( .method.init=method.init,
- .dfmu.init=dfmu.init,
- .sinit=shrinkage.init, .digt=digt,
- .elocation=elocation, .escale=escale,
- .llocation=llocation, .lscale=lscale, .kappa=kappa,
- .ilocation=ilocation, .iscale=iscale ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- location.y = eta2theta(eta[,1:(extra$M/2),drop=FALSE],
- .llocation, earg= .elocation)
- if ( .fittedMean) {
- kappamat = matrix(extra$kappa, extra$n, extra$M/2, byrow=TRUE)
- Scale.y = eta2theta(eta[,(1+extra$M/2):extra$M], .lscale, earg= .escale)
- location.y + Scale.y * (1/kappamat - kappamat)
- } else {
- dimnames(location.y) = list(dimnames(eta)[[1]], extra$y.names)
- location.y
- }
- }, list( .elocation=elocation, .llocation=llocation,
- .fittedMean=fittedMean, .escale=escale, .lscale=lscale,
- .kappa=kappa ))),
- last = eval(substitute(expression({
- misc$link = c(location= .llocation, scale= .lscale)
- misc$earg = list(location= .elocation, scale= .escale)
- misc$expected = TRUE
- extra$kappa = misc$kappa = .kappa
- extra$tau = misc$tau = misc$kappa^2 / (1 + misc$kappa^2)
- misc$true.mu = .fittedMean # @fitted is not a true mu?
- misc$nsimEIM = .nsimEIM
- misc$rep0 = .rep0
- extra$percentile = numeric(length(misc$kappa))
- location = as.matrix(location.y)
- for(ii in 1:length(misc$kappa))
- extra$percentile[ii] = 100 * weighted.mean(y <= location.y[,ii], w)
- }), list( .elocation=elocation, .llocation=llocation,
- .escale=escale, .lscale=lscale,
- .fittedMean=fittedMean,
- .nsimEIM=nsimEIM, .rep0 = rep0,
- .kappa=kappa ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- kappamat = matrix(extra$kappa, extra$n, extra$M/2, byrow=TRUE)
- Scale.w = eta2theta(eta[,(1+extra$M/2):extra$M], .lscale, earg= .escale)
- ymat = matrix(y, extra$n, extra$M/2)
- ymat[ymat <= 0] = min(min(y[y > 0]), .rep0) # Adjust for 0s
- ell.mat = matrix(c(dloglaplace(x=c(ymat),
- location.ald=c(eta[,1:(extra$M/2)]),
- scale.ald=c(Scale.w),
- kappa=c(kappamat), log=TRUE)),
- extra$n, extra$M/2)
- if (residuals) stop("loglikelihood residuals not implemented yet") else
- sum(w * ell.mat)
- }, list( .elocation=elocation, .llocation=llocation,
- .escale=escale, .lscale=lscale,
- .rep0 = rep0, .kappa=kappa ))),
- vfamily=c("loglaplace2"),
- deriv=eval(substitute(expression({
- ymat = matrix(y, n, M/2)
- Scale.w = eta2theta(eta[,(1+extra$M/2):extra$M], .lscale, earg= .escale)
- location.w = eta[,1:(extra$M/2),drop=FALSE]
- location.y = eta2theta(location.w, .llocation, earg= .elocation)
- kappamat = matrix(extra$kappa, n, M/2, byrow=TRUE)
- w.mat = ymat
- w.mat[w.mat <= 0] = min(min(w.mat[w.mat > 0]), .rep0) # Adjust for 0s
- w.mat= theta2eta(w.mat, .llocation, earg= .elocation) # w.mat=log(w.mat)
- zedd = abs(w.mat-location.w) / Scale.w
- dl.dlocation = sqrt(2) *
- ifelse(w.mat >= location.w, kappamat, 1/kappamat) *
- sign(w.mat-location.w) / Scale.w
- dl.dscale = sqrt(2) *
- ifelse(w.mat >= location.w, kappamat, 1/kappamat) *
- zedd / Scale.w - 1 / Scale.w
- dlocation.deta = dtheta.deta(location.w, .llocation, earg= .elocation)
- dscale.deta = dtheta.deta(Scale.w, .lscale, earg= .escale)
- w * cbind(dl.dlocation * dlocation.deta,
- dl.dscale * dscale.deta)
- }), list( .escale=escale, .lscale=lscale,
- .elocation=elocation, .llocation=llocation,
- .rep0 = rep0, .kappa=kappa ))),
- weight=eval(substitute(expression({
- run.varcov = 0
- ind1 = iam(NA, NA, M=M, both=TRUE, diag=TRUE)
- dthetas.detas = cbind(dlocation.deta, dscale.deta)
- if (length( .nsimEIM )) {
- for(ii in 1:( .nsimEIM )) {
- wsim = matrix(rloglap(n*M/2, loc=c(location.w),
- sca=c(Scale.w),
- kappa=c(kappamat)), n, M/2)
- zedd = abs(wsim-location.w) / Scale.w
- dl.dlocation = sqrt(2) *
- ifelse(wsim >= location.w, kappamat, 1/kappamat) *
- sign(wsim-location.w) / Scale.w
- dl.dscale = sqrt(2) *
- ifelse(wsim >= location.w, kappamat, 1/kappamat) *
- zedd / Scale.w - 1 / Scale.w
-
- rm(wsim)
- temp3 = cbind(dl.dlocation, dl.dscale) # n x M matrix
- run.varcov = ((ii-1) * run.varcov +
- temp3[,ind1$row.index]*temp3[,ind1$col.index]) / ii
- }
- wz = if (intercept.only)
- matrix(colMeans(run.varcov),
- n, ncol(run.varcov), byrow=TRUE) else run.varcov
-
- wz = wz * dthetas.detas[,ind1$row] * dthetas.detas[,ind1$col]
- wz = w * matrix(wz, n, dimm(M))
- wz
- } else {
- d2l.dlocation2 = 2 / (Scale.w * location.w)^2
- d2l.dscale2 = 1 / Scale.w^2
- wz = cbind(d2l.dlocation2 * dlocation.deta^2,
- d2l.dscale2 * dscale.deta^2)
- w * wz
- }
- }), list( .escale=escale, .lscale=lscale,
- .nsimEIM=nsimEIM,
- .elocation=elocation, .llocation=llocation ))))
-}
-
-
-
-
-logitlaplace1.control <- function(maxit=300, ...)
-{
- list(maxit=maxit)
-}
-
-
-adjust01.logitlaplace1 = function(ymat, y, w, rep01) {
- rangey01 = range(y[(y > 0) & (y < 1)])
- ymat[ymat <= 0] = min(rangey01[1] / 2, rep01 / w[y <= 0])
- ymat[ymat >= 1] = max((1 + rangey01[2]) / 2, 1 - rep01 / w[y >= 1])
- ymat
-}
-
- logitlaplace1 = function(tau = NULL,
- llocation="logit",
- elocation=list(),
- ilocation=NULL,
- kappa = sqrt(tau/(1-tau)),
- Scale.arg=1,
- shrinkage.init=0.95, parallelLocation=FALSE, digt=4,
- dfmu.init = 3,
- rep01 = 0.5,
- method.init=1, zero=NULL) {
-
- if (!is.Numeric(rep01, posit=TRUE, allow=1) || rep01 > 0.5)
- stop("bad input for argument 'rep01'")
- if (!is.Numeric(kappa, posit=TRUE))
- stop("bad input for argument 'kappa'")
- if (length(tau) && max(abs(kappa - sqrt(tau/(1-tau)))) > 1.0e-6)
- stop("arguments 'kappa' and 'tau' do not match")
- if (mode(llocation) != "character" && mode(llocation) != "name")
- llocation = as.character(substitute(llocation))
- if (!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
- method.init > 4) stop("argument 'method.init' must be 1, 2 or ... 4")
- if (!is.list(elocation)) elocation = list()
- if (!is.Numeric(shrinkage.init, allow=1) || shrinkage.init < 0 ||
- shrinkage.init > 1) stop("bad input for argument 'shrinkage.init'")
- if (length(zero) &&
- !(is.Numeric(zero, integer=TRUE, posit=TRUE) || is.character(zero )))
- stop("bad input for argument 'zero'")
- if (!is.Numeric(Scale.arg, posit=TRUE))
- stop("bad input for argument 'Scale.arg'")
- if (!is.logical(parallelLocation) || length(parallelLocation) != 1)
- stop("bad input for argument 'parallelLocation'")
- fittedMean = FALSE
- if (!is.logical(fittedMean) || length(fittedMean) != 1)
- stop("bad input for argument 'fittedMean'")
-
- mystring0 = namesof("location", llocation, earg=elocation)
- mychars = substring(mystring0, fi=1:nchar(mystring0), la=1:nchar(mystring0))
- mychars[nchar(mystring0)] = ", inverse=TRUE)"
- mystring1 = paste(mychars, collapse="")
-
- new("vglmff",
- blurb = c("One-parameter ", llocation, "-Laplace distribution\n\n",
- "Links: ", mystring0, "\n", "\n",
- "Quantiles: ", mystring1),
- constraints=eval(substitute(expression({
- constraints = cm.vgam(matrix(1,M,1), x, .parallelLocation,
- constraints, intercept=FALSE)
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .parallelLocation=parallelLocation,
- .Scale.arg=Scale.arg, .zero=zero ))),
- initialize=eval(substitute(expression({
- extra$M = M = max(length( .Scale.arg ), length( .kappa )) # Recycle
- extra$Scale = rep( .Scale.arg, length=M)
- extra$kappa = rep( .kappa, length=M)
- extra$tau = extra$kappa^2 / (1 + extra$kappa^2)
- if (ncol(y <- cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- extra$n = n
- extra$y.names = y.names = paste("tau=", round(extra$tau, dig=.digt),
- sep="")
- extra$individual = FALSE
- predictors.names = namesof(paste("quantile(", y.names, ")", sep=""),
- .llocation, earg=.elocation, tag=FALSE)
-
- if (all(y == 0 | y == 1)) stop("response cannot be all 0s or 1s")
- if (min(y) < 0) stop("negative response values detected")
- if (max(y) > 1) stop("response values greater than 1 detected")
- if ((prop.0. <- weighted.mean(1*(y == 0), w)) >= min(extra$tau))
- stop("sample proportion of 0s == ", round(prop.0., dig=4),
- " > minimum 'tau' value. Choose larger values for 'tau'.")
- if ((prop.1. <- weighted.mean(1*(y == 1), w)) >= max(extra$tau))
- stop("sample proportion of 1s == ", round(prop.1., dig=4),
- " < maximum 'tau' value. Choose smaller values for 'tau'.")
- if (!length(etastart)) {
- if ( .method.init == 1) {
- location.init = quantile(rep(y, w), probs= extra$tau)
- } else if ( .method.init == 2) {
- location.init = weighted.mean(y, w)
- location.init = median(rep(y, w))
- } else if ( .method.init == 3) {
- use.this = weighted.mean(y, w)
- location.init = (1- .sinit)*y + use.this * .sinit
- } else {
- stop("this option not implemented")
- }
-
-
- location.init = if (length(.ilocation)) rep(.ilocation, len=M) else
- rep(location.init, len=M)
- location.init = matrix(location.init, n, M, byrow=TRUE)
- location.init = abs(location.init)
- etastart =
- cbind(theta2eta(location.init, .llocation, earg= .elocation))
- }
- }), list( .method.init=method.init,
- .dfmu.init=dfmu.init,
- .sinit=shrinkage.init, .digt=digt,
- .elocation=elocation, .Scale.arg=Scale.arg,
- .llocation=llocation, .kappa=kappa,
- .ilocation=ilocation ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- location.y = eta2theta(eta, .llocation, earg= .elocation)
- if ( .fittedMean) {
- stop("Yet to do: handle 'fittedMean = TRUE'")
- kappamat = matrix(extra$kappa, extra$n, extra$M, byrow=TRUE)
- Scale = matrix(extra$Scale, extra$n, extra$M, byrow=TRUE)
- location.y + Scale * (1/kappamat - kappamat)
- } else {
- if (length(location.y) > extra$n)
- dimnames(location.y) = list(dimnames(eta)[[1]], extra$y.names)
- location.y
- }
- }, list( .elocation=elocation, .llocation=llocation,
- .fittedMean=fittedMean, .Scale.arg=Scale.arg,
- .kappa=kappa ))),
- last = eval(substitute(expression({
- misc$link = c(location= .llocation)
- misc$earg = list(location= .elocation)
- misc$expected = TRUE
- extra$kappa = misc$kappa = .kappa
- extra$tau = misc$tau = misc$kappa^2 / (1 + misc$kappa^2)
- extra$Scale.arg = .Scale.arg
- misc$true.mu = .fittedMean # @fitted is not a true mu?
- misc$rep01 = .rep01
-
- extra$percentile = numeric(length(misc$kappa))
- location.y = eta2theta(eta, .llocation, earg= .elocation)
- location.y = as.matrix(location.y)
- for(ii in 1:length(misc$kappa))
- extra$percentile[ii] = 100 * weighted.mean(y <= location.y[,ii], w)
-
- }), list( .elocation=elocation, .llocation=llocation,
- .Scale.arg=Scale.arg, .fittedMean=fittedMean,
- .rep01 = rep01,
- .kappa=kappa ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
- kappamat = matrix(extra$kappa, extra$n, extra$M, byrow=TRUE)
- Scale.w = matrix(extra$Scale, extra$n, extra$M, byrow=TRUE)
- ymat = matrix(y, extra$n, extra$M)
- ymat = adjust01.logitlaplace1(ymat=ymat, y=y, w=w, rep01= .rep01)
- w.mat = theta2eta(ymat, .llocation, earg= .elocation) # e.g., logit()
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
- ALDans =
- sum(w * dalap(x=c(w.mat), location=c(eta),
- scale=c(Scale.w), kappa=c(kappamat), log=TRUE))
- ALDans
- }
- }, list( .elocation=elocation, .llocation=llocation,
- .rep01 = rep01,
- .Scale.arg=Scale.arg, .kappa=kappa ))),
- vfamily=c("logitlaplace1"),
- deriv=eval(substitute(expression({
- ymat = matrix(y, n, M)
- Scale.w = matrix(extra$Scale, extra$n, extra$M, byrow=TRUE)
- location.w = eta
- kappamat = matrix(extra$kappa, n, M, byrow=TRUE)
- ymat = adjust01.logitlaplace1(ymat=ymat, y=y, w=w, rep01= .rep01)
- w.mat = theta2eta(ymat, .llocation, earg= .elocation) # e.g., logit()
- zedd = abs(w.mat-location.w) / Scale.w
- dl.dlocation = ifelse(w.mat >= location.w, kappamat, 1/kappamat) *
- sqrt(2) * sign(w.mat-location.w) / Scale.w
- dlocation.deta = dtheta.deta(location.w, "identity", earg= .elocation)
- w * cbind(dl.dlocation * dlocation.deta)
- }), list( .Scale.arg=Scale.arg, .elocation=elocation,
- .rep01 = rep01,
- .llocation=llocation, .kappa=kappa ))),
- weight=eval(substitute(expression({
- d2l.dlocation2 = 2 / Scale.w^2
- wz = cbind(d2l.dlocation2 * dlocation.deta^2)
- w * wz
- }), list( .Scale.arg=Scale.arg,
- .elocation=elocation, .llocation=llocation ))))
-}
diff --git a/R/family.vglm.R b/R/family.vglm.R
index 9d2627f..4f6fccc 100644
--- a/R/family.vglm.R
+++ b/R/family.vglm.R
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
diff --git a/R/family.zeroinf.R b/R/family.zeroinf.R
index 7cd3f6b..aa303ff 100644
--- a/R/family.zeroinf.R
+++ b/R/family.zeroinf.R
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
@@ -7,7 +8,7 @@
-dzanegbin = function(x, p0, size, prob=NULL, munb=NULL, log=FALSE) {
+dzanegbin = function(x, p0, size, prob = NULL, munb = NULL, log = FALSE) {
if (length(munb)) {
if (length(prob))
stop("'prob' and 'munb' both specified")
@@ -17,24 +18,24 @@ dzanegbin = function(x, p0, size, prob=NULL, munb=NULL, log=FALSE) {
rm(log)
LLL = max(length(x), length(p0), length(prob), length(size))
- if (length(x) != LLL) x = rep(x, len=LLL)
- if (length(p0) != LLL) p0 = rep(p0, len=LLL);
- if (length(prob) != LLL) prob = rep(prob, len=LLL)
- if (length(size) != LLL) size = rep(size, len=LLL);
- ans = rep(0.0, len=LLL)
+ if (length(x) != LLL) x = rep(x, len = LLL)
+ if (length(p0) != LLL) p0 = rep(p0, len = LLL);
+ if (length(prob) != LLL) prob = rep(prob, len = LLL)
+ if (length(size) != LLL) size = rep(size, len = LLL);
+ ans = rep(0.0, len = LLL)
if (!is.Numeric(p0) || any(p0 < 0) || any(p0 > 1))
stop("'p0' must be in [0,1]")
- if (!is.Numeric(prob, posit=TRUE))
+ if (!is.Numeric(prob, posit = TRUE))
stop("'prob' must be in [0,Inf)")
- if (!is.Numeric(size, posit=TRUE))
+ if (!is.Numeric(size, posit = TRUE))
stop("'size' must be in [0,Inf)")
- index0 = x==0
+ index0 = x == 0
if (log.arg) {
ans[ index0] = log(p0[index0])
ans[!index0] = log1p(-p0[!index0]) +
dposnegbin(x[!index0], prob=prob[!index0],
- size=size[!index0], log=TRUE)
+ size=size[!index0], log = TRUE)
} else {
ans[ index0] = p0[index0]
ans[!index0] = (1-p0[!index0]) * dposnegbin(x[!index0],
@@ -45,18 +46,18 @@ dzanegbin = function(x, p0, size, prob=NULL, munb=NULL, log=FALSE) {
-pzanegbin = function(q, p0, size, prob=NULL, munb=NULL) {
+pzanegbin = function(q, p0, size, prob = NULL, munb = NULL) {
if (length(munb)) {
if (length(prob))
stop("'prob' and 'munb' both specified")
prob <- size/(size + munb)
}
LLL = max(length(q), length(p0), length(prob), length(size))
- if (length(q) != LLL) q = rep(q, len=LLL);
- if (length(p0) != LLL) p0 = rep(p0, len=LLL);
- if (length(prob) != LLL) prob = rep(prob, len=LLL);
- if (length(size) != LLL) size = rep(size, len=LLL);
- ans = rep(0.0, len=LLL)
+ if (length(q) != LLL) q = rep(q, len = LLL);
+ if (length(p0) != LLL) p0 = rep(p0, len = LLL);
+ if (length(prob) != LLL) prob = rep(prob, len = LLL);
+ if (length(size) != LLL) size = rep(size, len = LLL);
+ ans = rep(0.0, len = LLL)
if (!is.Numeric(p0) || any(p0 < 0) || any(p0 > 1))
stop("'p0' must be in [0,1]")
@@ -67,18 +68,18 @@ pzanegbin = function(q, p0, size, prob=NULL, munb=NULL) {
ans
}
-qzanegbin = function(p, p0, size, prob=NULL, munb=NULL) {
+qzanegbin = function(p, p0, size, prob = NULL, munb = NULL) {
if (length(munb)) {
if (length(prob))
stop("'prob' and 'munb' both specified")
prob <- size/(size + munb)
}
LLL = max(length(p), length(p0), length(prob), length(size))
- if (length(p) != LLL) p = rep(p, len=LLL);
- if (length(p0) != LLL) p0 = rep(p0, len=LLL);
- if (length(prob) != LLL) prob = rep(prob, len=LLL);
- if (length(size) != LLL) size = rep(size, len=LLL);
- ans = rep(0.0, len=LLL)
+ if (length(p) != LLL) p = rep(p, len = LLL);
+ if (length(p0) != LLL) p0 = rep(p0, len = LLL);
+ if (length(prob) != LLL) prob = rep(prob, len = LLL);
+ if (length(size) != LLL) size = rep(size, len = LLL);
+ ans = rep(0.0, len = LLL)
if (!is.Numeric(p0) || any(p0 < 0) || any(p0 > 1))
stop("'p0' must be between 0 and 1 inclusive")
@@ -89,9 +90,9 @@ qzanegbin = function(p, p0, size, prob=NULL, munb=NULL) {
ans
}
-rzanegbin = function(n, p0, size, prob=NULL, munb=NULL) {
+rzanegbin = function(n, p0, size, prob = NULL, munb = NULL) {
use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integ=TRUE, allow=1, posit=TRUE))
+ if (!is.Numeric(n, integ = TRUE, allow = 1, posit = TRUE))
stop("bad input for argument 'n'") else n
if (length(munb)) {
@@ -110,23 +111,23 @@ rzanegbin = function(n, p0, size, prob=NULL, munb=NULL) {
-dzapois = function(x, lambda, p0=0, log=FALSE) {
+dzapois = function(x, lambda, p0=0, log = FALSE) {
if (!is.logical(log.arg <- log)) stop("bad input for argument 'log'")
rm(log)
LLL = max(length(x), length(lambda), length(p0))
- if (length(x) != LLL) x = rep(x, len=LLL);
- if (length(lambda) != LLL) lambda = rep(lambda, len=LLL);
- if (length(p0) != LLL) p0 = rep(p0, len=LLL);
- ans = rep(0.0, len=LLL)
+ if (length(x) != LLL) x = rep(x, len = LLL);
+ if (length(lambda) != LLL) lambda = rep(lambda, len = LLL);
+ if (length(p0) != LLL) p0 = rep(p0, len = LLL);
+ ans = rep(0.0, len = LLL)
if (!is.Numeric(p0) || any(p0 < 0) || any(p0 > 1))
stop("argument 'p0' must be in [0,1]")
- index0 = (x==0)
+ index0 = (x == 0)
if (log.arg) {
ans[ index0] = log(p0[index0])
ans[!index0] = log1p(-p0[!index0]) +
- dpospois(x[!index0], lambda[!index0], log=TRUE)
+ dpospois(x[!index0], lambda[!index0], log = TRUE)
} else {
ans[ index0] = p0[index0]
ans[!index0] = (1-p0[!index0]) * dpospois(x[!index0], lambda[!index0])
@@ -138,10 +139,10 @@ dzapois = function(x, lambda, p0=0, log=FALSE) {
pzapois = function(q, lambda, p0=0) {
LLL = max(length(q), length(lambda), length(p0))
- if (length(q) != LLL) q = rep(q, len=LLL);
- if (length(lambda) != LLL) lambda = rep(lambda, len=LLL);
- if (length(p0) != LLL) p0 = rep(p0, len=LLL);
- ans = rep(0.0, len=LLL)
+ if (length(q) != LLL) q = rep(q, len = LLL);
+ if (length(lambda) != LLL) lambda = rep(lambda, len = LLL);
+ if (length(p0) != LLL) p0 = rep(p0, len = LLL);
+ ans = rep(0.0, len = LLL)
if (!is.Numeric(p0) || any(p0 < 0) || any(p0 > 1))
stop("argument 'p0' must be in [0,1]")
@@ -154,9 +155,9 @@ pzapois = function(q, lambda, p0=0) {
qzapois = function(p, lambda, p0=0) {
LLL = max(length(p), length(lambda), length(p0))
- if (length(p) != LLL) p = rep(p, len=LLL);
- if (length(lambda) != LLL) lambda = rep(lambda, len=LLL);
- if (length(p0) != LLL) p0 = rep(p0, len=LLL);
+ if (length(p) != LLL) p = rep(p, len = LLL);
+ if (length(lambda) != LLL) lambda = rep(lambda, len = LLL);
+ if (length(p0) != LLL) p0 = rep(p0, len = LLL);
if (!is.Numeric(p0) || any(p0 < 0) || any(p0 > 1))
stop("argument 'p0' must be between 0 and 1 inclusive")
@@ -169,7 +170,7 @@ qzapois = function(p, lambda, p0=0) {
rzapois = function(n, lambda, p0=0) {
use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integ=TRUE, allow=1, posit=TRUE))
+ if (!is.Numeric(n, integ = TRUE, allow = 1, posit = TRUE))
stop("bad input for argument 'n'") else n
ans = rpospois(use.n, lambda)
@@ -183,24 +184,24 @@ rzapois = function(n, lambda, p0=0) {
-dzipois = function(x, lambda, phi=0, log=FALSE) {
+dzipois = function(x, lambda, phi = 0, log = FALSE) {
if (!is.logical(log.arg <- log)) stop("bad input for argument 'log'")
rm(log)
LLL = max(length(x), length(lambda), length(phi))
- if (length(x) != LLL) x = rep(x, len=LLL);
- if (length(lambda) != LLL) lambda = rep(lambda, len=LLL);
- if (length(phi) != LLL) phi = rep(phi, len=LLL);
- ans = rep(0.0, len=LLL)
+ if (length(x) != LLL) x = rep(x, len = LLL);
+ if (length(lambda) != LLL) lambda = rep(lambda, len = LLL);
+ if (length(phi) != LLL) phi = rep(phi, len = LLL);
+ ans = rep(0.0, len = LLL)
if (!is.Numeric(phi) || any(phi < 0) || any(phi > 1))
stop("'phi' must be between 0 and 1 inclusive")
- index0 = (x==0)
+ index0 = (x == 0)
if (log.arg) {
ans[ index0] = log(phi[ index0] + (1-phi[ index0]) *
dpois(x[ index0], lambda[ index0]))
ans[!index0] = log1p(-phi[!index0]) +
- dpois(x[!index0], lambda[!index0], log=TRUE)
+ dpois(x[!index0], lambda[!index0], log = TRUE)
} else {
ans[ index0] = phi[ index0] + (1-phi[ index0]) *
dpois(x[ index0], lambda[ index0])
@@ -209,11 +210,11 @@ dzipois = function(x, lambda, phi=0, log=FALSE) {
ans
}
-pzipois = function(q, lambda, phi=0) {
+pzipois = function(q, lambda, phi = 0) {
ans = ppois(q, lambda)
LLL = max(length(phi), length(ans))
- if (length(phi) != LLL) phi = rep(phi, len=LLL);
- if (length(ans) != LLL) ans = rep(ans, len=LLL);
+ if (length(phi) != LLL) phi = rep(phi, len = LLL);
+ if (length(ans) != LLL) ans = rep(ans, len = LLL);
ans = ifelse(q<0, 0, phi + (1-phi) * ans)
ans[phi < 0] = NaN
@@ -221,11 +222,11 @@ pzipois = function(q, lambda, phi=0) {
ans
}
-qzipois = function(p, lambda, phi=0) {
+qzipois = function(p, lambda, phi = 0) {
LLL = max(length(p), length(lambda), length(phi))
- ans = p = rep(p, len=LLL)
- lambda = rep(lambda, len=LLL)
- phi = rep(phi, len=LLL)
+ ans = p = rep(p, len = LLL)
+ lambda = rep(lambda, len = LLL)
+ phi = rep(phi, len = LLL)
ans[p<=phi] = 0
ans[p>phi] = qpois((p[p>phi]-phi[p>phi])/(1-phi[p>phi]), lam=lambda[p>phi])
ans[phi < 0] = NaN
@@ -233,9 +234,9 @@ qzipois = function(p, lambda, phi=0) {
ans
}
-rzipois = function(n, lambda, phi=0) {
+rzipois = function(n, lambda, phi = 0) {
use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integ=TRUE, allow=1, posit=TRUE))
+ if (!is.Numeric(n, integ = TRUE, allow = 1, posit = TRUE))
stop("bad input for argument 'n'") else n
ans = rpois(use.n, lambda)
@@ -247,17 +248,17 @@ rzipois = function(n, lambda, phi=0) {
}
- yip88 = function(link.lambda="loge", n.arg=NULL)
+ yip88 = function(link.lambda = "loge", n.arg = NULL)
{
if (mode(link.lambda) != "character" && mode(link.lambda) != "name")
link.lambda = as.character(substitute(link.lambda))
new("vglmff",
- blurb=c("Zero-inflated Poisson (based on Yip (1988))\n\n",
- "Link: ", namesof("lambda", link.lambda), "\n",
- "Variance: (1-phi)*lambda"),
+ blurb = c("Zero-inflated Poisson (based on Yip (1988))\n\n",
+ "Link: ", namesof("lambda", link.lambda), "\n",
+ "Variance: (1-phi)*lambda"),
first=eval(substitute(expression({
- zero <- y==0
+ zero <- y == 0
if (any(zero)) {
if (length(extra)) extra$sumw = sum(w) else
extra = list(sumw=sum(w))
@@ -267,7 +268,7 @@ rzipois = function(n, lambda, phi=0) {
warning("trimming out the zero observations")
axa.save = attr(x, "assign")
- x = x[!zero,,drop=FALSE]
+ x = x[!zero,, drop = FALSE]
attr(x, "assign") = axa.save # Don't lose these!!
w = w[!zero]
y = y[!zero]
@@ -275,13 +276,13 @@ rzipois = function(n, lambda, phi=0) {
if (!is.numeric(.n.arg))
stop("n.arg must be supplied")
- }), list( .n.arg=n.arg ))),
- initialize=eval(substitute(expression({
+ }), list( .n.arg = n.arg ))),
+ initialize = eval(substitute(expression({
narg = if (is.numeric(.n.arg)) .n.arg else extra$sumw
if (sum(w) > narg)
stop("sum(w) > narg")
- predictors.names = namesof("lambda", .link.lambda, tag=FALSE)
+ predictors.names = namesof("lambda", .link.lambda, tag = FALSE)
if (!length(etastart)) {
lambda.init = rep(median(y), length=length(y))
etastart = theta2eta(lambda.init, .link.lambda)
@@ -291,54 +292,55 @@ rzipois = function(n, lambda, phi=0) {
extra$narg = narg # For @inverse
} else
extra = list(sumw=sum(w), narg = narg)
- }), list( .link.lambda=link.lambda, .n.arg=n.arg ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
+ }), list( .link.lambda = link.lambda, .n.arg = n.arg ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
lambda = eta2theta(eta, .link.lambda)
temp5 = exp(-lambda)
phi = (1 - temp5 - extra$sumw/extra$narg) / (1 - temp5)
if (any(phi <= 0))
stop("non-positive value(s) of phi")
(1-phi) * lambda
- }, list( .link.lambda=link.lambda ))),
- last=eval(substitute(expression({
+ }, list( .link.lambda = link.lambda ))),
+ last = eval(substitute(expression({
misc$link = c(lambda = .link.lambda)
- if (ncol(x)==1 && dimnames(x)[[2]]=="(Intercept)") {
+ if (ncol(x) == 1 && dimnames(x)[[2]]=="(Intercept)") {
suma = extra$sumw
phi = (1 - temp5[1] - suma/narg) / (1 - temp5[1])
phi = if (phi < 0 || phi>1) NA else phi # phi is a probability
misc$phi = phi
}
- }), list( .link.lambda=link.lambda ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals=FALSE, eta, extra=NULL) {
+ }), list( .link.lambda = link.lambda ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
lambda = eta2theta(eta, .link.lambda)
temp5 = exp(-lambda)
phi = (1 - temp5 - extra$sumw/extra$narg) / (1 - temp5)
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(w * dzipois(x=y, phi=phi, lambda=lambda, log=TRUE))
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(w * dzipois(x=y, phi = phi, lambda = lambda, log = TRUE))
}
- }, list( .link.lambda=link.lambda ))),
- vfamily=c("yip88"),
- deriv=eval(substitute(expression({
+ }, list( .link.lambda = link.lambda ))),
+ vfamily = c("yip88"),
+ deriv = eval(substitute(expression({
lambda = eta2theta(eta, .link.lambda)
temp5 = exp(-lambda)
dl.dlambda = -1 + y/lambda - temp5/(1-temp5)
dlambda.deta = dtheta.deta(lambda, .link.lambda)
w * dl.dlambda * dlambda.deta
- }), list( .link.lambda=link.lambda ))),
- weight=eval(substitute(expression({
+ }), list( .link.lambda = link.lambda ))),
+ weight = eval(substitute(expression({
d2lambda.deta2 = d2theta.deta2(lambda, .link.lambda)
d2l.dlambda2 = -y / lambda^2 + temp5 / (1-temp5)^2
-w * (d2l.dlambda2*dlambda.deta^2 + dl.dlambda*d2lambda.deta2)
- }), list( .link.lambda=link.lambda ))))
+ }), list( .link.lambda = link.lambda ))))
}
- zapoisson = function(lp0="logit", llambda="loge",
- ep0=list(), elambda=list(), zero=NULL) {
+ zapoisson = function(lp0 = "logit", llambda = "loge",
+ ep0 = list(), elambda = list(), zero = NULL) {
if (mode(lp0) != "character" && mode(lp0) != "name")
lp0 = as.character(substitute(lp0))
if (mode(llambda) != "character" && mode(llambda) != "name")
@@ -347,19 +349,19 @@ rzipois = function(n, lambda, phi=0) {
if (!is.list(elambda)) elambda = list()
new("vglmff",
- blurb=c("Zero-altered Poisson ",
- "(binomial and positive-Poisson conditional model)\n\n",
- "Links: ",
- namesof("p0", lp0, earg=ep0, tag=FALSE), ", ",
- namesof("lambda", llambda, earg= elambda, tag=FALSE),
- "\n"),
- constraints=eval(substitute(expression({
- temp752 = .zero
- if (length(temp752) && all(temp752 == -1))
- temp752 = 2*(1:ncol(y)) - 1
- constraints = cm.zero.vgam(constraints, x, temp752, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ blurb = c("Zero-altered Poisson ",
+ "(binomial and positive-Poisson conditional model)\n\n",
+ "Links: ",
+ namesof("p0", lp0, earg = ep0, tag = FALSE), ", ",
+ namesof("lambda", llambda, earg = elambda, tag = FALSE), "\n"),
+
+ constraints = eval(substitute(expression({
+
+ dotzero <- .zero
+ Musual <- 2
+ eval(negzero.expression)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
y = as.matrix(y)
if (any(y != round(y )))
stop("the response must be integer-valued")
@@ -370,14 +372,17 @@ rzipois = function(n, lambda, phi=0) {
extra$NOS = NOS = ncoly = ncol(y) # Number of species
extra$skip.these = skip.these = matrix(as.logical(y0), n, NOS)
- mynames1 = if (ncoly == 1) "p0" else paste("p0", 1:ncoly, sep = "")
+ mynames1 = if (ncoly == 1) "p0" else
+ paste("p0", 1:ncoly, sep = "")
mynames2 = if (ncoly == 1) "lambda" else
paste("lambda", 1:ncoly, sep = "")
predictors.names =
- c(namesof(mynames1, .lp0, earg = .ep0, tag = FALSE),
+ c(namesof(mynames1, .lp0, earg = .ep0, tag = FALSE),
namesof(mynames2, .llambda, earg = .elambda, tag = FALSE))
+ predictors.names = predictors.names[interleave.VGAM(2*NOS, M = 2)]
+
if (!length(etastart)) {
- etastart = cbind(theta2eta((0.5+w*y0)/(1+w), .lp0, earg= .ep0 ),
+ etastart = cbind(theta2eta((0.5+w*y0)/(1+w), .lp0, earg = .ep0 ),
matrix(1, n, NOS)) # 1 here is any old value
for(spp. in 1:NOS) {
sthese = skip.these[, spp.]
@@ -385,45 +390,65 @@ rzipois = function(n, lambda, phi=0) {
y[!sthese, spp.] / (-expm1(-y[!sthese, spp.])),
.llambda, earg = .elambda )
}
+ etastart = etastart[, interleave.VGAM(ncol(etastart), M = 2)]
}
}), list( .lp0 = lp0, .llambda = llambda,
.ep0 = ep0, .elambda = elambda ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
+ inverse = eval(substitute(function(eta, extra = NULL) {
NOS = extra$NOS
- p0 = eta2theta(eta[, 1:NOS], .lp0, earg = .ep0)
- lambda = eta2theta(eta[, NOS+(1:NOS)], .llambda, earg= .elambda)
- (1-p0) * lambda / (-expm1(-lambda))
- }, list( .lp0=lp0, .llambda=llambda, .ep0= ep0, .elambda= elambda ))),
- last=eval(substitute(expression({
- misc$link = c(rep( .lp0, len = NOS), rep( .llambda, len = NOS))
- names(misc$link) = c(mynames1, mynames2)
+
+
+ p0 = cbind(eta2theta(eta[, 2*(1:NOS)-1, drop = FALSE],
+ .lp0, earg = .ep0))
+ lambda = cbind(eta2theta(eta[, 2*(1:NOS)-0, drop = FALSE],
+ .llambda, earg = .elambda ))
+
+ (1 - p0) * lambda / (-expm1(-lambda))
+ }, list( .lp0 = lp0, .llambda = llambda,
+ .ep0 = ep0, .elambda = elambda ))),
+ last = eval(substitute(expression({
+ temp.names = c(rep( .lp0, len = NOS),
+ rep( .llambda, len = NOS))
+ temp.names = temp.names[interleave.VGAM(2*NOS, M = 2)]
+ misc$link = temp.names
misc$earg = vector("list", 2 * NOS)
- names(misc$earg) = c(mynames1, mynames2)
+ names(misc$link) <-
+ names(misc$earg) <-
+ c(mynames1, mynames2)[interleave.VGAM(2*NOS, M = 2)]
for(ii in 1:NOS) {
- misc$earg[[ ii]] = .ep0
- misc$earg[[NOS + ii]] = .elambda
+ misc$earg[[2*ii-1]] = .ep0
+ misc$earg[[2*ii ]] = .elambda
}
}), list( .lp0 = lp0, .llambda = llambda,
.ep0 = ep0, .elambda = elambda ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals=FALSE, eta,extra=NULL) {
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
NOS = extra$NOS
- p0 = cbind(eta2theta(eta[, 1:NOS], .lp0, earg = .ep0))
- lambda = cbind(eta2theta(eta[, NOS+(1:NOS)], .llambda, earg = .elambda ))
+
+ p0 = cbind(eta2theta(eta[, 2*(1:NOS)-1, drop = FALSE],
+ .lp0, earg = .ep0))
+ lambda = cbind(eta2theta(eta[, 2*(1:NOS)-0, drop = FALSE],
+ .llambda, earg = .elambda ))
+
if (residuals)
stop("loglikelihood residuals not implemented yet") else {
sum(w * dzapois(x = y, p0 = p0, lambda = lambda, log = TRUE))
}
}, list( .lp0 = lp0, .llambda = llambda,
.ep0 = ep0, .elambda = elambda ))),
- vfamily=c("zapoisson"),
- deriv=eval(substitute(expression({
+ vfamily = c("zapoisson"),
+ deriv = eval(substitute(expression({
NOS = extra$NOS
y0 = extra$y0
skip = extra$skip.these
- p0 = cbind(eta2theta(eta[, 1:NOS], .lp0, earg = .ep0))
- lambda = cbind(eta2theta(eta[, NOS+(1:NOS)], .llambda, earg = .elambda))
- dl.dlambda = y/lambda - 1 - 1 / expm1(lambda)
+
+ p0 = cbind(eta2theta(eta[, 2*(1:NOS)-1, drop = FALSE],
+ .lp0, earg = .ep0))
+ lambda = cbind(eta2theta(eta[, 2*(1:NOS)-0, drop = FALSE],
+ .llambda, earg = .elambda ))
+
+
+ dl.dlambda = y / lambda - 1 - 1 / expm1(lambda)
for(spp. in 1:NOS)
dl.dlambda[skip[, spp.], spp.] = 0
dlambda.deta = dtheta.deta(lambda, .llambda, earg = .elambda)
@@ -433,9 +458,13 @@ rzipois = function(n, lambda, phi=0) {
} else
w * dtheta.deta(mup0, link = .lp0, earg = .ep0) *
(y0 / mup0 - 1) / (1 - mup0)
- cbind(temp3, w * dl.dlambda * dlambda.deta)
- }), list( .lp0 = lp0, .llambda = llambda, .ep0 = ep0, .elambda = elambda ))),
- weight=eval(substitute(expression({
+ ans <- cbind(temp3,
+ w * dl.dlambda * dlambda.deta)
+ ans = ans[, interleave.VGAM(ncol(ans), M = 2)]
+ ans
+ }), list( .lp0 = lp0, .llambda = llambda,
+ .ep0 = ep0, .elambda = elambda ))),
+ weight = eval(substitute(expression({
wz = matrix( 10 * .Machine$double.eps^(3/4), n, 2*NOS)
for(spp. in 1:NOS) {
sthese = skip[, spp.]
@@ -450,7 +479,7 @@ rzipois = function(n, lambda, phi=0) {
tmp200 = if ( .lp0 == "logit") {
cbind(w * tmp100)
} else {
- cbind(w * dtheta.deta(mup0, link= .lp0, earg= .ep0)^2 / tmp100)
+ cbind(w * dtheta.deta(mup0, link= .lp0, earg = .ep0)^2 / tmp100)
}
for(ii in 1:NOS) {
index200 = abs(tmp200[, ii]) < .Machine$double.eps
@@ -459,31 +488,37 @@ rzipois = function(n, lambda, phi=0) {
}
}
wz[, 1:NOS] = tmp200
+
+ wz = wz[, interleave.VGAM(ncol(wz), M = 2)]
+
wz
}), list( .lp0 = lp0, .ep0 = ep0 ))))
-}
+} # End of zapoisson
+
- zanegbinomial = function(lp0="logit", lmunb = "loge", lk = "loge",
- ep0=list(), emunb =list(), ek = list(),
+
+
+ zanegbinomial = function(lp0 = "logit", lmunb = "loge", lk = "loge",
+ ep0 = list(), emunb = list(), ek = list(),
ipnb0 = NULL, ik = NULL, zero = -3,
- cutoff = 0.995, method.init=1,
- shrinkage.init=0.95)
+ cutoff = 0.995, method.init = 1,
+ shrinkage.init = 0.95)
{
- if (!is.Numeric(cutoff, positiv=TRUE, allow=1) || cutoff<0.8 || cutoff>=1)
+ if (!is.Numeric(cutoff, positiv=TRUE, allow = 1) || cutoff<0.8 || cutoff>=1)
stop("range error in the argument 'cutoff'")
if (length(ipnb0) && (!is.Numeric(ipnb0, positiv=TRUE) ||
max(ipnb0) >= 1))
stop("If given, 'ipnb0' must contain values in (0,1) only")
if (length(ik) && !is.Numeric(ik, positiv=TRUE))
stop("If given, 'ik' must contain positive values only")
- if (!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
method.init > 2) stop("argument 'method.init' must be 1 or 2")
- if (!is.Numeric(shrinkage.init, allow=1) || shrinkage.init < 0 ||
+ if (!is.Numeric(shrinkage.init, allow = 1) || shrinkage.init < 0 ||
shrinkage.init > 1) stop("bad input for argument 'shrinkage.init'")
if (mode(lmunb) != "character" && mode(lmunb) != "name")
@@ -497,20 +532,20 @@ rzipois = function(n, lambda, phi=0) {
if (!is.list(ek)) ek = list()
new("vglmff",
- blurb=c("Zero-altered negative binomial (binomial and\n",
- "positive-negative binomial conditional model)\n\n",
- "Links: ",
- namesof("p0", lp0, earg= ep0, tag=FALSE), ", ",
- namesof("munb", lmunb, earg= emunb, tag=FALSE), ", ",
- namesof("k", lk, earg= ek, tag=FALSE), "\n",
- "Mean: (1-p0) * munb / [1 - (k/(k+munb))^k]"),
- constraints=eval(substitute(expression({
- temp752 = .zero
- if (length(temp752) && all(temp752 == -3))
- temp752 = 3*(1:ncol(y))
- constraints = cm.zero.vgam(constraints, x, temp752, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ blurb = c("Zero-altered negative binomial (binomial and\n",
+ "positive-negative binomial conditional model)\n\n",
+ "Links: ",
+ namesof("p0", lp0, earg = ep0, tag = FALSE), ", ",
+ namesof("munb", lmunb, earg = emunb, tag = FALSE), ", ",
+ namesof("k", lk, earg = ek, tag = FALSE), "\n",
+ "Mean: (1-p0) * munb / [1 - (k/(k+munb))^k]"),
+ constraints = eval(substitute(expression({
+
+ dotzero <- .zero
+ Musual <- 3
+ eval(negzero.expression)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
y = as.matrix(y)
extra$NOS = NOS = ncoly = ncol(y) # Number of species
M = 3 * ncoly #
@@ -519,15 +554,15 @@ rzipois = function(n, lambda, phi=0) {
if (any(y < 0))
stop("the response must not have negative values")
- mynames1 = if (NOS==1) "p0" else paste("p0", 1:NOS, sep="")
- mynames2 = if (NOS==1) "munb" else paste("munb", 1:NOS, sep="")
- mynames3 = if (NOS==1) "k" else paste("k", 1:NOS, sep="")
+ mynames1 = if (NOS == 1) "p0" else paste("p0", 1:NOS, sep = "")
+ mynames2 = if (NOS == 1) "munb" else paste("munb", 1:NOS, sep = "")
+ mynames3 = if (NOS == 1) "k" else paste("k", 1:NOS, sep = "")
predictors.names =
- c(namesof(mynames1, .lp0, earg= .ep0, tag= FALSE),
- namesof(mynames2, .lmunb, earg= .emunb, tag= FALSE),
- namesof(mynames3, .lk, earg= .ek, tag= FALSE))
- predictors.names = predictors.names[interleave.VGAM(3*NOS, M=3)]
- extra$y0 = y0 = ifelse(y==0, 1, 0)
+ c(namesof(mynames1, .lp0, earg = .ep0, tag = FALSE),
+ namesof(mynames2, .lmunb, earg = .emunb, tag = FALSE),
+ namesof(mynames3, .lk, earg = .ek, tag = FALSE))
+ predictors.names = predictors.names[interleave.VGAM(3*NOS, M = 3)]
+ extra$y0 = y0 = ifelse(y == 0, 1, 0)
extra$skip.these = skip.these = matrix(as.logical(y0), n, NOS)
if (!length(etastart)) {
@@ -564,7 +599,7 @@ rzipois = function(n, lambda, phi=0) {
} else {
posnegbinomial.Loglikfun = function(kmat, y, x, w, extraargs) {
munb = extraargs
- sum(w * dposnegbin(x=y, munb=munb, size=kmat, log=TRUE))
+ sum(w * dposnegbin(x=y, munb=munb, size=kmat, log = TRUE))
}
k.grid = 2^((-6):6)
kmat0 = matrix(0, nr=n, nc=NOS)
@@ -578,30 +613,33 @@ rzipois = function(n, lambda, phi=0) {
}
}
- etastart = cbind(theta2eta(pnb0, .lp0, earg= .ep0 ),
- theta2eta(mu.init, .lmunb, earg= .emunb),
- theta2eta(kmat0, .lk, earg= .ek ))
- etastart = etastart[,interleave.VGAM(ncol(etastart),M=3)]
+ etastart = cbind(theta2eta(pnb0, .lp0, earg = .ep0 ),
+ theta2eta(mu.init, .lmunb, earg = .emunb),
+ theta2eta(kmat0, .lk, earg = .ek ))
+ etastart = etastart[, interleave.VGAM(ncol(etastart), M = 3)]
}
- }), list( .lp0=lp0, .lmunb=lmunb, .lk=lk,
+ }), list( .lp0 = lp0, .lmunb = lmunb, .lk = lk,
+ .ep0 = ep0, .emunb = emunb, .ek = ek,
.ipnb0=ipnb0, .ik=ik,
- .ep0=ep0, .emunb=emunb, .ek=ek,
- .method.init=method.init, .sinit=shrinkage.init ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- NOS = extra$NOS
- p0 = eta2theta(eta[,3*(1:NOS)-2], .lp0, earg= .ep0 )
- munb = eta2theta(eta[,3*(1:NOS)-1,drop=FALSE], .lmunb, earg= .emunb )
- kmat = eta2theta(eta[,3*(1:NOS),drop=FALSE], .lk, earg= .ek )
- pnb0 = (kmat / (kmat + munb))^kmat # p(0) from negative binomial
+ .method.init = method.init, .sinit = shrinkage.init ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ NOS <- extra$NOS
+ p0 <- eta2theta(eta[,3*(1:NOS)-2],
+ .lp0, earg = .ep0 )
+ munb <- eta2theta(eta[,3*(1:NOS)-1, drop = FALSE],
+ .lmunb, earg = .emunb )
+ kmat <- eta2theta(eta[,3*(1:NOS), drop = FALSE],
+ .lk, earg = .ek )
+ pnb0 <- (kmat / (kmat + munb))^kmat # p(0) from negative binomial
(1 - p0) * munb / (1 - pnb0)
- }, list( .lp0=lp0, .lk=lk, .lmunb=lmunb,
- .ep0=ep0, .emunb=emunb, .ek=ek ))),
- last=eval(substitute(expression({
+ }, list( .lp0 = lp0, .lk = lk, .lmunb = lmunb,
+ .ep0 = ep0, .emunb = emunb, .ek = ek ))),
+ last = eval(substitute(expression({
misc$link = c(rep( .lp0, length=NOS),
rep( .lmunb, length=NOS),
rep( .lk, length=NOS))
temp.names = c(mynames1, mynames2, mynames3)
- temp.names = temp.names[interleave.VGAM(3*NOS, M=3)]
+ temp.names = temp.names[interleave.VGAM(3*NOS, M = 3)]
names(misc$link) = temp.names
misc$earg = vector("list", 3*NOS)
names(misc$earg) = temp.names
@@ -612,27 +650,28 @@ rzipois = function(n, lambda, phi=0) {
}
misc$cutoff = .cutoff
misc$method.init = .method.init
- }), list( .lp0=lp0, .lmunb=lmunb, .lk=lk, .cutoff=cutoff,
- .ep0=ep0, .emunb=emunb, .ek=ek,
- .method.init=method.init ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals=FALSE, eta,extra=NULL) {
+ }), list( .lp0 = lp0, .lmunb = lmunb, .lk = lk, .cutoff = cutoff,
+ .ep0 = ep0, .emunb = emunb, .ek = ek,
+ .method.init = method.init ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
NOS = extra$NOS
- p0 = eta2theta(eta[,3*(1:NOS)-2,drop=FALSE], .lp0, earg= .ep0 )
- munb = eta2theta(eta[,3*(1:NOS)-1,drop=FALSE], .lmunb, earg= .emunb )
- kmat = eta2theta(eta[,3*(1:NOS),drop=FALSE], .lk, earg= .ek )
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(w * dzanegbin(x=y, p0=p0, munb=munb, size=kmat, log=TRUE))
+ p0 = eta2theta(eta[,3*(1:NOS)-2, drop = FALSE], .lp0, earg = .ep0 )
+ munb = eta2theta(eta[,3*(1:NOS)-1, drop = FALSE], .lmunb, earg = .emunb )
+ kmat = eta2theta(eta[,3*(1:NOS), drop = FALSE], .lk, earg = .ek )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(w * dzanegbin(x=y, p0=p0, munb=munb, size=kmat, log = TRUE))
}
- }, list( .lp0=lp0, .lmunb=lmunb, .lk=lk,
- .ep0=ep0, .emunb=emunb, .ek=ek ))),
- vfamily=c("zanegbinomial"),
- deriv=eval(substitute(expression({
+ }, list( .lp0 = lp0, .lmunb = lmunb, .lk = lk,
+ .ep0 = ep0, .emunb = emunb, .ek = ek ))),
+ vfamily = c("zanegbinomial"),
+ deriv = eval(substitute(expression({
NOS = extra$NOS
y0 = extra$y0
- p0 = eta2theta(eta[,3*(1:NOS)-2], .lp0, earg= .ep0 )
- munb = eta2theta(eta[,3*(1:NOS)-1,drop=FALSE], .lmunb, earg= .emunb )
- kmat = eta2theta(eta[,3*(1:NOS),drop=FALSE], .lk, earg= .ek )
+ p0 = eta2theta(eta[,3*(1:NOS)-2], .lp0, earg = .ep0 )
+ munb = eta2theta(eta[,3*(1:NOS)-1, drop = FALSE], .lmunb, earg = .emunb )
+ kmat = eta2theta(eta[,3*(1:NOS), drop = FALSE], .lk, earg = .ek )
skip = extra$skip.these
d3 = deriv3(~ -log(1 - (kmat. /(kmat. + munb. ))^kmat. ),
@@ -643,8 +682,8 @@ rzipois = function(n, lambda, phi=0) {
kmat. = kmat[,spp.]
munb. = munb[,spp.]
eval.d3 = eval(d3) # Evaluated for one species
- dl0.dthetas[,spp.,1] = attr(eval.d3, "gradient")[,1]
- dl0.dthetas[,spp.,2] = attr(eval.d3, "gradient")[,2]
+ dl0.dthetas[,spp.,1] = attr(eval.d3, "gradient")[, 1]
+ dl0.dthetas[,spp.,2] = attr(eval.d3, "gradient")[, 2]
d2l0.dthetas2[,spp.,1] = attr(eval.d3, "hessian")[,1,1]
d2l0.dthetas2[,spp.,2] = attr(eval.d3, "hessian")[,2,2]
d2l0.dthetas2[,spp.,3] = attr(eval.d3, "hessian")[,1,2]
@@ -655,23 +694,23 @@ rzipois = function(n, lambda, phi=0) {
for(spp. in 1:NOS)
dl.dk[skip[,spp.],spp.] = dl.dmunb[skip[,spp.],spp.] = 0
- dmunb.deta = dtheta.deta(munb, .lmunb, earg= .emunb )
- dk.deta = dtheta.deta(kmat, .lk, earg= .ek )
+ dmunb.deta = dtheta.deta(munb, .lmunb, earg = .emunb )
+ dk.deta = dtheta.deta(kmat, .lk, earg = .ek )
myderiv = w * cbind(dl.dmunb * dmunb.deta, dl.dk * dk.deta)
mup0 = p0
temp3 = if ( .lp0 == "logit") {
w * (y0 - mup0)
} else
- w * dtheta.deta(mup0, link= .lp0, earg= .ep0 ) *
+ w * dtheta.deta(mup0, link= .lp0, earg = .ep0 ) *
(y0/mup0 - 1) / (1-mup0)
ans = cbind(temp3, myderiv)
- ans = ans[,interleave.VGAM(ncol(ans), M=3)]
+ ans = ans[, interleave.VGAM(ncol(ans), M = 3)]
ans
- }), list( .lp0=lp0, .lmunb=lmunb, .lk=lk,
- .ep0=ep0, .emunb=emunb, .ek=ek ))),
- weight=eval(substitute(expression({
+ }), list( .lp0 = lp0, .lmunb = lmunb, .lk = lk,
+ .ep0 = ep0, .emunb = emunb, .ek = ek ))),
+ weight = eval(substitute(expression({
wz = matrix(0, n, 6*NOS-1) # wz is not 'diagonal'
pnb0 = (kmat / (kmat + munb))^kmat
ed2l.dmunb2 = (1/munb - (munb + kmat*(1-pnb0))/(munb +
@@ -700,7 +739,7 @@ rzipois = function(n, lambda, phi=0) {
tmp200 = if (.lp0 == "logit") {
cbind(w * tmp100)
} else {
- cbind(w * dtheta.deta(mup0, link= .lp0, earg= .ep0 )^2 / tmp100)
+ cbind(w * dtheta.deta(mup0, link= .lp0, earg = .ep0 )^2 / tmp100)
}
for(ii in 1:NOS) {
index200 = abs(tmp200[,ii]) < .Machine$double.eps
@@ -717,7 +756,7 @@ rzipois = function(n, lambda, phi=0) {
}
wz
- }), list( .lp0=lp0, .ep0=ep0, .cutoff=cutoff ))))
+ }), list( .lp0 = lp0, .ep0 = ep0, .cutoff = cutoff ))))
}
@@ -725,11 +764,11 @@ rzipois = function(n, lambda, phi=0) {
if (FALSE)
rposnegbin = function(n, munb, k) {
- if (!is.Numeric(k, posit=TRUE))
+ if (!is.Numeric(k, posit = TRUE))
stop("argument 'k' must be positive")
- if (!is.Numeric(munb, posit=TRUE))
+ if (!is.Numeric(munb, posit = TRUE))
stop("argument 'munb' must be positive")
- if (!is.Numeric(n, posit=TRUE, integ=TRUE, allow=1))
+ if (!is.Numeric(n, posit = TRUE, integ = TRUE, allow = 1))
stop("argument 'n' must be a positive integer")
ans = rnbinom(n=n, mu=munb, size=k)
munb = rep(munb, len=n)
@@ -746,13 +785,13 @@ rposnegbin = function(n, munb, k) {
if (FALSE)
-dposnegbin = function(x, munb, k, log=FALSE) {
- if (!is.Numeric(k, posit=TRUE))
+dposnegbin = function(x, munb, k, log = FALSE) {
+ if (!is.Numeric(k, posit = TRUE))
stop("argument 'k' must be positive")
- if (!is.Numeric(munb, posit=TRUE))
+ if (!is.Numeric(munb, posit = TRUE))
stop("argument 'munb' must be positive")
ans = dnbinom(x=x, mu=munb, size=k, log=log)
- ans0 = dnbinom(x=0, mu=munb, size=k, log=FALSE)
+ ans0 = dnbinom(x=0, mu=munb, size=k, log = FALSE)
ans = if (log) ans - log1p(-ans0) else ans/(1-ans0)
ans[x == 0] = if (log) -Inf else 0
ans
@@ -760,112 +799,149 @@ dposnegbin = function(x, munb, k, log=FALSE) {
- zipoisson = function(lphi="logit", llambda="loge",
- ephi=list(), elambda =list(),
- iphi=NULL, method.init=1,
- shrinkage.init=0.8, zero=NULL)
+
+
+
+ zipoisson = function(lphi = "logit", llambda = "loge",
+ ephi = list(), elambda = list(),
+ iphi = NULL, ilambda = NULL, method.init = 1,
+ shrinkage.init = 0.8, zero = NULL)
{
if (mode(lphi) != "character" && mode(lphi) != "name")
lphi = as.character(substitute(lphi))
if (mode(llambda) != "character" && mode(llambda) != "name")
llambda = as.character(substitute(llambda))
if (is.Numeric(iphi))
- if (!is.Numeric(iphi, posit=TRUE) || any(iphi >= 1))
+ if (!is.Numeric(iphi, posit = TRUE) || any(iphi >= 1))
stop("'iphi' values must be inside the interval (0,1)")
+ if (is.Numeric(ilambda))
+ if (!is.Numeric(ilambda, posit = TRUE))
+ stop("'ilambda' values must be positive")
if (!is.list(ephi)) ephi = list()
if (!is.list(elambda)) elambda = list()
- if (!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
method.init > 2) stop("argument 'method.init' must be 1 or 2")
- if (!is.Numeric(shrinkage.init, allow=1) || shrinkage.init < 0 ||
+ if (!is.Numeric(shrinkage.init, allow = 1) || shrinkage.init < 0 ||
shrinkage.init > 1) stop("bad input for argument 'shrinkage.init'")
new("vglmff",
- blurb=c("Zero-inflated Poisson\n\n",
- "Links: ", namesof("phi", lphi, earg= ephi), ", ",
- namesof("lambda", llambda, earg= elambda), "\n",
- "Mean: (1-phi)*lambda"),
- constraints=eval(substitute(expression({
+ blurb = c("Zero-inflated Poisson\n\n",
+ "Links: ",
+ namesof("phi", lphi, earg = ephi), ", ",
+ namesof("lambda", llambda, earg = elambda), "\n",
+ "Mean: (1-phi)*lambda"),
+ constraints = eval(substitute(expression({
constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero=zero ))),
- initialize=eval(substitute(expression({
- if (ncol(as.matrix(y)) != 1) stop("multivariate responses not allowed")
+ initialize = eval(substitute(expression({
+ if (ncol(as.matrix(y)) != 1)
+ stop("multivariate responses not allowed")
if (any(round(y) != y))
- stop("integer-valued responses only allowed for the zipoisson family")
+ stop("integer-valued responses only allowed for ",
+ "the 'zipoisson' family")
+
predictors.names = c(
- namesof("phi", .lphi, earg= .ephi, tag=FALSE),
- namesof("lambda", .llambda, earg= .ephi, tag=FALSE))
+ namesof("phi", .lphi, earg = .ephi, tag = FALSE),
+ namesof("lambda", .llambda, earg = .ephi, tag = FALSE))
+
if (!length(etastart)) {
- phi.init = if (length( .iphi)) .iphi else {
- sum(w[y == 0]) / sum(w)
+ phi.init = if (length( .iphi )) .iphi else {
+ 0.5 * sum(w[y == 0]) / sum(w)
}
phi.init[phi.init <= 0.02] = 0.02 # Last resort
phi.init[phi.init >= 0.98] = 0.98 # Last resort
- if ( .method.init == 2) {
- mymean = weighted.mean(y[y>0], w[y>0]) + 1/16
+
+
+ if ( length( .ilambda )) {
+ lambda.init = rep( .ilambda, len = n)
+ } else if ( length(mustart)) {
+ lambda.init = mustart / (1 - phi.init)
+ } else if ( .method.init == 2) {
+ mymean = weighted.mean(y[y > 0], w[y > 0]) + 1/16
lambda.init = (1 - .sinit) * (y + 1/8) + .sinit * mymean
} else {
use.this = median(y[y > 0]) + 1 / 16
lambda.init = (1 - .sinit) * (y + 1/8) + .sinit * use.this
}
- etastart = cbind(theta2eta(rep(phi.init, len=n), .lphi, .ephi ),
- theta2eta(lambda.init, .llambda, .ephi ))
+
+
+
+ zipois.Loglikfun = function(phival, y, x, w, extraargs) {
+ sum(w * dzipois(x=y, phi = phival,
+ lambda = extraargs$lambda,
+ log = TRUE))
+ }
+ phi.grid = seq(0.02, 0.98, len = 21)
+ init.phi = getMaxMin(phi.grid,
+ objfun=zipois.Loglikfun,
+ y=y, x=x, w=w,
+ extraargs= list(lambda = lambda.init))
+ phi.init = if (length( .iphi )) .iphi else init.phi
+ if (length(mustart)) {
+ lambda.init = lambda.init / (1 - phi.init)
+ }
+
+ etastart = cbind(theta2eta(rep(phi.init, len = n), .lphi, .ephi ),
+ theta2eta(lambda.init, .llambda, .elambda ))
+ mustart <- NULL # Since etastart has been computed.
}
}), list( .lphi = lphi, .llambda = llambda,
.ephi = ephi, .elambda = elambda,
- .iphi = iphi,
+ .iphi = iphi, .ilambda = ilambda,
.method.init = method.init, .sinit = shrinkage.init ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- phi = eta2theta(eta[,1], .lphi, earg= .ephi )
- lambda = eta2theta(eta[,2], .llambda, earg= .elambda )
- (1-phi) * lambda
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ phivec = eta2theta(eta[, 1], .lphi, earg = .ephi )
+ lambda = eta2theta(eta[, 2], .llambda, earg = .elambda )
+ (1 - phivec) * lambda
}, list( .lphi = lphi, .llambda = llambda,
.ephi = ephi, .elambda = elambda ))),
- last=eval(substitute(expression({
+ last = eval(substitute(expression({
misc$link <- c("phi" = .lphi, "lambda" = .llambda)
misc$earg <- list("phi" = .ephi, "lambda" = .elambda)
if (intercept.only) {
- phi = eta2theta(eta[1,1], .lphi, earg= .ephi )
- lambda = eta2theta(eta[1,2], .llambda, earg= .elambda )
- misc$prob0 = phi + (1-phi) * exp(-lambda) # P(Y=0)
+ phi = eta2theta(eta[1, 1], .lphi, earg = .ephi )
+ lambda = eta2theta(eta[1, 2], .llambda, earg = .elambda )
+ misc$prob0 = phi + (1 - phi) * exp(-lambda) # P(Y=0)
}
}), list( .lphi = lphi, .llambda = llambda,
.ephi = ephi, .elambda = elambda ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals=FALSE, eta, extra=NULL) {
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
smallno = 100 * .Machine$double.eps
- phi = eta2theta(eta[,1], .lphi, earg= .ephi )
+ phi = eta2theta(eta[, 1], .lphi, earg = .ephi )
phi = pmax(phi, smallno)
phi = pmin(phi, 1.0-smallno)
- lambda = eta2theta(eta[,2], .llambda, earg= .elambda )
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(w * dzipois(x=y, phi=phi, lambda=lambda, log=TRUE))
+ lambda = eta2theta(eta[, 2], .llambda, earg = .elambda )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(w * dzipois(x=y, phi = phi, lambda = lambda, log = TRUE))
}
- }, list( .lphi=lphi, .llambda=llambda,
- .ephi=ephi, .elambda=elambda ))),
- vfamily=c("zipoisson"),
- deriv=eval(substitute(expression({
+ }, list( .lphi = lphi, .llambda = llambda,
+ .ephi = ephi, .elambda = elambda ))),
+ vfamily = c("zipoisson"),
+ deriv = eval(substitute(expression({
smallno = 100 * .Machine$double.eps
- phi = eta2theta(eta[,1], .lphi, earg= .ephi )
+ phi = eta2theta(eta[, 1], .lphi, earg = .ephi )
phi = pmax(phi, smallno)
phi = pmin(phi, 1.0-smallno)
- lambda = eta2theta(eta[,2], .llambda, earg= .elambda )
+ lambda = eta2theta(eta[, 2], .llambda, earg = .elambda )
tmp8 = phi + (1-phi)*exp(-lambda)
index0 = (y == 0)
dl.dphi = -expm1(-lambda) / tmp8
dl.dphi[!index0] = -1 / (1-phi[!index0])
dl.dlambda = -(1-phi) * exp(-lambda) / tmp8
dl.dlambda[!index0] = (y[!index0] - lambda[!index0]) / lambda[!index0]
- dphi.deta = dtheta.deta(phi, .lphi, earg= .ephi)
- dlambda.deta = dtheta.deta(lambda, .llambda, earg= .elambda )
+ dphi.deta = dtheta.deta(phi, .lphi, earg = .ephi)
+ dlambda.deta = dtheta.deta(lambda, .llambda, earg = .elambda )
ans = w * cbind(dl.dphi * dphi.deta, dl.dlambda * dlambda.deta)
if (.llambda == "loge" && (any(lambda[!index0] < .Machine$double.eps))) {
ans[!index0,2] = w[!index0] * (y[!index0] - lambda[!index0])
}
ans
- }), list( .lphi=lphi, .llambda=llambda,
- .ephi=ephi, .elambda=elambda ))),
- weight=eval(substitute(expression({
- wz = matrix(as.numeric(NA), nrow=n, ncol=dimm(M))
+ }), list( .lphi = lphi, .llambda = llambda,
+ .ephi = ephi, .elambda = elambda ))),
+ weight = eval(substitute(expression({
+ wz = matrix(as.numeric(NA), nrow = n, ncol = dimm(M))
d2l.dphi2 = -expm1(-lambda) / ((1-phi)*tmp8)
d2l.dlambda2 = (1-phi)/lambda - phi*(1-phi)*exp(-lambda) / tmp8
d2l.dphilambda = -exp(-lambda) / tmp8
@@ -883,9 +959,9 @@ dposnegbin = function(x, munb, k, log=FALSE) {
- zibinomial = function(lphi="logit", lmu="logit",
- ephi=list(), emu=list(),
- iphi=NULL, zero=1, mv=FALSE)
+ zibinomial = function(lphi = "logit", lmu = "logit",
+ ephi = list(), emu = list(),
+ iphi = NULL, zero = 1, mv = FALSE)
{
if (as.logical(mv)) stop("argument 'mv' must be FALSE")
if (mode(lphi) != "character" && mode(lphi) != "name")
@@ -893,109 +969,134 @@ dposnegbin = function(x, munb, k, log=FALSE) {
if (mode(lmu) != "character" && mode(lmu) != "name")
lmu = as.character(substitute(lmu))
if (is.Numeric(iphi))
- if (!is.Numeric(iphi, posit=TRUE) || any(iphi >= 1))
+ if (!is.Numeric(iphi, posit = TRUE) || any(iphi >= 1))
stop("'iphi' values must be inside the interval (0,1)")
if (!is.list(ephi)) ephi = list()
if (!is.list(emu)) emu = list()
new("vglmff",
- blurb=c("Zero-inflated binomial\n\n",
- "Links: ", namesof("phi", lphi, earg= ephi ), ", ",
- namesof("mu", lmu, earg= emu ), "\n",
- "Mean: (1-phi) * mu / (1 - (1-mu)^w)"),
- constraints=eval(substitute(expression({
+ blurb = c("Zero-inflated binomial\n\n",
+ "Links: ",
+ namesof("phi", lphi, earg = ephi ), ", ",
+ namesof("mu", lmu, earg = emu ), "\n",
+ "Mean: (1-phi) * mu / (1 - (1-mu)^w)"),
+ constraints = eval(substitute(expression({
constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ initialize = eval(substitute(expression({
+ if (!all(w == 1))
+ extra$orig.w = w
+
+
{
NCOL = function (x)
if (is.array(x) && length(dim(x)) > 1 ||
is.data.frame(x)) ncol(x) else as.integer(1)
if (NCOL(y) == 1) {
- if (is.factor(y)) y = y != levels(y)[1]
+ if (is.factor(y)) y <- y != levels(y)[1]
nn = rep(1, n)
if (!all(y >= 0 & y <= 1))
stop("response values must be in [0, 1]")
- mustart = (0.5 + w * y) / (1 + w)
- no.successes = w * y
- if (any(abs(no.successes - round(no.successes)) > 0.001))
+ if (!length(mustart) && !length(etastart))
+ mustart = (0.5 + w * y) / (1 + w)
+
+
+ no.successes = y
+ if (min(y) < 0)
+ stop("Negative data not allowed!")
+ if (any(abs(no.successes - round(no.successes)) > 1.0e-8))
stop("Number of successes must be integer-valued")
+
} else if (NCOL(y) == 2) {
- if (any(abs(y - round(y)) > 0.001))
+ if (min(y) < 0)
+ stop("Negative data not allowed!")
+ if (any(abs(y - round(y)) > 1.0e-8))
stop("Count data must be integer-valued")
- nn = y[,1] + y[,2]
- y = ifelse(nn > 0, y[,1]/nn, 0)
- w = w * nn
- mustart = (0.5 + nn * y) / (1 + nn)
- } else
- stop("Response not of the right form (1 or 2 columns required)")
+ y = round(y)
+ nvec = y[, 1] + y[, 2]
+ y = ifelse(nvec > 0, y[, 1] / nvec, 0)
+ w = w * nvec
+ if (!length(mustart) && !length(etastart))
+ mustart = (0.5 + nvec * y) / (1 + nvec)
+ } else {
+ stop("for the binomialff family, response 'y' must be a ",
+ "vector of 0 and 1's\n",
+ "or a factor (first level = fail, other levels = success),\n",
+ "or a 2-column matrix where col 1 is the no. of ",
+ "successes and col 2 is the no. of failures")
+ }
+
}
- predictors.names = c( namesof("phi", .lphi, earg= .ephi, tag=FALSE),
- namesof("mu", .lmu, earg= .emu, tag=FALSE))
- phi.init = if (length( .iphi)) .iphi else {
- sum(w[y==0]) / sum(w)
+ predictors.names =
+ c(namesof("phi", .lphi, earg = .ephi, tag = FALSE),
+ namesof("mu", .lmu, earg = .emu, tag = FALSE))
+ phi.init = if (length( .iphi )) .iphi else {
+ sum(w[y == 0]) / sum(w)
}
phi.init[phi.init <= 0.0] = 0.1 # Last resort
- phi.init[phi.init >= 1.0] = 0.1 # Last resort
- mustart = cbind(rep(phi.init, len=n), mustart) # 1st coln not a real mu
- }), list( .lphi=lphi, .lmu=lmu,
- .ephi=ephi, .emu=emu,
- .iphi=iphi ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
- phi = eta2theta(eta[,1], .lphi, earg= .ephi )
- mubin = eta2theta(eta[,2], .lmu, earg= .emu )
+ phi.init[phi.init >= 1.0] = 0.9 # Last resort
+ if ( length(mustart) && !length(etastart))
+ mustart = cbind(rep(phi.init, len = n),
+ mustart) # 1st coln not a real mu
+ }), list( .lphi = lphi, .lmu = lmu,
+ .ephi = ephi, .emu=emu,
+ .iphi = iphi ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ phi = eta2theta(eta[, 1], .lphi, earg = .ephi )
+ mubin = eta2theta(eta[, 2], .lmu, earg = .emu )
(1-phi) * mubin
- }, list( .lphi=lphi, .lmu=lmu,
- .ephi=ephi, .emu=emu ))),
- last=eval(substitute(expression({
- misc$link = c("phi" = .lphi, "mu" = .lmu)
+ }, list( .lphi = lphi, .lmu = lmu,
+ .ephi = ephi, .emu=emu ))),
+ last = eval(substitute(expression({
+ misc$link = c("phi" = .lphi, "mu" = .lmu)
misc$earg = list("phi" = .ephi, "mu" = .emu )
- if (intercept.only && all(w==w[1])) {
- phi = eta2theta(eta[1,1], .lphi, earg= .ephi )
- mubin = eta2theta(eta[1,2], .lmu, earg= .emu )
+ if (intercept.only && all(w == w[1])) {
+ phi = eta2theta(eta[1,1], .lphi, earg = .ephi )
+ mubin = eta2theta(eta[1,2], .lmu, earg = .emu )
misc$p0 = phi + (1-phi) * (1-mubin)^w[1] # P(Y=0)
}
- }), list( .lphi=lphi, .lmu=lmu,
- .ephi=ephi, .emu=emu ))),
- link=eval(substitute(function(mu, extra=NULL) {
- cbind(theta2eta(mu[,1], .lphi, earg= .ephi ),
- theta2eta(mu[,2], .lmu, earg= .emu ))
- }, list( .lphi=lphi, .lmu=lmu,
- .ephi=ephi, .emu=emu ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals=FALSE, eta, extra=NULL) {
- phi = eta2theta(eta[,1], .lphi, earg= .ephi )
- mubin = eta2theta(eta[,2], .lmu, earg= .emu )
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(dzibinom(x=round(w*y), size=w, prob=mubin, log=TRUE, phi=phi))
+ }), list( .lphi = lphi, .lmu = lmu,
+ .ephi = ephi, .emu = emu ))),
+ link = eval(substitute(function(mu, extra = NULL) {
+ cbind(theta2eta(mu[, 1], .lphi, earg = .ephi ),
+ theta2eta(mu[, 2], .lmu, earg = .emu ))
+ }, list( .lphi = lphi, .lmu = lmu,
+ .ephi = ephi, .emu=emu ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ phi = eta2theta(eta[, 1], .lphi, earg = .ephi )
+ mubin = eta2theta(eta[, 2], .lmu, earg = .emu )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(dzibinom(x=round(w*y), size=w, prob=mubin, log = TRUE, phi = phi))
}
- }, list( .lphi=lphi, .lmu=lmu,
- .ephi=ephi, .emu=emu ))),
- vfamily=c("zibinomial"),
- deriv=eval(substitute(expression({
- phi = eta2theta(eta[,1], .lphi, earg= .ephi )
- mubin = eta2theta(eta[,2], .lmu, earg= .emu )
+ }, list( .lphi = lphi, .lmu = lmu,
+ .ephi = ephi, .emu=emu ))),
+ vfamily = c("zibinomial"),
+ deriv = eval(substitute(expression({
+ phi = eta2theta(eta[, 1], .lphi, earg = .ephi )
+ mubin = eta2theta(eta[, 2], .lmu, earg = .emu )
prob0 = (1-mubin)^w # Actually q^w
tmp8 = phi + (1-phi)*prob0
- index = (y==0)
+ index = (y == 0)
dl.dphi = (1-prob0) / tmp8
dl.dphi[!index] = -1 / (1-phi[!index])
dl.dmubin = -w * (1-phi) * (1-mubin)^(w-1) / tmp8
dl.dmubin[!index] = w[!index] * (y[!index]/mubin[!index] -
(1-y[!index]) / (1-mubin[!index]))
- dphi.deta = dtheta.deta(phi, .lphi, earg= .ephi )
- dmubin.deta = dtheta.deta(mubin, .lmu, earg= .emu )
+ dphi.deta = dtheta.deta(phi, .lphi, earg = .ephi )
+ dmubin.deta = dtheta.deta(mubin, .lmu, earg = .emu )
ans = cbind(dl.dphi * dphi.deta, dl.dmubin * dmubin.deta)
if (.lmu == "logit") {
ans[!index,2] = w[!index] * (y[!index] - mubin[!index])
}
ans
- }), list( .lphi=lphi, .lmu=lmu,
- .ephi=ephi, .emu=emu ))),
- weight=eval(substitute(expression({
- wz = matrix(as.numeric(NA), nrow=n, ncol=dimm(M))
+ }), list( .lphi = lphi, .lmu = lmu,
+ .ephi = ephi, .emu=emu ))),
+ weight = eval(substitute(expression({
+ wz = matrix(as.numeric(NA), nrow = n, ncol = dimm(M))
d2l.dphi2 = (1-mubin^w) / ((1-phi) * tmp8)
d2l.dmubin2 = w * (1-phi) * ((1 - mubin * (1-mubin)^(w-1)) /
(mubin*(1-mubin)) - mubin^(w-2) * (w*phi-tmp8) / tmp8)
@@ -1009,32 +1110,32 @@ dposnegbin = function(x, munb, k, log=FALSE) {
wz[ind6,iam(2,2,M)] = .Machine$double.eps
}
wz
- }), list( .lphi=lphi, .lmu=lmu,
- .ephi=ephi, .emu=emu ))))
+ }), list( .lphi = lphi, .lmu = lmu,
+ .ephi = ephi, .emu=emu ))))
}
-dzibinom = function(x, size, prob, log = FALSE, phi=0) {
+dzibinom = function(x, size, prob, log = FALSE, phi = 0) {
if (!is.logical(log.arg <- log)) stop("bad input for argument 'log'")
rm(log)
LLL = max(length(x), length(size), length(prob), length(phi))
- if (length(x) != LLL) x = rep(x, len=LLL);
- if (length(size) != LLL) size = rep(size, len=LLL);
- if (length(prob) != LLL) prob = rep(prob, len=LLL);
- if (length(phi) != LLL) phi = rep(phi, len=LLL);
- ans = dbinom(x=x, size=size, prob=prob, log=TRUE)
+ if (length(x) != LLL) x = rep(x, len = LLL);
+ if (length(size) != LLL) size = rep(size, len = LLL);
+ if (length(prob) != LLL) prob = rep(prob, len = LLL);
+ if (length(phi) != LLL) phi = rep(phi, len = LLL);
+ ans = dbinom(x=x, size=size, prob=prob, log = TRUE)
if (!is.Numeric(phi) || any(phi < 0) || any(phi > 1))
stop("'phi' must be between 0 and 1 inclusive")
if (log.arg) {
- ifelse(x==0, log(phi + (1-phi) * exp(ans)), log1p(-phi) + ans)
+ ifelse(x == 0, log(phi + (1-phi) * exp(ans)), log1p(-phi) + ans)
} else {
- ifelse(x==0, phi + (1-phi) * exp(ans) , (1-phi) * exp(ans))
+ ifelse(x == 0, phi + (1-phi) * exp(ans) , (1-phi) * exp(ans))
}
}
-pzibinom = function(q, size, prob, lower.tail = TRUE, log.p = FALSE, phi=0) {
+pzibinom = function(q, size, prob, lower.tail = TRUE, log.p = FALSE, phi = 0) {
ans = pbinom(q, size, prob, lower.tail = lower.tail, log.p = log.p)
phi = rep(phi, length=length(ans))
if (!is.Numeric(phi) || any(phi < 0) || any(phi > 1))
@@ -1042,7 +1143,7 @@ pzibinom = function(q, size, prob, lower.tail = TRUE, log.p = FALSE, phi=0) {
phi + (1-phi) * ans
}
-qzibinom = function(p, size, prob, lower.tail = TRUE, log.p = FALSE, phi=0) {
+qzibinom = function(p, size, prob, lower.tail = TRUE, log.p = FALSE, phi = 0) {
nn = max(length(p), length(size), length(prob), length(phi))
p = rep(p, len=nn)
size = rep(size, len=nn)
@@ -1057,8 +1158,8 @@ qzibinom = function(p, size, prob, lower.tail = TRUE, log.p = FALSE, phi=0) {
ans
}
-rzibinom = function(n, size, prob, phi=0) {
- if (!is.Numeric(n, positive=TRUE, integer=TRUE, allow=1))
+rzibinom = function(n, size, prob, phi = 0) {
+ if (!is.Numeric(n, positive=TRUE, integer=TRUE, allow = 1))
stop("n must be a single positive integer")
ans = rbinom(n, size, prob)
phi = rep(phi, len=length(ans))
@@ -1076,7 +1177,9 @@ rzibinom = function(n, size, prob, phi=0) {
-dzinegbin = function(x, phi, size, prob=NULL, munb=NULL, log=FALSE) {
+
+
+dzinegbin = function(x, phi, size, prob = NULL, munb = NULL, log = FALSE) {
if (length(munb)) {
if (length(prob))
stop("'prob' and 'munb' both specified")
@@ -1090,11 +1193,13 @@ dzinegbin = function(x, phi, size, prob=NULL, munb=NULL, log=FALSE) {
if (!is.Numeric(phi) || any(phi < 0) || any(phi > 1))
stop("'phi' must be between 0 and 1 inclusive")
phi = rep(phi, length=length(ans))
- if (log.arg) ifelse(x==0, log(phi+(1-phi)*exp(ans)), log1p(-phi) + ans) else
- ifelse(x==0, phi+(1-phi)* ans, (1-phi) * ans)
+ if (log.arg)
+ ifelse(x == 0, log(phi+(1-phi)*exp(ans)), log1p(-phi) + ans) else
+ ifelse(x == 0, phi+(1-phi)* ans, (1-phi) * ans)
}
-pzinegbin = function(q, phi, size, prob=NULL, munb=NULL) {
+
+pzinegbin = function(q, phi, size, prob = NULL, munb = NULL) {
if (length(munb)) {
if (length(prob))
stop("'prob' and 'munb' both specified")
@@ -1106,17 +1211,18 @@ pzinegbin = function(q, phi, size, prob=NULL, munb=NULL) {
phi + (1-phi) * ans
}
-qzinegbin = function(p, phi, size, prob=NULL, munb=NULL) {
+
+qzinegbin = function(p, phi, size, prob = NULL, munb = NULL) {
if (length(munb)) {
if (length(prob))
stop("'prob' and 'munb' both specified")
prob <- size/(size + munb)
}
LLL = max(length(p), length(prob), length(phi), length(size))
- if (length(p) != LLL) p = rep(p, len=LLL)
- if (length(phi) != LLL) phi = rep(phi, len=LLL);
- if (length(prob) != LLL) prob = rep(prob, len=LLL)
- if (length(size) != LLL) size = rep(size, len=LLL);
+ if (length(p) != LLL) p = rep(p, len = LLL)
+ if (length(phi) != LLL) phi = rep(phi, len = LLL);
+ if (length(prob) != LLL) prob = rep(prob, len = LLL)
+ if (length(size) != LLL) size = rep(size, len = LLL);
if (!is.Numeric(phi) || any(phi < 0) || any(phi > 1))
stop("'phi' must be between 0 and 1 inclusive")
@@ -1128,7 +1234,8 @@ qzinegbin = function(p, phi, size, prob=NULL, munb=NULL) {
ans
}
-rzinegbin = function(n, phi, size, prob=NULL, munb=NULL) {
+
+rzinegbin = function(n, phi, size, prob = NULL, munb = NULL) {
if (length(munb)) {
if (length(prob))
stop("'prob' and 'munb' both specified")
@@ -1136,7 +1243,7 @@ rzinegbin = function(n, phi, size, prob=NULL, munb=NULL) {
}
use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integ=TRUE, allow=1, posit=TRUE))
+ if (!is.Numeric(n, integ = TRUE, allow = 1, posit = TRUE))
stop("bad input for argument 'n'") else n
ans = rnbinom(n=use.n, size=size, prob=prob)
@@ -1156,11 +1263,11 @@ zinegbinomial.control <- function(save.weight=TRUE, ...)
- zinegbinomial = function(lphi="logit", lmunb = "loge", lk = "loge",
- ephi=list(), emunb =list(), ek = list(),
- iphi = NULL, ik = NULL, zero = -3, method.init=1,
- shrinkage.init=0.95,
- nsimEIM=200)
+ zinegbinomial = function(lphi = "logit", lmunb = "loge", lk = "loge",
+ ephi = list(), emunb = list(), ek = list(),
+ iphi = NULL, ik = NULL, zero = -3,
+ method.init = 1, shrinkage.init = 0.95,
+ nsimEIM = 200)
{
@@ -1168,13 +1275,13 @@ zinegbinomial.control <- function(save.weight=TRUE, ...)
stop("'iphi' must contain values in (0,1)")
if (length(ik) && !is.Numeric(ik, positiv=TRUE))
stop("'ik' must contain positive values only")
- if (!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
method.init > 3) stop("argument 'method.init' must be 1, 2 or 3")
- if (!is.Numeric(nsimEIM, allow=1, integ=TRUE))
+ if (!is.Numeric(nsimEIM, allow = 1, integ = TRUE))
stop("'nsimEIM' must be a positive integer")
if (nsimEIM <= 10)
warning("'nsimEIM' should be greater than 10, say")
- if (!is.Numeric(shrinkage.init, allow=1) || shrinkage.init < 0 ||
+ if (!is.Numeric(shrinkage.init, allow = 1) || shrinkage.init < 0 ||
shrinkage.init > 1) stop("bad input for argument 'shrinkage.init'")
if (mode(lmunb) != "character" && mode(lmunb) != "name")
@@ -1188,32 +1295,32 @@ zinegbinomial.control <- function(save.weight=TRUE, ...)
if (!is.list(ek)) ek = list()
new("vglmff",
- blurb=c("Zero-inflated negative binomial\n\n",
- "Links: ",
- namesof("phi", lphi, earg= ephi, tag=FALSE), ", ",
- namesof("munb", lmunb, earg= emunb, tag=FALSE), ", ",
- namesof("k", lk, earg= ek, tag=FALSE), "\n",
- "Mean: (1-phi) * munb"),
- constraints=eval(substitute(expression({
- temp752 = .zero
- if (length(temp752) && all(temp752 == -3))
- temp752 = 3*(1:ncol(y))
- constraints = cm.zero.vgam(constraints, x, temp752, M)
- }), list( .zero=zero ))),
- initialize=eval(substitute(expression({
+ blurb = c("Zero-inflated negative binomial\n\n",
+ "Links: ",
+ namesof("phi", lphi , earg = ephi, tag = FALSE), ", ",
+ namesof("munb", lmunb, earg = emunb, tag = FALSE), ", ",
+ namesof("k", lk, earg = ek, tag = FALSE), "\n",
+ "Mean: (1-phi) * munb"),
+ constraints = eval(substitute(expression({
+
+ dotzero <- .zero
+ Musual <- 3
+ eval(negzero.expression)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
y = as.matrix(y)
extra$NOS = NOS = ncoly = ncol(y) # Number of species
if (length(dimnames(y)))
extra$dimnamesy2 = dimnames(y)[[2]]
- mynames1 = if (NOS==1) "phi" else paste("phi", 1:NOS, sep="")
- mynames2 = if (NOS==1) "munb" else paste("munb", 1:NOS, sep="")
- mynames3 = if (NOS==1) "k" else paste("k", 1:NOS, sep="")
+ mynames1 = if (NOS == 1) "phi" else paste("phi", 1:NOS, sep = "")
+ mynames2 = if (NOS == 1) "munb" else paste("munb", 1:NOS, sep = "")
+ mynames3 = if (NOS == 1) "k" else paste("k", 1:NOS, sep = "")
predictors.names =
- c(namesof(mynames1, .lphi, earg= .ephi, tag= FALSE),
- namesof(mynames2, .lmunb, earg= .emunb, tag= FALSE),
- namesof(mynames3, .lk, earg= .ek, tag= FALSE))
- predictors.names = predictors.names[interleave.VGAM(3*NOS, M=3)]
+ c(namesof(mynames1, .lphi, earg = .ephi, tag = FALSE),
+ namesof(mynames2, .lmunb, earg = .emunb, tag = FALSE),
+ namesof(mynames3, .lk, earg = .ek, tag = FALSE))
+ predictors.names = predictors.names[interleave.VGAM(3*NOS, M = 3)]
if (!length(etastart)) {
mu.init = if ( .method.init == 3) {
y + 1/16
@@ -1232,7 +1339,7 @@ zinegbinomial.control <- function(save.weight=TRUE, ...)
matrix( .iphi, n, ncoly, byrow=TRUE) else {
phi.init = y
for(iii in 1:ncol(y))
- phi.init[,iii] = sum(w[y[,iii]==0]) / sum(w)
+ phi.init[,iii] = sum(w[y[,iii] == 0]) / sum(w)
phi.init[phi.init <= 0.02] = 0.02 # Last resort
phi.init[phi.init >= 0.98] = 0.98 # Last resort
phi.init
@@ -1250,7 +1357,7 @@ zinegbinomial.control <- function(save.weight=TRUE, ...)
dnbinom(y[index], mu= muvec[index], size=kval)
ell0 = log(tmp8)
ell1 = log1p(-phivec[!index]) + dnbinom(y[!index],
- mu= muvec[!index], size=kval, log=TRUE)
+ mu= muvec[!index], size=kval, log = TRUE)
sum(w[index] * ell0) + sum(w[!index] * ell1)
}
k.grid = 2^((-6):6)
@@ -1259,37 +1366,38 @@ zinegbinomial.control <- function(save.weight=TRUE, ...)
kay.init[,spp.] = getMaxMin(k.grid,
objfun=zinegbin.Loglikfun,
y=y[,spp.], x=x, w=w,
- extraargs= list(phi=phi.init[,spp.],
+ extraargs= list(phi = phi.init[,spp.],
mu=mu.init[,spp.]))
}
kay.init
}
- etastart = cbind(theta2eta(phi.init, .lphi, earg= .ephi),
- theta2eta(mu.init, .lmunb, earg= .emunb),
- theta2eta(kay.init, .lk, earg= .ek))
- etastart = etastart[,interleave.VGAM(ncol(etastart),M=3)]
+ etastart = cbind(theta2eta(phi.init, .lphi, earg = .ephi),
+ theta2eta(mu.init, .lmunb, earg = .emunb),
+ theta2eta(kay.init, .lk, earg = .ek))
+ etastart = etastart[, interleave.VGAM(ncol(etastart),M = 3)]
}
- }), list( .lphi=lphi, .lmunb=lmunb, .lk=lk,
- .iphi=iphi, .ik=ik,
- .ephi=ephi, .emunb=emunb, .ek=ek,
- .sinit=shrinkage.init,
- .method.init=method.init ))),
- inverse=eval(substitute(function(eta, extra=NULL) {
+ }), list( .lphi = lphi, .lmunb = lmunb, .lk = lk,
+ .ephi = ephi, .emunb = emunb, .ek = ek,
+ .iphi = iphi, .ik = ik,
+ .sinit = shrinkage.init, .method.init = method.init ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
NOS = extra$NOS
- phi = eta2theta(eta[,3*(1:NOS)-2,drop=FALSE], .lphi, earg= .ephi )
- munb = eta2theta(eta[,3*(1:NOS)-1,drop=FALSE], .lmunb, earg= .emunb )
+ phi = eta2theta(eta[,3*(1:NOS)-2, drop = FALSE],
+ .lphi, earg = .ephi )
+ munb = eta2theta(eta[,3*(1:NOS)-1, drop = FALSE],
+ .lmunb, earg = .emunb )
fv.matrix = (1 - phi) * munb
if (length(extra$dimnamesy2))
- dimnames(fv.matrix) = list(dimnames(phi)[[1]], extra$dimnamesy2)
+ dimnames(fv.matrix) = list(dimnames(phi)[[1]], extra$dimnamesy2)
fv.matrix
- }, list( .lphi=lphi, .lk=lk, .lmunb=lmunb,
- .ephi=ephi, .emunb=emunb, .ek=ek ))),
- last=eval(substitute(expression({
+ }, list( .lphi = lphi, .lk = lk, .lmunb = lmunb,
+ .ephi = ephi, .ek = ek, .emunb = emunb ))),
+ last = eval(substitute(expression({
misc$link = c(rep( .lphi, length=NOS), rep( .lmunb, length=NOS),
rep( .lk, length=NOS))
temp.names = c(mynames1, mynames2, mynames3)
- temp.names = temp.names[interleave.VGAM(3*NOS, M=3)]
+ temp.names = temp.names[interleave.VGAM(3*NOS, M = 3)]
names(misc$link) = temp.names
misc$earg = vector("list", 3*NOS)
names(misc$earg) = temp.names
@@ -1302,40 +1410,43 @@ zinegbinomial.control <- function(save.weight=TRUE, ...)
misc$nsimEIM = .nsimEIM
misc$expected = TRUE
if (intercept.only) {
- phi = eta2theta(eta[1,3*(1:NOS)-2], .lphi, earg= .ephi)
- munb = eta2theta(eta[1,3*(1:NOS)-1], .lmunb, earg= .emunb )
- kval = eta2theta(eta[1,3*(1:NOS)], .lk, earg= .ek)
+ phi = eta2theta(eta[1,3*(1:NOS)-2], .lphi, earg = .ephi)
+ munb = eta2theta(eta[1,3*(1:NOS)-1], .lmunb, earg = .emunb )
+ kval = eta2theta(eta[1,3*(1:NOS)], .lk, earg = .ek)
misc$prob0 = phi + (1-phi) * (kval / (kval + munb))^kval # P(Y=0)
}
- }), list( .lphi=lphi, .lmunb=lmunb, .lk=lk,
- .ephi=ephi, .emunb=emunb, .ek=ek, .nsimEIM=nsimEIM,
- .method.init=method.init ))),
- loglikelihood=eval(substitute(
- function(mu,y,w,residuals=FALSE, eta,extra=NULL) {
+ }), list( .lphi = lphi, .lmunb = lmunb, .lk = lk,
+ .ephi = ephi, .emunb = emunb, .ek = ek,
+ .nsimEIM = nsimEIM, .method.init = method.init ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
NOS = extra$NOS
- phi = eta2theta(eta[,3*(1:NOS)-2,drop=FALSE], .lphi, earg= .ephi )
- munb = eta2theta(eta[,3*(1:NOS)-1,drop=FALSE], .lmunb, earg= .emunb )
- kmat = eta2theta(eta[,3*(1:NOS), drop=FALSE], .lk, earg= .ek )
- if (residuals) stop("loglikelihood residuals not implemented yet") else {
- sum(w*dzinegbin(x=y, phi=phi, munb=munb, size=kmat, log=TRUE))
+ phi = eta2theta(eta[,3*(1:NOS)-2, drop = FALSE], .lphi, earg = .ephi )
+ munb = eta2theta(eta[,3*(1:NOS)-1, drop = FALSE], .lmunb, earg = .emunb )
+ kmat = eta2theta(eta[,3*(1:NOS), drop = FALSE], .lk, earg = .ek )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(w*dzinegbin(x=y, phi = phi, munb=munb, size=kmat, log = TRUE))
}
- }, list( .lphi=lphi, .lmunb=lmunb, .lk=lk,
- .ephi=ephi, .emunb=emunb, .ek=ek ))),
- vfamily=c("zinegbinomial"),
- deriv=eval(substitute(expression({
+ }, list( .lphi = lphi, .lmunb = lmunb, .lk = lk,
+ .ephi = ephi, .emunb = emunb, .ek = ek ))),
+ vfamily = c("zinegbinomial"),
+ deriv = eval(substitute(expression({
NOS = extra$NOS
- phi = eta2theta(eta[,3*(1:NOS)-2,drop=FALSE], .lphi, earg= .ephi )
- munb = eta2theta(eta[,3*(1:NOS)-1,drop=FALSE], .lmunb, earg= .emunb )
- kmat = eta2theta(eta[,3*(1:NOS), drop=FALSE], .lk, earg= .ek )
- dphi.deta = dtheta.deta(phi, .lphi, earg= .ephi )
- dmunb.deta = dtheta.deta(munb, .lmunb, earg= .emunb )
- dk.deta = dtheta.deta(kmat, .lk, earg= .ek )
- dthetas.detas = (cbind(dphi.deta, dmunb.deta,
- dk.deta))[,interleave.VGAM(3*NOS, M=3)]
+ phi = eta2theta(eta[,3*(1:NOS)-2, drop = FALSE], .lphi, earg = .ephi )
+ munb = eta2theta(eta[,3*(1:NOS)-1, drop = FALSE], .lmunb, earg = .emunb )
+ kmat = eta2theta(eta[,3*(1:NOS), drop = FALSE], .lk, earg = .ek )
+ dphi.deta = dtheta.deta(phi, .lphi, earg = .ephi )
+ dmunb.deta = dtheta.deta(munb, .lmunb, earg = .emunb )
+ dk.deta = dtheta.deta(kmat, .lk, earg = .ek )
+ dthetas.detas =
+ (cbind(dphi.deta,
+ dmunb.deta,
+ dk.deta))[, interleave.VGAM(3*NOS, M = 3)]
d3 = deriv3(~ log(phi. + (1 - phi.) * (kmat. /(kmat. + munb. ))^kmat.),
c("phi.", "munb.", "kmat."), hessian=FALSE)
- dl.dthetas = matrix(0, n, M) # M=3*NOS; for all species
+ dl.dthetas = matrix(0, n, M) # M = 3*NOS; for all species
for(spp. in 1:NOS) {
index = (y[,spp.] == 0)
if (!sum(index) || !sum(!index))
@@ -1361,18 +1472,18 @@ zinegbinomial.control <- function(save.weight=TRUE, ...)
cbind(dl.dphi, dl.dmunb, dl.dk)
}
w * dl.dthetas * dthetas.detas
- }), list( .lphi=lphi, .lmunb=lmunb, .lk=lk,
- .ephi=ephi, .emunb=emunb, .ek=ek ))),
- weight=eval(substitute(expression({
+ }), list( .lphi = lphi, .lmunb = lmunb, .lk = lk,
+ .ephi = ephi, .emunb = emunb, .ek = ek ))),
+ weight = eval(substitute(expression({
wz = matrix(0, n, 3*(M-1))
- ind8 = iam(NA, NA, M=M, both=TRUE, diag=TRUE)
- ind1 = iam(NA, NA, M=3, both=TRUE, diag=TRUE)
+ ind8 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ ind1 = iam(NA, NA, M = 3, both = TRUE, diag = TRUE)
for(spp. in 1:NOS) {
run.varcov = 0
sdl.dthetas = matrix(0, n, 3)
for(ii in 1:( .nsimEIM )) {
- ysim = rzinegbin(n=n, phi=phi[,spp.],
+ ysim = rzinegbin(n=n, phi = phi[,spp.],
size=kmat[,spp.], mu=munb[,spp.])
index = (ysim == 0)
@@ -1406,14 +1517,257 @@ zinegbinomial.control <- function(save.weight=TRUE, ...)
for(jay in 1:3)
for(kay in jay:3) {
- cptr = iam((spp.-1)*3+jay, (spp.-1)*3+kay, M=M)
- wz[,cptr] = wz1[,iam(jay, kay, M=3)]
+ cptr = iam((spp.-1)*3+jay, (spp.-1)*3+kay, M = M)
+ wz[,cptr] = wz1[,iam(jay, kay, M = 3)]
}
}
w * wz
- }), list( .lphi=lphi, .ephi=ephi, .nsimEIM=nsimEIM ))))
+ }), list( .lphi = lphi, .ephi = ephi, .nsimEIM = nsimEIM ))))
}
+
+
+ zipoissonff <- function(llambda = "loge", lprobp = "logit",
+ elambda = list(), eprobp = list(),
+ ilambda = NULL, iprobp = NULL, method.init = 1,
+ shrinkage.init = 0.8, zero = -2)
+{
+ lprobp. <- lprobp
+ eprobp. <- eprobp
+ iprobp. <- iprobp
+
+ if (mode(llambda) != "character" && mode(llambda) != "name")
+ llambda <- as.character(substitute(llambda))
+ if (mode(lprobp.) != "character" && mode(lprobp.) != "name")
+ lphi <- as.character(substitute(lprobp.))
+
+ if (is.Numeric(ilambda))
+ if (!is.Numeric(ilambda, posit = TRUE))
+ stop("'ilambda' values must be positive")
+ if (is.Numeric(iprobp.))
+ if (!is.Numeric(iprobp., posit = TRUE) ||
+ any(iprobp. >= 1))
+ stop("'iprobp' values must be inside the interval (0,1)")
+ if (!is.list(elambda)) elambda <- list()
+ if (!is.list(eprobp.)) eprobp. <- list()
+
+ if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
+ method.init > 2)
+ stop("argument 'method.init' must be 1 or 2")
+ if (!is.Numeric(shrinkage.init, allow = 1) ||
+ shrinkage.init < 0 ||
+ shrinkage.init > 1)
+ stop("bad input for argument 'shrinkage.init'")
+
+ new("vglmff",
+ blurb = c("Zero-inflated Poisson\n\n",
+ "Links: ",
+ namesof("lambda", llambda, earg = elambda), ", ",
+ namesof("probp", lprobp., earg = eprobp.), "\n",
+ "Mean: probp * lambda"),
+ constraints = eval(substitute(expression({
+ dotzero <- .zero
+ Musual <- 2
+ eval(negzero.expression)
+ }), list( .zero = zero ))),
+ infos = eval(substitute(function(...) {
+ list(Musual = 2,
+ zero = .zero)
+ }, list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ y <- cbind(y)
+
+ ncoly <- ncol(y)
+ Musual <- 2
+ extra$ncoly <- ncoly
+ extra$Musual <- Musual
+ M <- Musual * ncoly
+
+ if (any(round(y) != y))
+ stop("responses must be integer-valued")
+
+ mynames1 <- paste("lambda", if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames2 <- paste("probp", if (ncoly > 1) 1:ncoly else "", sep = "")
+ predictors.names <-
+ c(namesof(mynames1, .llambda, earg = .elambda, tag = FALSE),
+ namesof(mynames2, .lprobp., earg = .eprobp., tag = FALSE))
+ predictors.names <- predictors.names[interleave.VGAM(M, M = ncoly)]
+
+
+ if (!length(etastart)) {
+
+ mat1 <- matrix(if (length( .ilambda )) theta2eta( .ilambda,
+ .llambda, earg = .elambda) else 0,
+ n, ncoly, byrow = TRUE)
+ mat2 <- matrix(if (length( .iprobp. )) theta2eta( .iprobp.,
+ .lprobp., earg = .eprobp.) else 0,
+ n, ncoly, byrow = TRUE)
+
+ for (jay in 1:ncoly) {
+ yjay <- y[, jay]
+
+ Phi.init <- 0.75 * sum(w[yjay > 0]) / sum(w)
+ Phi.init[Phi.init <= 0.02] = 0.02 # Last resort
+ Phi.init[Phi.init >= 0.98] = 0.98 # Last resort
+
+ if ( length(mustart)) {
+ mustart <- matrix(mustart, n, ncoly) # Make sure right size
+ Lambda.init <- mustart / (1 - Phi.init)
+ } else if ( .method.init == 2) {
+ mymean <- weighted.mean(yjay[yjay > 0], w[yjay > 0]) + 1/16
+ Lambda.init <- (1 - .sinit) * (yjay + 1/8) + .sinit * mymean
+ } else {
+ use.this <- median(yjay[yjay > 0]) + 1 / 16
+ Lambda.init <- (1 - .sinit) * (yjay + 1/8) + .sinit * use.this
+ }
+
+ zipois.Loglikfun <- function(phival, y, x, w, extraargs) {
+ sum(w * dzipois(x=y, phi = phival,
+ lambda = extraargs$lambda,
+ log = TRUE))
+ }
+ phi.grid <- seq(0.02, 0.98, len = 21)
+ Phimat.init <- getMaxMin(phi.grid,
+ objfun=zipois.Loglikfun,
+ y=y, x=x, w=w,
+ extraargs = list(lambda = Lambda.init))
+ if (length(mustart)) {
+ Lambda.init <- Lambda.init / (1 - Phimat.init)
+ }
+
+ if (!length( .ilambda ))
+ mat1[, jay] <- Lambda.init
+ if (!length( .iprobp. ))
+ mat2[, jay] <- Phimat.init
+ }
+
+ etastart <- cbind(theta2eta(mat1, .llambda, .elambda ),
+ theta2eta(mat2, .lprobp., .eprobp. ))
+ etastart <- etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
+
+ mustart <- NULL # Since etastart has been computed.
+ }
+ }), list( .lprobp. = lprobp., .llambda = llambda,
+ .eprobp. = eprobp., .elambda = elambda,
+ .iprobp. = iprobp., .ilambda = ilambda,
+ .method.init = method.init, .sinit = shrinkage.init ))),
+ inverse = eval(substitute(function(eta, extra = NULL) {
+ ncoly <- extra$ncoly
+ lambda <- eta2theta(eta[, 2*(1:ncoly) - 1], .llambda, earg = .elambda )
+ probp. <- eta2theta(eta[, 2*(1:ncoly) ], .lprobp., earg = .eprobp. )
+ probp. * lambda
+ }, list( .lprobp. = lprobp., .llambda = llambda,
+ .eprobp. = eprobp., .elambda = elambda ))),
+ last = eval(substitute(expression({
+ Musual <- extra$Musual
+ misc$link <- c(rep( .llambda, length = ncoly),
+ rep( .lprobp., length = ncoly))
+ misc$link <- misc$link[interleave.VGAM(Musual * ncoly, M = Musual)]
+ temp.names <- c(mynames1, mynames2)
+ temp.names <- temp.names[interleave.VGAM(Musual * ncoly, M = Musual)]
+ names(misc$link) <- temp.names
+
+
+ misc$earg <- vector("list", Musual * ncoly)
+ names(misc$earg) <- temp.names
+ for(ii in 1:ncoly) {
+ misc$earg[[Musual*ii-1]] <- .elambda
+ misc$earg[[Musual*ii ]] <- .eprobp.
+ }
+
+ misc$Musual <- Musual
+ misc$method.init <- .method.init
+
+ misc$prob0 <- (1 - probp.) + probp. * exp(-lambda) # P(Y=0)
+ }), list( .lprobp. = lprobp., .llambda = llambda,
+ .eprobp. = eprobp., .elambda = elambda,
+ .method.init = method.init ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ smallno <- 100 * .Machine$double.eps
+ ncoly <- extra$ncoly
+ lambda <- eta2theta(eta[, 2*(1:ncoly) - 1], .llambda, earg = .elambda )
+ probp. <- eta2theta(eta[, 2*(1:ncoly) ], .lprobp., earg = .eprobp. )
+
+ probp. <- pmax(probp., smallno)
+ probp. <- pmin(probp., 1.0 - smallno)
+
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(w * dzipois(x=y, phi = 1 - probp., lambda = lambda, log = TRUE))
+ }
+ }, list( .lprobp. = lprobp., .llambda = llambda,
+ .eprobp. = eprobp., .elambda = elambda ))),
+ vfamily = c("zipoissonff"),
+ deriv = eval(substitute(expression({
+ Musual <- extra$Musual
+ ncoly <- extra$ncoly
+ lambda <- eta2theta(eta[, 2*(1:ncoly) - 1], .llambda, earg = .elambda )
+ probp. <- eta2theta(eta[, 2*(1:ncoly) ], .lprobp., earg = .eprobp. )
+
+ smallno <- 100 * .Machine$double.eps
+ probp. <- pmax(probp., smallno)
+ probp. <- pmin(probp., 1.0 - smallno)
+
+ dlambda.deta <- dtheta.deta(lambda, .llambda, earg = .elambda )
+ dprobp..deta <- dtheta.deta(probp., .lprobp., earg = .eprobp. )
+
+ tmp8 <- 1 + probp. * expm1(-lambda)
+ ind0 <- (y == 0)
+ dl.dlambda <- -probp. * exp(-lambda) / tmp8
+ dl.dlambda[!ind0] <- (y[!ind0] - lambda[!ind0]) / lambda[!ind0]
+ dl.dprobp. <- expm1(-lambda) / tmp8
+ dl.dprobp.[!ind0] <- 1 / probp.[!ind0]
+
+ ans <- w * cbind(dl.dlambda * dlambda.deta,
+ dl.dprobp. * dprobp..deta)
+ if (FALSE && .llambda == "loge" &&
+ (any(lambda[!ind0] < .Machine$double.eps))) {
+ ans[!ind0, 2] <- w[!ind0] * (y[!ind0] - lambda[!ind0])
+ }
+ ans <- ans[, interleave.VGAM(ncol(ans), M = Musual)]
+ ans
+ }), list( .lprobp. = lprobp., .llambda = llambda,
+ .eprobp. = eprobp., .elambda = elambda ))),
+ weight = eval(substitute(expression({
+
+
+ wz <- matrix(0, nrow = n, ncol = M + M-1)
+ d2l.dlambda2 <- ( probp.) / lambda -
+ probp. * (1 - probp.) * exp(-lambda) / tmp8
+ d2l.dprobp.2 <- -expm1(-lambda) / (( probp.) * tmp8)
+ d2l.dphilambda <- -exp(-lambda) / tmp8
+
+ if (ncoly == 1) { # Make sure these are matrices
+ d2l.dlambda2 <- cbind(d2l.dlambda2)
+ d2l.dprobp.2 <- cbind(d2l.dprobp.2)
+ dlambda.deta <- cbind(dlambda.deta)
+ dprobp..deta <- cbind(dprobp..deta)
+ d2l.dphilambda <- cbind(d2l.dphilambda)
+ }
+
+ for (ii in 1:ncoly) {
+ wz[, iam(2*ii - 1, 2*ii - 1, M)] <- d2l.dlambda2[, ii] *
+ dlambda.deta[, ii]^2
+ wz[, iam(2*ii , 2*ii , M)] <- d2l.dprobp.2[, ii] *
+ dprobp..deta[, ii]^2
+ wz[, iam(2*ii - 1, 2*ii , M)] <- d2l.dphilambda[, ii] *
+ dprobp..deta[, ii] *
+ dlambda.deta[, ii]
+ if (FALSE && .llambda == "loge" &&
+ (any(lambda[!ind0] < .Machine$double.eps))) {
+ ind5 <- !ind0 & (lambda < .Machine$double.eps)
+ if (any(ind5))
+ wz[ind5,iam(1, 1, M)] <- (1 - probp.[ind5]) * .Machine$double.eps
+ }
+ }
+
+ w * wz
+ }), list( .llambda = llambda ))))
+}
+
+
+
diff --git a/R/fitted.vlm.R b/R/fitted.vlm.R
index 7a09e5e..e8d618f 100644
--- a/R/fitted.vlm.R
+++ b/R/fitted.vlm.R
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
diff --git a/R/formula.vlm.q b/R/formula.vlm.q
index 3c099bd..19f33be 100644
--- a/R/formula.vlm.q
+++ b/R/formula.vlm.q
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
diff --git a/R/generic.q b/R/generic.q
index 057ff49..f78e504 100644
--- a/R/generic.q
+++ b/R/generic.q
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
add1.vgam <- function(...)
diff --git a/R/links.q b/R/links.q
index 6b53fcf..93867f4 100644
--- a/R/links.q
+++ b/R/links.q
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
@@ -16,16 +17,18 @@
-TypicalVGAMfamilyFunction <- function(lsigma="loge", esigma=list(),
- isigma=NULL, parallel=TRUE,
+TypicalVGAMfamilyFunction <- function(lsigma = "loge", esigma = list(),
+ isigma = NULL, parallel = TRUE,
shrinkage.init = 0.95,
- nointercept = NULL, method.init=1,
- nsimEIM=100, zero=NULL) {
+ nointercept = NULL, method.init = 1,
+ prob.x = c(0.15, 0.85),
+ oim = FALSE,
+ nsimEIM = 100, zero = NULL) {
NULL
}
TypicalVGAMlinkFunction <- function(theta,
- earg=list(), inverse=FALSE, deriv=0, short=TRUE, tag=FALSE) {
+ earg = list(), inverse=FALSE, deriv=0, short=TRUE, tag=FALSE) {
NULL
}
@@ -33,21 +36,21 @@ TypicalVGAMlinkFunction <- function(theta,
namesof <- function(theta,
link,
- earg=list(),
+ earg = list(),
tag=FALSE,
short=TRUE)
{
string <- paste(link,
- "(theta=theta, earg=earg, short=short, tag=tag)", sep="")
+ "(theta=theta, earg=earg, short=short, tag=tag)", sep = "")
calls <- parse(text=string)[[1]]
ans <- eval(calls)
return(ans)
}
-theta2eta <- function(theta, link, earg=list()) {
- string <- paste(link, "(theta=theta, earg=earg)", sep="")
+theta2eta <- function(theta, link, earg = list()) {
+ string <- paste(link, "(theta=theta, earg=earg)", sep = "")
calls <- parse(text=string)[[1]]
eval(calls)
}
@@ -55,7 +58,7 @@ theta2eta <- function(theta, link, earg=list()) {
-eta2theta <- function(theta, link="identity", earg=list()) {
+eta2theta <- function(theta, link = "identity", earg = list()) {
if (is.null(link))
link <- "identity"
@@ -63,7 +66,7 @@ eta2theta <- function(theta, link="identity", earg=list()) {
llink <- length(link)
if (llink == 1) {
- string <- paste(link, "(theta=theta, earg=earg, inverse=TRUE)", sep="")
+ string <- paste(link, "(theta=theta, earg=earg, inverse=TRUE)", sep = "")
calls <- parse(text=string)[[1]]
return(eval(calls))
} else
@@ -78,7 +81,7 @@ eta2theta <- function(theta, link="identity", earg=list()) {
is.list(earg[[iii]])) earg[[iii]] else earg
string = paste(link[iii],
"(theta=theta[,iii], earg=use.earg, inverse=TRUE)",
- sep="")
+ sep = "")
calls <- parse(text=string)[[1]]
ans <- cbind(ans, eval(calls))
}
@@ -93,7 +96,7 @@ eta2theta <- function(theta, link="identity", earg=list()) {
for(iii in 1:llink) {
string = paste(link[iii],
"(theta=theta[iii], earg=earg, inverse=TRUE)",
- sep="")
+ sep = "")
calls <- parse(text=string)[[1]]
ans <- c(ans, eval(calls))
}
@@ -105,18 +108,18 @@ eta2theta <- function(theta, link="identity", earg=list()) {
-dtheta.deta <- function(theta, link, earg=list()) {
+dtheta.deta <- function(theta, link, earg = list()) {
- string <- paste(link, "(theta=theta, earg=earg, deriv=1)", sep="")
+ string <- paste(link, "(theta=theta, earg=earg, deriv=1)", sep = "")
calls <- parse(text=string)[[1]]
eval(calls)
}
-d2theta.deta2 <- function(theta, link, earg=list())
+d2theta.deta2 <- function(theta, link, earg = list())
{
- string <- paste(link, "(theta=theta, earg=earg, deriv=2)", sep="")
+ string <- paste(link, "(theta=theta, earg=earg, deriv=2)", sep = "")
calls <- parse(text=string)[[1]]
eval(calls)
}
@@ -133,13 +136,13 @@ d2theta.deta2 <- function(theta, link, earg=list())
"golf", "polf", "nbolf", "nbolf2")
-loglog <- function(theta, earg=list(), inverse=FALSE, deriv=0,
+loglog <- function(theta, earg = list(), inverse=FALSE, deriv=0,
short=TRUE, tag=FALSE)
{
if (is.character(theta)) {
string <- if (short)
- paste("loglog(",theta,")", sep="") else
- paste("log(log(",theta,"))", sep="")
+ paste("loglog(",theta,")", sep = "") else
+ paste("log(log(",theta,"))", sep = "")
if (tag)
string <- paste("Log-Log:", string)
return(string)
@@ -166,13 +169,13 @@ loglog <- function(theta, earg=list(), inverse=FALSE, deriv=0,
-cloglog <- function(theta, earg=list(), inverse=FALSE, deriv=0,
+cloglog <- function(theta, earg = list(), inverse=FALSE, deriv=0,
short=TRUE, tag=FALSE)
{
if (is.character(theta)) {
string <- if (short)
- paste("cloglog(",theta,")", sep="") else
- paste("log(-log(1-",theta,"))", sep="")
+ paste("cloglog(",theta,")", sep = "") else
+ paste("log(-log(1-",theta,"))", sep = "")
if (tag)
string <- paste("Complementary log-log:", string)
return(string)
@@ -202,13 +205,13 @@ cloglog <- function(theta, earg=list(), inverse=FALSE, deriv=0,
-probit <- function(theta, earg=list(), inverse=FALSE, deriv=0,
+probit <- function(theta, earg = list(), inverse=FALSE, deriv=0,
short=TRUE, tag=FALSE)
{
if (is.character(theta)) {
string <- if (short)
- paste("probit(",theta,")", sep="") else
- paste("qnorm(", theta, ")", sep="")
+ paste("probit(",theta,")", sep = "") else
+ paste("qnorm(", theta, ")", sep = "")
if (tag)
string <- paste("Probit:", string)
return(string)
@@ -262,13 +265,13 @@ probit <- function(theta, earg=list(), inverse=FALSE, deriv=0,
-loge <- function(theta, earg=list(), inverse=FALSE, deriv=0,
+loge <- function(theta, earg = list(), inverse=FALSE, deriv=0,
short=TRUE, tag=FALSE)
{
if (is.character(theta)) {
string <- if (short)
- paste("log(",theta,")", sep="") else
- paste("log(", theta, ")", sep="")
+ paste("log(",theta,")", sep = "") else
+ paste("log(", theta, ")", sep = "")
if (tag)
string <- paste("Log:", string)
return(string)
@@ -292,7 +295,7 @@ loge <- function(theta, earg=list(), inverse=FALSE, deriv=0,
-identity <- function(theta, earg=list(), inverse=FALSE, deriv=0,
+identity <- function(theta, earg = list(), inverse=FALSE, deriv=0,
short=TRUE, tag=FALSE)
{
if (is.character(theta)) {
@@ -315,11 +318,11 @@ identity <- function(theta, earg=list(), inverse=FALSE, deriv=0,
}
}
-nidentity <- function(theta, earg=list(), inverse=FALSE, deriv=0,
+nidentity <- function(theta, earg = list(), inverse=FALSE, deriv=0,
short=TRUE, tag=FALSE)
{
if (is.character(theta)) {
- string <- paste("-", theta, sep="")
+ string <- paste("-", theta, sep = "")
if (tag)
string <- paste("Negative-Identity:", string)
return(string)
@@ -339,11 +342,11 @@ nidentity <- function(theta, earg=list(), inverse=FALSE, deriv=0,
}
-reciprocal <- function(theta, earg=list(), inverse.arg=FALSE, deriv=0,
+reciprocal <- function(theta, earg = list(), inverse.arg=FALSE, deriv=0,
short=TRUE, tag=FALSE)
{
if (is.character(theta)) {
- string <- paste("1/",theta, sep="")
+ string <- paste("1/",theta, sep = "")
if (tag)
string <- paste("Reciprocal:", string)
return(string)
@@ -365,13 +368,13 @@ reciprocal <- function(theta, earg=list(), inverse.arg=FALSE, deriv=0,
}
-nloge <- function(theta, earg=list(), inverse=FALSE, deriv=0,
+nloge <- function(theta, earg = list(), inverse=FALSE, deriv=0,
short=TRUE, tag=FALSE)
{
if (is.character(theta)) {
string <- if (short)
- paste("-log(",theta,")", sep="") else
- paste("-log(", theta, ")", sep="")
+ paste("-log(",theta,")", sep = "") else
+ paste("-log(", theta, ")", sep = "")
if (tag)
string <- paste("Negative log:", string)
return(string)
@@ -394,11 +397,11 @@ nloge <- function(theta, earg=list(), inverse=FALSE, deriv=0,
-nreciprocal <- function(theta, earg=list(), inverse.arg=FALSE, deriv=0,
+nreciprocal <- function(theta, earg = list(), inverse.arg=FALSE, deriv=0,
short=TRUE, tag=FALSE)
{
if (is.character(theta)) {
- string <- paste("-1/",theta, sep="")
+ string <- paste("-1/",theta, sep = "")
if (tag)
string <- paste("Negative reciprocal:", string)
return(string)
@@ -420,12 +423,12 @@ nreciprocal <- function(theta, earg=list(), inverse.arg=FALSE, deriv=0,
}
-natural.ig <- function(theta, earg=list(), inverse=FALSE, deriv=0,
+natural.ig <- function(theta, earg = list(), inverse=FALSE, deriv=0,
short=TRUE, tag=FALSE)
{
if (is.character(theta)) {
- string <- paste("-1/",theta, sep="")
+ string <- paste("-1/",theta, sep = "")
if (tag)
string <- paste("Negative inverse:", string)
return(string)
@@ -448,13 +451,13 @@ natural.ig <- function(theta, earg=list(), inverse=FALSE, deriv=0,
-rhobit <- function(theta, earg=list(), inverse=FALSE, deriv=0,
+rhobit <- function(theta, earg = list(), inverse=FALSE, deriv=0,
short=TRUE, tag=FALSE)
{
if (is.character(theta)) {
string <- if (short)
- paste("rhobit(",theta,")", sep="") else
- paste("log((1+", theta, ")/(1-", theta, "))", sep="")
+ paste("rhobit(",theta,")", sep = "") else
+ paste("log((1+", theta, ")/(1-", theta, "))", sep = "")
if (tag)
string <- paste("Rhobit:", string)
return(string)
@@ -484,13 +487,13 @@ rhobit <- function(theta, earg=list(), inverse=FALSE, deriv=0,
-fisherz <- function(theta, earg=list(), inverse=FALSE, deriv=0,
+fisherz <- function(theta, earg = list(), inverse=FALSE, deriv=0,
short=TRUE, tag=FALSE)
{
if (is.character(theta)) {
string <- if (short)
- paste("fisherz(",theta,")", sep="") else
- paste("(1/2)log((1+", theta, ")/(1-", theta, "))", sep="")
+ paste("fisherz(",theta,")", sep = "") else
+ paste("(1/2)log((1+", theta, ")/(1-", theta, "))", sep = "")
if (tag)
string <- paste("Fisher's Z transformation:", string)
return(string)
@@ -521,7 +524,7 @@ fisherz <- function(theta, earg=list(), inverse=FALSE, deriv=0,
-fsqrt <- function(theta, earg=list(min=0, max=1, mux=sqrt(2)),
+fsqrt <- function(theta, earg = list(min=0, max=1, mux=sqrt(2)),
inverse=FALSE, deriv=0, short=TRUE, tag=FALSE)
{
min=0; max=1; mux=sqrt(2)
@@ -536,11 +539,11 @@ fsqrt <- function(theta, earg=list(min=0, max=1, mux=sqrt(2)),
if (is.character(theta)) {
string <- if (short)
- paste("fsqrt(",theta,")", sep="") else {
+ paste("fsqrt(",theta,")", sep = "") else {
if (abs(mux-sqrt(2)) < 1.0e-10)
- paste("sqrt(2*",theta,") - sqrt(2*(1-",theta,"))", sep="") else
+ paste("sqrt(2*",theta,") - sqrt(2*(1-",theta,"))", sep = "") else
paste(as.character(mux),
- " * (sqrt(",theta,"-",min,") - sqrt(",max,"-",theta,"))", sep="")
+ " * (sqrt(",theta,"-",min,") - sqrt(",max,"-",theta,"))", sep = "")
}
if (tag)
string <- paste("Folded Square Root:", string)
@@ -573,7 +576,7 @@ fsqrt <- function(theta, earg=list(min=0, max=1, mux=sqrt(2)),
-powl <- function(theta, earg=list(power=1), inverse=FALSE, deriv=0,
+powl <- function(theta, earg = list(power=1), inverse=FALSE, deriv=0,
short=TRUE, tag=FALSE)
{
@@ -587,9 +590,9 @@ powl <- function(theta, earg=list(power=1), inverse=FALSE, deriv=0,
if (is.character(theta)) {
string <- if (short)
- paste("powl(",theta,", earg=list(power=", as.character(exponent),
- "))", sep="") else
- paste(theta, "^(", as.character(exponent), ")", sep="")
+ paste("powl(",theta,", earg = list(power = ", as.character(exponent),
+ "))", sep = "") else
+ paste(theta, "^(", as.character(exponent), ")", sep = "")
if (tag)
string <- paste("Power:", string)
return(string)
@@ -615,7 +618,7 @@ powl <- function(theta, earg=list(power=1), inverse=FALSE, deriv=0,
}
-elogit <- function(theta, earg=list(min=0, max=1), inverse=FALSE, deriv=0,
+elogit <- function(theta, earg = list(min=0, max=1), inverse=FALSE, deriv=0,
short=TRUE, tag=FALSE)
{
if (!length(earg) || is.list(earg)) {
@@ -631,11 +634,11 @@ elogit <- function(theta, earg=list(min=0, max=1), inverse=FALSE, deriv=0,
if (is.character(theta)) {
string <- if (short) {
if (A != 0 || B != 1)
- paste("elogit(",theta,", earg=list(min=",A,
- ", max=",B,"))",sep="") else
- paste("elogit(",theta,")",sep="")
+ paste("elogit(",theta,", earg = list(min = ",A,
+ ", max = ",B,"))",sep = "") else
+ paste("elogit(",theta,")",sep = "")
} else
- paste("log((",theta,"-min)/(max-",theta,"))", sep="")
+ paste("log((",theta,"-min)/(max-",theta,"))", sep = "")
if (tag)
string <- paste("Extended logit:", string)
return(string)
@@ -659,13 +662,13 @@ elogit <- function(theta, earg=list(min=0, max=1), inverse=FALSE, deriv=0,
- logit <- function(theta, earg=list(), inverse=FALSE, deriv=0,
+ logit <- function(theta, earg = list(), inverse=FALSE, deriv=0,
short=TRUE, tag=FALSE)
{
if (is.character(theta)) {
string <- if (short)
- paste("logit(",theta,")", sep="") else
- paste("log(",theta,"/(1-",theta,"))", sep="")
+ paste("logit(",theta,")", sep = "") else
+ paste("log(",theta,"/(1-",theta,"))", sep = "")
if (tag)
string <- paste("Logit:", string)
return(string)
@@ -694,13 +697,13 @@ elogit <- function(theta, earg=list(min=0, max=1), inverse=FALSE, deriv=0,
}
-logc <- function(theta, earg=list(), inverse=FALSE, deriv=0,
+logc <- function(theta, earg = list(), inverse=FALSE, deriv=0,
short=TRUE, tag=FALSE)
{
if (is.character(theta)) {
string <- if (short)
- paste("logc(",theta,")", sep="") else
- paste("log(1-",theta,")", sep="")
+ paste("logc(",theta,")", sep = "") else
+ paste("log(1-",theta,")", sep = "")
if (tag)
string <- paste("Log Complementary:", string)
return(string)
@@ -726,7 +729,7 @@ logc <- function(theta, earg=list(), inverse=FALSE, deriv=0,
-logoff <- function(theta, earg=list(offset=0), inverse=FALSE, deriv=0,
+logoff <- function(theta, earg = list(offset=0), inverse=FALSE, deriv=0,
short=TRUE, tag=FALSE)
{
if (!length(earg) || is.list(earg)) {
@@ -741,8 +744,8 @@ logoff <- function(theta, earg=list(offset=0), inverse=FALSE, deriv=0,
if (is.character(theta)) {
string <- if (short)
paste("logoff(",theta,
- ", list(offset=",as.character(offset),"))", sep="") else
- paste("log(", as.character(offset), "+", theta, ")", sep="")
+ ", list(offset = ",as.character(offset),"))", sep = "") else
+ paste("log(", as.character(offset), "+", theta, ")", sep = "")
if (tag)
string <- paste("Log with offset:", string)
return(string)
@@ -771,8 +774,8 @@ nlogoff <- function(theta, earg=0, inverse=FALSE, deriv=0,
stop("bad input for argument earg")
if (is.character(theta)) {
string <- if (short)
- paste("nlogoff(",theta,",",as.character(offset),")", sep="") else
- paste("log(", as.character(offset), "-", theta, ")", sep="")
+ paste("nlogoff(",theta,",",as.character(offset),")", sep = "") else
+ paste("log(", as.character(offset), "-", theta, ")", sep = "")
if (tag)
string <- paste("Negative-log with offset:", string)
return(string)
@@ -793,14 +796,14 @@ nlogoff <- function(theta, earg=0, inverse=FALSE, deriv=0,
-cauchit <- function(theta, earg=list(bvalue= .Machine$double.eps),
+cauchit <- function(theta, earg = list(bvalue= .Machine$double.eps),
inverse=FALSE, deriv=0,
short=TRUE, tag=FALSE)
{
if (is.character(theta)) {
string <- if (short)
- paste("cauchit(",theta,")", sep="") else
- paste("tan(pi*(",theta,"-0.5))", sep="")
+ paste("cauchit(",theta,")", sep = "") else
+ paste("tan(pi*(",theta,"-0.5))", sep = "")
if (tag)
string <- paste("Cauchit:", string)
return(string)
@@ -825,7 +828,7 @@ cauchit <- function(theta, earg=list(bvalue= .Machine$double.eps),
-golf <- function(theta, earg=list(lambda=1), inverse=FALSE, deriv=0,
+golf <- function(theta, earg = list(lambda=1), inverse=FALSE, deriv=0,
short=TRUE, tag=FALSE)
{
@@ -849,22 +852,22 @@ golf <- function(theta, earg=list(lambda=1), inverse=FALSE, deriv=0,
string <- if (short) {
lenl = length(lambda) > 1
lenc = length(cutpoint) > 1
- paste("golf(",theta,", earg=list(lambda=",
+ paste("golf(",theta,", earg = list(lambda = ",
if (lenl) "c(" else "",
ToString(lambda),
if (lenl) ")" else "",
if (is.Numeric(cutpoint))
- paste(", cutpoint=",
+ paste(", cutpoint = ",
if (lenc) "c(" else "",
ToString(cutpoint),
if (lenc) ")" else "",
- sep="") else "",
- "))", sep="") } else {
+ sep = "") else "",
+ "))", sep = "") } else {
if (is.Numeric(cutpoint)) {
paste("-3*log(1-qnorm(",theta,")/(3*sqrt(lambda)))",
- " + log(cutpoint)", sep="")
+ " + log(cutpoint)", sep = "")
} else {
- paste("-3*log(1-qnorm(",theta,")/(3*sqrt(lambda)))", sep="")
+ paste("-3*log(1-qnorm(",theta,")/(3*sqrt(lambda)))", sep = "")
}
}
if (tag)
@@ -879,7 +882,7 @@ golf <- function(theta, earg=list(lambda=1), inverse=FALSE, deriv=0,
answer = thmat
for(ii in 1:ncol(thmat))
answer[,ii] = Recall(theta=thmat[,ii],
- earg=list(lambda=lambda[ii],
+ earg = list(lambda=lambda[ii],
cutpoint = if (is.Numeric(cutpoint)) cutpoint[ii] else NULL),
inverse=inverse, deriv=deriv)
return(answer)
@@ -899,7 +902,7 @@ golf <- function(theta, earg=list(lambda=1), inverse=FALSE, deriv=0,
} else {
smallno = 1 * .Machine$double.eps
Theta = theta
- Theta = pmin(Theta, 1 - smallno) # Since theta==1 is a possibility
+ Theta = pmin(Theta, 1 - smallno) # Since theta == 1 is a possibility
Theta = pmax(Theta, smallno) # Since theta==0 is a possibility
Ql = qnorm(Theta)
switch(deriv+1, {
@@ -930,13 +933,13 @@ polf <- function(theta, earg=stop("'earg' must be given"),
if (is.character(theta)) {
string <- if (short) {
lenc = length(cutpoint) > 1
- paste("polf(",theta,", earg=list(cutpoint=",
+ paste("polf(",theta,", earg = list(cutpoint = ",
if (lenc) "c(" else "",
ToString(cutpoint),
if (lenc) ")" else "",
- "))", sep="")
+ "))", sep = "")
} else
- paste("2*log(0.5*qnorm(",theta,") + sqrt(cutpoint+7/8))", sep="")
+ paste("2*log(0.5*qnorm(",theta,") + sqrt(cutpoint+7/8))", sep = "")
if (tag)
string <- paste("Poisson-ordinal link function:", string)
return(string)
@@ -971,7 +974,7 @@ polf <- function(theta, earg=stop("'earg' must be given"),
smallno = 1 * .Machine$double.eps
SMALLNO = 1 * .Machine$double.xmin
Theta = theta
- Theta = pmin(Theta, 1 - smallno) # Since theta==1 is a possibility
+ Theta = pmin(Theta, 1 - smallno) # Since theta == 1 is a possibility
Theta = pmax(Theta, smallno) # Since theta==0 is a possibility
Ql = qnorm(Theta)
switch(deriv+1, {
@@ -1008,18 +1011,18 @@ nbolf <- function(theta, earg=stop("'earg' must be given"),
string <- if (short) {
lenc = length(cutpoint) > 1
lenk = length(kay) > 1
- paste("nbolf(",theta,", earg=list(cutpoint=",
+ paste("nbolf(",theta,", earg = list(cutpoint = ",
if (lenc) "c(" else "",
ToString(cutpoint),
if (lenc) ")" else "",
- ", k=",
+ ", k = ",
if (lenk) "c(" else "",
ToString(kay),
if (lenk) ")" else "",
- "))", sep="")
+ "))", sep = "")
} else
paste("2*log(sqrt(k) * sinh(qnorm(",theta,")/(2*sqrt(k)) + ",
- "asinh(sqrt(cutpoint/k))))", sep="")
+ "asinh(sqrt(cutpoint/k))))", sep = "")
if (tag)
string <- paste("Negative binomial-ordinal link function:", string)
return(string)
@@ -1032,7 +1035,7 @@ nbolf <- function(theta, earg=stop("'earg' must be given"),
answer = thmat
for(ii in 1:ncol(thmat))
answer[,ii] = Recall(theta=thmat[,ii],
- earg=list(cutpoint=cutpoint[ii], k=kay[ii]),
+ earg = list(cutpoint=cutpoint[ii], k=kay[ii]),
inverse=inverse, deriv=deriv)
return(answer)
}
@@ -1053,7 +1056,7 @@ nbolf <- function(theta, earg=stop("'earg' must be given"),
smallno = 1 * .Machine$double.eps
SMALLNO = 1 * .Machine$double.xmin
Theta = theta
- Theta = pmin(Theta, 1 - smallno) # Since theta==1 is a possibility
+ Theta = pmin(Theta, 1 - smallno) # Since theta == 1 is a possibility
Theta = pmax(Theta, smallno) # Since theta==0 is a possibility
if (cutpoint == 0) {
switch(deriv+1, {
@@ -1104,17 +1107,17 @@ nbolf2 <- function(theta, earg=stop("'earg' must be given"),
string <- if (short) {
lenc = length(cutpoint) > 1
lenk = length(kay) > 1
- paste("nbolf2(",theta,", earg=list(cutpoint=",
+ paste("nbolf2(",theta,", earg = list(cutpoint = ",
if (lenc) "c(" else "",
ToString(cutpoint),
if (lenc) ")" else "",
- ", k=",
+ ", k = ",
if (lenk) "c(" else "",
ToString(kay),
if (lenk) ")" else "",
- "))", sep="")
+ "))", sep = "")
} else
- paste("3*log(<a complicated expression>)", sep="")
+ paste("3*log(<a complicated expression>)", sep = "")
if (tag)
string = paste("Negative binomial-ordinal link function 2:", string)
return(string)
@@ -1126,7 +1129,7 @@ nbolf2 <- function(theta, earg=stop("'earg' must be given"),
answer = thmat
for(ii in 1:ncol(thmat))
answer[,ii] = Recall(theta=thmat[,ii],
- earg=list(cutpoint=cutpoint[ii], k=kay[ii]),
+ earg = list(cutpoint=cutpoint[ii], k=kay[ii]),
inverse=inverse, deriv=deriv)
return(answer)
}
@@ -1152,7 +1155,7 @@ nbolf2 <- function(theta, earg=stop("'earg' must be given"),
theta2 = invfun = pnorm(-ans) # pnorm(-x) = 1-pnorm(x)
for(ii in 1:4) {
theta2[,ii] = Recall(theta=theta2[,ii],
- earg=list(cutpoint=cutpoint, k=kay),
+ earg = list(cutpoint=cutpoint, k=kay),
inverse=FALSE, deriv=deriv)
}
rankmat = t(apply(abs(theta2 - theta), 1, rank))
@@ -1168,7 +1171,7 @@ nbolf2 <- function(theta, earg=stop("'earg' must be given"),
smallno = 1 * .Machine$double.eps
SMALLNO = 1 * .Machine$double.xmin
Theta = theta
- Theta = pmin(Theta, 1 - smallno) # Since theta==1 is a possibility
+ Theta = pmin(Theta, 1 - smallno) # Since theta == 1 is a possibility
Theta = pmax(Theta, smallno) # Since theta==0 is a possibility
if (cutpoint == 0) {
switch(deriv+1, {
@@ -1218,7 +1221,7 @@ Cut = function(y, breaks=c(-Inf, quantile(c(y), prob = (1:4)/4))) {
answer = if (ncol(y) > 1) matrix(temp, nrow(y), ncol(y)) else temp
if (ncol(y) > 1) {
ynames = dimnames(y)[[2]]
- if (!length(ynames)) ynames = paste("Y", 1:ncol(y), sep="")
+ if (!length(ynames)) ynames = paste("Y", 1:ncol(y), sep = "")
xnames = dimnames(y)[[1]]
if (!length(xnames)) xnames = as.character(1:nrow(y))
dimnames(answer) = list(xnames, ynames)
diff --git a/R/logLik.vlm.q b/R/logLik.vlm.q
index 3e8faef..3a71b27 100644
--- a/R/logLik.vlm.q
+++ b/R/logLik.vlm.q
@@ -1,36 +1,77 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
+
+
+
+
logLik.vlm <- function(object, ...)
object at criterion$loglikelihood
+
if (!isGeneric("logLik"))
setGeneric("logLik", function(object, ...) standardGeneric("logLik"))
setMethod("logLik", "vlm", function(object, ...)
logLik.vlm(object, ...))
+
setMethod("logLik", "vglm", function(object, ...)
logLik.vlm(object, ...))
+
setMethod("logLik", "vgam", function(object, ...)
logLik.vlm(object, ...))
-if (TRUE) {
-constraints.vlm <- function(object, all=TRUE, which, ...)
- if (all) slot(object, "constraints") else
- slot(object, "constraints")[[which]]
+
+
+constraints.vlm <- function(object,
+ type = c("vlm", "lm"),
+ all = TRUE, which, ...) {
+
+
+ type <- match.arg(type, c("vlm","lm"))[1]
+
+ Hlist <-
+ ans <- slot(object, "constraints") # For "vlm"
+
+ if (type == "lm") {
+ oassign.LM <- object at misc$orig.assign
+
+ x.LM <- model.matrix(object)
+ att.x.LM <- attr(x.LM, "assign")
+ names.att.x.LM <- names(att.x.LM)
+ ppp <- length(names.att.x.LM)
+
+ ans <- vector("list", ppp)
+ for (ii in 1:ppp) {
+ col.ptr <- (oassign.LM[[ii]])[1] # 20110114
+ ans[[ii]] <- (Hlist[[col.ptr]])
+ }
+ names(ans) <- names.att.x.LM
+ } # End of "lm"
+
+ if (all) ans else ans[[which]]
+}
+
if (!isGeneric("constraints"))
-setGeneric("constraints", function(object, ...) standardGeneric("constraints"))
+ setGeneric("constraints", function(object, ...)
+ standardGeneric("constraints"))
+
setMethod("constraints", "vlm", function(object, ...)
constraints.vlm(object, ...))
-}
+
+
+
+
+
diff --git a/R/model.matrix.vglm.q b/R/model.matrix.vglm.q
index e25ca7e..f50b21c 100644
--- a/R/model.matrix.vglm.q
+++ b/R/model.matrix.vglm.q
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
diff --git a/R/mux.q b/R/mux.q
index abe54e6..cd77b65 100644
--- a/R/mux.q
+++ b/R/mux.q
@@ -1,10 +1,11 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
-mux34 <- function(xmat, cc, symmetric=FALSE)
+mux34 <- function(xmat, cc, symmetric = FALSE)
{
@@ -15,15 +16,15 @@ mux34 <- function(xmat, cc, symmetric=FALSE)
RRR <- d[2]
if (length(cc) == 1) cc = matrix(cc, 1, 1)
if (!is.matrix(cc)) stop("'cc' is not a matrix")
- c(dotC(name="VGAM_C_mux34", as.double(xmat), as.double(cc),
+ c(dotC(name = "VGAM_C_mux34", as.double(xmat), as.double(cc),
as.integer(nnn), as.integer(RRR),
- as.integer(symmetric), ans=as.double(rep(0.0, nnn)),
- NAOK=TRUE)$ans)
+ as.integer(symmetric), ans = as.double(rep(0.0, nnn)),
+ NAOK = TRUE)$ans)
}
if(FALSE)
-mux34 <- function(xmat, cc, symmetric=FALSE)
+mux34 <- function(xmat, cc, symmetric = FALSE)
{
if (!is.matrix(xmat))
@@ -33,10 +34,10 @@ mux34 <- function(xmat, cc, symmetric=FALSE)
R <- d[2]
if (length(cc) == 1) cc = matrix(cc, 1, 1)
if (!is.matrix(cc)) stop("'cc' is not a matrix")
- c(dotFortran(name="vgamf90mux34", as.double(xmat), as.double(cc),
+ c(dotFortran(name = "vgamf90mux34", as.double(xmat), as.double(cc),
as.integer(n), as.integer(R),
- as.integer(symmetric), ans=as.double(rep(0.0, n)),
- NAOK=TRUE)$ans)
+ as.integer(symmetric), ans = as.double(rep(0.0, n)),
+ NAOK = TRUE)$ans)
}
@@ -59,27 +60,27 @@ mux2 <- function(cc, xmat)
if (d[2] != p || d[3] != n)
stop("dimension size inconformable")
ans <- rep(as.numeric(NA), n*M)
- fred <- dotC(name="mux2", as.double(cc), as.double(t(xmat)),
- ans=as.double(ans), as.integer(p), as.integer(n),
- as.integer(M), NAOK=TRUE)
- matrix(fred$ans,n,M,byrow=TRUE)
+ fred <- dotC(name = "mux2", as.double(cc), as.double(t(xmat)),
+ ans = as.double(ans), as.integer(p), as.integer(n),
+ as.integer(M), NAOK = TRUE)
+ matrix(fred$ans,n,M,byrow = TRUE)
}
-mux22 <- function(cc, xmat, M, upper=FALSE, as.matrix=FALSE)
+mux22 <- function(cc, xmat, M, upper = FALSE, as.matrix = FALSE)
{
n <- ncol(cc)
- index <- iam(NA, NA, M, both=TRUE, diag=TRUE)
+ index <- iam(NA, NA, M, both = TRUE, diag = TRUE)
dimm.value <- nrow(cc) # Usually M or M(M+1)/2
ans <- rep(as.numeric(NA), n*M)
- fred <- dotC(name="mux22", as.double(cc), as.double(t(xmat)),
- ans=as.double(ans), as.integer(dimm.value),
+ fred <- dotC(name = "mux22", as.double(cc), as.double(t(xmat)),
+ ans = as.double(ans), as.integer(dimm.value),
as.integer(index$row), as.integer(index$col),
- as.integer(n), as.integer(M), wk=double(M*M),
- as.integer(as.numeric(upper)), NAOK=TRUE)
+ as.integer(n), as.integer(M), wk = double(M*M),
+ as.integer(as.numeric(upper)), NAOK = TRUE)
if (!as.matrix) fred$ans else {
dim(fred$ans) <- c(M, n)
t(fred$ans)
@@ -88,7 +89,7 @@ mux22 <- function(cc, xmat, M, upper=FALSE, as.matrix=FALSE)
-mux5 <- function(cc, x, M, matrix.arg=FALSE)
+mux5 <- function(cc, x, M, matrix.arg = FALSE)
{
@@ -103,19 +104,20 @@ mux5 <- function(cc, x, M, matrix.arg=FALSE)
cc <- t(cc)
} else {
n <- dimcc[3]
- if (dimcc[1]!=dimcc[2] || dimx[1]!=dimcc[1] ||
- (length(dimx)==3 && dimx[3]!=dimcc[3]))
+ if (dimcc[1]!= dimcc[2] || dimx[1]!= dimcc[1] ||
+ (length(dimx) == 3 && dimx[3]!= dimcc[3]))
stop('input nonconformable')
neltscci <- M*(M+1)/2
}
if (is.matrix(x))
x <- array(x,c(M,r,n))
- index.M <- iam(NA, NA, M, both=TRUE, diag=TRUE)
- index.r <- iam(NA, NA, r, both=TRUE, diag=TRUE)
+ index.M <- iam(NA, NA, M, both = TRUE, diag = TRUE)
+ index.r <- iam(NA, NA, r, both = TRUE, diag = TRUE)
size <- if (matrix.arg) dimm(r)*n else r*r*n
- fred <- dotC(name="mux5", as.double(cc), as.double(x), ans=double(size),
+ fred <- dotC(name = "mux5", as.double(cc), as.double(x),
+ ans = double(size),
as.integer(M), as.integer(n), as.integer(r),
as.integer(neltscci),
as.integer(dimm(r)),
@@ -123,7 +125,7 @@ mux5 <- function(cc, x, M, matrix.arg=FALSE)
double(M*M), double(r*r),
as.integer(index.M$row), as.integer(index.M$col),
as.integer(index.r$row), as.integer(index.r$col),
- ok3=as.integer(1), NAOK=TRUE)
+ ok3 = as.integer(1), NAOK = TRUE)
if (fred$ok3 == 0) stop("can only handle matrix.arg == 1")
@@ -142,16 +144,17 @@ mux55 <- function(evects, evals, M)
d <- dim(evects)
n <- ncol(evals)
- if (d[1]!=M || d[2]!=M || d[3]!=n || nrow(evals)!=M || ncol(evals)!=n)
+ if (d[1]!= M || d[2]!= M || d[3]!= n ||
+ nrow(evals)!= M || ncol(evals)!= n)
stop("input nonconformable")
MM12 <- M*(M+1)/2 # The answer is a full-matrix
- index <- iam(NA, NA, M, both=TRUE, diag=TRUE)
+ index <- iam(NA, NA, M, both = TRUE, diag = TRUE)
- fred <- dotC(name="mux55", as.double(evects), as.double(evals),
- ans=double(MM12 * n),
+ fred <- dotC(name = "mux55", as.double(evects), as.double(evals),
+ ans = double(MM12 * n),
double(M*M), double(M*M),
as.integer(index$row), as.integer(index$col),
- as.integer(M), as.integer(n), NAOK=TRUE)
+ as.integer(M), as.integer(n), NAOK = TRUE)
dim(fred$ans) <- c(MM12, n)
fred$ans
}
@@ -162,7 +165,8 @@ mux7 <- function(cc, x)
dimx <- dim(x)
dimcc <- dim(cc)
- if (dimx[1]!=dimcc[2] || (length(dimx)==3 && dimx[3]!=dimcc[3]))
+ if (dimx[1]!= dimcc[2] ||
+ (length(dimx) == 3 && dimx[3]!= dimcc[3]))
stop('input nonconformable')
M <- dimcc[1]
qq <- dimcc[2]
@@ -172,9 +176,10 @@ mux7 <- function(cc, x)
x <- array(x,c(qq,r,n))
ans <- array(NA, c(M,r,n))
- fred <- dotC(name="mux7", as.double(cc), as.double(x), ans=as.double(ans),
+ fred <- dotC(name = "mux7", as.double(cc), as.double(x),
+ ans = as.double(ans),
as.integer(M), as.integer(qq), as.integer(n),
- as.integer(r), NAOK=TRUE)
+ as.integer(r), NAOK = TRUE)
array(fred$ans,c(M,r,n))
}
@@ -188,14 +193,15 @@ mux9 <- function(cc, xmat)
dimxmat <- dim(xmat)
dimcc <- dim(cc)
- if (dimcc[1]!=dimcc[2] || dimxmat[1]!=dimcc[3] || dimxmat[2]!=dimcc[1])
+ if (dimcc[1]!= dimcc[2] || dimxmat[1]!= dimcc[3] || dimxmat[2]!= dimcc[1])
stop('input nonconformable')
M <- dimcc[1]
n <- dimcc[3]
ans <- matrix(as.numeric(NA),n,M)
- fred <- dotC(name="mux9", as.double(cc), as.double(xmat), ans=as.double(ans),
- as.integer(M), as.integer(n), NAOK=TRUE)
+ fred <- dotC(name = "mux9", as.double(cc), as.double(xmat),
+ ans = as.double(ans),
+ as.integer(M), as.integer(n), NAOK = TRUE)
matrix(fred$ans,n,M)
}
@@ -209,33 +215,34 @@ mux11 <- function(cc, xmat)
M <- dcc[1]
R <- d[2]
n <- dcc[3]
- if (M!=dcc[2] || d[1]!=n*M)
+ if (M!= dcc[2] || d[1]!= n*M)
stop("input inconformable")
Xmat <- array(c(t(xmat)), c(R,M,n))
Xmat <- aperm(Xmat, c(2,1,3)) # Xmat becomes M x R x n
mat <- mux7(cc, Xmat) # mat is M x R x n
mat <- aperm(mat, c(2,1,3)) # mat becomes R x M x n
- mat <- matrix(c(mat), n*M, R, byrow=TRUE)
+ mat <- matrix(c(mat), n*M, R, byrow = TRUE)
mat
}
-mux111 <- function(cc, xmat, M, upper=TRUE)
+mux111 <- function(cc, xmat, M, upper = TRUE)
{
R <- ncol(xmat)
n <- nrow(xmat) / M
- index <- iam(NA, NA, M, both=TRUE, diag=TRUE)
+ index <- iam(NA, NA, M, both = TRUE, diag = TRUE)
dimm.value <- nrow(cc) # M or M(M+1)/2
- fred <- dotC(name="mux111", as.double(cc), b=as.double(t(xmat)), as.integer(M),
- as.integer(R), as.integer(n), wk=double(M*M),
- wk2=double(M*R), as.integer(index$row),
+ fred <- dotC(name = "mux111", as.double(cc), b = as.double(t(xmat)),
+ as.integer(M),
+ as.integer(R), as.integer(n), wk = double(M*M),
+ wk2 = double(M*R), as.integer(index$row),
as.integer(index$col), as.integer(dimm.value),
- as.integer(as.numeric(upper)), NAOK=TRUE)
+ as.integer(as.numeric(upper)), NAOK = TRUE)
ans <- fred$b
dim(ans) <- c(R, nrow(xmat))
@@ -256,9 +263,9 @@ mux15 <- function(cc, xmat)
stop("argument 'cc' is not symmetric")
ans <- rep(as.numeric(NA),n*M*M)
- fred <- dotC(name="mux15", as.double(cc), as.double(t(xmat)),
- ans=as.double(ans), as.integer(M),
- as.integer(n), NAOK=TRUE)
+ fred <- dotC(name = "mux15", as.double(cc), as.double(t(xmat)),
+ ans = as.double(ans), as.integer(M),
+ as.integer(n), NAOK = TRUE)
array(fred$ans,c(M,M,n))
}
@@ -271,14 +278,14 @@ vforsub <- function(cc, b, M, n)
- index <- iam(NA, NA, M, both=TRUE, diag=TRUE)
+ index <- iam(NA, NA, M, both = TRUE, diag = TRUE)
dimm.value <- nrow(cc) # M or M(M+1)/2
- fred <- dotC(name="vforsub", as.double(cc), b=as.double(t(b)),
- as.integer(M), as.integer(n), wk=double(M*M),
+ fred <- dotC(name = "vforsub", as.double(cc), b = as.double(t(b)),
+ as.integer(M), as.integer(n), wk = double(M*M),
as.integer(index$row), as.integer(index$col),
- as.integer(dimm.value), NAOK=TRUE)
+ as.integer(dimm.value), NAOK = TRUE)
dim(fred$b) <- c(M, n)
fred$b
@@ -289,46 +296,50 @@ vforsub <- function(cc, b, M, n)
vbacksub <- function(cc, b, M, n)
{
- index <- iam(NA, NA, M, both=TRUE, diag=TRUE)
+ index <- iam(NA, NA, M, both = TRUE, diag = TRUE)
dimm.value <- nrow(cc)
- if (nrow(b)!=M || ncol(b)!=n)
+ if (nrow(b)!= M || ncol(b)!= n)
stop("dimension size inconformable")
- fred <- dotC(name="vbacksub", as.double(cc), b=as.double(b),
- as.integer(M), as.integer(n), wk=double(M*M),
+ fred <- dotC(name = "vbacksub", as.double(cc), b = as.double(b),
+ as.integer(M), as.integer(n), wk = double(M*M),
as.integer(index$row), as.integer(index$col),
- as.integer(dimm.value), NAOK=TRUE)
+ as.integer(dimm.value), NAOK = TRUE)
- if (M==1) fred$b else {
+ if (M == 1) fred$b else {
dim(fred$b) <- c(M,n)
t(fred$b)
}
}
-vchol <- function(cc, M, n, silent=FALSE)
+vchol <- function(cc, M, n, silent = FALSE, callno = 0)
{
- index <- iam(NA, NA, M, both=TRUE, diag=TRUE)
+
+
+ index <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
cc <- t(cc)
MM <- nrow(cc) # cc is big enough to hold its Cholesky decom.
- fred <- dotC(name="vchol", cc=as.double(cc), as.integer(M),
- as.integer(n), ok=integer(n),
- wk=double(M*M), as.integer(index$row),
+ fred <- dotC(name = "vchol", cc = as.double(cc), as.integer(M),
+ as.integer(n), ok = integer(n),
+ wk = double(M*M), as.integer(index$row),
as.integer(index$col),
as.integer(MM),
- NAOK=TRUE)
+ NAOK = TRUE)
- failed <- fred$ok != 1
+ failed <- (fred$ok != 1)
if ((correction.needed <- any(failed))) {
index <- (1:n)[failed]
if (!silent) {
if (length(index) < 11)
- warning(paste("weight matri", ifelse(length(index)>1, "ces ","x "),
- paste(index, collapse=", "), " not positive-definite", sep=""))
+ warning("weight matri",
+ ifelse(length(index) > 1, "ces ","x "),
+ paste(index, collapse = ", "),
+ " not positive-definite")
}
}
@@ -336,12 +347,13 @@ vchol <- function(cc, M, n, silent=FALSE)
dim(ans) <- c(MM, n)
if (correction.needed) {
- temp <- cc[, index, drop=FALSE]
- tmp777 <- vchol.greenstadt(temp, M=M, silent=silent)
+ temp <- cc[, index, drop = FALSE]
+ tmp777 <- vchol.greenstadt(temp, M = M, silent = silent,
+ callno = callno + 1)
if (length(index) == n) {
- ans = tmp777[1:nrow(ans),,drop=FALSE] # was tmp777 prior to 7/3/03
+ ans = tmp777[1:nrow(ans),,drop = FALSE]
} else {
@@ -355,7 +367,8 @@ vchol <- function(cc, M, n, silent=FALSE)
-vchol.greenstadt <- function(cc, M, silent=FALSE)
+vchol.greenstadt <- function(cc, M, silent = FALSE,
+ callno = 0)
{
@@ -365,25 +378,54 @@ vchol.greenstadt <- function(cc, M, silent=FALSE)
n <- dim(cc)[2]
if (!silent)
- cat(paste("Applying Greenstadt modification to", n, "matrices\n"))
+ cat(paste("Applying Greenstadt modification to ", n, " matri",
+ ifelse(n > 1, "ces", "x"), "\n", sep = ""))
+
+
+
- temp <- veigen(cc, M=M) # , mat=TRUE)
- dim(temp$vectors) <- c(M, M, n) # Make sure (when M=1) for mux5
- dim(temp$values) <- c(M, n) # Make sure (when M=1) for mux5
- zero <- temp$values == 0
- neg <- temp$values < 0
- pos <- temp$values > 0
+ temp <- veigen(cc, M = M) # , mat = TRUE)
+ dim(temp$vectors) <- c(M, M, n) # Make sure (when M = 1) for mux5
+ dim(temp$values) <- c(M, n) # Make sure (when M = 1) for mux5
+
+ is.neg <- (temp$values < .Machine$double.eps)
+ is.pos <- (temp$values > .Machine$double.eps)
+ zilch <- (!is.pos & !is.neg)
temp$values <- abs(temp$values)
- small.value <- quantile(temp$values[!zero], prob=0.15)
- temp$values[zero] <- small.value
+ temp.small.value <- quantile(temp$values[!zilch], prob = 0.15)
+ if (callno > 2) {
+ temp.small.value <- abs(temp.small.value) * 1.50^callno
+
+
+ small.value <- temp.small.value
+
+
+ temp$values[zilch] <- small.value
+
+ }
+
+
+ if (callno > 9) {
+ warning("taking drastic action; setting all wz to ",
+ "scaled versions of the order-M identity matrix")
+
+ cc2mean <- abs(colMeans(cc[1:M, , drop = FALSE]))
+ temp$values <- matrix(cc2mean, M, n, byrow = TRUE)
+ temp$vectors <- array(c(diag(M)), c(M, M, n))
+ }
+
+
+
+ temp3 <- mux55(temp$vectors, temp$values, M = M) #, matrix.arg = TRUE)
+ ans <- vchol(t(temp3), M = M, n = n, silent = silent,
+ callno = callno + 1) #, matrix.arg = TRUE)
+
- temp3 <- mux55(temp$vectors, temp$values, M=M) # , matrix.arg=TRUE)
- ans <- vchol(t(temp3), M=M, n=n, silent=silent) # , matrix.arg=TRUE)
- if (nrow(ans) == MM) ans else ans[1:MM, , drop=FALSE]
+ if (nrow(ans) == MM) ans else ans[1:MM, , drop = FALSE]
}
@@ -391,9 +433,9 @@ vchol.greenstadt <- function(cc, M, silent=FALSE)
myf = function(x) {
dotFortran("VGAM_F90_fill9",
- x=as.double(x), lenx=as.integer(length(x)),
- answer=as.double(x),
- NAOK=TRUE)$answer
+ x = as.double(x), lenx = as.integer(length(x)),
+ answer = as.double(x),
+ NAOK = TRUE)$answer
}
diff --git a/R/plot.vglm.q b/R/plot.vglm.q
index f2342e6..7372114 100644
--- a/R/plot.vglm.q
+++ b/R/plot.vglm.q
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
diff --git a/R/predict.vgam.q b/R/predict.vgam.q
index ffca04d..d6e5ed8 100644
--- a/R/predict.vgam.q
+++ b/R/predict.vgam.q
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
diff --git a/R/predict.vglm.q b/R/predict.vglm.q
index aa0a1d7..ea5d6d0 100644
--- a/R/predict.vglm.q
+++ b/R/predict.vglm.q
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
diff --git a/R/predict.vlm.q b/R/predict.vlm.q
index 110221b..afb53fc 100644
--- a/R/predict.vlm.q
+++ b/R/predict.vlm.q
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
diff --git a/R/print.vglm.q b/R/print.vglm.q
index c777ac1..5eb7d08 100644
--- a/R/print.vglm.q
+++ b/R/print.vglm.q
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
print.vglm <- function(x, ...) {
diff --git a/R/print.vlm.q b/R/print.vlm.q
index 40b5b75..2f06c69 100644
--- a/R/print.vlm.q
+++ b/R/print.vlm.q
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
diff --git a/R/qrrvglm.control.q b/R/qrrvglm.control.q
index 6315c48..0f19001 100644
--- a/R/qrrvglm.control.q
+++ b/R/qrrvglm.control.q
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
qrrvglm.control = function(Rank=1,
@@ -115,7 +116,7 @@ qrrvglm.control = function(Rank=1,
save.weight = FALSE,
SD.Cinit = SD.Cinit,
SmallNo = SmallNo,
- Structural.zero = NULL,
+ szero = NULL,
Svd.arg = TRUE, Alpha=0.5, Uncorrelated.lv = TRUE,
trace = trace,
Use.Init.Poisson.QO=as.logical(Use.Init.Poisson.QO)[1],
diff --git a/R/qtplot.q b/R/qtplot.q
index dad53ac..1f3cb29 100644
--- a/R/qtplot.q
+++ b/R/qtplot.q
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
diff --git a/R/residuals.vlm.q b/R/residuals.vlm.q
index 658a2bf..fe301c8 100644
--- a/R/residuals.vlm.q
+++ b/R/residuals.vlm.q
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
diff --git a/R/rrvglm.R b/R/rrvglm.R
index 45bab6a..088fbc4 100644
--- a/R/rrvglm.R
+++ b/R/rrvglm.R
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
diff --git a/R/rrvglm.control.q b/R/rrvglm.control.q
index f6081b2..a36dbf7 100644
--- a/R/rrvglm.control.q
+++ b/R/rrvglm.control.q
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
@@ -10,8 +11,8 @@ rrvglm.control = function(Rank = 1,
Uncorrelated.lv=FALSE,
Wmat=NULL,
Svd.arg=FALSE,
- Index.corner = if (length(Structural.zero))
- head((1:1000)[-Structural.zero], Rank) else 1:Rank,
+ Index.corner = if (length(szero))
+ head((1:1000)[-szero], Rank) else 1:Rank,
Ainit=NULL,
Alpha=0.5,
Bestof = 1,
@@ -19,7 +20,7 @@ rrvglm.control = function(Rank = 1,
Etamat.colmax = 10,
SD.Ainit = 0.02,
SD.Cinit = 0.02,
- Structural.zero = NULL,
+ szero = NULL,
Norrr = ~ 1,
trace = FALSE,
Use.Init.Poisson.QO=FALSE,
@@ -47,12 +48,13 @@ rrvglm.control = function(Rank = 1,
stop("bad input for 'SD.Ainit'")
if (!is.Numeric(SD.Cinit, posit=TRUE, allow=1))
stop("bad input for 'SD.Cinit'")
- if (!is.Numeric(Etamat.colmax, posit=TRUE, allow=1) || Etamat.colmax < Rank)
+ if (!is.Numeric(Etamat.colmax, posit=TRUE, allow=1) ||
+ Etamat.colmax < Rank)
stop("bad input for 'Etamat.colmax'")
- if (length(Structural.zero) && (any(round(Structural.zero) != Structural.zero)
- || any(Structural.zero<1)))
- stop("bad input for the argument 'Structural.zero'")
+ if (length(szero) && (any(round(szero) != szero)
+ || any(szero<1)))
+ stop("bad input for the argument 'szero'")
Quadratic = FALSE
@@ -69,8 +71,8 @@ rrvglm.control = function(Rank = 1,
if (Corner && (Svd.arg || Uncorrelated.lv || length(Wmat)))
stop("cannot have Corner=TRUE and either Svd=TRUE or Uncorrelated.lv=TRUE or Wmat")
- if (Corner && length(intersect(Structural.zero, Index.corner)))
- stop("cannot have Structural.zero and Index.corner having common values")
+ if (Corner && length(intersect(szero, Index.corner)))
+ stop("cannot have szero and Index.corner having common values")
if (length(Index.corner) != Rank)
stop("length(Index.corner) != Rank")
@@ -103,7 +105,7 @@ rrvglm.control = function(Rank = 1,
SD.Ainit = SD.Ainit,
SD.Cinit = SD.Cinit,
Etamat.colmax = Etamat.colmax,
- Structural.zero = Structural.zero,
+ szero = szero,
Svd.arg=Svd.arg,
Use.Init.Poisson.QO=Use.Init.Poisson.QO),
checkwz=checkwz,
diff --git a/R/rrvglm.fit.q b/R/rrvglm.fit.q
index 1860081..9e4e6a6 100644
--- a/R/rrvglm.fit.q
+++ b/R/rrvglm.fit.q
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
@@ -201,16 +202,27 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
eval(rrr.init.expression)
+
if (length(etastart)) {
eta <- etastart
mu <- if (length(mustart)) mustart else
- slot(family, "inverse")(eta, extra)
- } else {
- if (length(mustart))
- mu <- mustart
- eta <- slot(family, "link")(mu, extra)
+ if (length(body(slot(family, "inverse"))))
+ slot(family, "inverse")(eta, extra) else
+ warning("argument 'etastart' assigned a value ",
+ "but there is no 'inverse' slot to use it")
+ }
+
+ if (length(mustart)) {
+ mu <- mustart
+ if (length(body(slot(family, "link")))) {
+ eta <- slot(family, "link")(mu, extra)
+ } else {
+ warning("argument 'mustart' assigned a value ",
+ "but there is no 'link' slot to use it")
+ }
}
+
M <- if (is.matrix(eta)) ncol(eta) else 1
if (is.character(rrcontrol$Dzero)) {
@@ -294,8 +306,8 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
if (control$Corner)
Amat[control$Index.corner,] = diag(Rank)
- if (length(control$Structural.zero))
- Amat[control$Structural.zero,] = 0
+ if (length(control$szero))
+ Amat[control$szero,] = 0
rrcontrol$Ainit = control$Ainit = Amat # Good for valt()
rrcontrol$Cinit = control$Cinit = Cmat # Good for valt()
@@ -507,7 +519,14 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
- df.residual <- nrow_X_vlm - rank - (if(control$Quadratic) Rank*p2 else 0)
+
+ elts.tildeA = (M - Rank - length(control$szero)) * Rank
+ no.dpar = 0
+ df.residual <- nrow_X_vlm - rank -
+ (if(control$Quadratic) Rank*p2 else 0) -
+ no.dpar - elts.tildeA
+
+
fit <- list(assign=asgn,
coefficients=coefs,
constraints = if (control$Quadratic) B.list else Blist,
diff --git a/R/s.q b/R/s.q
index d01370a..aa80db9 100644
--- a/R/s.q
+++ b/R/s.q
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
diff --git a/R/s.vam.q b/R/s.vam.q
index ad6378e..ccd71f9 100644
--- a/R/s.vam.q
+++ b/R/s.vam.q
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
diff --git a/R/smart.R b/R/smart.R
index 6bf7ef4..99d9313 100644
--- a/R/smart.R
+++ b/R/smart.R
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
@@ -14,159 +15,167 @@
-smartpredenv = new.env()
-smart.mode.is <- function(mode.arg=NULL) {
- if (!length(mode.arg)) {
- if (exists(".smart.prediction", env=smartpredenv)) {
- get(".smart.prediction.mode", env=smartpredenv)
- } else {
- "neutral"
- }
+
+
+
+
+smartpredenv <- new.env()
+
+
+smart.mode.is <- function(mode.arg = NULL) {
+ if (!length(mode.arg)) {
+ if (exists(".smart.prediction", envir = VGAM:::smartpredenv)) {
+ get(".smart.prediction.mode", envir = VGAM:::smartpredenv)
} else {
- if (mode.arg != "neutral" && mode.arg != "read" && mode.arg != "write")
-stop("argument \"mode.arg\" must be one of \"neutral\", \"read\" or \"write\"")
- if (exists(".smart.prediction", env=smartpredenv)) {
- get(".smart.prediction.mode", env=smartpredenv)==mode.arg
- } else {
- mode.arg=="neutral"
- }
+ "neutral"
+ }
+ } else {
+ if (mode.arg != "neutral" &&
+ mode.arg != "read" &&
+ mode.arg != "write")
+ stop("argument \"mode.arg\" must be one of",
+ " \"neutral\", \"read\" or \"write\"")
+ if (exists(".smart.prediction", envir = VGAM:::smartpredenv)) {
+ get(".smart.prediction.mode", envir = VGAM:::smartpredenv) ==
+ mode.arg
+ } else {
+ mode.arg == "neutral"
}
+ }
}
-setup.smart <- function(mode.arg, smart.prediction=NULL, max.smart=30) {
- actual <- if (mode.arg=="write") vector("list", max.smart) else
- if (mode.arg=="read") smart.prediction else
- stop("value of mode.arg unrecognized")
+setup.smart <- function(mode.arg, smart.prediction = NULL,
+ max.smart = 30) {
+ actual <- if (mode.arg == "write") vector("list", max.smart) else
+ if (mode.arg == "read") smart.prediction else
+ stop("value of 'mode.arg' unrecognized")
- wrapup.smart() # make sure
+ wrapup.smart() # make sure
- if (length(actual)) {
- # Double check that smart.prediction is not trivial (in "read" mode)
- # If it is trivial then ignore it. This saves testing whether
- # length(object$smart.prediction) > 0 in the predict methods function
+ if (length(actual)) {
- assign(".smart.prediction", actual, envir = smartpredenv)
- assign(".smart.prediction.counter", 0, envir = smartpredenv)
- assign(".smart.prediction.mode", mode.arg, envir = smartpredenv)
- assign(".max.smart", max.smart, envir = smartpredenv)
- assign(".smart.prediction", actual, envir = smartpredenv)
- }
+ assign(".smart.prediction", actual, envir = VGAM:::smartpredenv)
+ assign(".smart.prediction.counter", 0, envir = VGAM:::smartpredenv)
+ assign(".smart.prediction.mode", mode.arg, envir = VGAM:::smartpredenv)
+ assign(".max.smart", max.smart, envir = VGAM:::smartpredenv)
+ assign(".smart.prediction", actual, envir = VGAM:::smartpredenv)
+ }
}
wrapup.smart <- function() {
- if (exists(".smart.prediction", envir = smartpredenv))
- rm(".smart.prediction", envir = smartpredenv)
- if (exists(".smart.prediction.counter", envir = smartpredenv))
- rm(".smart.prediction.counter", envir = smartpredenv)
- if (exists(".smart.prediction.mode", envir = smartpredenv))
- rm(".smart.prediction.mode", envir = smartpredenv)
- if (exists(".max.smart", envir = smartpredenv))
- rm(".max.smart", envir = smartpredenv)
+ if (exists(".smart.prediction", envir = VGAM:::smartpredenv))
+ rm(".smart.prediction", envir = VGAM:::smartpredenv)
+ if (exists(".smart.prediction.counter", envir = VGAM:::smartpredenv))
+ rm(".smart.prediction.counter", envir = VGAM:::smartpredenv)
+ if (exists(".smart.prediction.mode", envir = VGAM:::smartpredenv))
+ rm(".smart.prediction.mode", envir = VGAM:::smartpredenv)
+ if (exists(".max.smart", envir = VGAM:::smartpredenv))
+ rm(".max.smart", envir = VGAM:::smartpredenv)
}
get.smart.prediction <- function() {
- smart.prediction.counter <- get(".smart.prediction.counter",
- envir = smartpredenv)
- max.smart <- get(".max.smart", envir = smartpredenv)
-
- if (smart.prediction.counter > 0) {
- # Save this on the object for smart prediction later
- smart.prediction <- get(".smart.prediction", envir = smartpredenv)
- if (max.smart >= (smart.prediction.counter+1))
- for(i in max.smart:(smart.prediction.counter+1))
- smart.prediction[[i]] <- NULL
- smart.prediction
- } else
- NULL
+ smart.prediction.counter <- get(".smart.prediction.counter",
+ envir = VGAM:::smartpredenv)
+ max.smart <- get(".max.smart", envir = VGAM:::smartpredenv)
+
+ if (smart.prediction.counter > 0) {
+ # Save this on the object for smart prediction later
+ smart.prediction <- get(".smart.prediction", envir = VGAM:::smartpredenv)
+ if (max.smart >= (smart.prediction.counter + 1))
+ for(i in max.smart:(smart.prediction.counter + 1))
+ smart.prediction[[i]] <- NULL
+ smart.prediction
+ } else
+ NULL
}
put.smart <- function(smart) {
- # Puts the info, if possible, in frame 1.
- # Does not returns whether it did it or not.
- # Write the info to frame 0 as well
- max.smart <- get(".max.smart", envir = smartpredenv)
- smart.prediction.counter <- get(".smart.prediction.counter",
- envir = smartpredenv)
- smart.prediction <- get(".smart.prediction", envir = smartpredenv)
- smart.prediction.counter <- smart.prediction.counter + 1
+ max.smart <- get(".max.smart", envir = VGAM:::smartpredenv)
+ smart.prediction.counter <- get(".smart.prediction.counter",
+ envir = VGAM:::smartpredenv)
+ smart.prediction <- get(".smart.prediction", envir = VGAM:::smartpredenv)
+ smart.prediction.counter <- smart.prediction.counter + 1
- if (smart.prediction.counter > max.smart) {
- # if list is too small, make it larger
- max.smart <- max.smart + (inc.smart <- 10) # can change inc.smart
- smart.prediction <- c(smart.prediction, vector("list", inc.smart))
- assign(".max.smart", max.smart, envir = smartpredenv)
- }
+ if (smart.prediction.counter > max.smart) {
+ # if list is too small, make it larger
+ max.smart <- max.smart + (inc.smart <- 10) # can change inc.smart
+ smart.prediction <- c(smart.prediction, vector("list", inc.smart))
+ assign(".max.smart", max.smart, envir = VGAM:::smartpredenv)
+ }
- smart.prediction[[smart.prediction.counter]] <- smart
- assign(".smart.prediction", smart.prediction, envir = smartpredenv)
- assign(".smart.prediction.counter", smart.prediction.counter,
- envir = smartpredenv)
+ smart.prediction[[smart.prediction.counter]] <- smart
+ assign(".smart.prediction", smart.prediction, envir = VGAM:::smartpredenv)
+ assign(".smart.prediction.counter", smart.prediction.counter,
+ envir = VGAM:::smartpredenv)
}
get.smart <- function() {
- # Returns one list component of information
- smart.prediction <- get(".smart.prediction", envir = smartpredenv)
- smart.prediction.counter <- get(".smart.prediction.counter",
- envir = smartpredenv)
- smart.prediction.counter <- smart.prediction.counter + 1
- assign(".smart.prediction.counter", smart.prediction.counter,
- envir = smartpredenv)
- smart <- smart.prediction[[smart.prediction.counter]]
- smart
+ # Returns one list component of information
+ smart.prediction <- get(".smart.prediction", envir = VGAM:::smartpredenv)
+ smart.prediction.counter <- get(".smart.prediction.counter",
+ envir = VGAM:::smartpredenv)
+ smart.prediction.counter <- smart.prediction.counter + 1
+ assign(".smart.prediction.counter", smart.prediction.counter,
+ envir = VGAM:::smartpredenv)
+ smart <- smart.prediction[[smart.prediction.counter]]
+ smart
}
smart.expression <- expression({
- # This expression only works if the first argument of the smart
- # function is "x", e.g., smartfun(x, ...)
- # Nb. .smart.match.call is the name of the smart function.
- smart <- get.smart()
- assign(".smart.prediction.mode", "neutral", envir = smartpredenv)
+ smart <- get.smart()
+ assign(".smart.prediction.mode", "neutral", envir = VGAM:::smartpredenv)
- .smart.match.call = as.character(smart$match.call)
- smart$match.call = NULL # Kill it off for the do.call
+ .smart.match.call <- as.character(smart$match.call)
+ smart$match.call <- NULL # Kill it off for the do.call
- ans.smart <- do.call(.smart.match.call[1], c(list(x=x), smart))
- assign(".smart.prediction.mode", "read", envir = smartpredenv)
+ ans.smart <- do.call(.smart.match.call[1], c(list(x=x), smart))
+ assign(".smart.prediction.mode", "read", envir = VGAM:::smartpredenv)
- ans.smart
+ ans.smart
})
is.smart <- function(object) {
- if (is.function(object)) {
- if (is.logical(a <- attr(object, "smart"))) a else FALSE
+ if (is.function(object)) {
+ if (is.logical(a <- attr(object, "smart"))) a else FALSE
+ } else {
+ if (length(slotNames(object))) {
+ if (length(object at smart.prediction) == 1 &&
+ is.logical(object at smart.prediction$smart.arg))
+ object at smart.prediction$smart.arg else
+ any(slotNames(object) == "smart.prediction")
} else {
- if (length(slotNames(object))) {
- if (length(object at smart.prediction) == 1 &&
- is.logical(object at smart.prediction$smart.arg))
- object at smart.prediction$smart.arg else
- any(slotNames(object) == "smart.prediction")
- } else {
- if (length(object$smart.prediction) == 1 &&
- is.logical(object$smart.prediction$smart.arg))
- object$smart.prediction$smart.arg else
- any(names(object) == "smart.prediction")
- }
+ if (length(object$smart.prediction) == 1 &&
+ is.logical(object$smart.prediction$smart.arg))
+ object$smart.prediction$smart.arg else
+ any(names(object) == "smart.prediction")
}
+ }
}
+
+
+
+
+
+
library(splines)
@@ -188,7 +197,7 @@ function (x, df = NULL, knots = NULL, degree = 3, intercept = FALSE,
if (!missing(Boundary.knots)) {
Boundary.knots <- sort(Boundary.knots)
outside <- (ol <- x < Boundary.knots[1]) | (or <- x >
- Boundary.knots[2])
+ Boundary.knots[2L])
} else outside <- FALSE
ord <- 1 + (degree <- as.integer(degree))
if (ord <= 1)
@@ -211,19 +220,19 @@ function (x, df = NULL, knots = NULL, degree = 3, intercept = FALSE,
warning(
"some 'x' values beyond boundary knots may cause ill-conditioned bases")
derivs <- 0:degree
- scalef <- gamma(1:ord)
+ scalef <- gamma(1L:ord)
basis <- array(0, c(length(x), length(Aknots) - degree -
- 1))
+ 1L))
if (any(ol)) {
- k.pivot <- Boundary.knots[1]
- xl <- cbind(1, outer(x[ol] - k.pivot, 1:degree, "^"))
+ k.pivot <- Boundary.knots[1L]
+ xl <- cbind(1, outer(x[ol] - k.pivot, 1L:degree, "^"))
tt <- spline.des(Aknots, rep(k.pivot, ord), ord,
derivs)$design
basis[ol, ] <- xl %*% (tt/scalef)
}
if (any(or)) {
- k.pivot <- Boundary.knots[2]
- xr <- cbind(1, outer(x[or] - k.pivot, 1:degree, "^"))
+ k.pivot <- Boundary.knots[2L]
+ xr <- cbind(1, outer(x[or] - k.pivot, 1L:degree, "^"))
tt <- spline.des(Aknots, rep(k.pivot, ord), ord,
derivs)$design
basis[or, ] <- xr %*% (tt/scalef)
@@ -233,31 +242,36 @@ function (x, df = NULL, knots = NULL, degree = 3, intercept = FALSE,
ord)$design
} else basis <- spline.des(Aknots, x, ord)$design
if (!intercept)
- basis <- basis[, -1, drop = FALSE]
+ basis <- basis[, -1L, drop = FALSE]
n.col <- ncol(basis)
if (nas) {
nmat <- matrix(NA, length(nax), n.col)
nmat[!nax, ] <- basis
basis <- nmat
}
- dimnames(basis) <- list(nx, 1:n.col)
- a <- list(degree = degree, knots = if (is.null(knots)) numeric(0) else knots,
+ dimnames(basis) <- list(nx, 1L:n.col)
+ a <- list(degree = degree, knots = if (is.null(knots)) numeric(0L) else knots,
Boundary.knots = Boundary.knots, intercept = intercept)
attributes(basis) <- c(attributes(basis), a)
- class(basis) <- c("bs", "basis")
+ class(basis) <- c("bs", "basis", "matrix")
if (smart.mode.is("write"))
- put.smart(list(df=df,
- knots=knots,
- degree=degree,
- intercept=intercept,
- Boundary.knots=Boundary.knots,
- match.call=match.call()))
+ put.smart(list(df = df,
+ knots = knots,
+ degree = degree,
+ intercept = intercept,
+ Boundary.knots = Boundary.knots,
+ match.call = match.call()))
basis
}
attr(bs, "smart") <- TRUE
+
+
+
+
+
ns <-
function (x, df = NULL, knots = NULL, intercept = FALSE, Boundary.knots = range(x))
{
@@ -273,8 +287,8 @@ function (x, df = NULL, knots = NULL, intercept = FALSE, Boundary.knots = range(
x <- x[!nax]
if (!missing(Boundary.knots)) {
Boundary.knots <- sort(Boundary.knots)
- outside <- (ol <- x < Boundary.knots[1]) | (or <- x >
- Boundary.knots[2])
+ outside <- (ol <- x < Boundary.knots[1L]) | (or <- x >
+ Boundary.knots[2L])
} else outside <- FALSE
if (!missing(df) && missing(knots)) {
nIknots <- df - 1 - intercept
@@ -283,31 +297,30 @@ function (x, df = NULL, knots = NULL, intercept = FALSE, Boundary.knots = range(
warning("'df' was too small; have used ", 1 + intercept)
}
knots <- if (nIknots > 0) {
- knots <- seq(0, 1, length = nIknots + 2)[-c(1, nIknots +
- 2)]
+ knots <- seq.int(0, 1, length.out = nIknots + 2L)[-c(1L, nIknots +
+ 2L)]
stats::quantile(x[!outside], knots)
}
} else nIknots <- length(knots)
Aknots <- sort(c(rep(Boundary.knots, 4), knots))
if (any(outside)) {
- basis <- array(0, c(length(x), nIknots + 4))
+ basis <- array(0, c(length(x), nIknots + 4L))
if (any(ol)) {
- k.pivot <- Boundary.knots[1]
+ k.pivot <- Boundary.knots[1L]
xl <- cbind(1, x[ol] - k.pivot)
- tt <- spline.des(Aknots, rep(k.pivot, 2), 4, c(0,
+ tt <- spline.des(Aknots, rep(k.pivot, 2L), 4, c(0,
1))$design
basis[ol, ] <- xl %*% tt
}
if (any(or)) {
- k.pivot <- Boundary.knots[2]
+ k.pivot <- Boundary.knots[2L]
xr <- cbind(1, x[or] - k.pivot)
- tt <- spline.des(Aknots, rep(k.pivot, 2), 4, c(0,
+ tt <- spline.des(Aknots, rep(k.pivot, 2L), 4, c(0,
1))$design
basis[or, ] <- xr %*% tt
}
if (any(inside <- !outside))
- basis[inside, ] <- spline.des(Aknots, x[inside],
- 4)$design
+ basis[inside, ] <- spline.des(Aknots, x[inside], 4)$design
} else basis <- spline.des(Aknots, x, 4)$design
const <- spline.des(Aknots, Boundary.knots, 4, c(2, 2))$design
if (!intercept) {
@@ -315,7 +328,7 @@ function (x, df = NULL, knots = NULL, intercept = FALSE, Boundary.knots = range(
basis <- basis[, -1, drop = FALSE]
}
qr.const <- qr(t(const))
- basis <- as.matrix((t(qr.qty(qr.const, t(basis))))[, -(1:2),
+ basis <- as.matrix((t(qr.qty(qr.const, t(basis))))[, -(1L:2L),
drop = FALSE])
n.col <- ncol(basis)
if (nas) {
@@ -323,18 +336,18 @@ function (x, df = NULL, knots = NULL, intercept = FALSE, Boundary.knots = range(
nmat[!nax, ] <- basis
basis <- nmat
}
- dimnames(basis) <- list(nx, 1:n.col)
+ dimnames(basis) <- list(nx, 1L:n.col)
a <- list(degree = 3, knots = if (is.null(knots)) numeric(0) else knots,
Boundary.knots = Boundary.knots, intercept = intercept)
attributes(basis) <- c(attributes(basis), a)
- class(basis) <- c("ns", "basis")
+ class(basis) <- c("ns", "basis", "matrix")
if (smart.mode.is("write"))
- put.smart(list(df=df,
- knots=knots,
- intercept=intercept,
- Boundary.knots=Boundary.knots,
- match.call=match.call()))
+ put.smart(list(df = df,
+ knots = knots,
+ intercept = intercept,
+ Boundary.knots = Boundary.knots,
+ match.call = match.call()))
basis
}
@@ -343,6 +356,10 @@ attr(ns, "smart") <- TRUE
+
+
+
+
poly <-
function (x, ..., degree = 1, coefs = NULL, raw = FALSE)
{
@@ -356,8 +373,8 @@ function (x, ..., degree = 1, coefs = NULL, raw = FALSE)
dots <- list(...)
if (nd <- length(dots)) {
- if (nd == 1 && length(dots[[1]]) == 1)
- degree <- dots[[1]] else
+ if (nd == 1 && length(dots[[1]]) == 1L)
+ degree <- dots[[1L]] else
return(polym(x, ..., degree = degree, raw = raw))
}
if (is.matrix(x)) {
@@ -367,37 +384,46 @@ function (x, ..., degree = 1, coefs = NULL, raw = FALSE)
if (degree < 1)
stop("'degree' must be at least 1")
+
+
# At prediction time x may be less than the degree
if (smart.mode.is("write") || smart.mode.is("neutral"))
if (degree >= length(x))
stop("degree must be less than number of points")
+
+
+
if (any(is.na(x)))
stop("missing values are not allowed in 'poly'")
n <- degree + 1
if (raw) {
- if (degree >= length(x))
- stop("'degree' must be less than number of points")
- Z <- outer(x, 1:degree, "^")
- colnames(Z) <- 1:degree
- attr(Z, "degree") <- 1:degree
+ if (degree >= length(unique(x)))
+ stop("'degree' must be less than number of unique points")
+ Z <- outer(x, 1L:degree, "^")
+ colnames(Z) <- 1L:degree
+ attr(Z, "degree") <- 1L:degree
class(Z) <- c("poly", "matrix")
return(Z)
}
if (is.null(coefs)) {
- if (degree >= length(x))
- stop("'degree' must be less than number of points")
+ if (degree >= length(unique(x)))
+ stop("'degree' must be less than number of unique points")
xbar <- mean(x)
x <- x - xbar
X <- outer(x, seq_len(n) - 1, "^")
QR <- qr(X)
+
+ if (QR$rank < degree)
+ stop("'degree' must be less than number of unique points")
+
z <- QR$qr
z <- z * (row(z) == col(z))
raw <- qr.qy(QR, z)
norm2 <- colSums(raw^2)
- alpha <- (colSums(x * raw^2)/norm2 + xbar)[1:degree]
+ alpha <- (colSums(x * raw^2)/norm2 + xbar)[1L:degree]
Z <- raw/rep(sqrt(norm2), each = length(x))
- colnames(Z) <- 1:n - 1
+ colnames(Z) <- 1L:n - 1L
Z <- Z[, -1, drop = FALSE]
attr(Z, "degree") <- 1:degree
attr(Z, "coefs") <- list(alpha = alpha, norm2 = c(1,
@@ -408,28 +434,33 @@ function (x, ..., degree = 1, coefs = NULL, raw = FALSE)
norm2 <- coefs$norm2
Z <- matrix(, length(x), n)
Z[, 1] <- 1
- Z[, 2] <- x - alpha[1]
+ Z[, 2] <- x - alpha[1L]
if (degree > 1)
for (i in 2:degree) Z[, i + 1] <- (x - alpha[i]) *
Z[, i] - (norm2[i + 1]/norm2[i]) * Z[, i - 1]
- Z <- Z/rep(sqrt(norm2[-1]), each = length(x))
+ Z <- Z/rep(sqrt(norm2[-1L]), each = length(x))
colnames(Z) <- 0:degree
Z <- Z[, -1, drop = FALSE]
- attr(Z, "degree") <- 1:degree
+ attr(Z, "degree") <- 1L:degree
attr(Z, "coefs") <- list(alpha = alpha, norm2 = norm2)
class(Z) <- c("poly", "matrix")
}
if (smart.mode.is("write"))
- put.smart(list(degree=degree, coefs=attr(Z, "coefs"),
- raw=FALSE, # raw is changed above
- match.call=match.call()))
+ put.smart(list(degree = degree,
+ coefs = attr(Z, "coefs"),
+ raw = FALSE, # raw is changed above
+ match.call = match.call()))
Z
}
attr(poly, "smart") <- TRUE
+
+
+
+
scale.default <-
function (x, center = TRUE, scale = TRUE)
{
@@ -443,22 +474,22 @@ function (x, center = TRUE, scale = TRUE)
if (is.logical(center)) {
if (center) {
center <- colMeans(x, na.rm = TRUE)
- x <- sweep(x, 2, center)
+ x <- sweep(x, 2L, center, check.margin = FALSE)
}
} else if (is.numeric(center) && (length(center) == nc))
- x <- sweep(x, 2, center) else
+ x <- sweep(x, 2L, center, check.margin = FALSE) else
stop("length of 'center' must equal the number of columns of 'x'")
if (is.logical(scale)) {
if (scale) {
f <- function(v) {
v <- v[!is.na(v)]
- sqrt(sum(v^2)/max(1, length(v) - 1))
+ sqrt(sum(v^2)/max(1, length(v) - 1L))
}
- scale <- apply(x, 2, f)
- x <- sweep(x, 2, scale, "/")
+ scale <- apply(x, 2L, f)
+ x <- sweep(x, 2L, scale, "/", check.margin = FALSE)
}
} else if (is.numeric(scale) && length(scale) == nc)
- x <- sweep(x, 2, scale, "/") else
+ x <- sweep(x, 2L, scale, "/", check.margin = FALSE) else
stop("length of 'scale' must equal the number of columns of 'x'")
if (is.numeric(center))
attr(x, "scaled:center") <- center
@@ -466,8 +497,8 @@ function (x, center = TRUE, scale = TRUE)
attr(x, "scaled:scale") <- scale
if (smart.mode.is("write")) {
- put.smart(list(center=center, scale=scale,
- match.call=match.call()))
+ put.smart(list(center = center, scale = scale,
+ match.call = match.call()))
}
x
diff --git a/R/step.vglm.q b/R/step.vglm.q
index 517740e..f6e809d 100644
--- a/R/step.vglm.q
+++ b/R/step.vglm.q
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
step.vglm <- function(fit, ...)
diff --git a/R/summary.vgam.q b/R/summary.vgam.q
index bf62ee8..dcca22b 100644
--- a/R/summary.vgam.q
+++ b/R/summary.vgam.q
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
diff --git a/R/summary.vglm.q b/R/summary.vglm.q
index aa339cc..db3e71f 100644
--- a/R/summary.vglm.q
+++ b/R/summary.vglm.q
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
diff --git a/R/summary.vlm.q b/R/summary.vlm.q
index 6f53a77..0ff5ccb 100644
--- a/R/summary.vlm.q
+++ b/R/summary.vlm.q
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
diff --git a/R/uqo.R b/R/uqo.R
index 5180f61..ca0ba0f 100644
--- a/R/uqo.R
+++ b/R/uqo.R
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
@@ -315,7 +316,7 @@ callduqof = function(sitescores, etamat, ymat, wvec, modelno, nice31, xmat,
control = Control
itol = othint[14]
inited = if (is.R()) {
- if (exists(".VGAM.UQO.etamat", envir = VGAMenv)) 1 else 0
+ if (exists(".VGAM.UQO.etamat", envir = VGAM:::VGAMenv)) 1 else 0
} else 0 # 0 means fortran initializes the etamat
othint[5] = inited # Replacement
usethiseta = if (inited==1)
@@ -497,11 +498,11 @@ uqo.fit <- function(x, y, w=rep(1, len=nrow(x)),
"gamma2"=5,
0) # stop("can't fit this model using fast algorithm")
if (!modelno) stop("the family function does not work with uqo()")
- if (modelno == 1) modelno = get("modelno", envir = VGAMenv)
+ if (modelno == 1) modelno = get("modelno", envir = VGAM:::VGAMenv)
rmfromVGAMenv(c("etamat", "beta"), prefix=".VGAM.UQO.")
- cqofastok = if (is.R()) (exists("CQO.FastAlgorithm", envir = VGAMenv) &&
- get("CQO.FastAlgorithm", envir = VGAMenv)) else
+ cqofastok = if (is.R()) (exists("CQO.FastAlgorithm", envir = VGAM:::VGAMenv) &&
+ get("CQO.FastAlgorithm", envir = VGAM:::VGAMenv)) else
(exists("CQO.FastAlgorithm", inherits=TRUE) && CQO.FastAlgorithm)
if (!cqofastok)
stop("can't fit this model using fast algorithm")
diff --git a/R/vgam.R b/R/vgam.R
index 75fd774..e3acf18 100644
--- a/R/vgam.R
+++ b/R/vgam.R
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
diff --git a/R/vgam.control.q b/R/vgam.control.q
index 71a0dde..ac4b5a2 100644
--- a/R/vgam.control.q
+++ b/R/vgam.control.q
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
diff --git a/R/vgam.fit.q b/R/vgam.fit.q
index 2e72295..e089dcd 100644
--- a/R/vgam.fit.q
+++ b/R/vgam.fit.q
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
@@ -107,11 +108,20 @@ vgam.fit <- function(x, y, w, mf,
if (length(etastart)) {
eta <- etastart
mu <- if (length(mustart)) mustart else
- slot(family, "inverse")(eta, extra)
- } else {
- if (length(mustart))
- mu <- mustart
- eta <- slot(family, "link")(mu, extra)
+ if (length(body(slot(family, "inverse"))))
+ slot(family, "inverse")(eta, extra) else
+ warning("argument 'etastart' assigned a value ",
+ "but there is no 'inverse' slot to use it")
+ }
+
+ if (length(mustart)) {
+ mu <- mustart
+ if (length(body(slot(family, "link")))) {
+ eta <- slot(family, "link")(mu, extra)
+ } else {
+ warning("argument 'mustart' assigned a value ",
+ "but there is no 'link' slot to use it")
+ }
}
M <- if (is.matrix(eta)) ncol(eta) else 1
diff --git a/R/vgam.match.q b/R/vgam.match.q
index db3bb7c..390c694 100644
--- a/R/vgam.match.q
+++ b/R/vgam.match.q
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
diff --git a/R/vglm.R b/R/vglm.R
index 4473df7..1273c12 100644
--- a/R/vglm.R
+++ b/R/vglm.R
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
diff --git a/R/vglm.control.q b/R/vglm.control.q
index b3c72ef..afe87e8 100644
--- a/R/vglm.control.q
+++ b/R/vglm.control.q
@@ -1,15 +1,21 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
-.min.criterion.VGAM <- c("deviance"=TRUE, "loglikelihood"=FALSE, "AIC"=TRUE,
- "Likelihood"=FALSE, "rss"=TRUE, "coefficients"=TRUE)
+.min.criterion.VGAM <-
+ c("deviance" = TRUE,
+ "loglikelihood" = FALSE,
+ "AIC" = TRUE,
+ "Likelihood" = FALSE,
+ "rss" = TRUE,
+ "coefficients" = TRUE)
-vlm.control <- function(save.weight=TRUE, tol=1e-7, method="qr",
- checkwz=TRUE, wzepsilon = .Machine$double.eps^0.75,
+vlm.control <- function(save.weight = TRUE, tol=1e-7, method="qr",
+ checkwz = TRUE, wzepsilon = .Machine$double.eps^0.75,
...) {
if (tol <= 0) {
warning("tol not positive; using 1e-7 instead")
@@ -17,7 +23,7 @@ vlm.control <- function(save.weight=TRUE, tol=1e-7, method="qr",
}
if (!is.logical(checkwz) || length(checkwz) != 1)
stop("bad input for 'checkwz'")
- if (!is.Numeric(wzepsilon, allow=1, positive=TRUE))
+ if (!is.Numeric(wzepsilon, allow=1, positive = TRUE))
stop("bad input for 'wzepsilon'")
list(save.weight=save.weight, tol=tol, method=method,
@@ -26,14 +32,14 @@ vlm.control <- function(save.weight=TRUE, tol=1e-7, method="qr",
}
-vglm.control <- function(checkwz=TRUE,
+vglm.control <- function(checkwz = TRUE,
criterion = names(.min.criterion.VGAM),
epsilon=1e-7,
- half.stepsizing=TRUE,
+ half.stepsizing = TRUE,
maxit=30,
stepsize=1,
- save.weight=FALSE,
- trace=FALSE,
+ save.weight = FALSE,
+ trace = FALSE,
wzepsilon = .Machine$double.eps^0.75,
xij=NULL,
...)
@@ -50,7 +56,7 @@ vglm.control <- function(checkwz=TRUE,
if (!is.logical(checkwz) || length(checkwz) != 1)
stop("bad input for 'checkwz'")
- if (!is.Numeric(wzepsilon, allow=1, positive=TRUE))
+ if (!is.Numeric(wzepsilon, allow=1, positive = TRUE))
stop("bad input for 'wzepsilon'")
convergence <- expression({
@@ -62,15 +68,15 @@ vglm.control <- function(checkwz=TRUE,
abs(old.crit-new.crit)/(abs(old.crit)+epsilon) > epsilon && iter<maxit)
})
- if (!is.Numeric(epsilon, allow=1, posit=TRUE)) {
+ if (!is.Numeric(epsilon, allow=1, posit = TRUE)) {
warning("bad input for 'epsilon'; using 0.00001 instead")
epsilon <- 0.00001
}
- if (!is.Numeric(maxit, allow=1, posit=TRUE, integ=TRUE)) {
+ if (!is.Numeric(maxit, allow=1, posit = TRUE, integ = TRUE)) {
warning("bad input for 'maxit'; using 20 instead")
maxit <- 20
}
- if (!is.Numeric(stepsize, allow=1, posit=TRUE)) {
+ if (!is.Numeric(stepsize, allow=1, posit = TRUE)) {
warning("bad input for 'stepsize'; using 1 instead")
stepsize <- 1
}
@@ -98,10 +104,10 @@ vcontrol.expression <- expression({
mylist <- family at vfamily
for(i in length(mylist):1) {
for(ii in 1:2) {
- temp <- paste(if(ii==1) "" else paste(function.name, ".", sep=""),
+ temp <- paste(if(ii == 1) "" else paste(function.name, ".", sep=""),
mylist[i], ".control", sep="")
- tempexists = if (is.R()) exists(temp, envir = VGAMenv) else
- exists(temp, inherit=TRUE)
+ tempexists = if (is.R()) exists(temp, envir = VGAM:::VGAMenv) else
+ exists(temp, inherit = TRUE)
if (tempexists) {
temp <- get(temp)
temp <- temp(...)
@@ -132,7 +138,7 @@ vcontrol.expression <- expression({
for(ii in 1:2) {
- temp <- paste(if(ii==1) "" else paste(function.name, ".", sep=""),
+ temp <- paste(if(ii == 1) "" else paste(function.name, ".", sep=""),
family at vfamily[1],
".", control$criterion, ".control", sep="")
if (exists(temp, inherit=T)) {
diff --git a/R/vglm.fit.q b/R/vglm.fit.q
index 9e80486..697d601 100644
--- a/R/vglm.fit.q
+++ b/R/vglm.fit.q
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
@@ -190,11 +191,20 @@ vglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
if (length(etastart)) {
eta <- etastart
mu <- if (length(mustart)) mustart else
- slot(family, "inverse")(eta, extra)
- } else {
- if (length(mustart))
- mu <- mustart
- eta <- slot(family, "link")(mu, extra)
+ if (length(body(slot(family, "inverse"))))
+ slot(family, "inverse")(eta, extra) else
+ warning("argument 'etastart' assigned a value ",
+ "but there is no 'inverse' slot to use it")
+ }
+
+ if (length(mustart)) {
+ mu <- mustart
+ if (length(body(slot(family, "link")))) {
+ eta <- slot(family, "link")(mu, extra)
+ } else {
+ warning("argument 'mustart' assigned a value ",
+ "but there is no 'link' slot to use it")
+ }
}
@@ -206,7 +216,7 @@ vglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
eval(slot(family, "constraints"))
- Blist <- process.constraints(constraints, x, M, specialCM=specialCM)
+ Blist <- process.constraints(constraints, x, M, specialCM = specialCM)
ncolBlist <- unlist(lapply(Blist, ncol))
diff --git a/R/vlm.R b/R/vlm.R
index 6da229e..fdca4f3 100644
--- a/R/vlm.R
+++ b/R/vlm.R
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
diff --git a/R/vlm.wfit.q b/R/vlm.wfit.q
index ea87269..5625fca 100644
--- a/R/vlm.wfit.q
+++ b/R/vlm.wfit.q
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
diff --git a/R/vsmooth.spline.q b/R/vsmooth.spline.q
index cefd53e..9a36714 100644
--- a/R/vsmooth.spline.q
+++ b/R/vsmooth.spline.q
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
diff --git a/R/zzz.R b/R/zzz.R
index a630d09..61570a9 100644
--- a/R/zzz.R
+++ b/R/zzz.R
@@ -1,5 +1,6 @@
# These functions are
-# Copyright (C) 1998-2010 T.W. Yee, University of Auckland. All rights reserved.
+# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# All rights reserved.
diff --git a/data/alclevels.R b/data/alclevels.R
new file mode 100644
index 0000000..2ef9cf5
--- /dev/null
+++ b/data/alclevels.R
@@ -0,0 +1,22 @@
+alclevels <-
+structure(list(Monday = c(121L, 97L, 60L, 55L, 25L, 19L, 13L,
+20L, 5L, 7L, 7L, 8L, 10L, 9L, 22L, 39L, 28L, 46L, 53L, 74L, 74L,
+84L, 90L, 110L), Tuesday = c(98L, 92L, 69L, 60L, 38L, 10L, 9L,
+6L, 8L, 8L, 13L, 10L, 13L, 26L, 41L, 48L, 48L, 59L, 100L, 119L,
+135L, 154L, 143L, 169L), Wednesday = c(165L, 157L, 107L, 75L,
+48L, 20L, 9L, 9L, 12L, 14L, 20L, 20L, 8L, 32L, 31L, 62L, 71L,
+98L, 117L, 155L, 283L, 326L, 345L, 363L), Thursday = c(324L,
+278L, 229L, 238L, 145L, 56L, 55L, 42L, 29L, 28L, 36L, 32L, 39L,
+37L, 46L, 69L, 85L, 141L, 185L, 289L, 508L, 610L, 765L, 899L),
+ Friday = c(827L, 619L, 410L, 401L, 223L, 139L, 70L, 40L,
+ 40L, 38L, 38L, 27L, 37L, 27L, 42L, 59L, 55L, 136L, 223L,
+ 335L, 591L, 866L, 976L, 1265L), Saturday = c(1379L, 1327L,
+ 979L, 693L, 346L, 188L, 155L, 160L, 79L, 44L, 52L, 44L, 73L,
+ 41L, 58L, 75L, 96L, 154L, 236L, 337L, 490L, 754L, 1026L,
+ 1179L), Sunday = c(1332L, 1356L, 1011L, 718L, 410L, 287L,
+ 213L, 200L, 96L, 58L, 69L, 39L, 59L, 45L, 53L, 70L, 95L,
+ 130L, 121L, 146L, 166L, 131L, 114L, 159L)), .Names = c("Monday",
+"Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"
+), class = "data.frame", row.names = c("0", "1", "2", "3", "4",
+"5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15",
+"16", "17", "18", "19", "20", "21", "22", "23"))
diff --git a/data/alcoff.R b/data/alcoff.R
new file mode 100644
index 0000000..600d3e1
--- /dev/null
+++ b/data/alcoff.R
@@ -0,0 +1,22 @@
+alcoff <-
+structure(list(Monday = c(121L, 97L, 60L, 55L, 25L, 19L, 13L,
+20L, 5L, 7L, 7L, 8L, 10L, 9L, 22L, 39L, 28L, 46L, 53L, 74L, 74L,
+84L, 90L, 110L), Tuesday = c(98L, 92L, 69L, 60L, 38L, 10L, 9L,
+6L, 8L, 8L, 13L, 10L, 13L, 26L, 41L, 48L, 48L, 59L, 100L, 119L,
+135L, 154L, 143L, 169L), Wednesday = c(165L, 157L, 107L, 75L,
+48L, 20L, 9L, 9L, 12L, 14L, 20L, 20L, 8L, 32L, 31L, 62L, 71L,
+98L, 117L, 155L, 283L, 326L, 345L, 363L), Thursday = c(324L,
+278L, 229L, 238L, 145L, 56L, 55L, 42L, 29L, 28L, 36L, 32L, 39L,
+37L, 46L, 69L, 85L, 141L, 185L, 289L, 508L, 610L, 765L, 899L),
+ Friday = c(827L, 619L, 410L, 401L, 223L, 139L, 70L, 40L,
+ 40L, 38L, 38L, 27L, 37L, 27L, 42L, 59L, 55L, 136L, 223L,
+ 335L, 591L, 866L, 976L, 1265L), Saturday = c(1379L, 1327L,
+ 979L, 693L, 346L, 188L, 155L, 160L, 79L, 44L, 52L, 44L, 73L,
+ 41L, 58L, 75L, 96L, 154L, 236L, 337L, 490L, 754L, 1026L,
+ 1179L), Sunday = c(1332L, 1356L, 1011L, 718L, 410L, 287L,
+ 213L, 200L, 96L, 58L, 69L, 39L, 59L, 45L, 53L, 70L, 95L,
+ 130L, 121L, 146L, 166L, 131L, 114L, 159L)), .Names = c("Monday",
+"Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"
+), class = "data.frame", row.names = c("0", "1", "2", "3", "4",
+"5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15",
+"16", "17", "18", "19", "20", "21", "22", "23"))
diff --git a/data/azprocedure.R b/data/azprocedure.R
new file mode 100644
index 0000000..9b6f457
--- /dev/null
+++ b/data/azprocedure.R
@@ -0,0 +1,2289 @@
+azprocedure <-
+structure(list(hospital = c(3.60000014305115, 6.7000002861023,
+2.5, 6.5, 3.70000004768372, 4.30000019073486, 6.5, 5.19999980926514,
+3.70000004768372, 4.30000019073486, 3.60000014305115, 5.19999980926514,
+6.80000019073486, 2.40000009536743, 3.09999990463257, 2.5, 3.09999990463257,
+3.70000004768372, 6, 6.80000019073486, 6.7000002861023, 5.19999980926514,
+2.5, 2.5, 5.19999980926514, 3.60000014305115, 3.70000004768372,
+2.5, 6.7000002861023, 2.5, 4.30000019073486, 3.60000014305115,
+6.7000002861023, 6.80000019073486, 3.09999990463257, 6, 3.09999990463257,
+4.30000019073486, 6, 6.5, 6, 2.5, 4.30000019073486, 6.5, 6, 3.60000014305115,
+2.5, 3.19999980926514, 2.70000004768372, 3.60000014305115, 6.80000019073486,
+3.19999980926514, 3.19999980926514, 4.30000019073486, 4.30000019073486,
+0.100000001490116, 2.5, 2.5, 2.40000009536743, 9.10000038146973,
+6.80000019073486, 3.70000004768372, 5.19999980926514, 3.70000004768372,
+2.5, 6.5, 5.19999980926514, 5.19999980926514, 2.5, 6.5, 4.30000019073486,
+2.5, 3.09999990463257, 6.7000002861023, 3.09999990463257, 3.19999980926514,
+4.30000019073486, 6, 6.5, 2.5, 5.19999980926514, 5.19999980926514,
+6.5, 2.5, 6.7000002861023, 2.5, 5.19999980926514, 3.60000014305115,
+3.09999990463257, 3.60000014305115, 2.70000004768372, 2.40000009536743,
+6.5, 3.60000014305115, 9.10000038146973, 4.30000019073486, 6,
+3.60000014305115, 3.60000014305115, 2.5, 3.19999980926514, 9.10000038146973,
+3.60000014305115, 6.5, 4.30000019073486, 3.09999990463257, 3.19999980926514,
+2.5, 2.5, 2.5, 3.19999980926514, 6.5, 6.5, 5.19999980926514,
+3.09999990463257, 3.19999980926514, 3.09999990463257, 6.7000002861023,
+6.7000002861023, 2.5, 6.5, 2.5, 2.5, 5.19999980926514, 2.70000004768372,
+3.09999990463257, 6.5, 5.19999980926514, 2.5, 2.5, 6.5, 2.5,
+2.5, 3.09999990463257, 2.5, 5.19999980926514, 6.7000002861023,
+4.30000019073486, 3.09999990463257, 6, 2.40000009536743, 2.5,
+4.30000019073486, 3.09999990463257, 5.19999980926514, 2.5, 6.5,
+3.09999990463257, 2.5, 2.5, 9.10000038146973, 6, 6.7000002861023,
+6.5, 6, 2.5, 5.19999980926514, 2.5, 2.40000009536743, 2.5, 9.10000038146973,
+5.19999980926514, 5.19999980926514, 9.10000038146973, 2.5, 5.19999980926514,
+6.5, 3.09999990463257, 6.5, 3.60000014305115, 5.19999980926514,
+3.09999990463257, 6, 2.5, 2.5, 6.5, 2.5, 2.5, 9.10000038146973,
+5.19999980926514, 3.70000004768372, 6.7000002861023, 6.5, 3.60000014305115,
+5.19999980926514, 3.09999990463257, 6.7000002861023, 4.30000019073486,
+3.09999990463257, 6.5, 3.5, 5.19999980926514, 2.5, 5.19999980926514,
+2.5, 3.09999990463257, 3.09999990463257, 6.5, 6, 2.5, 6.7000002861023,
+2.5, 3.60000014305115, 3.70000004768372, 2.40000009536743, 6.5,
+2.70000004768372, 6.7000002861023, 6, 5.19999980926514, 4.09999990463257,
+3.09999990463257, 6.5, 5.19999980926514, 3.70000004768372, 4.30000019073486,
+6.7000002861023, 5.19999980926514, 4.30000019073486, 2.5, 3.60000014305115,
+2.5, 6.7000002861023, 5.19999980926514, 2.40000009536743, 4.09999990463257,
+3.09999990463257, 2.5, 5.19999980926514, 3.5, 2.5, 6.7000002861023,
+6, 6.5, 5.19999980926514, 5.19999980926514, 5.19999980926514,
+6.7000002861023, 2.5, 6.5, 5.19999980926514, 2.40000009536743,
+4.30000019073486, 4.30000019073486, 3.60000014305115, 2.40000009536743,
+5.19999980926514, 3.09999990463257, 3.09999990463257, 5.19999980926514,
+2.5, 2.5, 3.60000014305115, 3.19999980926514, 6, 2.5, 3.19999980926514,
+3.09999990463257, 3.70000004768372, 6.7000002861023, 2.5, 3.70000004768372,
+6.7000002861023, 3.09999990463257, 3.70000004768372, 3.60000014305115,
+6.7000002861023, 6.5, 2.70000004768372, 5.19999980926514, 6.80000019073486,
+6, 6.5, 6.7000002861023, 6, 6.80000019073486, 3.70000004768372,
+2.5, 3.60000014305115, 5.19999980926514, 3.60000014305115, 3.09999990463257,
+6.5, 6, 4.30000019073486, 6, 6.7000002861023, 3.60000014305115,
+2.70000004768372, 3.70000004768372, 2.5, 2.5, 2.5, 5.19999980926514,
+3.09999990463257, 2.70000004768372, 4.09999990463257, 6.5, 6.7000002861023,
+6.80000019073486, 3.09999990463257, 3.60000014305115, 2.40000009536743,
+4.30000019073486, 6.5, 6.80000019073486, 2.5, 4.30000019073486,
+3.60000014305115, 3.60000014305115, 2.5, 2.5, 2.5, 6.5, 6, 5.19999980926514,
+5.19999980926514, 3.09999990463257, 2.5, 6.5, 3.09999990463257,
+3.09999990463257, 5.19999980926514, 3.70000004768372, 6.5, 5.19999980926514,
+3.5, 2.5, 6.5, 3.19999980926514, 9.10000038146973, 3.60000014305115,
+5.19999980926514, 6.5, 3.70000004768372, 6.5, 6.7000002861023,
+3.5, 6.5, 3.5, 6.5, 3.09999990463257, 2.5, 2.5, 3.60000014305115,
+3.70000004768372, 2.70000004768372, 3.5, 2.5, 2.5, 6, 2.5, 6.7000002861023,
+2.40000009536743, 4.30000019073486, 2.40000009536743, 6, 2.70000004768372,
+3.60000014305115, 6, 6.7000002861023, 2.5, 3.60000014305115,
+3.09999990463257, 2.5, 6.80000019073486, 4.09999990463257, 5.19999980926514,
+3.60000014305115, 6.80000019073486, 2.5, 5.19999980926514, 2.40000009536743,
+2.5, 6, 5.19999980926514, 5.19999980926514, 3.09999990463257,
+3.60000014305115, 2.5, 2.40000009536743, 5.19999980926514, 2.5,
+5.19999980926514, 5.19999980926514, 3.09999990463257, 2.5, 4.30000019073486,
+2.5, 5.19999980926514, 3.5, 2.5, 3.09999990463257, 3.09999990463257,
+4.09999990463257, 5.19999980926514, 2.5, 2.70000004768372, 3.70000004768372,
+9.10000038146973, 5.19999980926514, 3.60000014305115, 2.5, 2.5,
+9.10000038146973, 2.70000004768372, 2.5, 3.09999990463257, 5.19999980926514,
+3.19999980926514, 6, 2.70000004768372, 3.09999990463257, 2.5,
+6, 3.09999990463257, 6.5, 2.5, 3.09999990463257, 3.70000004768372,
+6.5, 3.09999990463257, 5.19999980926514, 9.10000038146973, 6,
+5.19999980926514, 2.5, 3.70000004768372, 3.19999980926514, 3.70000004768372,
+4.09999990463257, 2.5, 2.5, 2.5, 3.60000014305115, 2.5, 5.19999980926514,
+5.19999980926514, 2.5, 5.19999980926514, 5.19999980926514, 2.5,
+2.40000009536743, 6, 2.5, 6, 2.5, 2.5, 3.09999990463257, 2.5,
+3.60000014305115, 6.5, 3.70000004768372, 3.09999990463257, 2.40000009536743,
+2.5, 5.19999980926514, 6.5, 2.5, 3.70000004768372, 4.30000019073486,
+2.5, 9.10000038146973, 2.40000009536743, 6.5, 6, 2.5, 9.10000038146973,
+2.5, 2.5, 3.09999990463257, 2.5, 5.19999980926514, 2.5, 2.5,
+2.5, 6, 3.09999990463257, 2.70000004768372, 3.09999990463257,
+2.5, 2.40000009536743, 2.40000009536743, 5.19999980926514, 9.10000038146973,
+5.19999980926514, 6.7000002861023, 2.5, 3.09999990463257, 2.5,
+3.5, 3.09999990463257, 9.10000038146973, 3.09999990463257, 2.5,
+9.10000038146973, 2.5, 3.09999990463257, 6.7000002861023, 3.09999990463257,
+6, 2.5, 2.5, 2.5, 6, 2.40000009536743, 6.5, 2.70000004768372,
+5.19999980926514, 6.80000019073486, 4.30000019073486, 4.30000019073486,
+6.5, 3.09999990463257, 3.60000014305115, 3.60000014305115, 5.19999980926514,
+6.7000002861023, 2.5, 2.5, 6.7000002861023, 9.10000038146973,
+5.19999980926514, 5.19999980926514, 2.70000004768372, 2.5, 3.09999990463257,
+6.80000019073486, 2.5, 6.5, 6.7000002861023, 3.09999990463257,
+4.30000019073486, 3.09999990463257, 3.09999990463257, 2.70000004768372,
+2.5, 5.19999980926514, 6.7000002861023, 2.5, 2.5, 3.60000014305115,
+3.09999990463257, 2.5, 5.19999980926514, 2.5, 3.09999990463257,
+2.5, 3.09999990463257, 2.5, 2.5, 2.70000004768372, 3.09999990463257,
+5.19999980926514, 2.5, 2.5, 6.5, 6.80000019073486, 2.40000009536743,
+2.5, 5.19999980926514, 2.5, 9.10000038146973, 5.19999980926514,
+2.5, 2.40000009536743, 5.19999980926514, 3.09999990463257, 3.60000014305115,
+2.5, 2.5, 2.5, 3.09999990463257, 6.5, 6.7000002861023, 2.40000009536743,
+2.70000004768372, 6.5, 4.30000019073486, 9.10000038146973, 3.09999990463257,
+4.30000019073486, 3.09999990463257, 3.09999990463257, 3.60000014305115,
+3.09999990463257, 6, 2.5, 2.5, 2.5, 2.40000009536743, 3.09999990463257,
+5.19999980926514, 6.7000002861023, 5.19999980926514, 2.70000004768372,
+3.19999980926514, 4.30000019073486, 3.09999990463257, 2.5, 6.5,
+5.19999980926514, 3.09999990463257, 2.5, 3.09999990463257, 3.60000014305115,
+5.19999980926514, 4.09999990463257, 2.5, 5.19999980926514, 6,
+5.19999980926514, 5.19999980926514, 3.09999990463257, 6.5, 6.5,
+2.5, 5.19999980926514, 6.5, 3.19999980926514, 2.5, 6.5, 3.70000004768372,
+6.5, 9.10000038146973, 3.60000014305115, 3.09999990463257, 3.70000004768372,
+3.09999990463257, 2.5, 2.5, 2.5, 6.7000002861023, 5.19999980926514,
+6, 5.19999980926514, 5.19999980926514, 3.09999990463257, 2.5,
+2.5, 2.5, 6, 3.09999990463257, 2.5, 2.5, 3.09999990463257, 4.30000019073486,
+6, 6, 6.80000019073486, 3.60000014305115, 3.5, 2.5, 2.70000004768372,
+3.09999990463257, 6.5, 6, 3.09999990463257, 3.60000014305115,
+6, 5.19999980926514, 5.19999980926514, 2.40000009536743, 6.5,
+2.5, 2.5, 2.5, 5.19999980926514, 2.5, 6.7000002861023, 4.09999990463257,
+6.5, 2.5, 5.19999980926514, 6.5, 6.5, 2.5, 6.5, 5.19999980926514,
+3.09999990463257, 6.5, 2.70000004768372, 9.10000038146973, 2.5,
+2.5, 3.70000004768372, 2.5, 2.5, 3.70000004768372, 6.7000002861023,
+5.19999980926514, 2.40000009536743, 3.09999990463257, 2.5, 3.09999990463257,
+3.09999990463257, 5.19999980926514, 6, 6.5, 5.19999980926514,
+6.7000002861023, 2.5, 5.19999980926514, 6.7000002861023, 2.5,
+5.19999980926514, 5.19999980926514, 4.30000019073486, 3.09999990463257,
+5.19999980926514, 2.40000009536743, 2.5, 2.5, 3.60000014305115,
+6.7000002861023, 6, 3.09999990463257, 4.30000019073486, 2.5,
+6.7000002861023, 2.5, 4.09999990463257, 2.5, 3.09999990463257,
+4.30000019073486, 6.5, 2.5, 3.19999980926514, 6.5, 3.09999990463257,
+3.60000014305115, 3.19999980926514, 3.70000004768372, 6, 6.5,
+3.09999990463257, 6.5, 6.80000019073486, 2.70000004768372, 5.19999980926514,
+2.5, 5.19999980926514, 6, 5.19999980926514, 3.60000014305115,
+2.70000004768372, 5.19999980926514, 3.09999990463257, 3.60000014305115,
+9.10000038146973, 3.09999990463257, 6, 3.09999990463257, 6.5,
+6, 2.5, 2.5, 2.5, 5.19999980926514, 2.5, 9.10000038146973, 5.19999980926514,
+2.40000009536743, 5.19999980926514, 5.19999980926514, 5.19999980926514,
+5.19999980926514, 5.19999980926514, 6.5, 5.19999980926514, 2.5,
+5.19999980926514, 2.5, 5.19999980926514, 2.5, 6.5, 6.5, 6.7000002861023,
+2.40000009536743, 3.09999990463257, 6.5, 2.5, 5.19999980926514,
+3.60000014305115, 4.30000019073486, 2.70000004768372, 2.40000009536743,
+6.5, 6, 5.19999980926514, 6.80000019073486, 6, 9.10000038146973,
+4.30000019073486, 6, 6, 3.09999990463257, 2.5, 4.30000019073486,
+2.5, 5.19999980926514, 5.19999980926514, 3.09999990463257, 3.70000004768372,
+6.5, 3.60000014305115, 2.5, 2.5, 2.70000004768372, 5.19999980926514,
+2.5, 2.5, 6.5, 3.19999980926514, 2.5, 9.10000038146973, 6, 6.7000002861023,
+5.19999980926514, 2.5, 2.5, 6, 9.10000038146973, 2.5, 5.19999980926514,
+5.19999980926514, 2.5, 5.19999980926514, 5.19999980926514, 6.5,
+2.5, 4.09999990463257, 2.5, 6.7000002861023, 4.09999990463257,
+2.5, 2.5, 6.5, 5.19999980926514, 9.10000038146973, 5.19999980926514,
+5.19999980926514, 2.5, 6, 5.19999980926514, 6.7000002861023,
+2.5, 3.70000004768372, 5.19999980926514, 3.09999990463257, 4.09999990463257,
+6.7000002861023, 2.70000004768372, 3.09999990463257, 6.5, 4.30000019073486,
+6.7000002861023, 9.10000038146973, 6, 2.5, 3.09999990463257,
+3.19999980926514, 6.7000002861023, 2.5, 3.09999990463257, 2.5,
+3.09999990463257, 2.40000009536743, 3.09999990463257, 3.09999990463257,
+3.70000004768372, 9.10000038146973, 6.5, 3.60000014305115, 2.5,
+5.19999980926514, 2.5, 6, 3.60000014305115, 3.09999990463257,
+4.09999990463257, 4.09999990463257, 6.5, 2.40000009536743, 9.10000038146973,
+3.09999990463257, 5.19999980926514, 6, 2.70000004768372, 6.7000002861023,
+4.30000019073486, 3.5, 9.10000038146973, 5.19999980926514, 6.80000019073486,
+6.5, 5.19999980926514, 5.19999980926514, 3.19999980926514, 5.19999980926514,
+2.5, 6, 5.19999980926514, 2.70000004768372, 3.09999990463257,
+5.19999980926514, 5.19999980926514, 5.19999980926514, 2.5, 2.5,
+6.80000019073486, 6, 2.40000009536743, 2.70000004768372, 9.10000038146973,
+6.80000019073486, 6.5, 5.19999980926514, 5.19999980926514, 3.09999990463257,
+3.09999990463257, 4.30000019073486, 3.70000004768372, 2.5, 2.5,
+6, 6.5, 3.09999990463257, 5.19999980926514, 6, 9.10000038146973,
+6, 5.19999980926514, 4.30000019073486, 9.10000038146973, 2.5,
+3.09999990463257, 6.7000002861023, 6, 5.19999980926514, 3.60000014305115,
+2.5, 3.70000004768372, 3.09999990463257, 6.5, 4.09999990463257,
+6.7000002861023, 2.5, 2.70000004768372, 5.19999980926514, 2.5,
+5.19999980926514, 2.40000009536743, 4.09999990463257, 2.5, 3.5,
+5.19999980926514, 5.19999980926514, 3.70000004768372, 9.10000038146973,
+6.5, 3.5, 3.09999990463257, 6, 5.19999980926514, 3.09999990463257,
+4.09999990463257, 2.5, 5.19999980926514, 5.19999980926514, 5.19999980926514,
+3.09999990463257, 5.19999980926514, 4.30000019073486, 6.80000019073486,
+3.09999990463257, 2.5, 5.19999980926514, 2.5, 3.70000004768372,
+6, 5.19999980926514, 5.19999980926514, 2.5, 2.5, 6.80000019073486,
+5.19999980926514, 3.09999990463257, 5.19999980926514, 4.09999990463257,
+3.60000014305115, 6, 3.09999990463257, 2.5, 3.09999990463257,
+3.09999990463257, 2.5, 6, 5.19999980926514, 4.30000019073486,
+3.09999990463257, 3.09999990463257, 5.19999980926514, 2.5, 6.80000019073486,
+2.5, 5.19999980926514, 2.5, 3.09999990463257, 6.7000002861023,
+3.60000014305115, 2.5, 6.5, 3.70000004768372, 3.09999990463257,
+5.19999980926514, 4.09999990463257, 6, 6, 3.70000004768372, 6,
+2.70000004768372, 2.70000004768372, 3.19999980926514, 6, 2.5,
+5.19999980926514, 6.5, 6.5, 3.19999980926514, 3.09999990463257,
+6, 2.70000004768372, 6, 6, 3.60000014305115, 2.5, 9.10000038146973,
+2.5, 6.5, 3.60000014305115, 5.19999980926514, 6.5, 2.70000004768372,
+6.7000002861023, 2.5, 6, 3.19999980926514, 0.100000001490116,
+2.70000004768372, 5.19999980926514, 5.19999980926514, 6.7000002861023,
+2.5, 2.5, 3.19999980926514, 5.19999980926514, 3.09999990463257,
+6, 2.5, 2.5, 2.5, 2.5, 6.5, 2.40000009536743, 2.5, 6.5, 6.5,
+2.5, 6.5, 3.09999990463257, 4.09999990463257, 5.19999980926514,
+6.5, 2.5, 6.5, 5.19999980926514, 6, 6.5, 2.5, 3.70000004768372,
+3.09999990463257, 2.5, 2.5, 6.80000019073486, 3.09999990463257,
+2.5, 2.40000009536743, 5.19999980926514, 6.7000002861023, 6.7000002861023,
+5.19999980926514, 2.5, 5.19999980926514, 6.5, 3.09999990463257,
+3.09999990463257, 6.5, 5.19999980926514, 4.30000019073486, 6.80000019073486,
+4.30000019073486, 2.5, 6, 2.5, 4.30000019073486, 6.5, 2.70000004768372,
+2.5, 5.19999980926514, 6.7000002861023, 2.5, 3.09999990463257,
+3.09999990463257, 6.5, 5.19999980926514, 3.70000004768372, 6.7000002861023,
+5.19999980926514, 3.09999990463257, 2.70000004768372, 6.5, 9.10000038146973,
+6, 6.5, 3.70000004768372, 6.80000019073486, 4.30000019073486,
+3.09999990463257, 5.19999980926514, 5.19999980926514, 3.09999990463257,
+3.09999990463257, 3.09999990463257, 6.7000002861023, 6, 3.60000014305115,
+2.70000004768372, 3.70000004768372, 6, 3.60000014305115, 5.19999980926514,
+5.19999980926514, 6.5, 3.60000014305115, 3.09999990463257, 3.19999980926514,
+9.10000038146973, 6.5, 6.5, 6, 2.5, 6.80000019073486, 2.40000009536743,
+6, 9.10000038146973, 6.5, 6.7000002861023, 2.5, 6.7000002861023,
+6.5, 3.60000014305115, 3.70000004768372, 6.5, 6.5, 4.30000019073486,
+4.09999990463257, 2.5, 3.09999990463257, 2.70000004768372, 6.5,
+3.60000014305115, 3.5, 6.5, 2.40000009536743, 6.5, 5.19999980926514,
+5.19999980926514, 4.30000019073486, 2.40000009536743, 6, 3.19999980926514,
+6.5, 3.09999990463257, 6.7000002861023, 2.5, 6, 2.70000004768372,
+4.30000019073486, 6.5, 3.60000014305115, 2.70000004768372, 6.7000002861023,
+3.09999990463257, 6.7000002861023, 6.80000019073486, 6.5, 2.5,
+3.70000004768372, 3.5, 5.19999980926514, 2.70000004768372, 4.30000019073486,
+3.09999990463257, 3.70000004768372, 2.5, 6, 6, 6.7000002861023,
+6.80000019073486, 5.19999980926514, 3.70000004768372, 5.19999980926514,
+2.5, 5.19999980926514, 5.19999980926514, 2.70000004768372, 9.10000038146973,
+6.5, 3.60000014305115, 3.09999990463257, 3.70000004768372, 3.19999980926514,
+2.5, 2.70000004768372, 6, 5.19999980926514, 6.7000002861023,
+2.40000009536743, 6.7000002861023, 3.09999990463257, 6.5, 2.5,
+6.5, 5.19999980926514, 5.19999980926514, 3.09999990463257, 3.60000014305115,
+2.5, 4.09999990463257, 6.80000019073486, 6.7000002861023, 2.5,
+2.5, 6, 6.80000019073486, 3.19999980926514, 5.19999980926514,
+5.19999980926514, 2.5, 6.5, 9.10000038146973, 2.5, 2.5, 3.09999990463257,
+3.5, 6.5, 5.19999980926514, 4.09999990463257, 6.5, 3.09999990463257,
+2.5, 5.19999980926514, 2.40000009536743, 3.70000004768372, 2.5,
+3.60000014305115, 6.5, 9.10000038146973, 3.19999980926514, 6.80000019073486,
+9.10000038146973, 6, 3.09999990463257, 6.7000002861023, 5.19999980926514,
+9.10000038146973, 2.70000004768372, 2.40000009536743, 2.40000009536743,
+2.5, 5.19999980926514, 6.5, 6.7000002861023, 3.60000014305115,
+6.7000002861023, 3.60000014305115, 4.30000019073486, 6.80000019073486,
+3.70000004768372, 2.5, 3.09999990463257, 2.5, 2.5, 5.19999980926514,
+2.70000004768372, 6, 2.5, 3.60000014305115, 2.40000009536743,
+3.60000014305115, 2.70000004768372, 3.09999990463257, 6.80000019073486,
+6.80000019073486, 6.5, 3.19999980926514, 2.5, 2.5, 2.5, 2.70000004768372,
+2.40000009536743, 3.60000014305115, 2.70000004768372, 4.09999990463257,
+6.5, 3.09999990463257, 3.70000004768372, 6, 5.19999980926514,
+6.5, 5.19999980926514, 3.60000014305115, 3.09999990463257, 6,
+2.5, 3.60000014305115, 6.80000019073486, 6, 2.5, 9.10000038146973,
+6.7000002861023, 3.09999990463257, 2.5, 5.19999980926514, 2.5,
+6, 2.5, 6.80000019073486, 4.09999990463257, 6.5, 6.5, 2.5, 6.7000002861023,
+2.5, 6.7000002861023, 6.7000002861023, 6, 2.5, 2.70000004768372,
+6.5, 3.70000004768372, 3.60000014305115, 6.7000002861023, 4.30000019073486,
+3.60000014305115, 6.5, 3.09999990463257, 3.09999990463257, 2.5,
+3.09999990463257, 3.09999990463257, 3.70000004768372, 6.5, 6,
+3.09999990463257, 9.10000038146973, 3.09999990463257, 2.5, 3.19999980926514,
+2.5, 3.60000014305115, 2.5, 2.40000009536743, 2.70000004768372,
+6.5, 4.30000019073486, 9.10000038146973, 6, 5.19999980926514,
+6.7000002861023, 2.5, 3.19999980926514, 6.5, 2.5, 9.10000038146973,
+2.5, 9.10000038146973, 6.5, 2.40000009536743, 6.7000002861023,
+2.70000004768372, 3.60000014305115, 4.09999990463257, 6.5, 6.5,
+6.7000002861023, 3.60000014305115, 9.10000038146973, 3.19999980926514,
+3.70000004768372, 9.10000038146973, 5.19999980926514, 3.09999990463257,
+3.09999990463257, 3.5, 3.09999990463257, 5.19999980926514, 3.09999990463257,
+2.5, 3.09999990463257, 6.7000002861023, 3.60000014305115, 3.60000014305115,
+2.5, 6.5, 2.5, 6.7000002861023, 6.5, 6.80000019073486, 3.09999990463257,
+6.7000002861023, 5.19999980926514, 2.5, 6.7000002861023, 3.09999990463257,
+6.5, 6, 2.5, 2.5, 2.70000004768372, 9.10000038146973, 9.10000038146973,
+3.09999990463257, 2.40000009536743, 3.09999990463257, 3.09999990463257,
+3.19999980926514, 5.19999980926514, 5.19999980926514, 6.80000019073486,
+6, 3.09999990463257, 6, 6.5, 6.80000019073486, 3.19999980926514,
+6.5, 5.19999980926514, 6.5, 6.5, 5.19999980926514, 3.60000014305115,
+3.19999980926514, 3.60000014305115, 2.5, 3.60000014305115, 5.19999980926514,
+6.7000002861023, 5.19999980926514, 6, 6.7000002861023, 2.40000009536743,
+9.10000038146973, 5.19999980926514, 9.10000038146973, 5.19999980926514,
+2.70000004768372, 3.70000004768372, 6.5, 2.70000004768372, 3.09999990463257,
+3.19999980926514, 3.09999990463257, 3.09999990463257, 6.5, 3.19999980926514,
+2.5, 9.10000038146973, 2.5, 3.09999990463257, 3.70000004768372,
+4.09999990463257, 6.7000002861023, 2.5, 6.7000002861023, 3.09999990463257,
+3.60000014305115, 6.7000002861023, 4.30000019073486, 4.09999990463257,
+2.40000009536743, 6.80000019073486, 4.30000019073486, 4.30000019073486,
+3.09999990463257, 3.09999990463257, 3.5, 5.19999980926514, 2.5,
+9.10000038146973, 9.10000038146973, 3.09999990463257, 3.60000014305115,
+3.70000004768372, 2.5, 3.09999990463257, 5.19999980926514, 6.5,
+3.09999990463257, 2.5, 4.30000019073486, 2.5, 3.70000004768372,
+4.30000019073486, 4.30000019073486, 2.5, 3.60000014305115, 3.60000014305115,
+6.80000019073486, 3.60000014305115, 6.5, 3.09999990463257, 9.10000038146973,
+2.70000004768372, 5.19999980926514, 3.09999990463257, 6.5, 6.80000019073486,
+6, 2.5, 3.09999990463257, 3.09999990463257, 3.60000014305115,
+2.5, 6.7000002861023, 3.70000004768372, 2.70000004768372, 4.09999990463257,
+2.70000004768372, 3.70000004768372, 2.5, 5.19999980926514, 6.5,
+3.5, 5.19999980926514, 3.19999980926514, 3.5, 3.09999990463257,
+3.60000014305115, 2.70000004768372, 5.19999980926514, 3.60000014305115,
+5.19999980926514, 9.10000038146973, 2.5, 6, 3.60000014305115,
+2.40000009536743, 3.70000004768372, 2.70000004768372, 5.19999980926514,
+2.5, 2.5, 6, 6, 6.5, 6.7000002861023, 3.5, 3.19999980926514,
+2.5, 3.60000014305115, 6.7000002861023, 6.5, 2.5, 4.30000019073486,
+9.10000038146973, 4.09999990463257, 6.7000002861023, 3.19999980926514,
+2.70000004768372, 3.60000014305115, 3.60000014305115, 2.5, 6.80000019073486,
+6, 5.19999980926514, 2.40000009536743, 2.5, 2.5, 2.40000009536743,
+6.7000002861023, 3.19999980926514, 6, 5.19999980926514, 5.19999980926514,
+9.10000038146973, 5.19999980926514, 6.5, 5.19999980926514, 2.5,
+6.80000019073486, 4.09999990463257, 5.19999980926514, 9.10000038146973,
+2.40000009536743, 4.30000019073486, 5.19999980926514, 5.19999980926514,
+3.09999990463257, 4.30000019073486, 2.70000004768372, 2.5, 5.19999980926514,
+4.30000019073486, 3.60000014305115, 6.5, 6, 5.19999980926514,
+5.19999980926514, 6.7000002861023, 9.10000038146973, 4.30000019073486,
+2.5, 2.5, 5.19999980926514, 3.60000014305115, 6, 6, 6.5, 2.5,
+2.5, 2.5, 9.10000038146973, 6.7000002861023, 2.70000004768372,
+6.5, 9.10000038146973, 6.80000019073486, 6, 2.70000004768372,
+3.70000004768372, 2.5, 5.19999980926514, 5.19999980926514, 3.60000014305115,
+3.70000004768372, 2.5, 6.80000019073486, 2.5, 6, 5.19999980926514,
+3.60000014305115, 6.5, 6.5, 6, 3.70000004768372, 5.19999980926514,
+2.40000009536743, 6, 5.19999980926514, 3.09999990463257, 6.7000002861023,
+3.60000014305115, 6.7000002861023, 9.10000038146973, 9.10000038146973,
+6.7000002861023, 6.5, 4.30000019073486, 6.7000002861023, 3.60000014305115,
+2.70000004768372, 5.19999980926514, 3.09999990463257, 2.70000004768372,
+3.60000014305115, 2.70000004768372, 3.70000004768372, 0.100000001490116,
+6.5, 5.19999980926514, 6.5, 3.70000004768372, 5.19999980926514,
+6.5, 3.09999990463257, 4.30000019073486, 3.70000004768372, 3.09999990463257,
+2.5, 6.5, 6.5, 9.10000038146973, 6.5, 6, 3.60000014305115, 6.7000002861023,
+3.60000014305115, 4.09999990463257, 6.5, 4.30000019073486, 2.5,
+3.19999980926514, 2.70000004768372, 6.7000002861023, 6.5, 6.7000002861023,
+6.7000002861023, 3.09999990463257, 2.5, 6.5, 4.30000019073486,
+3.70000004768372, 2.5, 5.19999980926514, 2.5, 3.09999990463257,
+2.5, 5.19999980926514, 4.09999990463257, 6, 6, 6, 3.09999990463257,
+6.80000019073486, 3.09999990463257, 2.40000009536743, 3.70000004768372,
+2.5, 2.5, 6.5, 3.19999980926514, 6.7000002861023, 5.19999980926514,
+4.30000019073486, 3.60000014305115, 4.09999990463257, 6.5, 4.09999990463257,
+2.5, 4.09999990463257, 5.19999980926514, 9.10000038146973, 2.5,
+5.19999980926514, 6, 6.5, 4.30000019073486, 2.5, 9.10000038146973,
+6.80000019073486, 3.19999980926514, 2.40000009536743, 6.5, 2.5,
+4.30000019073486, 2.40000009536743, 3.19999980926514, 6.5, 2.5,
+6, 2.5, 6.80000019073486, 5.19999980926514, 6, 3.60000014305115,
+2.5, 2.5, 3.60000014305115, 2.5, 2.5, 2.5, 2.40000009536743,
+2.40000009536743, 5.19999980926514, 3.19999980926514, 4.30000019073486,
+5.19999980926514, 3.19999980926514, 3.5, 6, 6.80000019073486,
+5.19999980926514, 4.09999990463257, 6, 9.10000038146973, 5.19999980926514,
+2.5, 9.10000038146973, 3.09999990463257, 3.09999990463257, 2.70000004768372,
+6.5, 3.19999980926514, 5.19999980926514, 2.70000004768372, 3.09999990463257,
+2.70000004768372, 3.70000004768372, 2.70000004768372, 3.09999990463257,
+6, 6, 6.80000019073486, 4.09999990463257, 6.5, 2.40000009536743,
+3.60000014305115, 0.100000001490116, 6, 2.5, 2.70000004768372,
+2.40000009536743, 6.7000002861023, 4.09999990463257, 2.40000009536743,
+3.09999990463257, 3.70000004768372, 3.60000014305115, 6.80000019073486,
+6.5, 5.19999980926514, 4.09999990463257, 3.09999990463257, 3.09999990463257,
+2.70000004768372, 2.5, 3.5, 6, 6.7000002861023, 6.5, 9.10000038146973,
+6, 9.10000038146973, 5.19999980926514, 5.19999980926514, 2.70000004768372,
+3.70000004768372, 4.30000019073486, 3.19999980926514, 9.10000038146973,
+5.19999980926514, 3.60000014305115, 3.60000014305115, 3.09999990463257,
+6.7000002861023, 6.7000002861023, 2.5, 3.70000004768372, 5.19999980926514,
+5.19999980926514, 6.80000019073486, 2.5, 2.5, 3.70000004768372,
+6.5, 4.30000019073486, 3.70000004768372, 5.19999980926514, 5.19999980926514,
+2.5, 2.5, 3.70000004768372, 2.5, 2.5, 3.19999980926514, 2.40000009536743,
+5.19999980926514, 6.5, 9.10000038146973, 3.5, 5.19999980926514,
+6.5, 6.5, 2.5, 6, 5.19999980926514, 5.19999980926514, 5.19999980926514,
+6, 2.5, 2.5, 3.09999990463257, 5.19999980926514, 6.5, 6, 6.5,
+6.80000019073486, 9.10000038146973, 2.70000004768372, 2.5, 2.70000004768372,
+2.40000009536743, 3.60000014305115, 3.19999980926514, 3.70000004768372,
+2.5, 9.10000038146973, 4.09999990463257, 3.70000004768372, 3.60000014305115,
+6.7000002861023, 2.5, 3.09999990463257, 3.70000004768372, 2.70000004768372,
+3.60000014305115, 2.5, 2.5, 2.5, 2.5, 6.80000019073486, 2.40000009536743,
+2.70000004768372, 9.10000038146973, 2.5, 6.5, 6.5, 6.7000002861023,
+3.70000004768372, 3.60000014305115, 3.60000014305115, 0.100000001490116,
+3.09999990463257, 6, 3.09999990463257, 4.09999990463257, 6.7000002861023,
+3.09999990463257, 2.5, 2.70000004768372, 3.09999990463257, 4.09999990463257,
+6.5, 3.19999980926514, 2.5, 3.60000014305115, 2.40000009536743,
+4.09999990463257, 2.5, 3.5, 3.09999990463257, 2.5, 5.19999980926514,
+4.30000019073486, 3.5, 3.19999980926514, 6.5, 5.19999980926514,
+3.19999980926514, 6.5, 6.7000002861023, 5.19999980926514, 3.09999990463257,
+3.60000014305115, 6, 3.09999990463257, 6.7000002861023, 6.7000002861023,
+2.5, 6.5, 6.7000002861023, 6, 3.19999980926514, 6.7000002861023,
+6.80000019073486, 3.09999990463257, 2.70000004768372, 5.19999980926514,
+4.30000019073486, 6.5, 5.19999980926514, 6.5, 3.09999990463257,
+5.19999980926514, 5.19999980926514, 2.5, 6.5, 6.5, 2.5, 2.5,
+4.30000019073486, 5.19999980926514, 2.70000004768372, 6.5, 6.80000019073486,
+5.19999980926514, 6.80000019073486, 5.19999980926514, 5.19999980926514,
+5.19999980926514, 6.5, 9.10000038146973, 2.5, 2.5, 4.30000019073486,
+5.19999980926514, 3.09999990463257, 6.5, 6.7000002861023, 2.40000009536743,
+2.5, 2.5, 6.5, 3.19999980926514, 2.70000004768372, 3.09999990463257,
+2.5, 2.40000009536743, 3.09999990463257, 5.19999980926514, 4.30000019073486,
+5.19999980926514, 5.19999980926514, 2.70000004768372, 5.19999980926514,
+5.19999980926514, 2.70000004768372, 3.5, 4.30000019073486, 6.5,
+3.60000014305115, 2.5, 4.09999990463257, 2.5, 5.19999980926514,
+3.09999990463257, 3.09999990463257, 3.09999990463257, 6.5, 2.70000004768372,
+2.70000004768372, 2.5, 5.19999980926514, 6.7000002861023, 3.09999990463257,
+9.10000038146973, 6.5, 0.100000001490116, 3.19999980926514, 2.5,
+6.5, 3.09999990463257, 5.19999980926514, 6.80000019073486, 6.7000002861023,
+2.5, 4.30000019073486, 6.5, 6.5, 6.80000019073486, 5.19999980926514,
+6.5, 3.60000014305115, 6.80000019073486, 6.5, 3.60000014305115,
+9.10000038146973, 4.09999990463257, 6.7000002861023, 2.5, 6.7000002861023,
+4.30000019073486, 2.70000004768372, 2.70000004768372, 3.5, 6.80000019073486,
+2.70000004768372, 3.60000014305115, 3.19999980926514, 4.09999990463257,
+2.5, 6.5, 5.19999980926514, 3.09999990463257, 2.5, 2.5, 2.70000004768372,
+2.5, 2.5, 6.80000019073486, 5.19999980926514, 3.09999990463257,
+3.5, 6.5, 5.19999980926514, 4.09999990463257, 2.5, 3.70000004768372,
+2.5, 3.09999990463257, 3.09999990463257, 0.100000001490116, 5.19999980926514,
+6, 0.100000001490116, 6, 6.5, 6.5, 3.09999990463257, 9.10000038146973,
+6.7000002861023, 6.80000019073486, 2.5, 6.5, 3.60000014305115,
+2.40000009536743, 6, 6, 3.09999990463257, 2.5, 6.5, 3.19999980926514,
+4.30000019073486, 2.5, 6.5, 2.70000004768372, 5.19999980926514,
+9.10000038146973, 3.09999990463257, 2.70000004768372, 3.70000004768372,
+5.19999980926514, 3.60000014305115, 2.5, 3.09999990463257, 6.5,
+5.19999980926514, 6.7000002861023, 9.10000038146973, 4.30000019073486,
+4.09999990463257, 6.7000002861023, 3.60000014305115, 3.09999990463257,
+4.30000019073486, 5.19999980926514, 5.19999980926514, 6.5, 6.5,
+5.19999980926514, 6, 4.30000019073486, 2.5, 2.70000004768372,
+2.5, 3.60000014305115, 2.5, 3.19999980926514, 6.80000019073486,
+2.5, 3.60000014305115, 6.80000019073486, 3.70000004768372, 3.09999990463257,
+6.7000002861023, 2.5, 4.30000019073486, 2.5, 6, 6, 3.09999990463257,
+2.5, 5.19999980926514, 2.5, 6.7000002861023, 3.5, 3.60000014305115,
+6.5, 4.09999990463257, 6.5, 6.5, 5.19999980926514, 5.19999980926514,
+2.70000004768372, 4.30000019073486, 3.09999990463257, 2.5, 3.19999980926514,
+2.5, 2.5, 6.5, 2.5, 6, 2.5, 2.5, 5.19999980926514, 5.19999980926514,
+4.30000019073486, 6.7000002861023, 6.5, 2.5, 6.5, 3.60000014305115,
+6, 5.19999980926514, 5.19999980926514, 5.19999980926514, 6.80000019073486,
+2.5, 2.40000009536743, 6.80000019073486, 6.7000002861023, 6.5,
+6.5, 6.7000002861023, 6.5, 6, 3.70000004768372, 3.5, 5.19999980926514,
+3.09999990463257, 3.09999990463257, 3.09999990463257, 3.09999990463257,
+3.09999990463257, 3.5, 3.09999990463257, 3.09999990463257, 3.19999980926514,
+2.5, 5.19999980926514, 3.09999990463257, 6.5, 6.5, 3.19999980926514,
+2.5, 3.60000014305115, 6.5, 3.70000004768372, 9.10000038146973,
+3.09999990463257, 5.19999980926514, 4.30000019073486, 2.40000009536743,
+6, 3.60000014305115, 6.7000002861023, 4.30000019073486, 6.80000019073486,
+2.70000004768372, 2.5, 6.5, 6.5, 2.40000009536743, 3.09999990463257,
+3.70000004768372, 4.30000019073486, 5.19999980926514, 5.19999980926514,
+4.09999990463257, 5.19999980926514, 3.19999980926514, 6.5, 6.5,
+6, 6.5, 6.80000019073486, 4.09999990463257, 2.40000009536743,
+6.7000002861023, 3.60000014305115, 2.40000009536743, 3.60000014305115,
+4.09999990463257, 3.09999990463257, 6.7000002861023, 6.5, 3.09999990463257,
+5.19999980926514, 6.5, 3.09999990463257, 2.5, 6.5, 3.09999990463257,
+2.5, 6.7000002861023, 3.60000014305115, 4.09999990463257, 5.19999980926514,
+2.40000009536743, 6.5, 2.5, 6.7000002861023, 3.09999990463257,
+3.70000004768372, 6.5, 4.09999990463257, 2.5, 2.5, 3.60000014305115,
+3.19999980926514, 2.5, 5.19999980926514, 6.5, 2.5, 2.70000004768372,
+3.19999980926514, 2.70000004768372, 3.09999990463257, 4.09999990463257,
+3.60000014305115, 2.40000009536743, 6.5, 9.10000038146973, 9.10000038146973,
+2.40000009536743, 4.30000019073486, 6.80000019073486, 2.40000009536743,
+5.19999980926514, 9.10000038146973, 3.60000014305115, 3.60000014305115,
+9.10000038146973, 3.09999990463257, 2.40000009536743, 3.70000004768372,
+5.19999980926514, 6.80000019073486, 3.09999990463257, 3.60000014305115,
+5.19999980926514, 3.70000004768372, 6.7000002861023, 5.19999980926514,
+3.09999990463257, 5.19999980926514, 6.5, 9.10000038146973, 9.10000038146973,
+9.10000038146973, 2.5, 4.09999990463257, 6.7000002861023, 2.5,
+3.09999990463257, 3.09999990463257, 6.7000002861023, 6.7000002861023,
+3.09999990463257, 6.5, 3.5, 6.5, 3.09999990463257, 2.5, 3.09999990463257,
+3.09999990463257, 3.70000004768372, 3.70000004768372, 3.60000014305115,
+3.09999990463257, 4.30000019073486, 4.30000019073486, 6.5, 5.19999980926514,
+6.5, 3.09999990463257, 2.70000004768372, 6, 3.60000014305115,
+5.19999980926514, 4.30000019073486, 3.60000014305115, 3.70000004768372,
+2.5, 6, 6.5, 6.7000002861023, 5.19999980926514, 6.80000019073486,
+3.60000014305115, 3.19999980926514, 2.5, 6.5, 6.5, 5.19999980926514,
+2.40000009536743, 2.5, 4.30000019073486, 3.09999990463257, 2.70000004768372,
+3.60000014305115, 2.70000004768372, 2.40000009536743, 2.5, 6,
+2.5, 2.5, 2.70000004768372, 6.5, 6.7000002861023, 6.7000002861023,
+3.19999980926514, 2.5, 3.09999990463257, 2.70000004768372, 5.19999980926514,
+9.10000038146973, 6.80000019073486, 2.5, 2.40000009536743, 3.60000014305115,
+3.09999990463257, 3.09999990463257, 4.30000019073486, 3.09999990463257,
+5.19999980926514, 6, 6.5, 2.70000004768372, 3.19999980926514,
+6, 5.19999980926514, 3.09999990463257, 3.09999990463257, 4.30000019073486,
+5.19999980926514, 2.5, 3.09999990463257, 4.30000019073486, 2.5,
+6, 2.5, 6.7000002861023, 2.5, 5.19999980926514, 3.70000004768372,
+6.7000002861023, 6.5, 3.09999990463257, 2.40000009536743, 6.7000002861023,
+2.5, 2.40000009536743, 6.5, 3.19999980926514, 6.5, 2.5, 6.7000002861023,
+2.5, 3.19999980926514, 3.70000004768372, 2.5, 2.5, 5.19999980926514,
+6.5, 3.70000004768372, 5.19999980926514, 2.5, 2.5, 2.40000009536743,
+6.7000002861023, 6.80000019073486, 6.5, 2.40000009536743, 6.7000002861023,
+3.70000004768372, 6, 3.09999990463257, 9.10000038146973, 6.7000002861023,
+6.5, 3.60000014305115, 2.5, 6.5, 2.70000004768372, 3.09999990463257,
+6.80000019073486, 2.70000004768372, 4.30000019073486, 4.30000019073486,
+3.09999990463257, 4.30000019073486, 2.40000009536743, 6.5, 6.5,
+5.19999980926514, 4.30000019073486, 3.09999990463257, 6.5, 2.40000009536743,
+3.09999990463257, 5.19999980926514, 2.5, 9.10000038146973, 2.5,
+2.70000004768372, 2.5, 2.5, 3.09999990463257, 2.5, 2.40000009536743,
+6.5, 2.5, 3.09999990463257, 5.19999980926514, 3.09999990463257,
+3.09999990463257, 6.80000019073486, 6.5, 3.60000014305115, 3.60000014305115,
+2.5, 2.70000004768372, 2.5, 6, 6, 5.19999980926514, 6, 3.5, 4.30000019073486,
+6, 6.7000002861023, 2.40000009536743, 9.10000038146973, 5.19999980926514,
+2.5, 4.30000019073486, 3.09999990463257, 2.5, 6.5, 3.09999990463257,
+4.09999990463257, 6.7000002861023, 3.70000004768372, 6.7000002861023,
+2.40000009536743, 4.30000019073486, 2.70000004768372, 2.40000009536743,
+4.30000019073486, 3.09999990463257, 3.09999990463257, 6.80000019073486,
+3.19999980926514, 5.19999980926514, 2.40000009536743, 2.5, 6.5,
+2.70000004768372, 2.70000004768372, 5.19999980926514, 3.70000004768372,
+3.60000014305115, 6.80000019073486, 3.60000014305115, 6, 6.5,
+5.19999980926514, 2.40000009536743, 9.10000038146973, 6.5, 3.70000004768372,
+3.09999990463257, 3.5, 3.19999980926514, 3.09999990463257, 6.80000019073486,
+4.09999990463257, 3.19999980926514, 9.10000038146973, 6.7000002861023,
+6.5, 3.09999990463257, 6.7000002861023, 2.70000004768372, 6.5,
+6, 2.40000009536743, 3.70000004768372, 2.40000009536743, 5.19999980926514,
+9.10000038146973, 3.19999980926514, 6.5, 6.7000002861023, 2.5,
+6.5, 6.7000002861023, 5.19999980926514, 6, 6.5, 6.5, 3.09999990463257,
+4.30000019073486, 5.19999980926514, 3.19999980926514, 6, 5.19999980926514,
+6.5, 2.5, 5.19999980926514, 2.70000004768372, 9.10000038146973,
+2.40000009536743, 5.19999980926514, 3.19999980926514, 5.19999980926514,
+2.5, 3.09999990463257, 3.09999990463257, 2.5, 2.70000004768372,
+2.40000009536743, 5.19999980926514, 6.5, 6.5, 3.60000014305115,
+2.40000009536743, 6.7000002861023, 2.70000004768372, 3.70000004768372,
+5.19999980926514, 3.70000004768372, 3.60000014305115, 3.09999990463257,
+6.80000019073486, 6.7000002861023, 2.40000009536743, 2.5, 3.70000004768372,
+2.40000009536743, 3.70000004768372, 2.40000009536743, 3.19999980926514,
+3.09999990463257, 3.09999990463257, 3.19999980926514, 4.30000019073486,
+3.09999990463257, 3.70000004768372, 4.30000019073486, 6.5, 9.10000038146973,
+2.40000009536743, 3.09999990463257, 2.40000009536743, 6, 6.5,
+6.5, 5.19999980926514, 2.40000009536743, 5.19999980926514, 3.09999990463257,
+6.5, 6.5, 3.70000004768372, 6.5, 3.09999990463257, 4.30000019073486,
+5.19999980926514, 9.10000038146973, 2.5, 5.19999980926514, 5.19999980926514,
+9.10000038146973, 2.5, 2.5, 3.09999990463257, 4.09999990463257,
+2.40000009536743, 0.100000001490116, 4.30000019073486, 3.60000014305115,
+2.70000004768372, 6.5, 2.40000009536743, 3.60000014305115, 6.7000002861023,
+5.19999980926514, 3.5, 2.70000004768372, 3.09999990463257, 2.70000004768372,
+6, 6.5, 3.09999990463257, 2.70000004768372, 0.100000001490116,
+6.5, 5.19999980926514, 5.19999980926514, 5.19999980926514, 6.5,
+3.5, 6.80000019073486, 3.09999990463257, 3.60000014305115, 5.19999980926514,
+3.70000004768372, 9.10000038146973, 5.19999980926514, 2.5, 9.10000038146973,
+6.7000002861023, 9.10000038146973, 3.09999990463257, 3.60000014305115,
+3.09999990463257, 2.5, 5.19999980926514, 5.19999980926514, 6,
+5.19999980926514, 3.09999990463257, 3.60000014305115, 9.10000038146973,
+2.70000004768372, 6.5, 5.19999980926514, 5.19999980926514, 6.7000002861023,
+5.19999980926514, 6.80000019073486, 6.5, 6, 6.5, 2.40000009536743,
+6.80000019073486, 3.60000014305115, 3.60000014305115, 3.09999990463257,
+3.09999990463257, 5.19999980926514, 2.5, 5.19999980926514, 2.5,
+6.5, 3.5, 2.5, 4.09999990463257, 2.5, 2.5, 5.19999980926514,
+3.60000014305115, 2.5, 3.60000014305115, 3.60000014305115, 3.09999990463257,
+4.09999990463257, 6.80000019073486, 3.19999980926514, 6, 3.60000014305115,
+9.10000038146973, 5.19999980926514, 3.09999990463257, 5.19999980926514,
+6.5, 6.7000002861023, 5.19999980926514, 0.100000001490116, 2.5,
+2.5, 9.10000038146973, 2.70000004768372, 2.40000009536743, 9.10000038146973,
+6, 6.5, 3.09999990463257, 2.5, 3.09999990463257, 3.70000004768372,
+3.60000014305115, 6.5, 3.09999990463257, 3.5, 3.09999990463257,
+2.5, 2.5, 3.09999990463257, 3.09999990463257, 3.70000004768372,
+6.7000002861023, 3.60000014305115, 3.09999990463257, 6.5, 6.5,
+2.40000009536743, 6.7000002861023, 6.5, 3.09999990463257, 2.70000004768372,
+6.7000002861023, 6.5, 3.70000004768372, 6.5, 3.09999990463257,
+2.5, 4.30000019073486, 3.70000004768372, 6.5, 2.40000009536743,
+6.7000002861023, 3.09999990463257, 6.5, 6.7000002861023, 2.5,
+2.70000004768372, 2.5, 6.80000019073486, 4.09999990463257, 2.70000004768372,
+2.70000004768372, 6.5, 3.70000004768372, 2.70000004768372, 3.09999990463257,
+2.5, 6.80000019073486, 2.70000004768372, 3.09999990463257, 3.09999990463257,
+3.09999990463257, 3.60000014305115, 3.09999990463257, 6.7000002861023,
+3.19999980926514, 4.09999990463257, 2.70000004768372, 6.80000019073486,
+5.19999980926514, 6.5, 6.7000002861023, 2.5, 2.40000009536743,
+2.5, 6.5, 4.09999990463257, 4.09999990463257, 4.09999990463257,
+5.19999980926514, 3.09999990463257, 5.19999980926514, 6.7000002861023,
+6.7000002861023, 3.19999980926514, 5.19999980926514, 6.7000002861023,
+2.70000004768372, 5.19999980926514, 6.5, 5.19999980926514, 2.40000009536743,
+6.5, 2.70000004768372, 3.09999990463257, 5.19999980926514, 2.5,
+2.5, 5.19999980926514, 3.09999990463257, 2.5, 2.70000004768372,
+5.19999980926514, 6.5, 3.19999980926514, 3.60000014305115, 2.40000009536743,
+3.09999990463257, 3.60000014305115, 5.19999980926514, 3.09999990463257,
+3.09999990463257, 6.5, 9.10000038146973, 6.5, 2.5, 3.09999990463257,
+6.5, 4.30000019073486, 3.60000014305115, 6, 3.60000014305115,
+4.30000019073486, 6.5, 9.10000038146973, 5.19999980926514, 6.5,
+2.5, 3.5, 6.7000002861023, 6.5, 3.09999990463257, 2.70000004768372,
+3.09999990463257, 2.40000009536743, 3.5, 6.5, 5.19999980926514,
+3.60000014305115, 4.30000019073486, 3.70000004768372, 6.7000002861023,
+5.19999980926514, 2.5, 6.7000002861023, 2.70000004768372, 3.5,
+6, 3.19999980926514, 4.09999990463257, 2.5, 3.60000014305115,
+6.5, 5.19999980926514, 4.09999990463257, 3.60000014305115, 2.40000009536743,
+3.5, 4.30000019073486, 2.70000004768372, 3.60000014305115, 2.70000004768372,
+5.19999980926514, 2.40000009536743, 5.19999980926514, 3.5, 4.09999990463257,
+6, 4.09999990463257, 3.5, 6, 3.09999990463257, 5.19999980926514,
+3.60000014305115, 3.5, 2.40000009536743, 5.19999980926514, 2.5,
+6.5, 2.5, 2.5, 2.5, 4.09999990463257, 5.19999980926514, 6.7000002861023,
+3.09999990463257, 6.5, 6.5, 6.80000019073486, 5.19999980926514,
+6.7000002861023, 3.19999980926514, 3.5, 2.5, 3.09999990463257,
+5.19999980926514, 6.5, 6.80000019073486, 2.70000004768372, 2.5,
+3.70000004768372, 2.5, 2.40000009536743, 6.7000002861023, 3.19999980926514,
+3.09999990463257, 5.19999980926514, 2.5, 6.5, 2.5, 2.5, 3.60000014305115,
+6.5, 3.09999990463257, 3.09999990463257, 3.60000014305115, 6.7000002861023,
+6, 2.5, 3.09999990463257, 6.5, 9.10000038146973, 2.40000009536743,
+6.80000019073486, 6.7000002861023, 3.70000004768372, 0.100000001490116,
+6.5, 3.09999990463257, 3.70000004768372, 3.70000004768372, 2.5,
+5.19999980926514, 5.19999980926514, 6.5, 3.09999990463257, 3.09999990463257,
+2.5, 6.80000019073486, 6.5, 2.40000009536743, 6.7000002861023,
+3.09999990463257, 3.09999990463257, 6.7000002861023, 4.30000019073486,
+3.19999980926514, 6.5, 9.10000038146973, 9.10000038146973, 2.70000004768372,
+3.19999980926514, 3.5, 3.09999990463257, 2.5, 4.09999990463257,
+5.19999980926514, 3.09999990463257, 4.09999990463257, 6, 2.70000004768372,
+3.09999990463257, 6.7000002861023, 3.19999980926514, 6.5, 2.5,
+3.09999990463257, 4.09999990463257, 3.60000014305115, 3.60000014305115,
+3.09999990463257, 3.09999990463257, 6.5, 3.19999980926514, 3.09999990463257,
+6.80000019073486, 5.19999980926514, 4.30000019073486, 3.60000014305115,
+3.60000014305115, 6.7000002861023, 6.5, 9.10000038146973, 9.10000038146973,
+3.09999990463257, 3.09999990463257, 3.09999990463257, 2.5, 0.100000001490116,
+2.40000009536743, 3.19999980926514, 6, 4.30000019073486, 3.70000004768372,
+6.80000019073486, 3.09999990463257, 4.09999990463257, 4.30000019073486,
+3.19999980926514, 3.09999990463257, 2.70000004768372, 0.100000001490116,
+3.70000004768372, 6.80000019073486, 3.5, 2.70000004768372, 2.5,
+3.19999980926514, 4.09999990463257, 3.09999990463257, 6.80000019073486,
+2.5, 2.70000004768372, 6.80000019073486, 6.5, 3.19999980926514,
+3.19999980926514, 6, 9.10000038146973, 3.19999980926514, 5.19999980926514,
+2.70000004768372, 3.19999980926514, 3.19999980926514, 3.5, 6.5,
+5.19999980926514, 3.09999990463257, 4.30000019073486, 6.5, 4.30000019073486,
+3.09999990463257, 3.70000004768372, 6.7000002861023, 3.60000014305115,
+3.19999980926514, 6.5, 2.5, 6.5, 6.7000002861023, 6, 4.30000019073486,
+3.70000004768372, 6.7000002861023, 5.19999980926514, 5.19999980926514,
+3.09999990463257, 3.60000014305115, 3.09999990463257, 6, 6.5,
+3.19999980926514, 2.70000004768372, 5.19999980926514, 6.5, 6.5,
+3.60000014305115, 2.40000009536743, 5.19999980926514, 6.7000002861023,
+5.19999980926514, 3.09999990463257, 2.5, 6.80000019073486, 5.19999980926514,
+4.09999990463257, 3.09999990463257, 2.5, 2.5, 3.09999990463257,
+6.7000002861023, 3.5, 5.19999980926514, 3.60000014305115, 6.7000002861023,
+5.19999980926514, 5.19999980926514, 2.70000004768372, 4.30000019073486,
+6.7000002861023, 3.60000014305115, 6.80000019073486, 2.70000004768372,
+4.09999990463257, 6.7000002861023, 3.19999980926514, 3.19999980926514,
+2.5, 6.7000002861023, 3.70000004768372, 3.09999990463257, 6.5,
+2.5, 6.80000019073486, 5.19999980926514, 2.40000009536743, 2.70000004768372,
+6.5, 3.19999980926514, 6.5, 6.5, 3.5, 2.40000009536743, 3.5,
+3.60000014305115, 4.30000019073486, 5.19999980926514, 2.40000009536743,
+3.09999990463257, 3.09999990463257, 6.5, 4.30000019073486, 3.19999980926514,
+4.09999990463257, 6.7000002861023, 2.40000009536743, 6.7000002861023,
+3.09999990463257, 2.5, 9.10000038146973, 2.40000009536743, 2.5,
+3.70000004768372, 3.09999990463257, 3.19999980926514, 3.09999990463257,
+2.70000004768372, 6.7000002861023, 3.70000004768372, 5.19999980926514,
+6.5, 6.7000002861023, 5.19999980926514, 3.09999990463257, 6.5,
+3.60000014305115, 2.70000004768372, 6.5, 2.70000004768372, 3.60000014305115,
+5.19999980926514, 6, 3.60000014305115, 6, 2.5, 6.7000002861023,
+5.19999980926514, 6.5, 3.60000014305115, 3.70000004768372, 6.7000002861023,
+4.09999990463257, 6.7000002861023, 3.09999990463257, 5.19999980926514,
+9.10000038146973, 2.70000004768372, 6.7000002861023, 3.60000014305115,
+4.09999990463257, 3.5, 3.5, 3.70000004768372, 6, 2.40000009536743,
+4.30000019073486, 6.5, 3.09999990463257, 3.60000014305115, 2.40000009536743,
+6.7000002861023, 5.19999980926514, 2.40000009536743, 3.60000014305115,
+2.5, 3.19999980926514, 3.60000014305115, 3.09999990463257, 2.40000009536743,
+3.09999990463257, 3.09999990463257, 6.7000002861023, 3.60000014305115,
+3.19999980926514, 3.19999980926514, 3.60000014305115, 3.09999990463257,
+6, 6.7000002861023, 4.30000019073486, 5.19999980926514, 6.80000019073486,
+3.19999980926514, 3.19999980926514, 4.30000019073486, 3.70000004768372,
+3.09999990463257, 5.19999980926514, 2.70000004768372, 6.7000002861023,
+6.7000002861023, 3.5, 2.40000009536743, 2.40000009536743, 3.70000004768372,
+3.60000014305115, 5.19999980926514, 3.09999990463257, 3.09999990463257,
+3.09999990463257, 3.19999980926514, 6.7000002861023, 2.5, 6,
+4.09999990463257, 2.40000009536743, 2.5, 2.70000004768372, 3.09999990463257,
+3.09999990463257, 3.09999990463257, 3.19999980926514, 2.5, 5.19999980926514,
+5.19999980926514, 4.30000019073486, 2.70000004768372, 6.5, 3.19999980926514,
+4.30000019073486, 5.19999980926514, 6.5, 9.10000038146973, 3.09999990463257,
+3.09999990463257, 3.5, 2.5, 6.7000002861023, 3.09999990463257,
+3.60000014305115, 6.5, 6.7000002861023, 3.09999990463257, 3.19999980926514,
+2.70000004768372, 2.70000004768372, 9.10000038146973, 5.19999980926514,
+6.80000019073486, 3.19999980926514, 6, 3.70000004768372, 5.19999980926514,
+3.60000014305115, 2.70000004768372, 2.70000004768372, 3.19999980926514,
+3.09999990463257, 3.09999990463257, 2.70000004768372, 6.5, 2.5,
+6.7000002861023, 3.70000004768372, 2.40000009536743, 3.09999990463257,
+3.60000014305115, 3.09999990463257, 6, 3.19999980926514, 3.60000014305115,
+6, 3.19999980926514, 9.10000038146973, 2.70000004768372, 2.5,
+2.70000004768372, 3.60000014305115, 3.19999980926514, 5.19999980926514,
+2.5, 2.70000004768372, 2.5, 3.60000014305115, 6.7000002861023,
+2.40000009536743, 6.5, 5.19999980926514, 2.5, 2.40000009536743,
+4.30000019073486, 6, 3.09999990463257, 3.09999990463257, 2.70000004768372,
+6.7000002861023, 2.5, 6.7000002861023, 4.09999990463257, 6, 5.19999980926514,
+6.5, 2.5, 6.80000019073486, 2.70000004768372, 2.5, 6.7000002861023,
+6.80000019073486, 9.10000038146973, 2.40000009536743, 6.5, 6,
+6.5, 3.19999980926514, 3.19999980926514, 2.5, 3.09999990463257,
+5.19999980926514, 2.40000009536743, 3.19999980926514, 3.09999990463257,
+3.09999990463257, 2.70000004768372, 4.09999990463257, 3.09999990463257,
+3.70000004768372, 6.5, 3.70000004768372, 2.40000009536743, 5.19999980926514,
+4.09999990463257, 3.60000014305115, 2.40000009536743, 6.7000002861023,
+6.5, 4.09999990463257, 5.19999980926514, 3.19999980926514, 3.19999980926514,
+6.5, 4.30000019073486, 6, 6.5, 6.5, 5.19999980926514, 2.5, 6.5,
+3.09999990463257, 3.09999990463257, 2.70000004768372, 5.19999980926514,
+3.60000014305115, 5.19999980926514, 0.100000001490116, 2.70000004768372,
+4.30000019073486, 4.30000019073486, 3.5, 2.70000004768372, 3.09999990463257,
+3.09999990463257, 6, 3.60000014305115, 6.7000002861023, 3.09999990463257,
+3.60000014305115, 5.19999980926514, 6.80000019073486, 2.5, 5.19999980926514,
+3.70000004768372, 2.40000009536743, 3.09999990463257, 5.19999980926514,
+4.09999990463257, 4.30000019073486, 3.19999980926514, 5.19999980926514,
+2.5, 6.80000019073486, 2.40000009536743, 6.5, 3.09999990463257,
+6.7000002861023, 2.40000009536743, 5.19999980926514, 0.100000001490116,
+6.80000019073486, 5.19999980926514, 4.30000019073486, 5.19999980926514,
+9.10000038146973, 6.7000002861023, 4.09999990463257, 6, 2.40000009536743,
+3.19999980926514, 6.7000002861023, 3.60000014305115, 5.19999980926514,
+3.19999980926514, 3.19999980926514, 2.70000004768372, 5.19999980926514,
+3.70000004768372, 6.7000002861023, 6.7000002861023, 6.5, 2.40000009536743,
+5.19999980926514, 3.09999990463257, 5.19999980926514, 9.10000038146973,
+6.5, 6.5, 3.70000004768372, 5.19999980926514, 6.5, 4.09999990463257,
+2.5, 2.70000004768372, 3.5, 3.19999980926514, 3.5, 9.10000038146973,
+3.70000004768372, 6, 5.19999980926514, 3.19999980926514, 3.19999980926514,
+3.19999980926514, 2.40000009536743, 6.5, 6.5, 9.10000038146973,
+2.70000004768372, 3.19999980926514, 2.5, 0.100000001490116, 3.09999990463257,
+6.5, 9.10000038146973, 3.09999990463257, 6.7000002861023, 6.5,
+4.30000019073486, 6.5, 5.19999980926514, 3.19999980926514, 6.5,
+3.60000014305115, 3.70000004768372, 3.09999990463257, 2.40000009536743,
+6.5, 6.7000002861023, 3.70000004768372, 5.19999980926514, 2.70000004768372,
+2.5, 5.19999980926514, 5.19999980926514, 5.19999980926514, 3.09999990463257,
+3.09999990463257, 3.19999980926514, 4.30000019073486, 3.09999990463257,
+2.70000004768372, 6, 3.19999980926514, 5.19999980926514, 3.60000014305115,
+9.10000038146973, 5.19999980926514, 3.09999990463257, 3.60000014305115,
+6.7000002861023, 6.5, 6.7000002861023, 6, 2.5, 2.70000004768372,
+6, 6.80000019073486, 6.80000019073486, 2.5, 3.19999980926514,
+4.09999990463257, 5.19999980926514, 5.19999980926514, 2.5, 3.09999990463257,
+5.19999980926514, 4.30000019073486, 2.70000004768372, 6.5, 5.19999980926514,
+2.40000009536743, 4.09999990463257, 2.70000004768372, 2.40000009536743,
+2.5, 9.10000038146973, 3.19999980926514, 6.5, 3.09999990463257,
+4.09999990463257, 2.70000004768372, 4.30000019073486, 3.19999980926514,
+6.7000002861023, 6.80000019073486, 9.10000038146973, 3.09999990463257,
+3.19999980926514, 4.09999990463257, 5.19999980926514, 2.5, 6,
+2.70000004768372, 2.5, 2.70000004768372, 3.09999990463257, 4.09999990463257,
+6.7000002861023, 9.10000038146973, 3.09999990463257, 5.19999980926514,
+4.30000019073486, 2.70000004768372, 5.19999980926514, 6.5, 5.19999980926514
+), sex = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 0L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 0L, 1L, 1L, 0L, 0L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 0L,
+1L, 0L, 0L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L,
+1L, 1L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 0L,
+1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 0L, 0L, 1L, 1L,
+1L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 0L, 0L, 1L,
+1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L,
+1L, 0L, 0L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L,
+1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L,
+1L, 1L, 0L, 1L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 1L,
+1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 0L, 1L, 1L, 1L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L,
+1L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 1L,
+1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L,
+1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L,
+1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L,
+1L, 1L, 0L, 0L, 0L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L,
+0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 0L, 0L,
+1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L,
+0L, 0L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 1L,
+1L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L,
+1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L,
+0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L,
+0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L,
+0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 0L, 0L, 0L, 0L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L,
+1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L,
+0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L,
+1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 0L, 0L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L,
+1L, 1L, 1L, 0L, 0L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 0L, 0L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 0L, 0L, 0L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 0L,
+0L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 1L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L,
+0L, 0L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 0L, 1L, 1L, 0L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L,
+1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 1L, 1L, 0L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L,
+0L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L,
+1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L,
+0L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L,
+0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 0L, 0L,
+0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L,
+1L, 1L, 1L, 1L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L,
+0L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L,
+0L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 0L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 0L, 0L, 0L, 1L, 1L,
+1L, 1L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 0L,
+1L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L,
+0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 1L,
+1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L,
+1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 0L, 1L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L,
+1L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L,
+0L, 0L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 1L, 0L,
+0L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 0L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L,
+0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 0L, 0L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 1L, 1L, 0L, 0L, 0L, 0L,
+0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L,
+0L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 1L,
+1L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 0L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 0L,
+0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 1L, 0L,
+0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L,
+1L, 1L, 0L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L,
+0L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+0L, 0L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 0L, 1L,
+0L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L,
+1L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 0L,
+0L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L,
+0L, 0L, 0L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 1L,
+0L, 0L, 0L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L,
+1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 0L,
+0L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 0L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L,
+0L, 0L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 1L, 1L,
+1L, 1L, 1L, 1L, 0L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L,
+0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+0L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 0L, 0L,
+1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 1L,
+1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 1L, 0L, 0L, 1L,
+1L, 1L, 1L, 1L, 0L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 0L,
+1L, 0L, 1L, 1L, 0L, 0L, 0L, 1L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 1L,
+1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L,
+1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 1L,
+0L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 0L,
+0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L,
+1L, 1L, 0L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L,
+0L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 0L, 1L, 0L, 1L,
+1L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L,
+1L, 0L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L,
+1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 1L, 0L, 1L,
+1L, 1L, 0L, 0L, 1L, 1L, 0L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 0L, 0L,
+0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L,
+1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L,
+1L, 1L, 1L, 0L, 0L, 0L, 1L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 0L,
+0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L,
+0L, 0L, 1L, 1L, 1L, 0L, 0L, 1L, 0L, 1L, 0L, 1L, 1L, 0L, 1L, 1L,
+1L, 0L, 0L, 1L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 1L,
+0L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 1L, 0L, 0L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 0L,
+0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 0L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 0L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 0L,
+0L, 0L, 0L, 1L, 0L, 0L, 0L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 0L,
+0L, 1L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 1L,
+1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 0L,
+0L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 0L, 1L, 0L, 0L, 0L, 1L,
+1L, 1L, 0L, 1L, 0L, 1L, 0L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L,
+0L, 1L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L,
+0L, 0L, 0L, 1L, 1L, 0L, 1L, 1L, 0L, 1L, 1L, 1L, 0L, 1L, 0L, 0L,
+0L, 1L, 1L, 0L, 1L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 0L,
+1L, 0L, 1L, 0L, 0L, 1L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 1L, 0L, 0L,
+0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 0L, 1L, 1L, 1L,
+0L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 1L,
+1L, 0L, 1L, 0L, 1L, 1L, 1L, 0L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L,
+1L, 0L, 1L, 1L, 0L, 1L, 1L, 0L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 0L,
+1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 1L,
+1L, 0L, 0L, 0L, 1L, 1L, 0L, 1L, 0L, 1L, 0L, 0L, 0L, 1L, 0L, 0L,
+0L, 0L, 1L, 1L, 0L, 0L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L,
+1L, 0L, 0L, 1L, 0L, 0L, 1L, 1L), los = c(67L, 53L, 51L, 30L,
+43L, 43L, 32L, 47L, 50L, 37L, 32L, 42L, 33L, 28L, 50L, 17L, 22L,
+30L, 30L, 25L, 40L, 46L, 38L, 32L, 13L, 9L, 8L, 34L, 7L, 7L,
+7L, 6L, 6L, 5L, 5L, 5L, 42L, 4L, 4L, 4L, 4L, 4L, 4L, 4L, 3L,
+3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 27L, 11L, 2L, 2L, 2L,
+2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L,
+13L, 1L, 1L, 1L, 1L, 1L, 9L, 16L, 12L, 7L, 6L, 5L, 5L, 4L, 4L,
+4L, 4L, 4L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L,
+2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L,
+2L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 26L, 8L, 25L, 12L, 7L,
+7L, 7L, 6L, 5L, 5L, 5L, 5L, 14L, 13L, 4L, 4L, 4L, 4L, 4L, 4L,
+4L, 4L, 4L, 4L, 4L, 11L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 2L,
+2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L,
+2L, 2L, 2L, 2L, 2L, 10L, 1L, 1L, 1L, 1L, 1L, 1L, 19L, 17L, 8L,
+8L, 8L, 7L, 6L, 6L, 6L, 5L, 5L, 5L, 4L, 4L, 4L, 4L, 4L, 4L, 4L,
+3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 2L, 2L, 2L, 2L, 2L,
+2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 10L, 10L,
+1L, 1L, 1L, 1L, 9L, 9L, 9L, 8L, 8L, 23L, 7L, 7L, 6L, 6L, 6L,
+9L, 9L, 14L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 13L, 19L, 4L, 4L,
+4L, 4L, 4L, 4L, 4L, 21L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L,
+3L, 3L, 3L, 11L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L,
+2L, 2L, 2L, 2L, 2L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 15L, 8L,
+8L, 12L, 12L, 7L, 7L, 21L, 17L, 6L, 6L, 6L, 6L, 6L, 5L, 5L, 30L,
+4L, 4L, 4L, 4L, 4L, 4L, 4L, 4L, 4L, 3L, 3L, 3L, 3L, 3L, 3L, 3L,
+3L, 3L, 3L, 3L, 8L, 11L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L,
+2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 10L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 10L, 9L, 9L, 8L, 7L, 6L,
+5L, 5L, 4L, 4L, 4L, 4L, 4L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L,
+16L, 8L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L,
+2L, 10L, 10L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 13L, 26L, 8L, 8L, 8L, 7L, 16L, 28L, 6L, 6L, 6L,
+6L, 9L, 9L, 19L, 25L, 25L, 5L, 14L, 29L, 4L, 4L, 4L, 4L, 4L,
+8L, 12L, 3L, 3L, 3L, 3L, 3L, 11L, 2L, 2L, 2L, 2L, 2L, 2L, 2L,
+2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 36L, 24L, 10L, 1L, 1L,
+1L, 1L, 1L, 10L, 10L, 5L, 5L, 13L, 4L, 4L, 4L, 12L, 3L, 3L, 3L,
+7L, 11L, 6L, 6L, 6L, 9L, 14L, 5L, 5L, 5L, 6L, 6L, 6L, 9L, 4L,
+11L, 15L, 8L, 8L, 3L, 3L, 3L, 3L, 3L, 3L, 8L, 8L, 7L, 7L, 2L,
+2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 1L, 1L,
+1L, 1L, 1L, 18L, 10L, 10L, 10L, 9L, 4L, 35L, 8L, 8L, 7L, 16L,
+3L, 3L, 3L, 7L, 2L, 2L, 2L, 6L, 6L, 6L, 6L, 6L, 6L, 5L, 5L, 5L,
+5L, 5L, 5L, 5L, 5L, 9L, 9L, 4L, 4L, 4L, 4L, 4L, 4L, 11L, 8L,
+3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 8L, 7L, 2L, 2L, 2L,
+2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 6L, 1L, 1L, 1L, 1L, 4L, 4L,
+4L, 4L, 8L, 8L, 8L, 7L, 16L, 3L, 3L, 23L, 12L, 7L, 2L, 6L, 9L,
+5L, 5L, 5L, 5L, 5L, 6L, 6L, 6L, 13L, 4L, 4L, 4L, 4L, 4L, 4L,
+22L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 8L, 8L, 2L,
+2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 24L, 13L, 6L, 5L, 5L,
+10L, 10L, 1L, 1L, 1L, 1L, 10L, 5L, 5L, 9L, 4L, 4L, 8L, 7L, 7L,
+3L, 3L, 3L, 23L, 12L, 7L, 7L, 4L, 2L, 2L, 6L, 6L, 29L, 10L, 10L,
+1L, 5L, 5L, 5L, 6L, 13L, 4L, 4L, 8L, 3L, 3L, 3L, 3L, 3L, 3L,
+8L, 7L, 7L, 7L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 5L,
+5L, 9L, 10L, 10L, 10L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+10L, 5L, 9L, 4L, 8L, 8L, 8L, 7L, 3L, 40L, 7L, 4L, 4L, 4L, 4L,
+4L, 15L, 2L, 2L, 2L, 2L, 2L, 2L, 6L, 9L, 18L, 5L, 5L, 5L, 5L,
+6L, 9L, 4L, 4L, 4L, 4L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 8L, 3L, 3L,
+3L, 8L, 7L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 15L, 6L,
+5L, 1L, 1L, 1L, 1L, 1L, 1L, 31L, 10L, 10L, 5L, 5L, 4L, 12L, 12L,
+18L, 7L, 16L, 3L, 11L, 11L, 4L, 4L, 2L, 2L, 10L, 10L, 9L, 13L,
+36L, 9L, 9L, 6L, 6L, 9L, 8L, 30L, 4L, 4L, 4L, 22L, 22L, 3L, 3L,
+3L, 3L, 3L, 3L, 8L, 8L, 7L, 7L, 3L, 3L, 3L, 3L, 3L, 7L, 15L,
+6L, 6L, 6L, 6L, 6L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 23L, 6L, 16L,
+9L, 10L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 21L, 2L, 2L, 2L, 2L, 2L,
+2L, 2L, 2L, 2L, 4L, 4L, 4L, 11L, 25L, 3L, 3L, 7L, 7L, 4L, 4L,
+4L, 4L, 8L, 13L, 13L, 24L, 11L, 19L, 2L, 2L, 6L, 6L, 20L, 20L,
+1L, 5L, 5L, 5L, 5L, 5L, 17L, 6L, 10L, 9L, 15L, 15L, 4L, 4L, 4L,
+4L, 4L, 3L, 3L, 3L, 3L, 3L, 3L, 7L, 7L, 3L, 3L, 3L, 3L, 3L, 8L,
+8L, 12L, 6L, 2L, 2L, 2L, 2L, 2L, 16L, 5L, 5L, 5L, 5L, 5L, 9L,
+10L, 10L, 5L, 5L, 5L, 1L, 1L, 5L, 9L, 2L, 2L, 2L, 2L, 2L, 2L,
+2L, 2L, 2L, 2L, 2L, 4L, 4L, 4L, 4L, 4L, 8L, 7L, 3L, 3L, 3L, 3L,
+7L, 2L, 4L, 4L, 8L, 2L, 2L, 6L, 1L, 1L, 9L, 9L, 5L, 5L, 14L,
+17L, 6L, 17L, 8L, 8L, 4L, 4L, 4L, 3L, 3L, 3L, 3L, 3L, 3L, 3L,
+3L, 3L, 3L, 3L, 12L, 7L, 3L, 3L, 3L, 19L, 8L, 8L, 7L, 15L, 28L,
+11L, 6L, 6L, 6L, 2L, 17L, 6L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 1L,
+1L, 1L, 1L, 5L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 4L, 4L,
+4L, 4L, 4L, 6L, 4L, 4L, 8L, 7L, 7L, 3L, 3L, 3L, 3L, 3L, 15L,
+4L, 4L, 4L, 4L, 34L, 2L, 10L, 10L, 6L, 13L, 1L, 1L, 5L, 17L,
+6L, 8L, 8L, 8L, 4L, 4L, 14L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L,
+7L, 12L, 12L, 7L, 3L, 3L, 7L, 6L, 6L, 6L, 2L, 2L, 2L, 2L, 2L,
+2L, 2L, 22L, 5L, 14L, 5L, 5L, 5L, 5L, 1L, 2L, 2L, 2L, 2L, 2L,
+2L, 2L, 2L, 4L, 4L, 4L, 6L, 11L, 4L, 3L, 3L, 3L, 23L, 11L, 11L,
+7L, 26L, 4L, 4L, 4L, 2L, 2L, 10L, 1L, 1L, 1L, 10L, 10L, 1L, 9L,
+9L, 5L, 5L, 5L, 17L, 6L, 6L, 6L, 10L, 15L, 8L, 4L, 4L, 4L, 4L,
+14L, 14L, 3L, 3L, 3L, 12L, 7L, 3L, 3L, 22L, 19L, 8L, 7L, 6L,
+6L, 6L, 2L, 2L, 9L, 16L, 5L, 10L, 5L, 5L, 5L, 1L, 20L, 5L, 13L,
+2L, 2L, 2L, 2L, 2L, 2L, 4L, 4L, 4L, 8L, 8L, 7L, 7L, 22L, 18L,
+18L, 3L, 3L, 3L, 7L, 2L, 15L, 4L, 4L, 4L, 4L, 4L, 17L, 17L, 17L,
+17L, 2L, 10L, 25L, 16L, 16L, 1L, 1L, 1L, 5L, 5L, 5L, 1L, 9L,
+9L, 9L, 25L, 15L, 6L, 6L, 6L, 10L, 15L, 4L, 4L, 14L, 14L, 14L,
+14L, 14L, 26L, 14L, 3L, 3L, 3L, 3L, 3L, 18L, 7L, 3L, 13L, 13L,
+13L, 7L, 6L, 6L, 2L, 2L, 2L, 2L, 2L, 9L, 12L, 12L, 12L, 12L,
+12L, 12L, 47L, 6L, 16L, 5L, 5L, 5L, 5L, 14L, 11L, 11L, 11L, 11L,
+11L, 14L, 13L, 13L, 13L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 4L,
+4L, 10L, 10L, 10L, 10L, 10L, 10L, 12L, 12L, 7L, 9L, 9L, 9L, 9L,
+9L, 8L, 3L, 2L, 2L, 15L, 4L, 4L, 17L, 8L, 8L, 8L, 8L, 8L, 8L,
+8L, 8L, 8L, 8L, 8L, 8L, 8L, 11L, 6L, 9L, 1L, 1L, 12L, 12L, 1L,
+1L, 1L, 1L, 5L, 5L, 5L, 7L, 7L, 7L, 7L, 7L, 10L, 10L, 9L, 9L,
+15L, 15L, 15L, 6L, 10L, 6L, 9L, 32L, 8L, 8L, 4L, 14L, 3L, 3L,
+3L, 3L, 7L, 7L, 7L, 7L, 7L, 7L, 3L, 13L, 13L, 13L, 13L, 19L,
+8L, 4L, 4L, 4L, 6L, 6L, 2L, 2L, 2L, 9L, 12L, 12L, 12L, 12L, 12L,
+16L, 5L, 14L, 5L, 14L, 20L, 11L, 11L, 11L, 11L, 11L, 11L, 11L,
+11L, 19L, 13L, 13L, 13L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 4L, 4L,
+6L, 10L, 10L, 10L, 10L, 10L, 4L, 4L, 7L, 7L, 7L, 11L, 3L, 3L,
+18L, 9L, 9L, 9L, 9L, 9L, 9L, 9L, 9L, 9L, 9L, 9L, 8L, 35L, 3L,
+7L, 2L, 4L, 8L, 8L, 8L, 8L, 8L, 8L, 8L, 8L, 8L, 34L, 2L, 10L,
+9L, 16L, 12L, 12L, 1L, 5L, 7L, 7L, 7L, 7L, 7L, 7L, 7L, 7L, 7L,
+7L, 10L, 9L, 5L, 15L, 15L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 15L, 8L,
+8L, 8L, 8L, 7L, 22L, 11L, 14L, 14L, 14L, 14L, 14L, 14L, 3L, 3L,
+3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 7L, 16L, 7L, 7L, 7L, 3L, 3L,
+22L, 13L, 13L, 11L, 4L, 12L, 12L, 12L, 6L, 5L, 64L, 5L, 5L, 5L,
+5L, 5L, 5L, 1L, 1L, 11L, 11L, 11L, 5L, 13L, 13L, 2L, 2L, 2L,
+2L, 4L, 19L, 11L, 10L, 10L, 10L, 10L, 10L, 10L, 10L, 10L, 10L,
+4L, 7L, 9L, 9L, 9L, 9L, 9L, 9L, 9L, 9L, 9L, 8L, 11L, 11L, 12L,
+26L, 2L, 4L, 17L, 8L, 8L, 8L, 8L, 8L, 8L, 8L, 10L, 18L, 12L,
+12L, 1L, 1L, 5L, 5L, 7L, 7L, 7L, 7L, 7L, 18L, 17L, 6L, 6L, 21L,
+2L, 32L, 8L, 8L, 8L, 8L, 8L, 7L, 4L, 14L, 14L, 14L, 14L, 14L,
+14L, 5L, 5L, 14L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 7L, 22L, 8L, 7L,
+7L, 11L, 11L, 6L, 6L, 6L, 6L, 2L, 9L, 9L, 12L, 12L, 12L, 12L,
+5L, 5L, 5L, 5L, 5L, 5L, 11L, 11L, 11L, 14L, 5L, 13L, 2L, 2L,
+2L, 2L, 4L, 4L, 6L, 10L, 10L, 10L, 10L, 10L, 10L, 10L, 10L, 10L,
+10L, 10L, 4L, 3L, 3L, 18L, 10L, 10L, 10L, 3L, 9L, 9L, 9L, 9L,
+9L, 9L, 9L, 9L, 9L, 8L, 8L, 8L, 8L, 3L, 3L, 11L, 15L, 8L, 8L,
+8L, 8L, 8L, 8L, 8L, 8L, 8L, 13L, 2L, 10L, 19L, 18L, 12L, 1L,
+5L, 5L, 7L, 7L, 7L, 7L, 7L, 7L, 7L, 7L, 10L, 21L, 9L, 9L, 24L,
+15L, 15L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 9L, 15L, 15L, 2L,
+2L, 7L, 7L, 7L, 4L, 22L, 14L, 14L, 14L, 5L, 14L, 3L, 3L, 3L,
+3L, 23L, 23L, 12L, 7L, 7L, 13L, 13L, 13L, 13L, 13L, 48L, 11L,
+30L, 6L, 6L, 12L, 12L, 12L, 6L, 5L, 5L, 25L, 14L, 6L, 11L, 11L,
+11L, 13L, 13L, 13L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L,
+19L, 11L, 10L, 10L, 10L, 10L, 10L, 10L, 10L, 36L, 27L, 10L, 10L,
+10L, 3L, 29L, 9L, 9L, 9L, 9L, 9L, 9L, 9L, 9L, 8L, 11L, 11L, 26L,
+15L, 4L, 4L, 4L, 24L, 8L, 8L, 8L, 8L, 8L, 8L, 8L, 8L, 8L, 11L,
+19L, 5L, 2L, 10L, 6L, 25L, 25L, 20L, 16L, 16L, 12L, 1L, 1L, 1L,
+1L, 5L, 7L, 7L, 7L, 7L, 7L, 7L, 10L, 9L, 15L, 15L, 6L, 10L, 6L,
+6L, 6L, 6L, 6L, 9L, 9L, 7L, 14L, 14L, 31L, 3L, 3L, 3L, 3L, 3L,
+18L, 12L, 7L, 7L, 3L, 3L, 13L, 13L, 35L, 7L, 11L, 11L, 11L, 11L,
+11L, 6L, 6L, 20L, 2L, 12L, 12L, 12L, 12L, 5L, 5L, 5L, 25L, 5L,
+20L, 20L, 6L, 6L, 6L, 11L, 11L, 11L, 11L, 11L, 11L, 11L, 2L,
+2L, 2L, 4L, 4L, 4L, 6L, 11L, 10L, 10L, 10L, 10L, 10L, 10L, 10L,
+10L, 21L, 12L, 7L, 27L, 18L, 10L, 10L, 9L, 9L, 9L, 9L, 9L, 9L,
+9L, 9L, 9L, 9L, 9L, 9L, 19L, 8L, 8L, 11L, 4L, 4L, 17L, 17L, 8L,
+8L, 8L, 8L, 8L, 8L, 8L, 8L, 5L, 5L, 10L, 10L, 25L, 16L, 29L,
+12L, 12L, 1L, 1L, 1L, 1L, 7L, 7L, 7L, 7L, 7L, 7L, 7L, 10L, 15L,
+15L, 34L, 17L, 10L, 6L, 6L, 6L, 6L, 6L, 9L, 9L, 7L, 7L, 14L,
+14L, 14L, 14L, 3L, 3L, 7L, 22L, 4L, 13L, 11L, 11L, 11L, 15L,
+4L, 6L, 6L, 20L, 9L, 12L, 12L, 12L, 12L, 12L, 12L, 16L, 9L, 5L,
+5L, 5L, 20L, 11L, 11L, 13L, 13L, 2L, 2L, 2L, 2L, 2L, 4L, 4L,
+6L, 6L, 6L, 10L, 10L, 10L, 10L, 10L, 10L, 23L, 18L, 18L, 10L,
+10L, 10L, 10L, 10L, 14L, 3L, 3L, 3L, 9L, 9L, 9L, 9L, 9L, 9L,
+9L, 44L, 8L, 20L, 2L, 2L, 15L, 4L, 17L, 8L, 8L, 8L, 8L, 8L, 8L,
+8L, 8L, 8L, 8L, 8L, 8L, 13L, 30L, 5L, 10L, 12L, 12L, 1L, 1L,
+1L, 16L, 7L, 7L, 7L, 7L, 7L, 7L, 10L, 15L, 15L, 17L, 17L, 6L,
+10L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 9L, 9L, 9L, 2L, 8L, 7L, 14L,
+14L, 14L, 14L, 5L, 14L, 3L, 7L, 22L, 13L, 8L, 11L, 11L, 11L,
+11L, 4L, 4L, 6L, 9L, 9L, 12L, 12L, 12L, 16L, 5L, 68L, 14L, 8L,
+8L, 20L, 6L, 11L, 11L, 11L, 11L, 11L, 11L, 11L, 21L, 24L, 19L,
+13L, 13L, 13L, 13L, 2L, 2L, 2L, 2L, 2L, 4L, 4L, 19L, 19L, 11L,
+10L, 10L, 10L, 10L, 10L, 10L, 3L, 18L, 10L, 10L, 40L, 3L, 9L,
+9L, 9L, 9L, 9L, 9L, 9L, 9L, 9L, 9L, 9L, 8L, 35L, 11L, 26L, 13L,
+4L, 4L, 4L, 17L, 8L, 8L, 8L, 8L, 8L, 8L, 8L, 8L, 8L, 8L, 8L,
+13L, 7L, 10L, 12L, 16L, 16L, 16L, 16L, 12L, 1L, 5L, 5L, 7L, 7L,
+7L, 7L, 7L, 7L, 7L, 7L, 9L, 11L, 15L, 15L, 15L, 15L, 15L, 17L,
+13L, 2L, 19L, 7L, 7L, 7L, 10L, 14L, 14L, 14L, 14L, 14L, 14L,
+14L, 3L, 3L, 3L, 7L, 7L, 13L, 13L, 13L, 13L, 11L, 30L, 4L, 20L,
+9L, 8L, 8L, 12L, 12L, 12L, 12L, 12L, 12L, 5L, 29L, 25L, 5L, 5L,
+8L, 8L, 8L, 8L, 6L, 11L, 11L, 11L, 11L, 11L, 11L, 11L, 11L, 2L,
+2L, 2L, 2L, 4L, 4L, 4L, 4L, 4L, 19L, 6L, 22L, 11L, 10L, 10L,
+10L, 10L, 10L, 10L, 10L, 10L, 3L, 4L, 7L, 11L, 14L, 3L, 10L,
+10L, 10L, 10L, 3L, 9L, 9L, 9L, 9L, 9L, 9L, 9L, 15L, 4L, 17L,
+8L, 8L, 8L, 8L, 8L, 8L, 8L, 13L, 7L, 7L, 7L, 16L, 21L, 1L, 16L,
+12L, 12L, 1L, 1L, 7L, 7L, 21L, 10L, 20L, 15L, 6L, 13L, 2L, 2L,
+8L, 8L, 10L, 10L, 14L, 14L, 5L, 14L, 3L, 3L, 7L, 7L, 9L, 9L,
+13L, 11L, 11L, 4L, 4L, 17L, 6L, 8L, 8L, 12L, 12L, 12L, 5L, 8L,
+8L, 8L, 7L, 6L, 6L, 11L, 11L, 11L, 11L, 13L, 13L, 13L, 2L, 2L,
+2L, 17L, 17L, 19L, 6L, 22L, 10L, 10L, 3L, 12L, 14L, 18L, 10L,
+14L, 9L, 9L, 9L, 9L, 9L, 9L, 9L, 19L, 8L, 35L, 11L, 26L, 15L,
+15L, 8L, 8L, 8L, 8L, 8L, 8L, 8L, 8L, 8L, 8L, 7L, 7L, 16L, 16L,
+16L, 16L, 12L, 12L, 12L, 7L, 7L, 7L, 7L, 10L, 10L, 9L, 2L, 2L,
+11L, 15L, 15L, 15L, 6L, 6L, 9L, 9L, 2L, 2L, 2L, 2L, 8L, 8L, 8L,
+7L, 7L, 7L, 7L, 10L, 14L, 14L, 3L, 18L, 7L, 9L, 9L, 9L, 15L,
+4L, 11L, 11L, 11L, 11L, 65L, 6L, 9L, 9L, 9L, 12L, 12L, 12L, 12L,
+12L, 12L, 12L, 12L, 12L, 12L, 47L, 5L, 8L, 8L, 8L, 8L, 1L, 7L,
+17L, 11L, 11L, 11L, 11L, 11L, 11L, 5L, 13L, 2L, 2L, 2L, 6L, 6L,
+11L, 10L, 10L, 10L, 10L, 10L, 10L, 3L, 7L, 7L, 3L, 18L, 10L,
+10L, 9L, 9L, 9L, 9L, 9L, 9L, 9L, 9L, 9L, 83L, 8L, 2L, 17L, 8L,
+8L, 8L, 8L, 8L, 8L, 13L, 7L, 16L, 5L, 5L, 10L, 1L, 1L, 5L, 7L,
+9L, 2L, 2L, 24L, 11L, 15L, 15L, 6L, 9L, 9L, 9L, 2L, 8L, 7L, 7L,
+7L, 14L, 14L, 14L, 14L, 5L, 3L, 18L, 12L, 6L, 7L, 9L, 4L, 4L,
+13L, 13L, 13L, 13L, 11L, 11L, 11L, 4L, 4L, 4L, 4L, 8L, 12L, 12L,
+12L, 12L, 12L, 14L, 20L, 16L, 8L, 8L, 12L, 20L, 20L, 7L, 6L,
+11L, 11L, 22L, 15L, 13L, 13L, 4L, 4L, 22L, 10L, 10L, 10L, 10L,
+10L, 10L, 3L, 3L, 10L, 9L, 9L, 9L, 9L, 9L, 9L, 19L, 13L, 15L,
+4L, 8L, 8L, 8L, 8L, 7L, 7L, 5L, 5L, 5L, 5L, 10L, 12L, 16L, 16L,
+16L, 12L, 7L, 7L, 7L, 7L, 7L, 24L, 15L, 15L, 9L, 8L, 7L, 23L,
+23L, 10L, 10L, 14L, 14L, 14L, 14L, 3L, 3L, 18L, 10L, 10L, 7L,
+22L, 9L, 9L, 9L, 15L, 15L, 4L, 13L, 13L, 13L, 15L, 17L, 11L,
+4L, 6L, 8L, 8L, 12L, 12L, 12L, 5L, 9L, 5L, 8L, 8L, 8L, 7L, 7L,
+17L, 6L, 11L, 11L, 11L, 11L, 13L, 28L, 15L, 2L, 2L, 4L, 19L,
+19L, 11L, 10L, 10L, 10L, 10L, 10L, 10L, 10L, 14L, 22L, 14L, 18L,
+18L, 10L, 10L, 14L, 14L, 3L, 3L, 3L, 9L, 9L, 9L, 9L, 9L, 9L,
+9L, 11L, 13L, 13L, 15L, 4L, 17L, 8L, 8L, 8L, 8L, 8L, 8L, 8L,
+7L, 7L, 11L, 5L, 10L, 20L, 12L, 12L, 12L, 7L, 13L, 2L, 11L, 11L,
+15L, 15L, 6L, 2L, 19L, 18L, 7L, 7L, 27L, 10L, 14L, 3L, 3L, 6L,
+6L, 18L, 10L, 7L, 22L, 4L, 4L, 4L, 13L, 11L, 11L, 6L, 6L, 20L,
+20L, 20L, 8L, 8L, 16L, 5L, 5L, 24L, 8L, 8L, 8L, 8L, 6L, 6L, 6L,
+11L, 11L, 11L, 11L, 11L, 19L, 9L, 9L, 10L, 10L, 10L, 10L, 10L,
+10L, 14L, 14L, 10L, 10L, 10L, 5L, 14L, 9L, 9L, 9L, 8L, 13L, 12L,
+8L, 7L, 5L, 12L, 12L, 20L, 11L, 12L, 10L, 13L, 11L, 15L, 15L,
+15L, 15L, 15L, 10L, 6L, 9L, 9L, 2L, 8L, 14L, 9L, 9L, 9L, 9L,
+14L, 14L, 18L, 10L, 9L, 9L, 13L, 13L, 8L, 8L, 8L, 11L, 4L, 8L,
+12L, 16L, 8L, 8L, 12L, 7L, 7L, 15L, 17L, 6L, 11L, 15L, 13L, 23L,
+6L, 6L, 14L, 14L, 9L, 9L, 9L, 10L, 10L, 32L, 14L, 10L, 3L, 13L,
+39L, 22L, 13L, 13L, 2L, 15L, 12L, 8L, 8L, 16L, 25L, 20L, 20L,
+11L, 11L, 7L, 21L, 10L, 10L, 10L, 10L, 10L, 6L, 9L, 9L, 18L,
+7L, 7L, 18L, 9L, 9L, 14L, 6L, 18L, 10L, 9L, 9L, 9L, 4L, 4L, 17L,
+25L, 20L, 8L, 16L, 8L, 8L, 8L, 15L, 15L, 6L, 6L, 13L, 13L, 6L,
+22L, 14L, 14L, 14L, 14L, 40L, 40L, 27L, 22L, 10L, 10L, 9L, 9L,
+9L, 19L, 8L, 11L, 17L, 12L, 8L, 8L, 7L, 7L, 7L, 7L, 7L, 11L,
+16L, 10L, 12L, 11L, 11L, 12L, 12L, 12L, 9L, 13L, 13L, 11L, 15L,
+10L, 9L, 9L, 2L, 18L, 7L, 14L, 14L, 9L, 9L, 9L, 6L, 4L, 8L, 11L,
+11L, 15L, 6L, 25L, 8L, 12L, 8L, 8L, 20L, 15L, 15L, 17L, 6L, 11L,
+2L, 4L, 22L, 14L, 14L, 9L, 10L, 10L, 10L, 14L, 14L, 18L, 10L,
+10L, 13L, 9L, 9L, 19L, 8L, 8L, 8L, 12L, 8L, 8L, 7L, 5L, 5L, 12L,
+20L, 20L, 11L, 12L, 7L, 10L, 10L, 13L, 7L, 27L, 18L, 18L, 14L,
+36L, 9L, 9L, 9L, 9L, 14L, 10L, 7L, 9L, 9L, 15L, 15L, 13L, 8L,
+11L, 11L, 4L, 17L, 8L, 16L, 16L, 12L, 8L, 7L, 17L, 6L, 11L, 15L,
+23L, 23L, 19L, 6L, 14L, 14L, 14L, 9L, 10L, 10L, 3L, 10L, 9L,
+9L, 9L, 8L, 13L, 39L, 12L, 8L, 8L, 8L, 8L, 7L, 16L, 16L, 5L,
+20L, 16L, 11L, 11L, 16L, 7L, 8L, 13L, 9L, 9L, 9L, 19L, 18L, 18L,
+7L, 10L, 18L, 10L, 9L, 18L, 9L, 17L, 8L, 11L, 7L, 16L, 12L, 8L,
+12L, 15L, 15L, 17L, 6L, 11L, 11L, 11L, 24L, 23L, 11L, 14L, 14L,
+14L, 14L, 14L, 14L, 22L, 13L, 9L, 12L, 12L, 11L, 21L, 29L, 12L,
+20L, 11L, 11L, 20L, 13L, 13L, 10L, 10L, 10L, 10L, 2L, 27L, 26L,
+17L, 11L, 11L, 20L, 20L, 7L, 16L, 7L, 8L, 8L, 8L, 8L, 12L, 15L,
+15L, 15L, 17L, 6L, 6L, 9L, 32L, 4L, 14L, 5L, 14L, 13L, 22L, 11L,
+12L, 12L, 11L, 12L, 19L, 18L, 18L, 7L, 23L, 18L, 9L, 6L, 9L,
+15L, 8L, 4L, 25L, 8L, 12L, 12L, 7L, 15L, 15L, 15L, 15L, 6L, 14L,
+9L, 14L, 13L, 8L, 11L, 11L, 16L, 11L, 5L, 8L, 2L, 11L, 11L, 19L,
+19L, 10L, 10L, 9L, 6L, 10L, 9L, 8L, 8L, 26L, 8L, 16L, 17L, 6L,
+6L, 10L, 14L, 3L, 3L, 9L, 12L, 11L, 3L, 11L, 13L, 11L, 19L, 10L,
+9L, 18L, 10L, 10L, 10L, 8L, 25L, 9L, 12L, 12L, 7L, 16L, 15L,
+6L, 9L, 9L, 5L, 14L, 14L, 14L, 40L, 3L, 13L, 11L, 11L, 12L, 24L,
+5L, 11L, 11L, 19L, 2L, 18L, 18L, 10L, 10L, 4L, 17L, 8L, 8L, 25L,
+24L, 15L, 15L, 15L, 5L, 14L, 14L, 14L, 14L, 12L, 12L, 11L, 16L,
+20L, 20L, 20L, 8L, 8L, 13L, 13L, 11L, 10L, 19L, 10L, 4L, 17L,
+8L, 16L, 12L, 15L, 17L, 11L, 23L, 14L, 9L, 22L, 13L, 7L, 8L,
+20L, 11L, 11L, 11L, 11L, 8L, 10L, 10L, 9L, 10L, 18L, 10L, 9L,
+17L, 16L, 12L, 7L, 16L, 14L, 11L, 12L, 5L, 29L, 9L, 17L, 8L,
+16L, 16L, 12L, 15L, 17L, 6L, 9L, 9L, 10L, 11L, 10L, 9L, 9L, 16L,
+10L, 9L, 12L, 24L, 12L, 15L, 9L, 8L, 8L, 10L, 15L, 4L, 14L, 3L,
+12L, 11L, 8L, 10L, 17L, 17L, 12L, 17L, 20L, 13L, 11L, 10L, 26L,
+10L, 13L, 11L, 13L, 13L, 13L, 13L, 14L, 19L), admit = c(1L, 1L,
+0L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+0L, 1L, 1L, 0L, 0L, 1L, 1L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 1L,
+1L, 0L, 1L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 0L,
+1L, 1L, 0L, 1L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 1L, 0L, 1L, 0L,
+1L, 0L, 1L, 1L, 1L, 0L, 1L, 1L, 0L, 1L, 1L, 1L, 0L, 0L, 1L, 0L,
+1L, 1L, 0L, 0L, 1L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L,
+0L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 1L,
+1L, 0L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 1L,
+1L, 0L, 0L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 0L, 0L,
+1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 0L, 1L, 0L, 1L,
+0L, 0L, 0L, 0L, 1L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L,
+0L, 1L, 0L, 1L, 1L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L,
+0L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 1L, 0L, 0L,
+0L, 0L, 1L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 0L,
+0L, 0L, 1L, 1L, 0L, 0L, 1L, 0L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 0L, 0L, 0L, 1L, 0L, 1L, 0L, 1L, 1L, 0L, 1L, 0L, 0L, 1L,
+1L, 1L, 0L, 0L, 0L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 0L,
+0L, 0L, 1L, 1L, 1L, 0L, 0L, 1L, 0L, 1L, 1L, 0L, 0L, 0L, 1L, 1L,
+0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 0L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 0L, 0L, 1L, 0L, 0L, 1L, 0L, 0L, 0L, 1L, 0L, 0L,
+1L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 1L, 1L, 1L,
+1L, 1L, 0L, 0L, 0L, 1L, 0L, 1L, 1L, 0L, 1L, 0L, 1L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 1L, 1L, 0L, 1L, 1L, 1L, 0L,
+0L, 0L, 1L, 0L, 1L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L,
+0L, 0L, 1L, 0L, 1L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L,
+0L, 0L, 1L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 1L,
+1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 0L, 0L, 1L, 0L, 1L, 1L, 1L,
+1L, 0L, 1L, 1L, 1L, 0L, 1L, 0L, 0L, 0L, 0L, 1L, 0L, 1L, 0L, 0L,
+0L, 1L, 0L, 0L, 1L, 0L, 1L, 0L, 0L, 1L, 0L, 1L, 1L, 0L, 1L, 1L,
+1L, 1L, 0L, 0L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 0L, 0L, 1L, 1L, 1L, 1L,
+0L, 0L, 0L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L,
+1L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 1L,
+0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 1L, 1L, 0L, 1L, 1L, 1L,
+0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 0L, 0L, 1L, 1L, 1L,
+0L, 0L, 1L, 0L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 1L,
+0L, 0L, 1L, 1L, 0L, 1L, 0L, 1L, 0L, 1L, 1L, 0L, 1L, 1L, 1L, 1L,
+1L, 0L, 0L, 1L, 0L, 1L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 0L,
+0L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 1L, 0L, 1L, 1L, 0L,
+1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+1L, 0L, 0L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 0L, 0L, 0L,
+1L, 1L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L,
+1L, 0L, 0L, 1L, 0L, 1L, 1L, 1L, 0L, 1L, 1L, 0L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L,
+0L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 1L, 1L, 0L, 0L, 0L, 1L,
+1L, 1L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 1L,
+0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 1L,
+1L, 1L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L,
+0L, 1L, 1L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 1L, 1L, 1L,
+1L, 1L, 0L, 1L, 1L, 0L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 0L, 0L, 0L, 0L,
+1L, 0L, 1L, 0L, 1L, 1L, 1L, 0L, 0L, 1L, 0L, 0L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 1L, 1L, 1L,
+1L, 1L, 1L, 0L, 1L, 1L, 1L, 0L, 1L, 1L, 0L, 1L, 0L, 1L, 0L, 0L,
+1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 0L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 0L,
+1L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 1L,
+0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 0L, 1L, 1L, 0L, 1L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 1L,
+1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 1L, 1L,
+0L, 1L, 0L, 1L, 0L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 0L, 0L, 0L, 0L, 1L, 0L, 1L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L,
+1L, 0L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L,
+1L, 1L, 1L, 0L, 1L, 1L, 0L, 0L, 0L, 1L, 0L, 1L, 0L, 1L, 1L, 1L,
+1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 0L, 1L, 0L, 1L, 1L, 0L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L,
+1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L,
+1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 1L,
+0L, 0L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 0L, 1L, 1L, 1L,
+1L, 1L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L,
+1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L,
+1L, 1L, 0L, 1L, 0L, 1L, 1L, 0L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 0L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 0L, 1L, 1L, 0L, 1L, 0L, 0L, 1L,
+1L, 1L, 1L, 0L, 1L, 0L, 0L, 1L, 1L, 0L, 1L, 1L, 0L, 1L, 1L, 1L,
+0L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 1L,
+0L, 0L, 1L, 0L, 0L, 0L, 1L, 0L, 1L, 0L, 0L, 1L, 0L, 1L, 0L, 1L,
+1L, 1L, 0L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L,
+1L, 1L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L,
+1L, 0L, 0L, 0L, 1L, 0L, 1L, 1L, 0L, 1L, 0L, 0L, 0L, 1L, 0L, 1L,
+1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L,
+1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 0L, 0L, 1L,
+1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 1L, 0L, 0L, 1L, 0L, 0L, 1L, 1L,
+1L, 1L, 1L, 1L, 0L, 1L, 1L, 0L, 0L, 1L, 1L, 0L, 1L, 0L, 0L, 0L,
+0L, 0L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 1L, 1L,
+0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 1L, 0L, 1L, 1L, 1L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 0L, 0L,
+0L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 1L, 1L, 0L, 0L, 0L, 1L, 0L,
+0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 0L, 0L, 1L, 0L,
+1L, 0L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 0L, 0L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 0L, 1L,
+1L, 1L, 1L, 0L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 0L, 1L, 1L, 1L,
+0L, 0L, 0L, 0L, 1L, 0L, 0L, 1L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 1L,
+1L, 1L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 1L,
+0L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 0L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L,
+1L, 0L, 1L, 0L, 1L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 0L,
+0L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 0L, 0L, 1L,
+0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 0L, 1L, 0L, 0L, 0L,
+1L, 1L, 1L, 0L, 0L, 0L, 1L, 0L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L,
+1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 0L, 1L, 0L, 0L, 0L, 1L, 1L,
+0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 1L, 0L,
+0L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 0L, 0L,
+1L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 0L, 0L, 1L, 1L,
+0L, 1L, 1L, 0L, 0L, 0L, 1L, 0L, 0L, 1L, 1L, 1L, 0L, 1L, 0L, 1L,
+1L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 0L, 0L, 1L, 1L, 1L, 0L, 1L, 0L,
+0L, 0L, 0L, 0L, 1L, 0L, 1L, 1L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 0L,
+0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 0L, 1L,
+0L, 0L, 1L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 1L, 1L,
+1L, 1L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L,
+0L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+0L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 0L, 1L, 0L, 1L, 1L, 1L,
+0L, 1L, 0L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 0L, 1L, 1L, 0L, 0L, 1L,
+1L, 1L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L,
+0L, 0L, 1L, 1L, 1L, 0L, 0L, 1L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L,
+1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 1L, 1L,
+1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 1L, 0L,
+1L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 0L,
+1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 0L, 1L, 0L, 0L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L,
+1L, 0L, 0L, 1L, 0L, 0L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L,
+0L, 0L, 1L, 0L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 1L, 1L, 1L,
+0L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 1L, 0L, 1L, 1L, 0L, 0L, 1L,
+0L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L,
+1L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 0L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 0L, 0L, 0L, 1L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 1L, 1L,
+1L, 1L, 0L, 1L, 1L, 0L, 1L, 1L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 1L,
+1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L,
+0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L,
+1L, 0L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 0L, 0L, 1L, 0L, 1L, 1L, 0L,
+0L, 0L, 1L, 0L, 0L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 1L, 0L,
+1L, 1L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 1L, 1L, 0L, 1L, 1L, 0L, 0L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 0L,
+1L, 0L, 0L, 1L, 1L, 0L, 1L, 1L, 0L, 1L, 0L, 0L, 1L, 0L, 1L, 0L,
+1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 1L, 0L, 0L, 1L, 0L, 0L, 1L, 1L,
+1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L,
+1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 0L, 1L, 0L, 1L,
+1L, 0L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L,
+1L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 1L, 0L, 0L, 1L, 1L, 1L, 0L,
+1L, 0L, 1L, 1L, 1L, 0L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 0L,
+0L, 1L, 0L, 0L, 0L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L,
+1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L,
+1L, 0L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 0L, 1L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 0L, 0L, 0L, 1L, 0L, 1L, 1L, 0L, 1L, 1L, 0L, 0L, 1L, 1L, 0L,
+0L, 1L, 0L, 1L, 0L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L,
+0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 1L, 0L,
+0L, 1L, 0L, 1L, 1L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L,
+1L, 1L, 0L, 0L, 1L, 0L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 0L, 0L, 1L, 0L, 1L, 0L, 0L, 1L,
+1L, 0L, 1L, 1L, 0L, 1L, 1L, 0L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 1L,
+0L, 0L, 1L, 1L, 1L, 0L, 1L, 1L, 0L, 0L, 1L, 1L, 0L, 1L, 1L, 0L,
+0L, 0L, 0L, 1L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 1L,
+1L, 0L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 1L,
+1L, 1L, 0L, 1L, 0L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L,
+1L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L,
+0L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 0L, 0L, 1L, 1L, 1L,
+0L, 0L, 1L, 0L, 0L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 0L, 1L, 1L, 1L,
+1L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 1L, 0L, 0L, 0L, 1L, 1L, 1L,
+1L, 1L, 1L, 0L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L,
+1L, 1L, 0L, 0L, 0L, 1L, 0L, 0L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 0L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 0L,
+1L, 1L, 0L, 0L, 1L, 0L, 1L, 1L, 0L, 0L, 1L, 0L, 0L, 0L, 1L, 1L,
+0L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L,
+1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 1L, 1L, 1L, 1L,
+1L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 1L, 0L, 1L, 0L,
+1L, 1L, 1L, 0L, 0L, 0L, 1L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L,
+1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 0L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 0L, 1L, 0L, 1L,
+1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L,
+0L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 1L, 1L, 0L, 1L, 1L, 0L,
+0L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 0L, 0L, 0L, 1L,
+1L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 1L, 1L, 1L, 0L, 0L, 1L,
+1L, 0L, 1L, 0L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 0L, 0L, 1L,
+0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 0L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 0L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L,
+1L, 1L, 1L, 1L, 0L, 1L, 0L, 1L, 0L, 1L, 1L, 1L, 0L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L,
+1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L,
+0L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 1L, 0L, 1L, 1L, 1L, 0L, 1L, 1L,
+1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 0L, 1L, 1L,
+1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 0L, 1L,
+0L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 0L, 1L, 1L, 0L, 0L, 1L, 1L, 0L, 1L, 0L, 0L, 1L, 0L, 1L, 1L,
+1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+0L, 0L, 1L, 0L, 1L, 0L, 1L, 1L, 0L, 1L, 1L, 0L, 1L, 1L, 1L, 0L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 1L,
+1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 0L, 1L, 1L, 0L, 0L,
+0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L,
+1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+0L, 0L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 0L, 1L,
+0L, 0L, 1L, 1L, 0L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 0L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L,
+1L, 1L, 0L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L,
+1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L), procedure = c(1L, 0L, 1L, 0L, 1L, 1L, 0L, 1L, 1L,
+1L, 1L, 1L, 1L, 0L, 1L, 0L, 0L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+1L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 1L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 1L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 1L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 0L,
+0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 0L,
+1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L,
+0L, 0L, 1L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 1L,
+1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L,
+0L, 0L, 0L, 0L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 0L,
+0L, 1L, 0L, 1L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 0L,
+0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 1L,
+1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L,
+0L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L,
+1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 1L,
+1L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 0L,
+1L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L,
+0L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L,
+0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 0L, 1L, 0L, 1L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 1L, 1L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 1L,
+1L, 1L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 0L,
+0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 0L, 1L, 1L,
+1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 0L,
+0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 0L,
+0L, 0L, 1L, 0L, 0L, 1L, 1L, 1L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L,
+0L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L,
+0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 0L, 0L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L,
+0L, 0L, 0L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 0L,
+1L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 0L, 1L, 1L, 0L, 0L, 0L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L,
+1L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L,
+1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 0L,
+0L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 0L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 1L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 0L, 0L, 0L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L,
+0L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L,
+1L, 0L, 1L, 0L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 0L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L,
+1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 1L, 1L, 0L, 0L, 0L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 1L, 1L,
+1L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 0L, 0L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L,
+0L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 0L, 0L,
+1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 0L, 0L, 0L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L,
+1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 0L, 1L, 1L,
+1L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L,
+1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 1L,
+1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 0L, 0L, 0L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 1L, 0L, 1L,
+1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 0L, 0L, 0L, 1L, 0L, 1L, 1L,
+1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L,
+1L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L,
+1L, 0L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L,
+1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L,
+1L, 1L, 0L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L,
+0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L,
+1L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L,
+0L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 0L,
+0L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L,
+1L, 1L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 0L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 0L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L,
+1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 0L, 0L, 1L, 1L, 1L,
+0L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 0L,
+1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 1L,
+1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L,
+0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L,
+1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L,
+1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 0L, 0L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L,
+0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 0L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L,
+0L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L,
+0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L,
+0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L,
+1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+0L, 0L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 0L, 1L,
+0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 0L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 0L,
+1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L), age75 = c(0L,
+0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 1L, 0L, 1L, 1L, 0L, 0L,
+0L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 1L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L,
+1L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 1L, 1L,
+1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 0L, 1L, 1L, 1L, 0L, 0L,
+1L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L,
+0L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 1L,
+0L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L,
+0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L,
+1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 0L,
+1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L,
+0L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 1L, 1L, 1L, 0L, 0L,
+0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 1L, 0L,
+0L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 1L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 1L, 1L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 1L,
+0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 0L,
+0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L,
+1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+1L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 1L, 0L, 0L, 0L, 1L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L,
+1L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 1L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 1L, 1L, 0L, 0L, 0L,
+0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L,
+1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 1L, 1L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L,
+0L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 1L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 0L,
+0L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 1L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 1L, 0L, 1L, 0L, 0L,
+1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L,
+0L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L,
+1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 1L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 1L, 1L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L,
+1L, 0L, 0L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 1L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L,
+1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 1L, 0L, 0L, 0L, 0L,
+0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 1L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 1L, 0L, 0L, 0L, 0L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 1L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 1L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L,
+1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 1L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L,
+0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 1L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L,
+1L, 1L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 0L,
+0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+1L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 1L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L,
+1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 0L,
+0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 0L,
+0L, 0L, 0L, 1L, 1L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L,
+1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L,
+1L, 1L, 1L, 1L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L,
+1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L,
+0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 0L,
+1L, 0L, 0L, 1L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L,
+1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 0L, 1L, 0L, 0L, 0L, 1L, 1L, 1L,
+1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+0L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L,
+0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 1L,
+1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 0L, 0L,
+0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 0L, 0L, 1L, 1L,
+1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L,
+1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L,
+1L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L,
+0L, 0L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 1L,
+0L, 0L, 0L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 1L, 1L,
+0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L,
+0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L,
+1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L,
+1L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 1L, 1L,
+1L, 1L, 1L, 0L, 1L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 1L, 1L,
+0L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 1L, 0L, 1L, 0L, 0L, 0L, 0L, 0L,
+0L, 1L, 1L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 1L,
+1L, 0L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 1L, 0L,
+0L, 0L, 1L, 1L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
+0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L,
+0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L,
+1L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 1L,
+1L, 1L, 1L, 0L, 0L, 0L, 1L, 0L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 0L,
+0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 0L, 0L, 1L, 0L,
+0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 1L,
+1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 0L, 1L, 1L, 0L,
+0L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 1L, 0L, 0L, 0L, 1L, 1L, 0L, 0L,
+0L, 0L, 1L, 0L, 1L, 0L, 0L, 0L, 0L, 1L, 0L, 1L, 1L, 1L, 0L, 0L,
+0L, 0L, 1L, 1L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L,
+0L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 1L, 1L, 1L, 1L,
+0L, 0L, 0L, 1L, 0L, 1L, 0L, 0L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 1L,
+0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 0L, 0L, 0L, 1L,
+1L, 0L, 0L, 1L, 0L, 0L, 0L, 1L, 0L, 1L, 0L, 0L, 0L, 1L, 0L, 0L,
+0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 1L,
+1L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 0L, 0L, 1L, 0L, 0L, 1L,
+0L, 1L, 0L, 0L, 0L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L,
+0L, 0L, 1L, 0L, 1L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L,
+0L, 1L, 0L, 0L, 1L, 0L, 1L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L,
+1L, 0L, 1L, 0L, 0L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L,
+0L, 1L, 1L, 0L, 0L, 1L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 1L,
+1L, 0L, 0L, 1L, 0L, 1L, 0L, 0L, 1L, 0L, 0L, 1L, 1L, 1L, 0L, 0L,
+0L, 0L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 1L, 1L, 0L, 0L, 0L,
+1L, 1L, 0L, 0L, 0L, 1L, 0L, 0L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L,
+1L, 0L, 0L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 0L, 0L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
+1L, 1L, 1L, 1L)), .Names = c("hospital", "sex", "los", "admit",
+"procedure", "age75"), val.labels = c("", "vl_sex", "", "vl_admit",
+"vl_procedure", ""), var.labels = c("ENCRYPTED FACILITY CODE",
+"Gender: 1=Male", "LOS", "Severity:1=Urgent/Emerg", "1=CABG;0=PTCA",
+"Age>75"), row.names = c("1", "2", "3", "4", "5", "6", "7", "8",
+"9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19",
+"20", "21", "22", "23", "24", "25", "26", "27", "28", "29", "30",
+"31", "32", "33", "34", "35", "36", "37", "38", "39", "40", "41",
+"42", "43", "44", "45", "46", "47", "48", "49", "50", "51", "52",
+"53", "54", "55", "56", "57", "58", "59", "60", "61", "62", "63",
+"64", "65", "66", "67", "68", "69", "70", "71", "72", "73", "74",
+"75", "76", "77", "78", "79", "80", "81", "82", "83", "84", "85",
+"86", "87", "88", "89", "90", "91", "92", "93", "94", "95", "96",
+"97", "98", "99", "100", "101", "102", "103", "104", "105", "106",
+"107", "108", "109", "110", "111", "112", "113", "114", "115",
+"116", "117", "118", "119", "120", "121", "122", "123", "124",
+"125", "126", "127", "128", "129", "130", "131", "132", "133",
+"134", "135", "136", "137", "138", "139", "140", "141", "142",
+"143", "144", "145", "146", "147", "148", "149", "150", "151",
+"152", "153", "154", "155", "156", "157", "158", "159", "160",
+"161", "162", "163", "164", "165", "166", "167", "168", "169",
+"170", "171", "172", "173", "174", "175", "176", "177", "178",
+"179", "180", "181", "182", "183", "184", "185", "186", "187",
+"188", "189", "190", "191", "192", "193", "194", "195", "196",
+"197", "198", "199", "200", "201", "202", "203", "204", "205",
+"206", "207", "208", "209", "210", "211", "212", "213", "214",
+"215", "216", "217", "218", "219", "220", "221", "222", "223",
+"224", "225", "226", "227", "228", "229", "230", "231", "232",
+"233", "234", "235", "236", "237", "238", "239", "240", "241",
+"242", "243", "244", "245", "246", "247", "248", "249", "250",
+"251", "252", "253", "254", "255", "256", "257", "258", "259",
+"260", "261", "262", "263", "264", "265", "266", "267", "268",
+"269", "270", "271", "272", "273", "274", "275", "276", "277",
+"278", "279", "280", "281", "282", "283", "284", "285", "286",
+"287", "288", "289", "290", "291", "292", "293", "294", "295",
+"296", "297", "298", "299", "300", "301", "302", "303", "304",
+"305", "306", "307", "308", "309", "310", "311", "312", "313",
+"314", "315", "316", "317", "318", "319", "320", "321", "322",
+"323", "324", "325", "326", "327", "328", "329", "330", "331",
+"332", "333", "334", "335", "336", "337", "338", "339", "340",
+"341", "342", "343", "344", "345", "346", "347", "348", "349",
+"350", "351", "352", "353", "354", "355", "356", "357", "358",
+"359", "360", "361", "362", "363", "364", "365", "366", "367",
+"368", "369", "370", "371", "372", "373", "374", "375", "376",
+"377", "378", "379", "380", "381", "382", "383", "384", "385",
+"386", "387", "388", "389", "390", "391", "392", "393", "394",
+"395", "396", "397", "398", "399", "400", "401", "402", "403",
+"404", "405", "406", "407", "408", "409", "410", "411", "412",
+"413", "414", "415", "416", "417", "418", "419", "420", "421",
+"422", "423", "424", "425", "426", "427", "428", "429", "430",
+"431", "432", "433", "434", "435", "436", "437", "438", "439",
+"440", "441", "442", "443", "444", "445", "446", "447", "448",
+"449", "450", "451", "452", "453", "454", "455", "456", "457",
+"458", "459", "460", "461", "462", "463", "464", "465", "466",
+"467", "468", "469", "470", "471", "472", "473", "474", "475",
+"476", "477", "478", "479", "480", "481", "482", "483", "484",
+"485", "486", "487", "488", "489", "490", "491", "492", "493",
+"494", "495", "496", "497", "498", "499", "500", "501", "502",
+"503", "504", "505", "506", "507", "508", "509", "510", "511",
+"512", "513", "514", "515", "516", "517", "518", "519", "520",
+"521", "522", "523", "524", "525", "526", "527", "528", "529",
+"530", "531", "532", "533", "534", "535", "536", "537", "538",
+"539", "540", "541", "542", "543", "544", "545", "546", "547",
+"548", "549", "550", "551", "552", "553", "554", "555", "556",
+"557", "558", "559", "560", "561", "562", "563", "564", "565",
+"566", "567", "568", "569", "570", "571", "572", "573", "574",
+"575", "576", "577", "578", "579", "580", "581", "582", "583",
+"584", "585", "586", "587", "588", "589", "590", "591", "592",
+"593", "594", "595", "596", "597", "598", "599", "600", "601",
+"602", "603", "604", "605", "606", "607", "608", "609", "610",
+"611", "612", "613", "614", "615", "616", "617", "618", "619",
+"620", "621", "622", "623", "624", "625", "626", "627", "628",
+"629", "630", "631", "632", "633", "634", "635", "636", "637",
+"638", "639", "640", "641", "642", "643", "644", "645", "646",
+"647", "648", "649", "650", "651", "652", "653", "654", "655",
+"656", "657", "658", "659", "660", "661", "662", "663", "664",
+"665", "666", "667", "668", "669", "670", "671", "672", "673",
+"674", "675", "676", "677", "678", "679", "680", "681", "682",
+"683", "684", "685", "686", "687", "688", "689", "690", "691",
+"692", "693", "694", "695", "696", "697", "698", "699", "700",
+"701", "702", "703", "704", "705", "706", "707", "708", "709",
+"710", "711", "712", "713", "714", "715", "716", "717", "718",
+"719", "720", "721", "722", "723", "724", "725", "726", "727",
+"728", "729", "730", "731", "732", "733", "734", "735", "736",
+"737", "738", "739", "740", "741", "742", "743", "744", "745",
+"746", "747", "748", "749", "750", "751", "752", "753", "754",
+"755", "756", "757", "758", "759", "760", "761", "762", "763",
+"764", "765", "766", "767", "768", "769", "770", "771", "772",
+"773", "774", "775", "776", "777", "778", "779", "780", "781",
+"782", "783", "784", "785", "786", "787", "788", "789", "790",
+"791", "792", "793", "794", "795", "796", "797", "798", "799",
+"800", "801", "802", "803", "804", "805", "806", "807", "808",
+"809", "810", "811", "812", "813", "814", "815", "816", "817",
+"818", "819", "820", "821", "822", "823", "824", "825", "826",
+"827", "828", "829", "830", "831", "832", "833", "834", "835",
+"836", "837", "838", "839", "840", "841", "842", "843", "844",
+"845", "846", "847", "848", "849", "850", "851", "852", "853",
+"854", "855", "856", "857", "858", "859", "860", "861", "862",
+"863", "864", "865", "866", "867", "868", "869", "870", "871",
+"872", "873", "874", "875", "876", "877", "878", "879", "880",
+"881", "882", "883", "884", "885", "886", "887", "888", "889",
+"890", "891", "892", "893", "894", "895", "896", "897", "898",
+"899", "900", "901", "902", "903", "904", "905", "906", "907",
+"908", "909", "910", "911", "912", "913", "914", "915", "916",
+"917", "918", "919", "920", "921", "922", "923", "924", "925",
+"926", "927", "928", "929", "930", "931", "932", "933", "934",
+"935", "936", "937", "938", "939", "940", "941", "942", "943",
+"944", "945", "946", "947", "948", "949", "950", "951", "952",
+"953", "954", "955", "956", "957", "958", "959", "960", "961",
+"962", "963", "964", "965", "966", "967", "968", "969", "970",
+"971", "972", "973", "974", "975", "976", "977", "978", "979",
+"980", "981", "982", "983", "984", "985", "986", "987", "988",
+"989", "990", "991", "992", "993", "994", "995", "996", "997",
+"998", "999", "1000", "1001", "1002", "1003", "1004", "1005",
+"1006", "1007", "1008", "1009", "1010", "1011", "1012", "1013",
+"1014", "1015", "1016", "1017", "1018", "1019", "1020", "1021",
+"1022", "1023", "1024", "1025", "1026", "1027", "1028", "1029",
+"1030", "1031", "1032", "1033", "1034", "1035", "1036", "1037",
+"1038", "1039", "1040", "1041", "1042", "1043", "1044", "1045",
+"1046", "1047", "1048", "1049", "1050", "1051", "1052", "1053",
+"1054", "1055", "1056", "1057", "1058", "1059", "1060", "1061",
+"1062", "1063", "1064", "1065", "1066", "1067", "1068", "1069",
+"1070", "1071", "1072", "1073", "1074", "1075", "1076", "1077",
+"1078", "1079", "1080", "1081", "1082", "1083", "1084", "1085",
+"1086", "1087", "1088", "1089", "1090", "1091", "1092", "1093",
+"1094", "1095", "1096", "1097", "1098", "1099", "1100", "1101",
+"1102", "1103", "1104", "1105", "1106", "1107", "1108", "1109",
+"1110", "1111", "1112", "1113", "1114", "1115", "1116", "1117",
+"1118", "1119", "1120", "1121", "1122", "1123", "1124", "1125",
+"1126", "1127", "1128", "1129", "1130", "1131", "1132", "1133",
+"1134", "1135", "1136", "1137", "1138", "1139", "1140", "1141",
+"1142", "1143", "1144", "1145", "1146", "1147", "1148", "1149",
+"1150", "1151", "1152", "1153", "1154", "1155", "1156", "1157",
+"1158", "1159", "1160", "1161", "1162", "1163", "1164", "1165",
+"1166", "1167", "1168", "1169", "1170", "1171", "1172", "1173",
+"1174", "1175", "1176", "1177", "1178", "1179", "1180", "1181",
+"1182", "1183", "1184", "1185", "1186", "1187", "1188", "1189",
+"1190", "1191", "1192", "1193", "1194", "1195", "1196", "1197",
+"1198", "1199", "1200", "1201", "1202", "1203", "1204", "1205",
+"1206", "1207", "1208", "1209", "1210", "1211", "1212", "1213",
+"1214", "1215", "1216", "1217", "1218", "1219", "1220", "1221",
+"1222", "1223", "1224", "1225", "1226", "1227", "1228", "1229",
+"1230", "1231", "1232", "1233", "1234", "1235", "1236", "1237",
+"1238", "1239", "1240", "1241", "1242", "1243", "1244", "1245",
+"1246", "1247", "1248", "1249", "1250", "1251", "1252", "1253",
+"1254", "1255", "1256", "1257", "1258", "1259", "1260", "1261",
+"1262", "1263", "1264", "1265", "1266", "1267", "1268", "1269",
+"1270", "1271", "1272", "1273", "1274", "1275", "1276", "1277",
+"1278", "1279", "1280", "1281", "1282", "1283", "1284", "1285",
+"1286", "1287", "1288", "1289", "1290", "1291", "1292", "1293",
+"1294", "1295", "1296", "1297", "1298", "1299", "1300", "1301",
+"1302", "1303", "1304", "1305", "1306", "1307", "1308", "1309",
+"1310", "1311", "1312", "1313", "1314", "1315", "1316", "1317",
+"1318", "1319", "1320", "1321", "1322", "1323", "1324", "1325",
+"1326", "1327", "1328", "1329", "1330", "1331", "1332", "1333",
+"1334", "1335", "1336", "1337", "1338", "1339", "1340", "1341",
+"1342", "1343", "1344", "1345", "1346", "1347", "1348", "1349",
+"1350", "1351", "1352", "1353", "1354", "1355", "1356", "1357",
+"1358", "1359", "1360", "1361", "1362", "1363", "1364", "1365",
+"1366", "1367", "1368", "1369", "1370", "1371", "1372", "1373",
+"1374", "1375", "1376", "1377", "1378", "1379", "1380", "1381",
+"1382", "1383", "1384", "1385", "1386", "1387", "1388", "1389",
+"1390", "1391", "1392", "1393", "1394", "1395", "1396", "1397",
+"1398", "1399", "1400", "1401", "1402", "1403", "1404", "1405",
+"1406", "1407", "1408", "1409", "1410", "1411", "1412", "1413",
+"1414", "1415", "1416", "1417", "1418", "1419", "1420", "1421",
+"1422", "1423", "1424", "1425", "1426", "1427", "1428", "1429",
+"1430", "1431", "1432", "1433", "1434", "1435", "1436", "1437",
+"1438", "1439", "1440", "1441", "1442", "1443", "1444", "1445",
+"1446", "1447", "1448", "1449", "1450", "1451", "1452", "1453",
+"1454", "1455", "1456", "1457", "1458", "1459", "1460", "1461",
+"1462", "1463", "1464", "1465", "1466", "1467", "1468", "1469",
+"1470", "1471", "1472", "1473", "1474", "1475", "1476", "1477",
+"1478", "1479", "1480", "1481", "1482", "1483", "1484", "1485",
+"1486", "1487", "1488", "1489", "1490", "1491", "1492", "1493",
+"1494", "1495", "1496", "1497", "1498", "1499", "1500", "1501",
+"1502", "1503", "1504", "1505", "1506", "1507", "1508", "1509",
+"1510", "1511", "1512", "1513", "1514", "1515", "1516", "1517",
+"1518", "1519", "1520", "1521", "1522", "1523", "1524", "1525",
+"1526", "1527", "1528", "1529", "1530", "1531", "1532", "1533",
+"1534", "1535", "1536", "1537", "1538", "1539", "1540", "1541",
+"1542", "1543", "1544", "1545", "1546", "1547", "1548", "1549",
+"1550", "1551", "1552", "1553", "1554", "1555", "1556", "1557",
+"1558", "1559", "1560", "1561", "1562", "1563", "1564", "1565",
+"1566", "1567", "1568", "1569", "1570", "1571", "1572", "1573",
+"1574", "1575", "1576", "1577", "1578", "1579", "1580", "1581",
+"1582", "1583", "1584", "1585", "1586", "1587", "1588", "1589",
+"1590", "1591", "1592", "1593", "1594", "1595", "1596", "1597",
+"1598", "1599", "1600", "1601", "1602", "1603", "1604", "1605",
+"1606", "1607", "1608", "1609", "1610", "1611", "1612", "1613",
+"1614", "1615", "1616", "1617", "1618", "1619", "1620", "1621",
+"1622", "1623", "1624", "1625", "1626", "1627", "1628", "1629",
+"1630", "1631", "1632", "1633", "1634", "1635", "1636", "1637",
+"1638", "1639", "1640", "1641", "1642", "1643", "1644", "1645",
+"1646", "1647", "1648", "1649", "1650", "1651", "1652", "1653",
+"1654", "1655", "1656", "1657", "1658", "1659", "1660", "1661",
+"1662", "1663", "1664", "1665", "1666", "1667", "1668", "1669",
+"1670", "1671", "1672", "1673", "1674", "1675", "1676", "1677",
+"1678", "1679", "1680", "1681", "1682", "1683", "1684", "1685",
+"1686", "1687", "1688", "1689", "1690", "1691", "1692", "1693",
+"1694", "1695", "1696", "1697", "1698", "1699", "1700", "1701",
+"1702", "1703", "1704", "1705", "1706", "1707", "1708", "1709",
+"1710", "1711", "1712", "1713", "1714", "1715", "1716", "1717",
+"1718", "1719", "1720", "1721", "1722", "1723", "1724", "1725",
+"1726", "1727", "1728", "1729", "1730", "1731", "1732", "1733",
+"1734", "1735", "1736", "1737", "1738", "1739", "1740", "1741",
+"1742", "1743", "1744", "1745", "1746", "1747", "1748", "1749",
+"1750", "1751", "1752", "1753", "1754", "1755", "1756", "1757",
+"1758", "1759", "1760", "1761", "1762", "1763", "1764", "1765",
+"1766", "1767", "1768", "1769", "1770", "1771", "1772", "1773",
+"1774", "1775", "1776", "1777", "1778", "1779", "1780", "1781",
+"1782", "1783", "1784", "1785", "1786", "1787", "1788", "1789",
+"1790", "1791", "1792", "1793", "1794", "1795", "1796", "1797",
+"1798", "1799", "1800", "1801", "1802", "1803", "1804", "1805",
+"1806", "1807", "1808", "1809", "1810", "1811", "1812", "1813",
+"1814", "1815", "1816", "1817", "1818", "1819", "1820", "1821",
+"1822", "1823", "1824", "1825", "1826", "1827", "1828", "1829",
+"1830", "1831", "1832", "1833", "1834", "1835", "1836", "1837",
+"1838", "1839", "1840", "1841", "1842", "1843", "1844", "1845",
+"1846", "1847", "1848", "1849", "1850", "1851", "1852", "1853",
+"1854", "1855", "1856", "1857", "1858", "1859", "1860", "1861",
+"1862", "1863", "1864", "1865", "1866", "1867", "1868", "1869",
+"1870", "1871", "1872", "1873", "1874", "1875", "1876", "1877",
+"1878", "1879", "1880", "1881", "1882", "1883", "1884", "1885",
+"1886", "1887", "1888", "1889", "1890", "1891", "1892", "1893",
+"1894", "1895", "1896", "1897", "1898", "1899", "1900", "1901",
+"1902", "1903", "1904", "1905", "1906", "1907", "1908", "1909",
+"1910", "1911", "1912", "1913", "1914", "1915", "1916", "1917",
+"1918", "1919", "1920", "1921", "1922", "1923", "1924", "1925",
+"1926", "1927", "1928", "1929", "1930", "1931", "1932", "1933",
+"1934", "1935", "1936", "1937", "1938", "1939", "1940", "1941",
+"1942", "1943", "1944", "1945", "1946", "1947", "1948", "1949",
+"1950", "1951", "1952", "1953", "1954", "1955", "1956", "1957",
+"1958", "1959", "1960", "1961", "1962", "1963", "1964", "1965",
+"1966", "1967", "1968", "1969", "1970", "1971", "1972", "1973",
+"1974", "1975", "1976", "1977", "1978", "1979", "1980", "1981",
+"1982", "1983", "1984", "1985", "1986", "1987", "1988", "1989",
+"1990", "1991", "1992", "1993", "1994", "1995", "1996", "1997",
+"1998", "1999", "2000", "2001", "2002", "2003", "2004", "2005",
+"2006", "2007", "2008", "2009", "2010", "2011", "2012", "2013",
+"2014", "2015", "2016", "2017", "2018", "2019", "2020", "2021",
+"2022", "2023", "2024", "2025", "2026", "2027", "2028", "2029",
+"2030", "2031", "2032", "2033", "2034", "2035", "2036", "2037",
+"2038", "2039", "2040", "2041", "2042", "2043", "2044", "2045",
+"2046", "2047", "2048", "2049", "2050", "2051", "2052", "2053",
+"2054", "2055", "2056", "2057", "2058", "2059", "2060", "2061",
+"2062", "2063", "2064", "2065", "2066", "2067", "2068", "2069",
+"2070", "2071", "2072", "2073", "2074", "2075", "2076", "2077",
+"2078", "2079", "2080", "2081", "2082", "2083", "2084", "2085",
+"2086", "2087", "2088", "2089", "2090", "2091", "2092", "2093",
+"2094", "2095", "2096", "2097", "2098", "2099", "2100", "2101",
+"2102", "2103", "2104", "2105", "2106", "2107", "2108", "2109",
+"2110", "2111", "2112", "2113", "2114", "2115", "2116", "2117",
+"2118", "2119", "2120", "2121", "2122", "2123", "2124", "2125",
+"2126", "2127", "2128", "2129", "2130", "2131", "2132", "2133",
+"2134", "2135", "2136", "2137", "2138", "2139", "2140", "2141",
+"2142", "2143", "2144", "2145", "2146", "2147", "2148", "2149",
+"2150", "2151", "2152", "2153", "2154", "2155", "2156", "2157",
+"2158", "2159", "2160", "2161", "2162", "2163", "2164", "2165",
+"2166", "2167", "2168", "2169", "2170", "2171", "2172", "2173",
+"2174", "2175", "2176", "2177", "2178", "2179", "2180", "2181",
+"2182", "2183", "2184", "2185", "2186", "2187", "2188", "2189",
+"2190", "2191", "2192", "2193", "2194", "2195", "2196", "2197",
+"2198", "2199", "2200", "2201", "2202", "2203", "2204", "2205",
+"2206", "2207", "2208", "2209", "2210", "2211", "2212", "2213",
+"2214", "2215", "2216", "2217", "2218", "2219", "2220", "2221",
+"2222", "2223", "2224", "2225", "2226", "2227", "2228", "2229",
+"2230", "2231", "2232", "2233", "2234", "2235", "2236", "2237",
+"2238", "2239", "2240", "2241", "2242", "2243", "2244", "2245",
+"2246", "2247", "2248", "2249", "2250", "2251", "2252", "2253",
+"2254", "2255", "2256", "2257", "2258", "2259", "2260", "2261",
+"2262", "2263", "2264", "2265", "2266", "2267", "2268", "2269",
+"2270", "2271", "2272", "2273", "2274", "2275", "2276", "2277",
+"2278", "2279", "2280", "2281", "2282", "2283", "2284", "2285",
+"2286", "2287", "2288", "2289", "2290", "2291", "2292", "2293",
+"2294", "2295", "2296", "2297", "2298", "2299", "2300", "2301",
+"2302", "2303", "2304", "2305", "2306", "2307", "2308", "2309",
+"2310", "2311", "2312", "2313", "2314", "2315", "2316", "2317",
+"2318", "2319", "2320", "2321", "2322", "2323", "2324", "2325",
+"2326", "2327", "2328", "2329", "2330", "2331", "2332", "2333",
+"2334", "2335", "2336", "2337", "2338", "2339", "2340", "2341",
+"2342", "2343", "2344", "2345", "2346", "2347", "2348", "2349",
+"2350", "2351", "2352", "2353", "2354", "2355", "2356", "2357",
+"2358", "2359", "2360", "2361", "2362", "2363", "2364", "2365",
+"2366", "2367", "2368", "2369", "2370", "2371", "2372", "2373",
+"2374", "2375", "2376", "2377", "2378", "2379", "2380", "2381",
+"2382", "2383", "2384", "2385", "2386", "2387", "2388", "2389",
+"2390", "2391", "2392", "2393", "2394", "2395", "2396", "2397",
+"2398", "2399", "2400", "2401", "2402", "2403", "2404", "2405",
+"2406", "2407", "2408", "2409", "2410", "2411", "2412", "2413",
+"2414", "2415", "2416", "2417", "2418", "2419", "2420", "2421",
+"2422", "2423", "2424", "2425", "2426", "2427", "2428", "2429",
+"2430", "2431", "2432", "2433", "2434", "2435", "2436", "2437",
+"2438", "2439", "2440", "2441", "2442", "2443", "2444", "2445",
+"2446", "2447", "2448", "2449", "2450", "2451", "2452", "2453",
+"2454", "2455", "2456", "2457", "2458", "2459", "2460", "2461",
+"2462", "2463", "2464", "2465", "2466", "2467", "2468", "2469",
+"2470", "2471", "2472", "2473", "2474", "2475", "2476", "2477",
+"2478", "2479", "2480", "2481", "2482", "2483", "2484", "2485",
+"2486", "2487", "2488", "2489", "2490", "2491", "2492", "2493",
+"2494", "2495", "2496", "2497", "2498", "2499", "2500", "2501",
+"2502", "2503", "2504", "2505", "2506", "2507", "2508", "2509",
+"2510", "2511", "2512", "2513", "2514", "2515", "2516", "2517",
+"2518", "2519", "2520", "2521", "2522", "2523", "2524", "2525",
+"2526", "2527", "2528", "2529", "2530", "2531", "2532", "2533",
+"2534", "2535", "2536", "2537", "2538", "2539", "2540", "2541",
+"2542", "2543", "2544", "2545", "2546", "2547", "2548", "2549",
+"2550", "2551", "2552", "2553", "2554", "2555", "2556", "2557",
+"2558", "2559", "2560", "2561", "2562", "2563", "2564", "2565",
+"2566", "2567", "2568", "2569", "2570", "2571", "2572", "2573",
+"2574", "2575", "2576", "2577", "2578", "2579", "2580", "2581",
+"2582", "2583", "2584", "2585", "2586", "2587", "2588", "2589",
+"2590", "2591", "2592", "2593", "2594", "2595", "2596", "2597",
+"2598", "2599", "2600", "2601", "2602", "2603", "2604", "2605",
+"2606", "2607", "2608", "2609", "2610", "2611", "2612", "2613",
+"2614", "2615", "2616", "2617", "2618", "2619", "2620", "2621",
+"2622", "2623", "2624", "2625", "2626", "2627", "2628", "2629",
+"2630", "2631", "2632", "2633", "2634", "2635", "2636", "2637",
+"2638", "2639", "2640", "2641", "2642", "2643", "2644", "2645",
+"2646", "2647", "2648", "2649", "2650", "2651", "2652", "2653",
+"2654", "2655", "2656", "2657", "2658", "2659", "2660", "2661",
+"2662", "2663", "2664", "2665", "2666", "2667", "2668", "2669",
+"2670", "2671", "2672", "2673", "2674", "2675", "2676", "2677",
+"2678", "2679", "2680", "2681", "2682", "2683", "2684", "2685",
+"2686", "2687", "2688", "2689", "2690", "2691", "2692", "2693",
+"2694", "2695", "2696", "2697", "2698", "2699", "2700", "2701",
+"2702", "2703", "2704", "2705", "2706", "2707", "2708", "2709",
+"2710", "2711", "2712", "2713", "2714", "2715", "2716", "2717",
+"2718", "2719", "2720", "2721", "2722", "2723", "2724", "2725",
+"2726", "2727", "2728", "2729", "2730", "2731", "2732", "2733",
+"2734", "2735", "2736", "2737", "2738", "2739", "2740", "2741",
+"2742", "2743", "2744", "2745", "2746", "2747", "2748", "2749",
+"2750", "2751", "2752", "2753", "2754", "2755", "2756", "2757",
+"2758", "2759", "2760", "2761", "2762", "2763", "2764", "2765",
+"2766", "2767", "2768", "2769", "2770", "2771", "2772", "2773",
+"2774", "2775", "2776", "2777", "2778", "2779", "2780", "2781",
+"2782", "2783", "2784", "2785", "2786", "2787", "2788", "2789",
+"2790", "2791", "2792", "2793", "2794", "2795", "2796", "2797",
+"2798", "2799", "2800", "2801", "2802", "2803", "2804", "2805",
+"2806", "2807", "2808", "2809", "2810", "2811", "2812", "2813",
+"2814", "2815", "2816", "2817", "2818", "2819", "2820", "2821",
+"2822", "2823", "2824", "2825", "2826", "2827", "2828", "2829",
+"2830", "2831", "2832", "2833", "2834", "2835", "2836", "2837",
+"2838", "2839", "2840", "2841", "2842", "2843", "2844", "2845",
+"2846", "2847", "2848", "2849", "2850", "2851", "2852", "2853",
+"2854", "2855", "2856", "2857", "2858", "2859", "2860", "2861",
+"2862", "2863", "2864", "2865", "2866", "2867", "2868", "2869",
+"2870", "2871", "2872", "2873", "2874", "2875", "2876", "2877",
+"2878", "2879", "2880", "2881", "2882", "2883", "2884", "2885",
+"2886", "2887", "2888", "2889", "2890", "2891", "2892", "2893",
+"2894", "2895", "2896", "2897", "2898", "2899", "2900", "2901",
+"2902", "2903", "2904", "2905", "2906", "2907", "2908", "2909",
+"2910", "2911", "2912", "2913", "2914", "2915", "2916", "2917",
+"2918", "2919", "2920", "2921", "2922", "2923", "2924", "2925",
+"2926", "2927", "2928", "2929", "2930", "2931", "2932", "2933",
+"2934", "2935", "2936", "2937", "2938", "2939", "2940", "2941",
+"2942", "2943", "2944", "2945", "2946", "2947", "2948", "2949",
+"2950", "2951", "2952", "2953", "2954", "2955", "2956", "2957",
+"2958", "2959", "2960", "2961", "2962", "2963", "2964", "2965",
+"2966", "2967", "2968", "2969", "2970", "2971", "2972", "2973",
+"2974", "2975", "2976", "2977", "2978", "2979", "2980", "2981",
+"2982", "2983", "2984", "2985", "2986", "2987", "2988", "2989",
+"2990", "2991", "2992", "2993", "2994", "2995", "2996", "2997",
+"2998", "2999", "3000", "3001", "3002", "3003", "3004", "3005",
+"3006", "3007", "3008", "3009", "3010", "3011", "3012", "3013",
+"3014", "3015", "3016", "3017", "3018", "3019", "3020", "3021",
+"3022", "3023", "3024", "3025", "3026", "3027", "3028", "3029",
+"3030", "3031", "3032", "3033", "3034", "3035", "3036", "3037",
+"3038", "3039", "3040", "3041", "3042", "3043", "3044", "3045",
+"3046", "3047", "3048", "3049", "3050", "3051", "3052", "3053",
+"3054", "3055", "3056", "3057", "3058", "3059", "3060", "3061",
+"3062", "3063", "3064", "3065", "3066", "3067", "3068", "3069",
+"3070", "3071", "3072", "3073", "3074", "3075", "3076", "3077",
+"3078", "3079", "3080", "3081", "3082", "3083", "3084", "3085",
+"3086", "3087", "3088", "3089", "3090", "3091", "3092", "3093",
+"3094", "3095", "3096", "3097", "3098", "3099", "3100", "3101",
+"3102", "3103", "3104", "3105", "3106", "3107", "3108", "3109",
+"3110", "3111", "3112", "3113", "3114", "3115", "3116", "3117",
+"3118", "3119", "3120", "3121", "3122", "3123", "3124", "3125",
+"3126", "3127", "3128", "3129", "3130", "3131", "3132", "3133",
+"3134", "3135", "3136", "3137", "3138", "3139", "3140", "3141",
+"3142", "3143", "3144", "3145", "3146", "3147", "3148", "3149",
+"3150", "3151", "3152", "3153", "3154", "3155", "3156", "3157",
+"3158", "3159", "3160", "3161", "3162", "3163", "3164", "3165",
+"3166", "3167", "3168", "3169", "3170", "3171", "3172", "3173",
+"3174", "3175", "3176", "3177", "3178", "3179", "3180", "3181",
+"3182", "3183", "3184", "3185", "3186", "3187", "3188", "3189",
+"3190", "3191", "3192", "3193", "3194", "3195", "3196", "3197",
+"3198", "3199", "3200", "3201", "3202", "3203", "3204", "3205",
+"3206", "3207", "3208", "3209", "3210", "3211", "3212", "3213",
+"3214", "3215", "3216", "3217", "3218", "3219", "3220", "3221",
+"3222", "3223", "3224", "3225", "3226", "3227", "3228", "3229",
+"3230", "3231", "3232", "3233", "3234", "3235", "3236", "3237",
+"3238", "3239", "3240", "3241", "3242", "3243", "3244", "3245",
+"3246", "3247", "3248", "3249", "3250", "3251", "3252", "3253",
+"3254", "3255", "3256", "3257", "3258", "3259", "3260", "3261",
+"3262", "3263", "3264", "3265", "3266", "3267", "3268", "3269",
+"3270", "3271", "3272", "3273", "3274", "3275", "3276", "3277",
+"3278", "3279", "3280", "3281", "3282", "3283", "3284", "3285",
+"3286", "3287", "3288", "3289", "3290", "3291", "3292", "3293",
+"3294", "3295", "3296", "3297", "3298", "3299", "3300", "3301",
+"3302", "3303", "3304", "3305", "3306", "3307", "3308", "3309",
+"3310", "3311", "3312", "3313", "3314", "3315", "3316", "3317",
+"3318", "3319", "3320", "3321", "3322", "3323", "3324", "3325",
+"3326", "3327", "3328", "3329", "3330", "3331", "3332", "3333",
+"3334", "3335", "3336", "3337", "3338", "3339", "3340", "3341",
+"3342", "3343", "3344", "3345", "3346", "3347", "3348", "3349",
+"3350", "3351", "3352", "3353", "3354", "3355", "3356", "3357",
+"3358", "3359", "3360", "3361", "3362", "3363", "3364", "3365",
+"3366", "3367", "3368", "3369", "3370", "3371", "3372", "3373",
+"3374", "3375", "3376", "3377", "3378", "3379", "3380", "3381",
+"3382", "3383", "3384", "3385", "3386", "3387", "3388", "3389",
+"3390", "3391", "3392", "3393", "3394", "3395", "3396", "3397",
+"3398", "3399", "3400", "3401", "3402", "3403", "3404", "3405",
+"3406", "3407", "3408", "3409", "3410", "3411", "3412", "3413",
+"3414", "3415", "3416", "3417", "3418", "3419", "3420", "3421",
+"3422", "3423", "3424", "3425", "3426", "3427", "3428", "3429",
+"3430", "3431", "3432", "3433", "3434", "3435", "3436", "3437",
+"3438", "3439", "3440", "3441", "3442", "3443", "3444", "3445",
+"3446", "3447", "3448", "3449", "3450", "3451", "3452", "3453",
+"3454", "3455", "3456", "3457", "3458", "3459", "3460", "3461",
+"3462", "3463", "3464", "3465", "3466", "3467", "3468", "3469",
+"3470", "3471", "3472", "3473", "3474", "3475", "3476", "3477",
+"3478", "3479", "3480", "3481", "3482", "3483", "3484", "3485",
+"3486", "3487", "3488", "3489", "3490", "3491", "3492", "3493",
+"3494", "3495", "3496", "3497", "3498", "3499", "3500", "3501",
+"3502", "3503", "3504", "3505", "3506", "3507", "3508", "3509",
+"3510", "3511", "3512", "3513", "3514", "3515", "3516", "3517",
+"3518", "3519", "3520", "3521", "3522", "3523", "3524", "3525",
+"3526", "3527", "3528", "3529", "3530", "3531", "3532", "3533",
+"3534", "3535", "3536", "3537", "3538", "3539", "3540", "3541",
+"3542", "3543", "3544", "3545", "3546", "3547", "3548", "3549",
+"3550", "3551", "3552", "3553", "3554", "3555", "3556", "3557",
+"3558", "3559", "3560", "3561", "3562", "3563", "3564", "3565",
+"3566", "3567", "3568", "3569", "3570", "3571", "3572", "3573",
+"3574", "3575", "3576", "3577", "3578", "3579", "3580", "3581",
+"3582", "3583", "3584", "3585", "3586", "3587", "3588", "3589"
+), class = "data.frame", label.table = list(NULL, structure(c(0,
+1), .Names = c("Female", "Male")), NULL, structure(c(0, 1), .Names = c("Elective",
+"Emer/Urg")), structure(c(0, 1), .Names = c("PTCA", "CABG")),
+ NULL))
diff --git a/data/crashbc.R b/data/crashbc.R
new file mode 100644
index 0000000..4bb5d22
--- /dev/null
+++ b/data/crashbc.R
@@ -0,0 +1,18 @@
+crashbc <-
+structure(list(Monday = c(0L, 0L, 0L, 0L, 0L, 0L, 1L, 12L, 20L,
+5L, 9L, 3L, 5L, 8L, 6L, 11L, 17L, 10L, 4L, 6L, 1L, 0L, 1L, 1L
+), Tuesday = c(0L, 0L, 0L, 0L, 0L, 3L, 3L, 5L, 28L, 4L, 1L, 8L,
+4L, 9L, 2L, 13L, 16L, 23L, 8L, 3L, 2L, 0L, 1L, 0L), Wednesday = c(0L,
+0L, 0L, 0L, 0L, 2L, 5L, 17L, 23L, 6L, 6L, 1L, 5L, 6L, 3L, 13L,
+10L, 14L, 11L, 4L, 2L, 1L, 0L, 0L), Thursday = c(0L, 0L, 0L,
+0L, 1L, 1L, 4L, 17L, 26L, 3L, 2L, 5L, 9L, 4L, 5L, 16L, 12L, 25L,
+9L, 9L, 1L, 3L, 1L, 2L), Friday = c(0L, 0L, 0L, 0L, 0L, 1L, 5L,
+7L, 16L, 7L, 4L, 2L, 5L, 5L, 4L, 14L, 16L, 12L, 7L, 4L, 4L, 2L,
+0L, 1L), Saturday = c(1L, 0L, 0L, 0L, 0L, 0L, 0L, 4L, 10L, 6L,
+10L, 6L, 5L, 7L, 6L, 7L, 10L, 5L, 2L, 2L, 1L, 0L, 0L, 1L), Sunday = c(0L,
+0L, 2L, 1L, 0L, 0L, 0L, 1L, 5L, 8L, 7L, 14L, 3L, 3L, 7L, 5L,
+4L, 3L, 7L, 1L, 1L, 0L, 0L, 0L)), .Names = c("Monday", "Tuesday",
+"Wednesday", "Thursday", "Friday", "Saturday", "Sunday"), class = "data.frame", row.names = c("0",
+"1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12",
+"13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23"
+))
diff --git a/data/crashf.R b/data/crashf.R
new file mode 100644
index 0000000..60d0342
--- /dev/null
+++ b/data/crashf.R
@@ -0,0 +1,18 @@
+crashf <-
+structure(list(Monday = c(1L, 0L, 1L, 1L, 1L, 0L, 1L, 1L, 2L,
+1L, 1L, 2L, 2L, 5L, 2L, 0L, 3L, 3L, 1L, 1L, 2L, 2L, 2L, 0L),
+ Tuesday = c(0L, 0L, 0L, 1L, 1L, 1L, 1L, 2L, 2L, 0L, 4L, 2L,
+ 2L, 1L, 1L, 3L, 5L, 2L, 1L, 1L, 2L, 4L, 0L, 1L), Wednesday = c(0L,
+ 0L, 2L, 1L, 0L, 1L, 2L, 1L, 1L, 2L, 3L, 2L, 2L, 2L, 1L, 2L,
+ 2L, 6L, 2L, 1L, 2L, 2L, 0L, 1L), Thursday = c(1L, 5L, 0L,
+ 2L, 2L, 2L, 1L, 1L, 6L, 1L, 2L, 2L, 4L, 5L, 0L, 3L, 5L, 4L,
+ 4L, 2L, 4L, 0L, 1L, 0L), Friday = c(2L, 6L, 2L, 2L, 0L, 0L,
+ 0L, 0L, 1L, 4L, 0L, 4L, 4L, 1L, 4L, 2L, 6L, 1L, 4L, 6L, 2L,
+ 1L, 3L, 1L), Saturday = c(1L, 5L, 2L, 3L, 2L, 0L, 0L, 0L,
+ 0L, 0L, 4L, 1L, 3L, 4L, 3L, 3L, 3L, 6L, 4L, 3L, 4L, 1L, 6L,
+ 5L), Sunday = c(4L, 1L, 2L, 4L, 2L, 0L, 3L, 0L, 1L, 0L, 2L,
+ 2L, 2L, 1L, 2L, 4L, 5L, 5L, 1L, 3L, 3L, 1L, 2L, 0L)), .Names = c("Monday",
+"Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"
+), class = "data.frame", row.names = c("0", "1", "2", "3", "4",
+"5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15",
+"16", "17", "18", "19", "20", "21", "22", "23"))
diff --git a/data/crashi.R b/data/crashi.R
new file mode 100644
index 0000000..51ac861
--- /dev/null
+++ b/data/crashi.R
@@ -0,0 +1,21 @@
+crashi <-
+structure(list(Monday = c(16L, 13L, 5L, 6L, 7L, 12L, 37L, 66L,
+117L, 67L, 67L, 80L, 75L, 77L, 84L, 112L, 115L, 127L, 63L, 47L,
+25L, 34L, 24L, 28L), Tuesday = c(10L, 11L, 8L, 4L, 6L, 14L, 37L,
+79L, 138L, 81L, 70L, 80L, 85L, 69L, 87L, 136L, 110L, 130L, 69L,
+63L, 46L, 42L, 26L, 23L), Wednesday = c(22L, 15L, 16L, 8L, 11L,
+14L, 32L, 92L, 132L, 68L, 62L, 50L, 86L, 84L, 98L, 134L, 138L,
+140L, 91L, 53L, 62L, 49L, 35L, 20L), Thursday = c(12L, 23L, 13L,
+12L, 16L, 18L, 45L, 75L, 138L, 75L, 76L, 80L, 87L, 90L, 85L,
+156L, 144L, 149L, 97L, 57L, 55L, 53L, 52L, 49L), Friday = c(29L,
+23L, 24L, 19L, 11L, 19L, 32L, 73L, 122L, 72L, 72L, 74L, 94L,
+90L, 104L, 158L, 146L, 155L, 142L, 67L, 68L, 85L, 67L, 61L),
+ Saturday = c(55L, 42L, 37L, 31L, 35L, 27L, 21L, 40L, 59L,
+ 59L, 84L, 114L, 101L, 105L, 103L, 120L, 106L, 104L, 83L,
+ 69L, 70L, 62L, 54L, 69L), Sunday = c(55L, 64L, 64L, 45L,
+ 35L, 35L, 36L, 33L, 36L, 45L, 57L, 86L, 93L, 80L, 96L, 103L,
+ 90L, 97L, 64L, 52L, 44L, 33L, 18L, 29L)), .Names = c("Monday",
+"Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"
+), class = "data.frame", row.names = c("0", "1", "2", "3", "4",
+"5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15",
+"16", "17", "18", "19", "20", "21", "22", "23"))
diff --git a/data/crashmc.R b/data/crashmc.R
new file mode 100644
index 0000000..ba1ffc1
--- /dev/null
+++ b/data/crashmc.R
@@ -0,0 +1,19 @@
+crashmc <-
+structure(list(Monday = c(1L, 0L, 0L, 2L, 0L, 1L, 6L, 8L, 12L,
+9L, 12L, 7L, 13L, 5L, 10L, 12L, 10L, 21L, 8L, 3L, 2L, 3L, 0L,
+1L), Tuesday = c(0L, 1L, 0L, 0L, 0L, 0L, 2L, 16L, 21L, 6L, 8L,
+9L, 10L, 4L, 10L, 15L, 13L, 18L, 10L, 6L, 5L, 4L, 0L, 1L), Wednesday = c(0L,
+0L, 1L, 0L, 0L, 1L, 5L, 10L, 16L, 8L, 6L, 5L, 6L, 12L, 10L, 13L,
+21L, 18L, 7L, 11L, 12L, 6L, 1L, 2L), Thursday = c(0L, 1L, 2L,
+0L, 0L, 0L, 4L, 7L, 13L, 6L, 8L, 9L, 7L, 9L, 12L, 18L, 26L, 26L,
+16L, 10L, 5L, 3L, 3L, 1L), Friday = c(5L, 1L, 1L, 0L, 0L, 2L,
+4L, 8L, 18L, 5L, 4L, 8L, 8L, 10L, 14L, 18L, 24L, 24L, 21L, 10L,
+8L, 7L, 5L, 7L), Saturday = c(1L, 3L, 3L, 0L, 0L, 1L, 1L, 1L,
+3L, 7L, 14L, 21L, 24L, 22L, 16L, 24L, 17L, 18L, 13L, 10L, 6L,
+5L, 5L, 7L), Sunday = c(1L, 1L, 1L, 1L, 0L, 2L, 0L, 0L, 3L, 8L,
+7L, 23L, 22L, 16L, 20L, 23L, 21L, 14L, 13L, 6L, 5L, 3L, 3L, 1L
+)), .Names = c("Monday", "Tuesday", "Wednesday", "Thursday",
+"Friday", "Saturday", "Sunday"), class = "data.frame", row.names = c("0",
+"1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12",
+"13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23"
+))
diff --git a/data/crashp.R b/data/crashp.R
new file mode 100644
index 0000000..4b26d34
--- /dev/null
+++ b/data/crashp.R
@@ -0,0 +1,18 @@
+crashp <-
+structure(list(Monday = c(0L, 2L, 0L, 0L, 2L, 1L, 2L, 3L, 8L,
+4L, 3L, 12L, 7L, 7L, 7L, 15L, 9L, 16L, 1L, 3L, 1L, 1L, 0L, 0L
+), Tuesday = c(0L, 0L, 0L, 0L, 0L, 0L, 2L, 9L, 11L, 10L, 7L,
+6L, 13L, 5L, 4L, 26L, 11L, 10L, 8L, 3L, 4L, 0L, 2L, 1L), Wednesday = c(1L,
+1L, 0L, 0L, 0L, 0L, 5L, 2L, 22L, 4L, 6L, 7L, 9L, 4L, 6L, 27L,
+16L, 11L, 8L, 6L, 6L, 2L, 1L, 1L), Thursday = c(0L, 0L, 1L, 2L,
+0L, 0L, 0L, 5L, 14L, 6L, 5L, 6L, 11L, 10L, 5L, 22L, 11L, 10L,
+7L, 3L, 7L, 2L, 7L, 6L), Friday = c(0L, 1L, 1L, 1L, 1L, 0L, 1L,
+5L, 14L, 6L, 6L, 5L, 7L, 5L, 6L, 19L, 15L, 16L, 19L, 2L, 7L,
+8L, 5L, 3L), Saturday = c(3L, 4L, 2L, 1L, 0L, 0L, 0L, 5L, 3L,
+3L, 9L, 6L, 10L, 5L, 8L, 14L, 11L, 5L, 11L, 5L, 6L, 7L, 5L, 6L
+), Sunday = c(8L, 7L, 3L, 3L, 1L, 1L, 3L, 1L, 1L, 2L, 4L, 3L,
+4L, 2L, 3L, 6L, 3L, 3L, 7L, 4L, 4L, 1L, 0L, 1L)), .Names = c("Monday",
+"Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"
+), class = "data.frame", row.names = c("0", "1", "2", "3", "4",
+"5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15",
+"16", "17", "18", "19", "20", "21", "22", "23"))
diff --git a/data/crashtr.R b/data/crashtr.R
new file mode 100644
index 0000000..7eb0505
--- /dev/null
+++ b/data/crashtr.R
@@ -0,0 +1,18 @@
+crashtr <-
+structure(list(Monday = c(0L, 1L, 0L, 0L, 3L, 3L, 6L, 8L, 10L,
+10L, 10L, 11L, 4L, 4L, 4L, 5L, 11L, 6L, 3L, 1L, 1L, 1L, 0L, 2L
+), Tuesday = c(1L, 1L, 3L, 0L, 0L, 1L, 3L, 12L, 11L, 9L, 8L,
+6L, 7L, 9L, 7L, 13L, 5L, 5L, 0L, 1L, 1L, 1L, 0L, 2L), Wednesday = c(1L,
+1L, 2L, 2L, 3L, 0L, 2L, 13L, 5L, 7L, 10L, 10L, 7L, 6L, 11L, 7L,
+10L, 9L, 4L, 2L, 1L, 3L, 1L, 0L), Thursday = c(1L, 0L, 1L, 1L,
+4L, 5L, 8L, 7L, 2L, 8L, 8L, 11L, 8L, 10L, 9L, 5L, 7L, 5L, 3L,
+0L, 4L, 3L, 2L, 2L), Friday = c(0L, 2L, 1L, 1L, 2L, 4L, 5L, 8L,
+7L, 10L, 7L, 10L, 7L, 3L, 9L, 11L, 17L, 7L, 10L, 3L, 1L, 2L,
+4L, 3L), Saturday = c(2L, 0L, 3L, 0L, 3L, 1L, 1L, 2L, 3L, 1L,
+6L, 4L, 2L, 7L, 2L, 3L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L), Sunday = c(1L,
+2L, 2L, 3L, 0L, 0L, 2L, 2L, 2L, 3L, 1L, 5L, 3L, 1L, 4L, 1L, 3L,
+4L, 0L, 1L, 2L, 0L, 0L, 0L)), .Names = c("Monday", "Tuesday",
+"Wednesday", "Thursday", "Friday", "Saturday", "Sunday"), class = "data.frame", row.names = c("0",
+"1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12",
+"13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23"
+))
diff --git a/data/gala.R b/data/gala.R
new file mode 100644
index 0000000..27cbaec
--- /dev/null
+++ b/data/gala.R
@@ -0,0 +1,28 @@
+gala <-
+structure(list(Species = c(58, 31, 3, 25, 2, 18, 24, 10, 8, 2,
+97, 93, 58, 5, 40, 347, 51, 2, 104, 108, 12, 70, 280, 237, 444,
+62, 285, 44, 16, 21), Endemics = c(23, 21, 3, 9, 1, 11, 0, 7,
+4, 2, 26, 35, 17, 4, 19, 89, 23, 2, 37, 33, 9, 30, 65, 81, 95,
+28, 73, 16, 8, 12), Area = c(25.09, 1.24, 0.21, 0.1, 0.05, 0.34,
+0.08, 2.33, 0.03, 0.18, 58.27, 634.49, 0.57, 0.78, 17.35, 4669.32,
+129.49, 0.01, 59.56, 17.95, 0.23, 4.89, 551.62, 572.33, 903.82,
+24.08, 170.92, 1.84, 1.24, 2.85), Elevation = c(346, 109, 114,
+46, 77, 119, 93, 168, 71, 112, 198, 1494, 49, 227, 76, 1707,
+343, 25, 777, 458, 94, 367, 716, 906, 864, 259, 640, 147, 186,
+253), Nearest = c(0.6, 0.6, 2.8, 1.9, 1.9, 8, 6, 34.1, 0.4, 2.6,
+1.1, 4.3, 1.1, 4.6, 47.4, 0.7, 29.1, 3.3, 29.1, 10.7, 0.5, 4.4,
+45.2, 0.2, 0.6, 16.5, 2.6, 0.6, 6.8, 34.1), Scruz = c(0.6, 26.3,
+58.7, 47.4, 1.9, 8, 12, 290.2, 0.4, 50.2, 88.3, 95.3, 93.1, 62.2,
+92.2, 28.1, 85.9, 45.9, 119.6, 10.7, 0.6, 24.4, 66.6, 19.8, 0,
+16.5, 49.2, 9.6, 50.9, 254.7), Adjacent = c(1.84, 572.33, 0.78,
+0.18, 903.82, 1.84, 0.34, 2.85, 17.95, 0.1, 0.57, 4669.32, 58.27,
+0.21, 129.49, 634.49, 59.56, 0.1, 129.49, 0.03, 25.09, 572.33,
+0.57, 4.89, 0.52, 0.52, 0.1, 25.09, 17.95, 2.33)), .Names = c("Species",
+"Endemics", "Area", "Elevation", "Nearest", "Scruz", "Adjacent"
+), class = "data.frame", row.names = c("Baltra", "Bartolome",
+"Caldwell", "Champion", "Coamano", "Daphne.Major", "Daphne.Minor",
+"Darwin", "Eden", "Enderby", "Espanola", "Fernandina", "Gardner1",
+"Gardner2", "Genovesa", "Isabela", "Marchena", "Onslow", "Pinta",
+"Pinzon", "Las.Plazas", "Rabida", "SanCristobal", "SanSalvador",
+"SantaCruz", "SantaFe", "SantaMaria", "Seymour", "Tortuga", "Wolf"
+))
diff --git a/data/mmt.R b/data/mmt.R
new file mode 100644
index 0000000..73ebb91
--- /dev/null
+++ b/data/mmt.R
@@ -0,0 +1,328 @@
+mmt <-
+c(38.1, 32.4, 34.5, 20.7, 21.5, 23.1, 29.7, 36.6, 36.1, 20.6,
+20.4, 30.1, 38.7, 41.4, 37, 36, 37, 38, 23, 26.7, 27.5, 21.7,
+22.9, 26.2, 36.5, 41.8, 21.5, 19.2, 25, 28.9, 23.2, 31.5, 36.2,
+38.2, 26.4, 20.9, 21.5, 30.2, 33.4, 32.6, 22.2, 21.7, 30, 35.7,
+32.8, 39.3, 25.5, 23, 19.9, 21.3, 20.8, 21.7, 23.8, 29, 23.7,
+21.3, 28.5, 33.6, 34.6, 34.2, 27, 24.2, 19.9, 19.7, 21.5, 30.6,
+30, 19, 19.6, 20.6, 23.6, 17.9, 17.3, 21.4, 24.1, 20.9, 30.1,
+32.6, 21.3, 19.5, 19.9, 21, 25.4, 17.5, 20.4, 26.8, 25.8, 20.9,
+19.4, 25.8, 26.3, 29.6, 30.3, 23.6, 28.4, 20.7, 24.1, 27.3, 23.2,
+18.3, 24.6, 27.4, 20.4, 18.1, 25.2, 19.8, 21, 23.7, 19.6, 18.1,
+20.8, 26, 18.4, 22, 14.4, 19.9, 22.6, 13.7, 15.9, 21.2, 23.7,
+24, 17.2, 23.2, 25.2, 17.2, 16, 15.6, 13.4, 16, 16.8, 14.6, 19.4,
+21, 19.5, 18.5, 13.3, 13.7, 14.3, 14.1, 11.4, 13.6, 16.6, 17.6,
+14.6, 17.2, 14.4, 16.4, 17.3, 17.6, 17.2, 17.7, 14.2, 16.6, 15.7,
+13.7, 14.7, 13.1, 12.9, 15.4, 11.9, 15.2, 15.3, 16.5, 16.1, 11.7,
+11.2, 11.5, 10.8, 16.1, 14.8, 13.6, 13.8, 9.7, 10.7, 11, 15.3,
+15.3, 17, 16, 16.3, 15.7, 14.5, 10.8, 10.5, 13.4, 12.2, 13.2,
+13, 12.4, 13.1, 9.8, 10.5, 13.4, 11, 13.1, 15, 16.7, 16.1, 18.2,
+15.7, 17.7, 15.9, 15.1, 15.2, 14.7, 13.3, 14.5, 11.1, 13.1, 13.7,
+14.6, 12.9, 12.8, 15.2, 14.5, 17.2, 14.5, 14.4, 11, 13.1, 13.6,
+14.6, 12.7, 13.6, 12.7, 15.5, 17.4, 15.2, 14.2, 17.7, 19.2, 12.5,
+14.2, 15.3, 15.7, 17, 19, 13.1, 13.2, 13.2, 15.7, 14.1, 15.6,
+15.5, 15.9, 15.1, 16, 19.4, 21.5, 23.7, 18.7, 23.8, 18, 16.2,
+18.5, 20.6, 18.3, 22.5, 26.9, 19.4, 15.9, 20.5, 21.2, 19.5, 14.7,
+17.6, 15.8, 17.7, 14.3, 16.8, 18.6, 21.9, 21.4, 20.8, 14, 17,
+23, 26.4, 19.6, 22.7, 26.9, 14.7, 15.2, 19.8, 26.9, 20.2, 14.3,
+14.8, 18.5, 21.7, 21.4, 21.8, 18.2, 15.8, 15.3, 18.5, 19.2, 28.5,
+32.2, 21.8, 22.1, 20.7, 17, 24.7, 26.2, 29, 21.6, 17.1, 16.9,
+19.1, 24.7, 25.4, 19.8, 18.2, 16.3, 17, 17.7, 15.5, 14.7, 15.8,
+19.9, 20.4, 23.3, 20.2, 28.8, 31.2, 17.4, 18.5, 26.8, 34.3, 30.1,
+20.5, 20.5, 19.8, 27, 21, 33, 22.6, 28.3, 21.1, 19, 17.3, 27,
+30.2, 24.8, 17.9, 17.9, 20.7, 30.9, 36.2, 21, 20.2, 21.3, 24.2,
+21, 20.7, 17.8, 19.6, 22.6, 20.5, 24.1, 22.2, 27, 33.6, 26.6,
+20.6, 24.5, 19.8, 22.6, 29.2, 20.3, 23, 24.4, 38, 40.5, 24.2,
+20.2, 21.8, 27, 35.2, 25.2, 32.7, 35.9, 38.9, 26.5, 21.8, 37.9,
+43.3, 19, 19.7, 21.4, 32, 33.3, 22.2, 21.3, 20.8, 22.3, 22.5,
+21.4, 23, 35.1, 40.3, 39, 21.1, 25.4, 23.6, 28.1, 37, 39.3, 39.4,
+25.8, 27.7, 23, 24, 26.1, 21.8, 24.2, 22.3, 19.7, 20.8, 17.9,
+20.1, 20.9, 21.2, 20.4, 29, 34.7, 34, 30.9, 29.6, 26.1, 18.5,
+21.2, 22.4, 21.4, 30.5, 32, 32.7, 27.4, 18.9, 19.4, 22.2, 30.2,
+31.8, 31.4, 18.5, 22.2, 27.5, 25.8, 25.2, 19.1, 19.6, 21.4, 20.7,
+28.3, 23, 16.5, 18.8, 20, 20.6, 19.8, 20.3, 26.6, 21.5, 22.3,
+26.5, 27.1, 30, 28.2, 17.3, 16.8, 17.5, 19.2, 20.3, 20.6, 23.7,
+24.3, 26.4, 16, 14.5, 14.2, 16.5, 15.7, 16, 16.7, 19.8, 22.5,
+22.9, 18.8, 15.7, 14.7, 15.3, 15.3, 18, 15.1, 15.1, 13.6, 15.3,
+18, 18.6, 22, 22.1, 17.8, 15.2, 11.3, 13.5, 16.1, 16, 16.5, 16.7,
+19.6, 19.5, 18, 18.4, 12.9, 14.9, 12.4, 11.9, 11.7, 12.9, 12.9,
+12.4, 12.5, 15.7, 15.9, 14.3, 16.9, 14, 13.6, 13.7, 16, 13.1,
+13.8, 13.4, 11.3, 10.6, 13.9, 12.7, 12.5, 11.3, 11.6, 14, 13.7,
+15.2, 14, 11.4, 14.2, 15.2, 12.6, 10.2, 13.9, 13.7, 13.6, 13.2,
+15.4, 12.9, 11.9, 13.3, 13.3, 10.5, 10, 11, 8.3, 10.7, 11.2,
+11.7, 14.2, 14.5, 16, 16.7, 14.4, 17, 14.6, 11.6, 12.8, 14, 17.1,
+15.4, 15.5, 15.1, 16.9, 14.2, 15.9, 18.2, 19.5, 19.5, 19.8, 15.6,
+14.1, 17, 14.3, 13.1, 13.7, 18.2, 19.8, 22.3, 17.8, 15, 22.6,
+23.7, 15.8, 15, 23.5, 25.7, 26.5, 15.1, 13.9, 20.5, 21.4, 14.9,
+15, 18.9, 11.7, 15.5, 17.7, 11, 14.9, 13.1, 13.7, 15.4, 15.6,
+23.9, 23.7, 24.3, 15.9, 13.5, 15.3, 19.3, 20.5, 22.5, 23.2, 16.3,
+14.8, 13.7, 18.4, 12.3, 13.3, 16.2, 25.6, 24.1, 13.8, 15.9, 21.2,
+21.7, 15.6, 13.5, 15, 21.2, 25.4, 19, 15, 16.4, 15.6, 13.7, 22,
+17.4, 15.1, 14.6, 16.9, 15.9, 21.3, 29.7, 33, 22.7, 21.6, 30.3,
+19, 14.6, 15.9, 20.4, 29.3, 33.7, 35.2, 38.1, 24.5, 26.3, 25,
+17.9, 18.1, 23.3, 31.2, 32.8, 28, 19.7, 22.1, 16.3, 20.1, 21.7,
+19.6, 30.8, 36.8, 38.1, 17.6, 21.2, 17.2, 20.7, 26.1, 34.5, 18.6,
+20.1, 22.4, 19.1, 19.2, 28, 17.9, 18.1, 16, 18.5, 20.6, 33, 35.2,
+24, 26, 28, 23, 19, 19.1, 18, 19.4, 32.5, 32.6, 18.1, 19.7, 28,
+28, 23.8, 31.4, 22, 35.8, 23, 18.9, 25.4, 28, 19.2, 21.5, 28.8,
+37.3, 24.6, 21.3, 17.9, 17.5, 18.7, 18.1, 19.7, 22.1, 19.6, 18,
+19.9, 28.5, 23, 37, 19, 19.4, 35, 27.7, 23.9, 25.6, 27.7, 24.3,
+38.6, 41.2, 22, 22.5, 22.3, 25.5, 36.6, 35.7, 43.2, 28, 25.5,
+21.6, 21.8, 23.3, 34.3, 31.2, 43, 20.2, 20.3, 22.2, 24, 34.4,
+32.9, 21.6, 26.5, 33.2, 38.6, 24.4, 28.3, 35.2, 28.7, 35.6, 37.2,
+25.5, 31.5, 30.2, 29, 21.4, 28.5, 32.1, 23.5, 20, 21, 20, 21.8,
+24.3, 30.9, 21.6, 22.2, 20.2, 20.5, 17.6, 18.6, 17.7, 18, 18.6,
+18.6, 25.9, 19.8, 18.7, 17.9, 17.3, 18.1, 14.6, 14.1, 15.7, 16,
+21.6, 21.4, 23.8, 21.7, 16.6, 16.7, 18.1, 14.5, 14.7, 16.4, 16.5,
+22.7, 23.8, 24.6, 23.6, 19.8, 19.8, 18, 17.6, 19, 17.5, 17, 19.9,
+18.5, 18.7, 17.8, 17, 17.6, 18.7, 17, 18.8, 17.5, 19.5, 16.6,
+19.1, 21.2, 20.2, 17.9, 14.3, 16, 17.7, 18.6, 19.1, 13.7, 17.5,
+16.1, 15.2, 17.3, 14.9, 18.4, 17.2, 12.2, 13.9, 14, 15.3, 15,
+15.1, 16.6, 16.5, 16.1, 13.5, 13, 13.6, 11.8, 14.5, 14, 14.1,
+13.5, 12.1, 11.9, 11.9, 14.2, 14, 17.9, 19.1, 15.1, 13.8, 13.7,
+10, 10.3, 10.4, 12, 8.5, 14.4, 13.1, 14, 11.8, 13.9, 12.9, 11,
+11.8, 10.7, 11.2, 10.3, 12.2, 12.4, 14.7, 15.1, 15, 14, 15.2,
+14.2, 8.7, 11, 14.2, 15.2, 12.2, 13, 14.7, 17.2, 13.7, 14.8,
+13.8, 13.7, 13.1, 12.7, 12.3, 12.4, 13, 13.5, 13.5, 14.7, 13.4,
+15.9, 16, 15.3, 15.5, 18.2, 18.6, 18.5, 19.2, 14.2, 12.8, 13.1,
+15.4, 17.3, 14.5, 19.5, 20.2, 16.3, 13, 13.3, 13.3, 19, 16.8,
+19.2, 18.5, 19, 17.4, 15.3, 15.8, 18.7, 17.8, 16.5, 15.3, 13.2,
+12, 14, 9.5, 13, 17.6, 19.6, 17.2, 17.3, 17, 16.3, 17.3, 20.3,
+16.9, 13.5, 13.3, 20.7, 19.8, 20.8, 19.8, 19.5, 23.4, 14.2, 14.2,
+16.6, 17, 15.6, 14.8, 17.2, 20.2, 16.7, 18.2, 18.1, 22.5, 23.4,
+15.3, 13.7, 15.6, 18.9, 15.1, 16.6, 17.7, 23, 16.8, 21.3, 17.9,
+23.6, 25.3, 14.5, 16.9, 16.3, 17.7, 18, 21.7, 22.3, 15.5, 16.7,
+24.3, 23.5, 27.7, 18, 15.6, 19, 19.5, 20.1, 31.3, 15.6, 12.8,
+13.8, 15.8, 16.4, 17.6, 28.2, 25.5, 22.7, 28, 17, 22.6, 24.5,
+25.6, 27.5, 18.5, 18.2, 18.8, 17.8, 18.9, 24, 20.9, 16.8, 18.5,
+20.6, 24.9, 28.1, 25.7, 19.2, 32.7, 21.3, 20.4, 20.5, 22.8, 35.3,
+38.7, 21.4, 23.3, 22, 31.7, 23.4, 19.1, 19.5, 30, 32.4, 33.6,
+30, 27.7, 28.1, 23.7, 20.6, 32.3, 31.2, 21.5, 23, 21.9, 23, 21.6,
+27.2, 29.2, 32, 22.5, 23.9, 19.7, 19.5, 22, 22.1, 19.7, 19.4,
+30.5, 28.5, 20.2, 19, 20.2, 22.6, 28.4, 18.6, 20.3, 19, 26.2,
+30, 24.2, 23.4, 34.7, 20.4, 20.7, 20, 19.5, 31.6, 33.7, 23.5,
+24.6, 33.9, 22.4, 24.3, 29.5, 31.7, 32.9, 20.5, 28.5, 27.3, 19.5,
+21.4, 33.2, 35, 18.1, 17.2, 18.1, 19.5, 21.5, 29.2, 33.8, 20.8,
+17.5, 21, 27.4, 21.2, 27.1, 22.8, 18.7, 19, 19.7, 28.7, 25.5,
+18.4, 18.5, 23.6, 26.9, 28, 24.7, 21.5, 17.5, 16.1, 18.7, 24.5,
+27.6, 17.4, 18.2, 19.7, 18.5, 17.7, 20.7, 21.2, 17.2, 21.5, 22,
+23.2, 25.4, 24.6, 24.8, 25, 25, 18.2, 21.5, 16.2, 17.3, 15.5,
+16.4, 19.6, 18.5, 16.5, 16, 17.7, 19.2, 16.8, 16.5, 22.5, 22.7,
+23.5, 19, 15.5, 13.5, 14, 14.7, 15.6, 17, 18, 19.6, 17.1, 16.7,
+16.9, 18.2, 19.4, 22, 21.7, 22, 22.2, 20, 16.5, 15.6, 15, 16.6,
+18.7, 18.7, 18.1, 15.6, 20.1, 20.3, 15.2, 17.1, 16.3, 18.3, 18.3,
+19, 20, 15.7, 14.9, 13.1, 13.6, 13.8, 15, 16.9, 18.3, 16.4, 14,
+12.5, 12.8, 12.2, 14.2, 14.2, 15.5, 15.1, 14.4, 12.9, 10.8, 11.7,
+13.3, 12.4, 13.4, 10.6, 7, 12.6, 13.2, 12.1, 10.3, 15.7, 11.7,
+15.1, 15.8, 15, 11.6, 11.4, 10, 14.7, 13.6, 13.2, 11, 13, 12,
+10.6, 13.3, 14.5, 13.5, 15.9, 12.8, 11.7, 13, 14, 16, 17, 16.3,
+17.6, 19.5, 14, 17.5, 19.2, 18.5, 15.1, 15.4, 13.3, 13.1, 12.8,
+12, 13.3, 12.9, 15.4, 13.4, 15.1, 14.7, 14.9, 16.8, 15.8, 17,
+14.1, 16.5, 12.4, 12.7, 14.6, 17.6, 19.6, 17.3, 17.9, 17.8, 13,
+13.2, 17.1, 19.6, 16.3, 16.9, 22.2, 12, 14.2, 16.8, 16.9, 22,
+16.4, 14.2, 11.8, 15.2, 12.6, 13.2, 14.4, 15, 16.5, 13.3, 14.5,
+16.7, 13.8, 12.4, 13.7, 19.9, 24, 18.7, 18.9, 20.2, 19.9, 24,
+27.3, 24.2, 18, 21.3, 26.6, 18.5, 18.2, 21, 12, 12.2, 14.1, 16.5,
+15.5, 15, 20.8, 27.3, 28.4, 18.9, 18.2, 25.5, 15.9, 15.5, 21.4,
+23.2, 26.8, 23.9, 19.1, 15.2, 14.9, 21.3, 25.4, 28.7, 32.3, 25.7,
+19, 19.4, 15, 16.5, 16.3, 28.9, 27.4, 32.9, 27, 23.7, 22.8, 16.5,
+14, 18, 28.2, 32.8, 28, 22.2, 21.8, 18.7, 25.5, 27.6, 16.5, 15.9,
+16.8, 17.9, 23.1, 22.8, 24.2, 27.7, 21.3, 23.5, 17.9, 24.2, 20,
+23.7, 27, 24.6, 25, 20, 22.7, 22.6, 29.2, 27.8, 26.5, 18.9, 23.2,
+24.2, 28.1, 30.3, 25.6, 20.7, 19.6, 21.7, 28, 21.3, 21.4, 21.4,
+22.6, 21.7, 21.8, 19.8, 30.4, 36, 42.2, 21.1, 20, 18.4, 19.1,
+19.3, 18.8, 22.8, 20, 21.9, 20.1, 23.2, 34.5, 22.1, 21.6, 30,
+34, 22.2, 19.6, 25.4, 37.1, 30.2, 29.1, 21.1, 19.1, 22.2, 24.4,
+20.3, 28, 32.2, 23.4, 21.4, 33.5, 18.6, 19.2, 21.1, 29.5, 25.7,
+32.4, 28.5, 20.6, 25.2, 20.8, 17.5, 20.1, 22.4, 24, 21, 26.8,
+17.7, 19.2, 19.1, 26.4, 29.7, 30.2, 33, 33.3, 33, 36.1, 34, 34.7,
+20.4, 19.4, 21.3, 26.8, 28.5, 25.6, 28.6, 32.5, 18.7, 23.2, 23.7,
+27.2, 23.5, 19, 22.6, 30.1, 23.3, 19.7, 22.3, 21.6, 23.5, 22.6,
+24, 22.9, 22.4, 28.8, 30, 32.4, 33.8, 26.8, 18.6, 17, 15.3, 15.3,
+16.8, 16, 16.6, 17.2, 21.5, 19.7, 19.8, 21.2, 23.7, 23.9, 25.6,
+24.7, 20.2, 23.5, 22.2, 24.5, 25.5, 18.8, 18, 14.9, 18.3, 19.2,
+18.9, 19.2, 21.5, 18, 13.1, 14.9, 15.2, 13.5, 15.2, 14.2, 14.6,
+16, 15.1, 15.6, 17.5, 18.8, 20.4, 20.3, 19.1, 14.9, 14.6, 13.7,
+13, 13.4, 12.7, 14.2, 13.7, 12.7, 14.1, 12.8, 17.1, 14, 14.5,
+15.7, 14.1, 13.7, 13.3, 13.4, 11.9, 13.6, 13.3, 12.2, 10.8, 13,
+14.1, 14.4, 13.3, 15.7, 15.6, 19.3, 16.3, 19.4, 17.7, 16.9, 14.4,
+14.3, 13, 12.3, 12.8, 12.7, 10.8, 14.6, 13.3, 12.7, 12.9, 15.7,
+11.4, 12, 12.2, 11.8, 13.1, 15.5, 11.3, 12, 12.8, 11.6, 12.4,
+12.6, 17.4, 13.4, 11.3, 12.1, 12.7, 12.5, 13.2, 17.1, 15.9, 15,
+13.2, 12.6, 11.8, 12, 14, 13.7, 16.1, 19.8, 15.7, 13.2, 13.6,
+13.5, 11.7, 11.4, 15.4, 17.4, 15.3, 16.7, 16.5, 15.3, 18.8, 15.3,
+15.6, 18.7, 17.4, 12.6, 12.9, 13.5, 13.6, 13.7, 15.1, 14.3, 21.3,
+21.5, 23.2, 15.3, 19.1, 15.3, 14.8, 15.6, 14.6, 14.7, 13.7, 13.6,
+13.5, 14.6, 15.6, 20.7, 25.1, 19.7, 12.5, 14.1, 13.3, 14.7, 20.5,
+21.8, 16.2, 25.8, 26.6, 19.4, 21.1, 20.2, 20.9, 18.1, 26.3, 19.2,
+16.1, 17.9, 21.1, 25.7, 23.7, 19.7, 16.8, 14.3, 14.2, 20.5, 22.2,
+15.4, 17.2, 18, 20.5, 23.8, 24.1, 25.8, 19.4, 18.3, 22.3, 17.4,
+20.2, 27.5, 23.9, 22.8, 22.7, 21.5, 21.9, 30.3, 17.7, 26.8, 28.3,
+24.2, 18.2, 21.8, 17.8, 16.1, 18.7, 17.1, 15.7, 20.8, 26.2, 26.2,
+21.3, 20.5, 20.7, 19.9, 19.1, 21.7, 25.6, 23.4, 26.6, 23.5, 25,
+25.8, 28.1, 27.8, 23.2, 21, 21.1, 18.3, 19.6, 22.2, 30.6, 23.1,
+18.4, 22.4, 30.9, 25.2, 21.4, 20.4, 18.7, 22, 26.6, 20.4, 18.5,
+22.5, 32.7, 27.8, 19, 21, 18.5, 22.2, 23.3, 19.8, 24.7, 32.4,
+20, 18.2, 20.4, 25.5, 24.4, 21.5, 32.4, 27.2, 19.1, 19.7, 21.3,
+20.3, 21, 22.6, 21.5, 24.2, 25, 20.6, 19.6, 20, 29.5, 35.5, 21.1,
+23.7, 33.7, 21.8, 21.2, 23.7, 26.4, 21.8, 31.1, 38.3, 22.7, 21.5,
+20.8, 30, 34.5, 36.5, 28.9, 27.2, 25.5, 22.3, 21.8, 22.4, 19.7,
+19.4, 18.5, 24, 25.8, 25.5, 26.6, 34.5, 21.1, 20.4, 24.9, 21.9,
+25.5, 38, 36.4, 19, 18.4, 17.7, 21.3, 25.7, 30.5, 32.6, 23, 20.5,
+21.8, 26.4, 25.8, 36.7, 27.2, 27.7, 20.4, 18.8, 22.6, 32.7, 22.8,
+24.4, 32.6, 29.1, 18.3, 19.6, 28.6, 26.8, 30.7, 16.8, 16.1, 18.2,
+19.3, 19.8, 26.8, 17.5, 18.2, 24.9, 27.3, 24.8, 18, 16.8, 14.6,
+18.7, 18.8, 19.5, 21.1, 20, 18.5, 17, 16.2, 16.7, 14.9, 15.5,
+15.3, 15.7, 17.3, 17.1, 16.2, 15.4, 15, 19, 19.7, 19.7, 15.1,
+16.4, 21.7, 19.5, 17.2, 16.4, 18, 19.5, 16.2, 16, 14.4, 15.4,
+18.2, 13.8, 16.1, 13.7, 16.8, 15.9, 16.7, 14.6, 15.2, 14, 14.1,
+16, 16.7, 15.2, 13.3, 14, 15.9, 14.5, 14.2, 13, 13.7, 14.7, 14.6,
+13.9, 14.2, 16.8, 14.7, 12.1, 10.2, 10.8, 11.9, 11.8, 12, 12.7,
+13.6, 13, 13.1, 11.1, 12.8, 13.3, 11.7, 14.3, 15.4, 14.2, 13,
+13.1, 12.4, 11.2, 9.2, 12.4, 12.7, 13.7, 12.7, 15, 16.4, 14,
+13.2, 13.6, 15.4, 13.8, 15, 14.5, 14.8, 14, 8.2, 10, 11.2, 13.3,
+13.7, 15, 12.5, 15.3, 16.4, 17.6, 15, 14.8, 13.8, 14.8, 16.6,
+17.3, 12.2, 12.1, 13.9, 16.8, 16.8, 14.5, 15.1, 13.8, 13, 14.5,
+18, 20.2, 21, 16.5, 12.2, 15, 15.7, 17.7, 18.9, 14.9, 11.4, 15.4,
+15.2, 17.6, 17.2, 21.6, 18.7, 13.9, 13.3, 14.6, 16.5, 17.7, 13.6,
+15.2, 15.5, 16.5, 18.8, 13.6, 14, 13.8, 16, 19.3, 16.9, 14.3,
+16.7, 21.3, 19.4, 18.7, 14.7, 16.2, 15.5, 23.4, 22, 16.7, 14.4,
+16.4, 19.6, 18.3, 14.4, 14.8, 17.1, 15.2, 19.5, 21.1, 21.7, 14.6,
+18.2, 17.8, 23.5, 20.8, 15.7, 16.1, 21, 15, 16, 16, 14.8, 14.6,
+16.1, 21.6, 23.8, 27, 20.7, 19.7, 20, 28.8, 30, 33.1, 18.9, 16.3,
+19, 18.3, 21.2, 16.5, 16.7, 19.5, 29.1, 19.7, 22.6, 20.7, 14,
+15.5, 14.8, 19.2, 27.4, 18.5, 18.5, 31.5, 16.7, 15.7, 16.7, 22,
+25.7, 29.2, 27.7, 29.4, 19.9, 27.8, 31.2, 22.8, 17.6, 19.4, 27.5,
+18.4, 17.3, 17.5, 17.3, 21.9, 21.1, 17.9, 19.6, 23.6, 18.2, 20,
+17.5, 18.9, 18.7, 28.9, 21.6, 20.7, 21.8, 21.9, 35, 26, 20, 33.6,
+20.1, 23.5, 23.6, 24.5, 27, 17, 18.9, 21.8, 27.5, 22.8, 26.5,
+22.7, 22.5, 37.8, 22, 27.6, 21.2, 22.6, 18.7, 17.5, 17.7, 30.3,
+25.6, 32.7, 22.9, 21, 22.5, 25.7, 37.8, 21.3, 38.7, 21.5, 18.8,
+21.7, 30.6, 40, 20.8, 23.7, 22, 20.5, 23.3, 34.2, 36.7, 21.3,
+28.7, 27.6, 28.8, 37, 21.7, 22.7, 18.5, 25.4, 28.7, 18.4, 24.3,
+33, 16.5, 18, 22.1, 24.3, 24.7, 30.1, 22.7, 22.9, 16.4, 16.2,
+19, 27, 21.7, 16.7, 17.7, 21.6, 33.8, 31.8, 32, 17.6, 20.2, 17.3,
+17, 15.6, 17.1, 21.9, 20.1, 29.7, 20.7, 16.5, 15.8, 17.4, 19.4,
+21.1, 19.9, 28.6, 29.7, 29.7, 19.9, 17.4, 18.6, 23, 25.1, 26.6,
+19, 21.2, 16.5, 17.5, 18.2, 24, 22.8, 15.1, 16.1, 15.8, 17, 19.7,
+24.9, 26.9, 27.9, 18.5, 17, 16.4, 20.8, 16.7, 15.8, 18.5, 16.5,
+16.6, 17.4, 21.7, 22.7, 23.3, 22.5, 21.9, 17.9, 20.1, 16, 16.6,
+15.6, 15.2, 14.3, 15, 14.5, 16.9, 18.3, 18, 13.5, 13.7, 10.9,
+13.6, 15.2, 14.1, 13.2, 13.8, 14.6, 16.2, 14.5, 14.3, 15.6, 15.9,
+19.3, 16, 20.3, 15.8, 16.2, 12.8, 15, 13.4, 13.2, 13.8, 15.7,
+16.2, 14.9, 18.3, 15.3, 15.2, 14.2, 12, 13.7, 13.8, 12.3, 12.8,
+13.1, 14.5, 14, 13.3, 15.7, 16, 16.2, 18, 16.3, 15.8, 13.3, 13.7,
+11.5, 13.7, 12.3, 12.6, 12.7, 14.8, 14.9, 12, 12.8, 12.4, 13,
+13.1, 12.9, 13.8, 15.2, 13.1, 11.1, 10.8, 13.1, 15, 15.7, 16.4,
+14.1, 15.5, 14.8, 13.9, 15.4, 14.8, 10.9, 12.9, 12.8, 18.6, 14.6,
+18, 18.5, 11.8, 13.3, 14.5, 14, 13.3, 15, 18.4, 13.6, 15.3, 16.1,
+19, 15.5, 16.2, 14.9, 17.9, 10.1, 12.1, 13.1, 12.9, 15.4, 15.6,
+14, 14.2, 17.4, 14, 13.6, 15.7, 14, 15, 16.4, 15.1, 17.1, 18.9,
+23.8, 25.9, 15, 17, 19, 26.5, 26.9, 23.7, 30.7, 29.6, 25.9, 20.2,
+14.8, 13.9, 15.7, 22.8, 18.7, 17, 19, 14.1, 17.6, 19, 15.8, 12.9,
+14.5, 15.9, 21.9, 24.1, 19.4, 15.4, 17.6, 20.5, 19.4, 18, 16.8,
+15.8, 16, 22.1, 26.8, 26.3, 17.5, 19, 30.3, 32.7, 35, 36.1, 18.1,
+18.1, 16.8, 18.6, 20.5, 27.8, 18.8, 16.8, 17.2, 19.6, 18.8, 18.5,
+19.8, 30.5, 28.3, 32.6, 30.1, 19.8, 19.9, 23.8, 24.3, 17.4, 18.2,
+18.4, 16.9, 24, 32.8, 18.9, 15.5, 16, 16.6, 20.8, 19.6, 18.5,
+17.6, 20.8, 21.5, 25, 21.2, 22.6, 25.6, 25.1, 32.5, 27.8, 21.5,
+20.7, 19.9, 18.4, 22.3, 24.6, 25.6, 19, 18.7, 29.2, 31.8, 18.4,
+19.2, 30.8, 32.2, 22, 21.4, 20, 20.6, 22.2, 30, 40.4, 26.3, 39,
+30.6, 39.2, 37.8, 28.8, 30.8, 38.7, 25.2, 20.6, 21, 25.6, 36.2,
+24.9, 26.2, 20, 19.7, 20.3, 25.1, 29.1, 37.5, 39.2, 33.7, 19.5,
+19.9, 21, 20.3, 22.6, 32.9, 21.3, 19.5, 19.2, 22, 27.2, 32.2,
+25.4, 25, 25.2, 26.7, 25.3, 29.9, 27.6, 20.6, 21, 21.6, 24, 23.8,
+21.9, 29.4, 37.4, 21.5, 21.1, 21, 36.3, 28.7, 19.4, 20.3, 20.8,
+30.5, 30.3, 23.4, 25.7, 15.7, 19.3, 18.8, 17, 17.6, 18.5, 21,
+26.9, 30.6, 32.7, 29.2, 25.8, 28.2, 24.7, 30.6, 30.6, 29, 26.5,
+25.5, 24.9, 20.9, 23.4, 18.1, 20.7, 20.3, 21.1, 19.5, 20.4, 17.3,
+18.5, 21, 23.5, 25.7, 23.1, 24.8, 22.4, 17.4, 18.3, 26, 25.5,
+21.8, 21.2, 25.2, 21.7, 22.6, 24.3, 17, 20.7, 22.7, 25.4, 24.9,
+25, 20.5, 20.2, 19.4, 21.4, 25.5, 19.4, 18.8, 17, 15.5, 16.2,
+16.7, 20.1, 21.4, 16.4, 17.3, 17, 18.2, 17.5, 14.9, 18, 16.7,
+17.1, 16.8, 17.7, 18.4, 18.5, 15.5, 15.4, 17.5, 16.8, 18.9, 19.4,
+20.3, 19.3, 17, 16.4, 13.3, 13.6, 14.1, 14.8, 14.1, 17, 16, 16.5,
+14.9, 14.4, 13.5, 11.3, 15, 13, 14.9, 14.6, 14.1, 15.5, 14.4,
+12, 17.5, 13.3, 14.3, 14, 13.5, 13.3, 12.3, 12.6, 14.1, 16.6,
+17.3, 17.1, 16.1, 15, 15.4, 14.6, 14.6, 16.8, 16.5, 15.6, 14.8,
+15.2, 15.2, 15, 14.1, 15.5, 12.6, 12.9, 15.1, 13, 15.6, 13.4,
+14.4, 14.5, 15.4, 16.7, 16.1, 16.6, 16.5, 13.8, 13.4, 11.9, 10.7,
+13.2, 13.1, 12.8, 16.2, 18, 18.9, 18.3, 19.8, 19.3, 16.2, 17.6,
+17.5, 13.2, 13.2, 13.4, 14.2, 16.1, 14.2, 14.3, 14.5, 17.1, 18.2,
+22.2, 22.4, 15.6, 16.6, 14.5, 17.7, 21, 14.3, 20, 20.2, 19.2,
+19.8, 14.8, 14.2, 17.2, 17.2, 22.1, 15.4, 16.1, 14.8, 15.1, 20.2,
+22.3, 24.6, 18.4, 18.2, 23.4, 23.8, 20.1, 20.5, 19.5, 18.8, 19.8,
+26.2, 21.2, 21.2, 17.3, 20.5, 19.2, 18, 17.7, 23.7, 16.9, 23.2,
+22.3, 18.7, 24, 16.4, 18.8, 19, 26.7, 19.3, 19.5, 21.2, 17, 24.4,
+29, 19.2, 24.6, 23.4, 22.6, 21.1, 29.1, 36.1, 23.1, 16.5, 16.2,
+21.3, 25.4, 26.4, 21.1, 21.1, 19.7, 31.2, 34.5, 22.9, 13.5, 16.2,
+15.3, 19.2, 29.4, 27.7, 19.6, 16.8, 19.9, 20.4, 18.3, 16.7, 16.9,
+18.9, 17.6, 18.6, 21.4, 28.7, 28, 22.8, 17.2, 18.7, 19.9, 23,
+30.8, 31.7, 20.8, 20.8, 25.2, 31.3, 32.1, 34.3, 33.7, 28.6, 19.2,
+20.9, 21.1, 26.1, 26.4, 36.8, 26.3, 17.9, 21.3, 19.5, 20.1, 21.7,
+19.5, 24.8, 30.2, 31.6, 19, 21.3, 20.6, 22.8, 33.2, 24.7, 21.3,
+17.9, 18.7, 25.3, 31, 33.2, 31, 18.2, 19.1, 22.2, 20.3, 19.8,
+18.3, 27.5, 29.8, 31, 32.9, 35.5, 36, 36.6, 21.7, 19.8, 26.7,
+22.9, 19.8, 24.9, 24.8, 28.8, 32.7, 33.7, 24.7, 21.6, 26.2, 33.4,
+22.9, 31.2, 23.1, 24.4, 30.7, 38.8, 21.5, 20.7, 20.3, 21.3, 30,
+37.1, 21.1, 22, 34.4, 33.8, 37.8, 38.4, 32.8, 34.4, 26.4, 27.3,
+28, 30.7, 30.7, 23.4, 26.1, 26.1, 21.8, 18.9, 18.5, 17.5, 21.5,
+25.1, 21.9, 24.2, 24.2, 27.4, 21.3, 18, 18.7, 20, 20.9, 19.3,
+19.3, 22.2, 17.7, 21.9, 25.2, 25.1, 28.5, 27.6, 19.6, 18.5, 19.2,
+20.4, 22.8, 23, 20.3, 18, 18, 20.2, 18.1, 19.2, 17.8, 19.8, 20.3,
+18.3, 18.3, 20.2, 22, 24.3, 24.7, 25.4, 19.9, 23, 21.8, 19.3,
+18.5, 20.6, 17.2, 17.5, 19.1, 18, 18.9, 23.4, 19.3, 19.2, 21.2,
+21.5, 17.2, 16.8, 16, 18.9, 20.6, 15.9, 16.7, 18.4, 16.2, 16.2,
+15.8, 16.2, 18.3, 18.5, 19.4, 17, 14.6, 11, 12.4, 14, 10.8, 13.1,
+15, 13, 17.5, 16, 10.2, 13.7, 15.4, 13.6, 12.9, 14.2, 15, 15.2,
+18.2, 12.3, 13.8, 13.9, 14.6, 14.9, 12.6, 11.2, 12.9, 14.1, 12.8,
+15.8, 16, 16, 16.6, 16.6, 16, 12.7, 12.3, 11.8, 14.1, 12.6, 12.9,
+11.5, 12.9, 11.8, 11.9, 14.2, 12, 11.7, 11.8, 13, 11.4, 11, 12.3,
+13.1, 11.4, 16, 15.3, 13.2, 15.9, 16, 19.6, 15.2, 10.6, 14.8,
+15.8, 14.1, 14.4, 10.7, 13.2, 13, 13.2, 16, 13.2, 11.8, 11.6,
+13.4, 15.2, 15.5, 13.5, 14, 18.5, 14.9, 13.9, 15.9, 11.3, 12.2,
+13.8, 13.8, 11.3, 13, 13.6, 13.2, 16, 17.2, 14.7, 15.5, 15.2,
+16, 16.6, 17.8, 19.8, 20.6, 19, 19.5, 12.6, 14.3, 14.9, 13.7,
+14.2, 18.4, 15.4, 18.7, 16.9, 16.5, 20, 17.2, 15.8, 20, 14.3,
+14.2, 14.7, 22.4, 25.3, 21.5, 15, 13.8, 15.6, 22.8, 18, 16.6,
+20, 17.5, 18.1, 17.1, 18.4, 18.8, 21.8, 19.7, 26.1, 18.7, 20.8,
+29.7, 21, 17.3, 16.8, 19.4, 16.8, 21.1, 20.4, 26.1, 15, 17, 24,
+22.5, 16.3, 17.9, 17.7, 18.4, 25.6, 28.3, 15.1, 16.8, 19.6, 15.8,
+16.9, 17.8, 18.2, 19.2, 19, 18.4, 22, 16.4, 16.7, 17.4, 20.6,
+30.4, 30.3, 35.5, 18.8, 24.7, 27.3, 29.4, 32.5, 33.9, 25, 33.6,
+25.7, 19, 18.4, 22.1, 25.2, 23.9, 28.7, 31.1, 29.1, 25.6, 23.2,
+19, 17.1, 18, 20, 32.9, 32.1, 25.2, 18.5, 18.9, 20.8, 23.7, 28,
+35.2, 23.2, 18, 19.8, 31.9, 21.5, 33, 21.6, 31.2, 36.6, 19.6,
+23.1, 21.9, 21.1, 27.6, 27.3, 30.1, 33.5, 24.2, 21.4, 20.4, 20.3,
+18.5, 29.5, 25.4, 25.2, 21.6, 21.8, 25.4, 36.6, 36.6, 28.7, 27.2,
+25.5, 23.6, 25.6, 35.9, 19.1, 15, 20.7, 21.9, 18.2, 21.9, 25.3,
+29.6, 29.9, 24.4, 20.2, 23.5, 26, 25.9, 27.4, 31.5, 35.8, 23.9,
+27.8, 26, 31.1, 35.2, 27.8, 21.3, 21.5, 23, 24.4, 27.4, 23.8,
+25.2, 28.5, 25.3, 25.6, 24.1, 19.3, 22.7, 31.2, 25.8, 21.5, 20.3,
+18.9, 25.2, 26.8, 19.2, 21.5, 30.8, 18.6, 19.8, 21.3, 21.3, 29.9,
+32, 32.4, 32.5, 32.6, 31.5, 21, 20.6, 19.7, 19.7, 20.1, 17.8,
+22.1, 28.8, 26.8, 27.1, 19.4, 19.1, 22.5, 26, 22.2, 28.4, 25.2,
+20.9, 17.6, 17.2, 17.2, 18.7, 18.6, 21.8, 16.7, 17.9, 20.8, 21.2,
+17.5, 16.6, 17.5, 18.1, 18.5, 19.8, 23, 21.2, 21.2, 23.1, 20.2,
+18.1, 16.2, 18.7, 21.2, 19.3, 21.8, 19.9, 13.6, 14.7, 18, 19.3,
+17.7, 14.6, 14.5, 14.4, 15.4, 14.5, 16.2, 14.2, 13.2, 15.2, 17.4,
+13.7, 16.8, 17.7, 16.2, 13.2, 16.3, 16.3, 14.6, 13.9, 16, 18.4,
+19, 17.4, 16.2, 16.3, 16.8, 14.6, 14.2, 13.3, 14, 11.7, 12.9,
+12.7, 14.6, 15.1, 16.1, 16.2, 13, 12.4, 12.6, 9.7, 11.8, 15.8,
+13.6, 8.9, 12.2, 13.2, 15.2, 12.6, 12.9, 15.9, 15.4, 15, 14.2,
+15.7, 14.4, 14.7, 14.5, 15.5, 19.2, 18.2, 13.9, 15, 15.8, 15.7,
+15.6, 17.7, 18.8, 15.2, 14.5, 13.5, 13.4, 12.4, 15.5, 13.2, 14,
+14.1, 15.1, 17.2, 15.1, 14.5, 13.6, 13.1, 13.1, 13.3, 14.8, 15,
+16.4, 15.3, 13.8, 13.5, 12.7, 13.1, 15.8, 16, 14.4, 11.9, 13.2,
+12, 11.6, 12.1, 17.3, 17, 14.7, 17.8, 13.7, 16.6, 14.5, 13.6,
+15.8, 16.4, 16.8, 14.6, 15.7, 19.7, 20.7, 18.6, 13.7, 11.9, 14,
+14.6, 13.4, 16.6, 21, 23.2, 23.9, 17.8, 16.2, 22.1, 24.3, 19,
+16.5, 17.1, 17.3, 16.8, 21.7, 20.1, 15, 14.8, 19.8, 24, 14, 15.3,
+14.8, 16.8, 20.8, 12.9, 14.3, 18.9, 22.6, 24.7, 19.6, 20.2, 23.9,
+15.1, 17.1, 15.3, 16, 16.8, 19.8, 27.2, 29.1, 31.4, 33.2, 33.7,
+19.6, 31.8, 22.4, 21.4, 22, 21.6, 30.7, 30.2, 32.6, 26.6, 22,
+16.3, 17.2, 25.5, 32.1, 18.4, 21, 16.8, 15.6, 18.5, 21.5, 28.1,
+31.4, 20.9, 18.7, 19.9, 21.9, 19.1, 28.5, 26.2, 20.1, 19.9, 22.2,
+31.4, 24.1, 35.2, 36.9, 33.2, 35.7, 21.1, 21.6, 18.6, 18.8, 18.7,
+21.6, 22.4, 20.9, 23.3, 34.4, 20.7, 24.2, 22.2, 23.5, 30.3, 19.8,
+23.3, 29.3, 20.9, 37.6, 23.2, 25.1, 24.4, 24.6)
diff --git a/inst/doc/categoricalVGAM.pdf b/inst/doc/categoricalVGAM.pdf
index 859a3f7..9de2443 100644
Binary files a/inst/doc/categoricalVGAM.pdf and b/inst/doc/categoricalVGAM.pdf differ
diff --git a/man/CommonVGAMffArguments.Rd b/man/CommonVGAMffArguments.Rd
index ab1adab..e8a8fd6 100644
--- a/man/CommonVGAMffArguments.Rd
+++ b/man/CommonVGAMffArguments.Rd
@@ -6,14 +6,18 @@
Here is a description of some common and typical arguments found
in many \pkg{VGAM} family functions, e.g.,
\code{lsigma},
- \code{isigma}, \code{nsimEI}, \code{parallel} and \code{zero}.
+ \code{isigma},
+ \code{nsimEI},
+ \code{parallel} and
+ \code{zero}.
}
\usage{
TypicalVGAMfamilyFunction(lsigma = "loge", esigma = list(), isigma = NULL,
parallel = TRUE, shrinkage.init = 0.95,
nointercept = NULL, method.init = 1,
- nsimEIM = 100, zero = NULL)
+ prob.x = c(0.15, 0.85),
+ oim = FALSE, nsimEIM = 100, zero = NULL)
}
\arguments{
@@ -70,9 +74,11 @@ TypicalVGAMfamilyFunction(lsigma = "loge", esigma = list(), isigma = NULL,
mean over the observations), especially if the number of observations
is large.
+
Some \pkg{VGAM} family functions provide two algorithms for estimating
the EIM.
- If applicable, set \code{nsimEIM=NULL} to choose the other algorithm.
+ If applicable, set \code{nsimEIM = NULL} to choose the other algorithm.
+
}
\item{method.init}{
@@ -81,8 +87,8 @@ TypicalVGAMfamilyFunction(lsigma = "loge", esigma = list(), isigma = NULL,
parameter.
If failure to converge occurs try the next higher value, and
continue until success.
- For example, \code{method.init=1} might be the method of moments,
- and \code{method.init=2} might be another method.
+ For example, \code{method.init = 1} might be the method of moments,
+ and \code{method.init = 2} might be another method.
If no value of \code{method.init} works then it will be necessary
to use arguments such as \code{isigma}.
For many \pkg{VGAM} family functions it is advisable to try
@@ -93,26 +99,71 @@ TypicalVGAMfamilyFunction(lsigma = "loge", esigma = list(), isigma = NULL,
therefore care is needed to ensure the global solution is obtained.
So using all possible values that this argument supplies is a good idea.
+
+ }
+ \item{prob.x}{
+ Numeric, of length two.
+ The probabilites that define quantiles with respect to some vector,
+ usually an \code{x} of some sort.
+ This is used to create two subsets of data corresponding to `low' and
+ `high' values of x.
+ Each value is separately fed into the \code{probs} argument
+ of \code{\link[stats:quantile]{quantile}}.
+ If the data set size is small then it may be necessary to
+ increase/decrease slightly the first/second values respectively.
+
+
+ }
+ \item{oim}{
+ Logical.
+ Should the observed information matrices (OIMs) be used for
+ the working weights?
+ In general, setting \code{oim = TRUE} means the Newton-Raphson
+ algorithm, and \code{oim = FALSE} means Fisher-scoring.
+ The latter uses the EIM, and is usually recommended.
+ If \code{oim = TRUE} then \code{nsimEIM} is ignored.
+
}
\item{zero}{
- An integer specifying which linear/additive predictor is modelled as
- intercepts-only. That is,
- the regression coefficients are set to zero for all covariates
- except for the intercept.
- If \code{zero} is specified then it is a vector with values from the
- set \eqn{\{1,2,\ldots,M\}}.
- The value \code{zero=NULL} means model all linear/additive predictors
- as functions of the explanatory variables.
+ An integer specifying which linear/additive predictor is modelled
+ as intercepts-only. That is, the regression coefficients are
+ set to zero for all covariates except for the intercept.
+ If \code{zero} is specified then it is a vector with values
+ from the set \eqn{\{1,2,\ldots,M\}}.
+ The value \code{zero = NULL} means model all linear/additive
+ predictors as functions of the explanatory variables.
Here, \eqn{M} is the number of linear/additive predictors.
Some \pkg{VGAM} family functions allow the \code{zero} argument to
accept negative values; if so then its absolute value is recycled
- over each response. For example, \code{zero=-2} would mean,
+ over each (usual) response. For example, \code{zero = -2} for the
+ two-parameter negative binomial distribution would mean,
for each response, the second linear/additive predictor is modelled
- as intercepts-only. A specific example is the \eqn{k} parameter in
- \code{\link{negbinomial}}; this \pkg{VGAM} family function can handle
- a matrix of responses.
+ as intercepts-only. That is, for all the \eqn{k} parameters in
+ \code{\link{negbinomial}} (this \pkg{VGAM} family function can handle
+ a matrix of responses).
+
+
+ Suppose \code{zero = zerovec} where \code{zerovec} is a vector
+ of negative values. If \eqn{G} is the usual \eqn{M} value for
+ a univariate response then the actual values for argument \code{zero}
+ are all values in
+ \code{c(abs(zerovec), G + abs(zerovec), 2*G + abs(zerovec), ... )}
+ lying in the integer range \eqn{1} to \eqn{M}.
+ For example, setting \code{zero = -c(2, 3)} for a matrix response
+ of 4 columns with \code{\link{zinegbinomial}} (which
+ usually has \eqn{G = M = 3} for a univariate response)
+ would be equivalent to \code{zero = c(2, 3, 5, 6, 8, 9, 11, 12)}.
+ This example has \eqn{M = 12}.
+ Note that if \code{zerovec} contains negative values then their
+ absolute values should be elements from the set \code{1:G}.
+
+
+ Note: \code{zero} may have positive and negative values,
+ for example, setting \code{zero = c(-2, 3)} in the above example
+ would be equivalent to \code{zero = c(2, 3, 5, 8, 11)}.
+
}
\item{shrinkage.init}{
@@ -151,7 +202,7 @@ TypicalVGAMfamilyFunction(lsigma = "loge", esigma = list(), isigma = NULL,
argument of \code{\link{vglm}} and \code{\link{vgam}}.
See Example 5 below for an example.
If not sure, use, e.g., \code{constraints(fit)} and
- \code{coef(fit, matrix=TRUE)} to check the result of a fit \code{fit}.
+ \code{coef(fit, matrix = TRUE)} to check the result of a fit \code{fit}.
The arguments \code{zero} and \code{nointercept} can be inputted
@@ -181,6 +232,7 @@ TypicalVGAMfamilyFunction(lsigma = "loge", esigma = list(), isigma = NULL,
at a later date!
}
+
%\references{
%}
@@ -190,49 +242,55 @@ TypicalVGAMfamilyFunction(lsigma = "loge", esigma = list(), isigma = NULL,
}
\author{T. W. Yee}
+
%\note{
%
%}
+
\examples{
# Example 1
cumulative()
-cumulative(link="probit", reverse=TRUE, parallel=TRUE)
+cumulative(link = "probit", reverse = TRUE, parallel = TRUE)
# Example 2
-x = runif(n <- 1000)
-y = rweibull(n, shape=2+exp(1+x), scale = exp(-0.5))
-fit = vglm(y ~ x, weibull(lshape="logoff", eshape=list(offset=-2), zero=2))
-coef(fit, mat=TRUE)
-Coef(fit)
-
-# Example 3
-x = runif(n <- 500)
-y1 = rnbinom(n, mu=exp(3+x), size=exp(1)) # k is size
-y2 = rnbinom(n, mu=exp(2-x), size=exp(0))
-fit = vglm(cbind(y1,y2) ~ x, negbinomial(zero=-2)) # multivariate response
-coef(fit, matrix=TRUE)
+wdata <- data.frame(x = runif(nn <- 1000))
+wdata <- transform(wdata,
+ y = rweibull(nn, shape = 2 + exp(1+x), scale = exp(-0.5)))
+fit = vglm(y ~ x, weibull(lshape = "logoff", eshape = list(offset = -2),
+ zero = 2), wdata)
+coef(fit, mat = TRUE)
+
+# Example 3; multivariate (multiple) response
+ndata <- data.frame(x = runif(nn <- 500))
+ndata <- transform(ndata,
+ y1 = rnbinom(nn, mu = exp(3+x), size = exp(1)), # k is size
+ y2 = rnbinom(nn, mu = exp(2-x), size = exp(0)))
+fit <- vglm(cbind(y1, y2) ~ x, negbinomial(zero = -2), ndata)
+coef(fit, matrix = TRUE)
# Example 4
\dontrun{
# fit1 and fit2 are equivalent
-fit1 = vglm(ymatrix ~ x2 + x3 + x4 + x5,
- cumulative(parallel=FALSE ~ 1 + x3 + x5), mydataframe)
-fit2 = vglm(ymatrix ~ x2 + x3 + x4 + x5,
- cumulative(parallel=TRUE ~ x2 + x4), mydataframe)
+fit1 <- vglm(ymatrix ~ x2 + x3 + x4 + x5,
+ cumulative(parallel = FALSE ~ 1 + x3 + x5), mydataframe)
+fit2 <- vglm(ymatrix ~ x2 + x3 + x4 + x5,
+ cumulative(parallel = TRUE ~ x2 + x4), mydataframe)
}
# Example 5
-x = rnorm(n <- 200)
-y1 = rnorm(n, mean=1-3*x, sd=exp(1+0.2*x))
-y2 = rnorm(n, mean=1-3*x, sd=exp(1))
+gdata <- data.frame(x = rnorm(nn <- 200))
+gdata <- transform(gdata,
+ y1 = rnorm(nn, mean = 1 - 3*x, sd = exp(1 + 0.2*x)),
+ y2 = rnorm(nn, mean = 1 - 3*x, sd = exp(1)))
args(normal1)
-fit1 = vglm(y1 ~ x, normal1) # This is ok
-fit2 = vglm(y2 ~ x, normal1(zero=2)) # This is ok
+fit1 <- vglm(y1 ~ x, normal1, gdata) # This is ok
+fit2 <- vglm(y2 ~ x, normal1(zero = 2), gdata) # This is ok
# This creates potential conflict
-clist = list("(Intercept)"=diag(2), "x"=diag(2))
-fit3 = vglm(y2 ~ x, normal1(zero=2), constraints=clist) # Conflict!
-coef(fit3, matrix=TRUE) # Shows that clist[["x"]] was overwritten,
+clist <- list("(Intercept)" = diag(2), "x" = diag(2))
+fit3 <- vglm(y2 ~ x, normal1(zero = 2), gdata,
+ constraints = clist) # Conflict!
+coef(fit3, matrix = TRUE) # Shows that clist[["x"]] was overwritten,
constraints(fit3) # i.e., 'zero' seems to override the 'constraints' arg
}
\keyword{models}
diff --git a/man/Inv.gaussian.Rd b/man/Inv.gaussian.Rd
index a0054ba..b5c554f 100644
--- a/man/Inv.gaussian.Rd
+++ b/man/Inv.gaussian.Rd
@@ -10,15 +10,18 @@
}
\usage{
-dinv.gaussian(x, mu, lambda, log=FALSE)
+dinv.gaussian(x, mu, lambda, log = FALSE)
pinv.gaussian(q, mu, lambda)
rinv.gaussian(n, mu, lambda)
}
\arguments{
\item{x, q}{vector of quantiles.}
%%\item{p}{vector of probabilities.}
- \item{n}{number of observations. If \code{length(n) > 1} then the length
- is taken to be the number required. }
+ \item{n}{number of observations.
+ If \code{length(n) > 1} then the length
+ is taken to be the number required.
+
+ }
\item{mu}{the mean parameter.}
\item{lambda}{the \eqn{\lambda}{lambda} parameter.}
\item{log}{
@@ -33,6 +36,7 @@ rinv.gaussian(n, mu, lambda)
\code{pinv.gaussian} gives the distribution function, and
% \code{qinv.gaussian} gives the quantile function, and
\code{rinv.gaussian} generates random deviates.
+
}
\references{
Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1994)
@@ -41,6 +45,7 @@ Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1994)
Volume 1,
New York: Wiley.
+
Taraldsen, G. and Lindqvist, B. H. (2005)
The multiple roots simulation algorithm,
the inverse Gaussian distribution, and the
@@ -62,15 +67,16 @@ New York: Wiley.
}
\seealso{
\code{\link{inv.gaussianff}}.
+
+
}
\examples{
-\dontrun{
-x = seq(-0.05, 4, len=300)
-plot(x, dinv.gaussian(x, mu=1, lambda=1), type="l", col="blue", las=1,
- main="blue is density, red is cumulative distribution function")
-abline(h=0, col="blue", lty=2)
-lines(x, pinv.gaussian(x, mu=1, lambda=1), type="l", col="red")
-}
+\dontrun{ x <- seq(-0.05, 4, len = 300)
+plot(x, dinv.gaussian(x, mu = 1, lambda = 1), type = "l",
+ col = "blue",las = 1, main =
+ "blue is density, orange is cumulative distribution function")
+abline(h = 0, col = "black", lty = 2)
+lines(x, pinv.gaussian(x, mu = 1, lambda = 1), type = "l", col = "orange") }
}
\keyword{distribution}
diff --git a/man/Links.Rd b/man/Links.Rd
index d0f6572..4ae9c69 100644
--- a/man/Links.Rd
+++ b/man/Links.Rd
@@ -9,8 +9,8 @@
}
\usage{
-TypicalVGAMlinkFunction(theta, earg=list(), inverse=FALSE,
- deriv=0, short=TRUE, tag=FALSE)
+TypicalVGAMlinkFunction(theta, earg = list(), inverse = FALSE,
+ deriv = 0, short = TRUE, tag = FALSE)
}
\arguments{
\item{theta}{
@@ -29,6 +29,7 @@ TypicalVGAMlinkFunction(theta, earg=list(), inverse=FALSE,
always a list with \emph{named} components. See each specific link
function to find the component names for the list.
+
Almost all \pkg{VGAM} family functions with a single link
function have an argument (often called \code{earg}) which will
allow parameters to be inputted for that link function.
@@ -56,7 +57,7 @@ TypicalVGAMlinkFunction(theta, earg=list(), inverse=FALSE,
\code{\link{vglmff-class}} object.
Used only if \code{theta} is character, and gives the formula
for the link in character form.
- If \code{tag=TRUE} then the result contains a little more information.
+ If \code{tag = TRUE} then the result contains a little more information.
}
}
@@ -66,32 +67,35 @@ TypicalVGAMlinkFunction(theta, earg=list(), inverse=FALSE,
the inverse link or its first or second derivative,
or a character description of the link.
+
Here are the general details.
- If \code{inverse=FALSE} and \code{deriv=0} (default) then the
+ If \code{inverse = FALSE} and \code{deriv = 0} (default) then the
ordinary link
- function \eqn{\eta=g(\theta)}{eta=g(theta)} is returned.
- If \code{inverse=FALSE} and \code{deriv=1} then it is
+ function \eqn{\eta = g(\theta)}{eta = g(theta)} is returned.
+ If \code{inverse = FALSE} and \code{deriv = 1} then it is
\eqn{d\theta / d\eta}{d theta / d eta}
\emph{as a function of} \eqn{\theta}{theta}.
- If \code{inverse=FALSE} and \code{deriv=2} then it is
+ If \code{inverse = FALSE} and \code{deriv = 2} then it is
\eqn{d^2\theta / d\eta^2}{d^2 theta / d eta^2}
\emph{as a function of} \eqn{\theta}{theta}.
- If \code{inverse=TRUE} and \code{deriv=0} then the inverse
+
+ If \code{inverse = TRUE} and \code{deriv = 0} then the inverse
link function is returned, hence \code{theta} is really
\eqn{\eta}{eta}.
- If \code{inverse=TRUE} and \code{deriv} is positive then the
+ If \code{inverse = TRUE} and \code{deriv} is positive then the
\emph{reciprocal} of the same link function with
- \code{(theta=theta, earg=earg, inverse=TRUE, deriv=deriv)}
+ \code{(theta = theta, earg = earg, inverse = TRUE, deriv = deriv)}
is returned.
+
}
\details{
Almost all \pkg{VGAM} link functions have something similar to
the argument list as given above.
That is, there is a matching \code{earg} for each \code{link} argument.
In this help file
- we have \eqn{\eta=g(\theta)}{eta=g(theta)}
+ we have \eqn{\eta = g(\theta)}{eta = g(theta)}
where \eqn{g} is the link function, \eqn{\theta}{theta} is the parameter
and \eqn{\eta}{eta} is the linear/additive predictor.
@@ -106,28 +110,32 @@ TypicalVGAMlinkFunction(theta, earg=list(), inverse=FALSE,
\code{\link{probit}},
\code{\link{cloglog}},
\code{\link{cauchit}},
- \code{\link{loglog}},
\code{\link{fsqrt}},
\code{\link{logc}},
\code{\link{golf}},
\code{\link{polf}},
\code{\link{nbolf}}.
+
For positive parameters (i.e., greater than 0):
\code{\link{loge}},
\code{\link{nloge}},
\code{\link{powl}}.
+
For parameters greater than 1:
\code{\link{loglog}}.
+
For parameters between \eqn{-1} and \eqn{1}:
\code{\link{fisherz}},
\code{\link{rhobit}}.
+
For parameters between \eqn{A} and \eqn{B}:
\code{\link{elogit}},
- \code{\link{logoff}} (\eqn{B=\infty}{B=Inf}).
+ \code{\link{logoff}} (\eqn{B = \infty}{B = Inf}).
+
For unrestricted parameters (i.e., any value):
\code{\link{identity}},
@@ -135,6 +143,7 @@ TypicalVGAMlinkFunction(theta, earg=list(), inverse=FALSE,
\code{\link{reciprocal}},
\code{\link{nreciprocal}}.
+
% Other links:
}
@@ -152,6 +161,7 @@ TypicalVGAMlinkFunction(theta, earg=list(), inverse=FALSE,
\code{\link{cao}},
\code{\link{uqo}}.
+
}
\author{T. W. Yee}
\note{
@@ -160,15 +170,16 @@ TypicalVGAMlinkFunction(theta, earg=list(), inverse=FALSE,
In particular, they won't work with
\code{\link[stats]{glm}} or any other package for fitting GAMs.
+
From October 2006 onwards,
all \pkg{VGAM} family functions will only
contain one default value for each link argument rather than giving a
vector of choices.
For example, rather than
- \code{binomialff(link=c("logit", "probit", "cloglog",
+ \code{binomialff(link = c("logit", "probit", "cloglog",
"cauchit", "identity"), ...)}
it is now
- \code{binomialff(link="logit", ...)}
+ \code{binomialff(link = "logit", ...)}
No checking will be done to see if the user's choice is reasonable.
This means that the user can write his/her own \pkg{VGAM} link function
and use it within any \pkg{VGAM} family function.
@@ -177,43 +188,44 @@ TypicalVGAMlinkFunction(theta, earg=list(), inverse=FALSE,
link function, by either assigning the link argument the full name as
a character string, or just the name itself. See the examples below.
+
}
\examples{
logit("a")
-logit("a", short=FALSE)
-logit("a", short=FALSE, tag=TRUE)
+logit("a", short = FALSE)
+logit("a", short = FALSE, tag = TRUE)
-logoff(1:5, earg=list(offset=1)) # Same as log(1:5 + 1)
-powl(1:5, earg=list(power=2)) # Same as (1:5)^2
+logoff(1:5, earg = list(offset = 1)) # Same as log(1:5 + 1)
+powl(1:5, earg = list(power = 2)) # Same as (1:5)^2
-fit1 = vgam(agaaus ~ altitude, binomialff(link=cloglog), hunua) # ok
-fit2 = vgam(agaaus ~ altitude, binomialff(link="cloglog"), hunua) # ok
+fit1 <- vgam(agaaus ~ altitude, binomialff(link = cloglog), hunua) # ok
+fit2 <- vgam(agaaus ~ altitude, binomialff(link = "cloglog"), hunua) # ok
\dontrun{
# This no longer works since "clog" is not a valid VGAM link function:
-fit3 = vgam(agaaus ~ altitude, binomialff(link="clog"), hunua) # not ok
+fit3 <- vgam(agaaus ~ altitude, binomialff(link = "clog"), hunua) # not ok
# No matter what the link, the estimated var-cov matrix is the same
-y = rbeta(n=1000, shape1=exp(0), shape2=exp(1))
-fit1 = vglm(y ~ 1, beta.ab(lshape1="identity", lshape2="identity"),
- trace = TRUE, crit="c")
-fit2 = vglm(y ~ 1, beta.ab(lshape1=logoff, eshape1=list(offset=1.1),
- lshape2=logoff, eshape2=list(offset=1.1)),
- trace = TRUE, crit="c")
-vcov(fit1, untran=TRUE)
-vcov(fit1, untran=TRUE)-vcov(fit2, untran=TRUE) # Should be all 0s
+y <- rbeta(n = 1000, shape1 = exp(0), shape2 = exp(1))
+fit1 <- vglm(y ~ 1, beta.ab(lshape1 = "identity", lshape2 = "identity"),
+ trace = TRUE, crit = "c")
+fit2 <- vglm(y ~ 1, beta.ab(lshape1 = logoff, eshape1 = list(offset = 1.1),
+ lshape2 = logoff, eshape2 = list(offset = 1.1)),
+ trace = TRUE, crit = "c")
+vcov(fit1, untran = TRUE)
+vcov(fit1, untran = TRUE) - vcov(fit2, untran = TRUE) # Should be all 0s
fit1 at misc$earg # No 'special' parameters
fit2 at misc$earg # Some 'special' parameters are here
-par(mfrow=c(2,2))
-p = seq(0.01, 0.99, len=200)
-x = seq(-4, 4, len=200)
-plot(p, logit(p), type="l", col="blue")
-plot(x, logit(x, inverse=TRUE), type="l", col="blue")
-plot(p, logit(p, deriv=1), type="l", col="blue") # reciprocal!
-plot(p, logit(p, deriv=2), type="l", col="blue") # reciprocal!
+par(mfrow = c(2,2))
+p <- seq(0.01, 0.99, len = 200)
+x <- seq(-4, 4, len = 200)
+plot(p, logit(p), type = "l", col = "blue")
+plot(x, logit(x, inverse = TRUE), type = "l", col = "blue")
+plot(p, logit(p, deriv = 1), type = "l", col = "blue") # reciprocal!
+plot(p, logit(p, deriv = 2), type = "l", col = "blue") # reciprocal!
}
}
\keyword{models}
diff --git a/man/RayleighUC.Rd b/man/RayleighUC.Rd
index 8e37836..3c543e8 100644
--- a/man/RayleighUC.Rd
+++ b/man/RayleighUC.Rd
@@ -11,20 +11,21 @@
\code{a}.
}
\usage{
-drayleigh(x, a, log=FALSE)
-prayleigh(q, a)
-qrayleigh(p, a)
-rrayleigh(n, a)
+drayleigh(x, scale = 1, log = FALSE)
+prayleigh(q, scale = 1)
+qrayleigh(p, scale = 1)
+rrayleigh(n, scale = 1)
}
\arguments{
\item{x, q}{vector of quantiles.}
\item{p}{vector of probabilities.}
\item{n}{number of observations.
- Must be a positive integer of length 1.}
- \item{a}{the parameter \eqn{a}.}
+ Fed into \code{\link[stats]{runif}}.
+ }
+ \item{scale}{the scale parameter \eqn{b}.}
\item{log}{
Logical.
- If \code{log=TRUE} then the logarithm of the density is returned.
+ If \code{log = TRUE} then the logarithm of the density is returned.
}
@@ -34,18 +35,22 @@ rrayleigh(n, a)
\code{prayleigh} gives the distribution function,
\code{qrayleigh} gives the quantile function, and
\code{rrayleigh} generates random deviates.
+
}
\references{
+
Evans, M., Hastings, N. and Peacock, B. (2000)
\emph{Statistical Distributions},
New York: Wiley-Interscience, Third edition.
+
}
\author{ T. W. Yee }
\details{
- See \code{\link{rayleigh}}, the \pkg{VGAM} family function
- for estimating the parameter \eqn{a} by maximum likelihood estimation,
- for the formula of the probability density function and range restrictions
- on the parameter \eqn{a}.
+ See \code{\link{rayleigh}}, the \pkg{VGAM} family
+ function for estimating the scale parameter \eqn{b} by
+ maximum likelihood estimation, for the formula of the
+ probability density function and range restrictions on
+ the parameter \eqn{b}.
}
\note{
@@ -58,13 +63,14 @@ New York: Wiley-Interscience, Third edition.
}
\examples{
-\dontrun{ a = 2; x = seq(-1, 8, by=0.1)
-plot(x, drayleigh(x, a=a), type="l", ylim=c(0,1), las=1, ylab="",
- main="Rayleigh density divided into 10 equal areas; red=cdf")
-abline(h=0, col="blue", lty=2)
-qq = qrayleigh(seq(0.1,0.9,by=0.1),a=a)
-lines(qq, drayleigh(qq, a=a), col="purple", lty=3, type="h")
-lines(x, prayleigh(x, a=a), col="red") }
+\dontrun{ Scale = 2; x = seq(-1, 8, by = 0.1)
+plot(x, drayleigh(x, scale = Scale), type = "l", ylim = c(0,1),
+ las = 1, ylab = "",
+ main = "Rayleigh density divided into 10 equal areas; orange = cdf")
+abline(h = 0, col = "blue", lty = 2)
+qq = qrayleigh(seq(0.1, 0.9, by = 0.1), scale = Scale)
+lines(qq, drayleigh(qq, scale = Scale), col = "purple", lty = 3, type = "h")
+lines(x, prayleigh(x, scale = Scale), col = "orange") }
}
\keyword{distribution}
diff --git a/man/Rcam.Rd b/man/Rcam.Rd
new file mode 100644
index 0000000..d324ff5
--- /dev/null
+++ b/man/Rcam.Rd
@@ -0,0 +1,72 @@
+\name{Rcam}
+\alias{Rcam}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{
+ Mark the baseline of row and column on a matrix data
+
+}
+\description{
+ This function will grab the baseline of row and column for
+ the rank-zero model of row-column association models or main
+ effects models.
+
+}
+\usage{
+ Rcam(mat, rbaseline = 1, cbaseline = 1)
+
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+\item{mat}{
+ Matrix of \eqn{r} by \eqn{c}.
+ The matrix is better labeled with row and column names.
+
+}
+\item{rbaseline}{
+ Numeric (row number of the matrix y) or can be
+ character of row names of the matrix \code{y} that user
+ wants it as row baseline or reference.
+ Similarly \code{cbaseline} for the column
+
+}
+\item{cbaseline}{
+ Numeric of column number of the matrix \code{mat}, or can be
+ Character of column names of the matrix \code{mat} that user
+ wants it as column baseline or reference.
+
+}
+}
+\details{
+ In some regression analysis, it is necessary to put the baseline
+ (or reference) of the factor, and as well in rank-zero of
+ row-column association model. It should be done by marking
+ a baseline on a level of row and column. This is a data
+ preprocessing function for \code{\link{rcam}}.
+
+}
+
+\value{
+ Matrix of the same dimension as the input, rearranged the first
+ row or column.
+
+}
+\author{
+Alfian F. Hadi.
+
+}
+\note{
+This is a data preprocessing function for \code{\link{rcam}}.
+
+}
+
+
+\seealso{
+ \code{\link{moffset}},
+ \code{\link{rcam}},
+ \code{\link{plotrcam0}}.
+
+}
+\examples{
+(alcoff.e <- moffset(alcoff, roffset = "6"))
+Rcam(alcoff.e, rbaseline = "11", cbaseline = "Monday")
+}
diff --git a/man/VGAM-package.Rd b/man/VGAM-package.Rd
index ef2b958..18c6a06 100644
--- a/man/VGAM-package.Rd
+++ b/man/VGAM-package.Rd
@@ -142,14 +142,14 @@ zipdat = transform(zipdat, phi = logit(-0.5 + 1*x, inverse=TRUE),
lambda = loge( 0.5 + 2*x, inverse=TRUE))
zipdat = transform(zipdat, y = rzipois(nn, lambda, phi))
with(zipdat, table(y))
-fit = vglm(y ~ x, zipoisson, zipdat, trace=TRUE)
-coef(fit, matrix=TRUE) # These should agree with the above values
+fit = vglm(y ~ x, zipoisson, zipdat, trace = TRUE)
+coef(fit, matrix = TRUE) # These should agree with the above values
# Example 3; fit a two species GAM simultaneously
fit2 = vgam(cbind(agaaus, kniexc) ~ s(altitude, df = c(2,3)),
binomialff(mv = TRUE), hunua)
-coef(fit2, mat=TRUE) # Not really interpretable
+coef(fit2, matrix = TRUE) # Not really interpretable
\dontrun{
plot(fit2, se = TRUE, overlay = TRUE, lcol = 1:2, scol = 1:2)
diff --git a/man/acat.Rd b/man/acat.Rd
index 4b89a36..0fbfa50 100644
--- a/man/acat.Rd
+++ b/man/acat.Rd
@@ -21,7 +21,7 @@ acat(link = "loge", earg = list(),
}
\item{earg}{
List. Extra argument for the link function.
- See \code{earg} in \code{\link{Links}} for general information.
+ See \code{\link{CommonVGAMffArguments}} for more information.
}
\item{parallel}{
@@ -56,6 +56,7 @@ acat(link = "loge", earg = list(),
By default, the log link is used because the ratio of two probabilities
is positive.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
@@ -93,9 +94,11 @@ contains further information and examples.
all positive), or a factor. In both cases, the \code{y} slot returned
by \code{vglm}/\code{vgam}/\code{rrvglm} is the matrix of counts.
+
For a nominal (unordered) factor response, the multinomial logit model
(\code{\link{multinomial}}) is more appropriate.
+
Here is an example of the usage of the \code{parallel} argument.
If there are covariates \code{x1}, \code{x2} and \code{x3}, then
\code{parallel = TRUE ~ x1 + x2 -1} and \code{parallel = FALSE ~
@@ -103,10 +106,12 @@ contains further information and examples.
for \code{x1} and \code{x2} to be equal; those of the intercepts and
\code{x3} would be different.
+
}
\section{Warning }{
No check is made to verify that the response is ordinal;
see \code{\link[base:factor]{ordered}}.
+
}
\seealso{
@@ -115,11 +120,12 @@ contains further information and examples.
\code{\link{sratio}},
\code{\link{multinomial}},
\code{\link{pneumo}}.
+
}
\examples{
-pneumo <- transform(pneumo, let=log(exposure.time))
+pneumo <- transform(pneumo, let = log(exposure.time))
(fit <- vglm(cbind(normal,mild,severe) ~ let, acat, pneumo))
-coef(fit, matrix=TRUE)
+coef(fit, matrix = TRUE)
constraints(fit)
model.matrix(fit)
}
diff --git a/man/alaplace3.Rd b/man/alaplace3.Rd
index bdc3226..18bdfd6 100644
--- a/man/alaplace3.Rd
+++ b/man/alaplace3.Rd
@@ -14,14 +14,15 @@
alaplace1(tau = NULL, llocation = "identity", elocation = list(),
ilocation = NULL, kappa = sqrt(tau/(1 - tau)), Scale.arg = 1,
shrinkage.init = 0.95, parallelLocation = FALSE, digt = 4,
- dfmu.init = 3, method.init = 1, zero = NULL)
+ dfmu.init = 3, intparloc = FALSE, method.init = 1)
alaplace2(tau = NULL, llocation = "identity", lscale = "loge",
elocation = list(), escale = list(),
ilocation = NULL, iscale = NULL, kappa = sqrt(tau/(1 - tau)),
shrinkage.init = 0.95,
parallelLocation = FALSE, digt = 4, sameScale = TRUE,
- dfmu.init = 3, method.init = 1, zero = "(1 + M/2):M")
+ dfmu.init = 3, intparloc = FALSE,
+ method.init = 1, zero = -2)
alaplace3(llocation = "identity", lscale = "loge", lkappa = "loge",
elocation = list(), escale = list(), ekappa = list(),
@@ -46,7 +47,7 @@ alaplace3(llocation = "identity", lscale = "loge", lkappa = "loge",
See \code{\link{Links}} for more choices.
For example, the argument \code{llocation} can help handle
count data by restricting the quantiles to be positive
- (use \code{llocation="loge"}).
+ (use \code{llocation = "loge"}).
However, \code{llocation} is best left alone since the theory
only works properly with the identity link.
@@ -61,17 +62,20 @@ alaplace3(llocation = "identity", lscale = "loge", lkappa = "loge",
If given, it must be numeric and values are recycled to the
appropriate length.
The default is to choose the value internally.
+
}
- \item{parallelLocation}{ Logical.
+ \item{parallelLocation, intparloc}{ Logical.
Should the quantiles be parallel on the transformed scale
(argument \code{llocation})?
Assigning this argument to \code{TRUE} circumvents the
seriously embarrassing quantile crossing problem.
+ The argument \code{intparloc} applies to intercept term;
+ the argument \code{parallelLocation} applies to other terms.
}
\item{sameScale}{ Logical.
Should the scale parameters be equal? It is advised to keep
- \code{sameScale=TRUE} unchanged because it does not make sense to
+ \code{sameScale = TRUE} unchanged because it does not make sense to
have different values for each \code{tau} value.
@@ -85,7 +89,7 @@ alaplace3(llocation = "identity", lscale = "loge", lkappa = "loge",
Degrees of freedom for the cubic smoothing spline fit applied to
get an initial estimate of the location parameter.
See \code{\link{vsmooth.spline}}.
- Used only when \code{method.init=3}.
+ Used only when \code{method.init = 3}.
}
\item{shrinkage.init}{
@@ -93,7 +97,8 @@ alaplace3(llocation = "identity", lscale = "loge", lkappa = "loge",
The value must be between 0 and 1 inclusive, and
a value of 0 means the individual response values are used,
and a value of 1 means the median or mean is used.
- This argument is used only when \code{method.init=4}.
+ This argument is used only when \code{method.init = 4}.
+ See \code{\link{CommonVGAMffArguments}} for more information.
}
\item{Scale.arg}{
@@ -134,7 +139,7 @@ alaplace3(llocation = "identity", lscale = "loge", lkappa = "loge",
\right) }{%
f(y;xi,sigma,kappa) = (sqrt(2)/sigma) * (kappa/(1+ \kappa^2)) *
exp( -(sqrt(2) / (sigma * kappa)) * |y-xi| ) }
- for \eqn{y \leq \xi}{y <= xi}, and
+ for \eqn{y \leq \xi}{y <= xi}, and
\deqn{f(y;\xi,\sigma,\kappa) = \frac{\sqrt{2}}{\sigma} \,
\frac{\kappa}{1 + \kappa^2} \,
\exp \left( - \frac{\sqrt{2} \, \kappa}{\sigma} |y - \xi |
@@ -145,33 +150,42 @@ alaplace3(llocation = "identity", lscale = "loge", lkappa = "loge",
Here, the ranges are
for all real \eqn{y} and \eqn{\xi}{xi}, positive \eqn{\sigma}{sigma} and
positive \eqn{\kappa}{kappa}.
- The special case \eqn{\kappa=1}{kappa=1} corresponds to the
+ The special case \eqn{\kappa = 1}{kappa = 1} corresponds to the
(symmetric) Laplace distribution of Kotz et al. (2001).
The mean is \eqn{\xi + \sigma (1/\kappa - \kappa) / \sqrt{2}}{xi +
sigma * (1/kappa - kappa) / sqrt(2)}
and the variance is
\eqn{\sigma^2 (1 + \kappa^4) / (2 \kappa^2)}{sigma^2 * (1 +
kappa^4) / (2 * kappa^2)}.
- The enumeration of the linear/additive predictors used here is to
- first have all the location parameters, followed by all the
- scale parameters. Finally, for \code{alaplace3()}, the last one
- is the asymmetry parameter.
+ The enumeration of the linear/additive predictors used for
+ \code{alaplace2()} is
+ the first location parameter followed by the first scale parameter,
+ then the second location parameter followed by the
+ second scale parameter, etc.
+ For \code{alaplace3()}, only a vector response is handled
+ and the last (third) linear/additive predictor is for
+ the asymmetry parameter.
+
It is known that the maximum likelihood estimate of the
location parameter \eqn{\xi}{xi} corresponds to the regression
quantile estimate of the classical quantile regression approach of
Koenker and Bassett (1978). An important property of the ALD is that
- \eqn{P(Y \leq \xi) = \tau}{P(Y <= xi) = tau} where
+ \eqn{P(Y \leq \xi) = \tau}{P(Y <= xi) = tau} where
\eqn{\tau = \kappa^2 / (1 + \kappa^2)}{tau = kappa^2 / (1 + kappa^2)}
so that
\eqn{\kappa = \sqrt{\tau / (1-\tau)}}{kappa = sqrt(tau / (1-tau))}.
- Thus \code{alaplace1()} may be used as an alternative to \code{rq}
+ Thus \code{alaplace1()} might be used as an alternative to \code{rq}
in the \pkg{quantreg} package.
- In general the response must be a vector or a 1-column matrix.
- For \code{alaplace1()} and \code{alaplace2()}
- the number of linear/additive predictors is dictated by the
+
+ Both \code{alaplace1()} and \code{alaplace2()}
+ can handle multiple responses,
+ and the number of linear/additive predictors is dictated by the
length of \code{tau} or \code{kappa}.
+ The function \code{alaplace2()} can also handle a matrix
+ response with a single-valued \code{tau} or \code{kappa}.
+
}
\value{
@@ -179,10 +193,12 @@ alaplace3(llocation = "identity", lscale = "loge", lkappa = "loge",
The object is used by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}}.
+
In the \code{extra} slot of the fitted object are some list
components which are useful, e.g., the sample proportion of
values which are less than the fitted quantile curves.
+
}
\references{
Koenker, R. and Bassett, G. (1978)
@@ -190,16 +206,19 @@ alaplace3(llocation = "identity", lscale = "loge", lkappa = "loge",
\emph{Econometrica},
\bold{46}, 33--50.
+
Kotz, S., Kozubowski, T. J. and Podgorski, K. (2001)
\emph{The Laplace distribution and generalizations:
a revisit with applications to communications,
economics, engineering, and finance},
Boston: Birkhauser.
- Yee, T. W. (2009)
+
+ Yee, T. W. (2011)
Quantile regression for counts and proportions.
In preparation.
+
}
\author{ Thomas W. Yee }
\section{Warning}{
@@ -207,10 +226,12 @@ Boston: Birkhauser.
so that misleading inferences may result,
e.g., in the \code{summary} and \code{vcov} of the object.
+
Care is needed with \code{tau} values which are too small, e.g.,
- for count data with \code{llocation="loge"} and if the sample
+ for count data with \code{llocation = "loge"} and if the sample
proportion of zeros is greater than \code{tau}.
+
}
\note{
% Commented out 20090326
@@ -220,37 +241,45 @@ Boston: Birkhauser.
% i.e., linear transformation of the response produces the
% same linear transformation of the fitted quantiles.
+
These \pkg{VGAM} family functions use Fisher scoring.
- Convergence may be slow and
- half-stepping is usual (although one can use \code{trace=TRUE} to see
- which is the best model and then use \code{maxit} to fit that model).
+ Convergence may be slow and half-stepping is usual
+ (although one can use \code{trace = TRUE} to see which
+ is the best model and then use \code{maxit} to choose
+ that model)
+ due to the regularity conditions not holding.
+
For large data sets it is a very good idea to keep the length of
\code{tau}/\code{kappa} low to avoid large memory requirements.
Then
- for \code{parallelLoc=FALSE} one can repeatedly fit a model with
+ for \code{parallelLoc = FALSE} one can repeatedly fit a model with
\code{alaplace1()} with one \eqn{\tau}{tau} at a time;
and
- for \code{parallelLoc=TRUE} one can refit a model with
+ for \code{parallelLoc = TRUE} one can refit a model with
\code{alaplace1()} with one \eqn{\tau}{tau} at a time but
using offsets and an intercept-only model.
+
A second method for solving the noncrossing quantile problem is
illustrated below in Example 3.
This is called the \emph{accumulative quantile method} (AQM)
- and details are in Yee (2009).
+ and details are in Yee (2011).
It does not make the strong parallelism assumption.
+
The functions \code{alaplace2()} and \code{\link{laplace}}
differ slightly in terms of the parameterizations.
+
}
\seealso{
\code{\link{ralap}},
\code{\link{laplace}},
\code{\link{lms.bcn}},
- \code{\link{amlnormal}}.
+ \code{\link{amlnormal}},
+ \code{\link{koenker}}.
}
@@ -258,48 +287,43 @@ Boston: Birkhauser.
# Example 1: quantile regression with smoothing splines
adata = data.frame(x = sort(runif(n <- 500)))
mymu = function(x) exp(-2 + 6*sin(2*x-0.2) / (x+0.5)^2)
-adata = transform(adata, y = rpois(n, lambda=mymu(x)))
+adata = transform(adata, y = rpois(n, lambda = mymu(x)))
mytau = c(0.25, 0.75); mydof = 4
-fit = vgam(y ~ s(x, df=mydof), alaplace1(tau=mytau, llocation="loge",
- parallelLoc=FALSE), adata, trace=TRUE)
-fitp = vgam(y ~ s(x, df=mydof), alaplace1(tau=mytau, llocation="loge",
- parallelLoc=TRUE), adata, trace=TRUE)
+fit = vgam(y ~ s(x, df = mydof), alaplace1(tau = mytau, llocation = "loge",
+ parallelLoc = FALSE), adata, trace = TRUE)
+fitp = vgam(y ~ s(x, df = mydof), alaplace1(tau = mytau, llocation = "loge",
+ parallelLoc = TRUE), adata, trace = TRUE)
-\dontrun{
-par(las=1)
-mylwd = 1.5
-with(adata, plot(x, jitter(y, factor=0.5), col="red",
- main="Example 1; green: parallelLoc=TRUE",
- ylab="y", pch="o", cex=0.75))
-with(adata, matlines(x, fitted(fit), col="blue", lty="solid", lwd=mylwd))
-with(adata, matlines(x, fitted(fitp), col="green", lty="solid", lwd=mylwd))
-finexgrid = seq(0, 1, len=1001)
+\dontrun{ par(las = 1); mylwd = 1.5
+with(adata, plot(x, jitter(y, factor = 0.5), col = "red",
+ main = "Example 1; green: parallelLoc = TRUE",
+ ylab = "y", pch = "o", cex = 0.75))
+with(adata, matlines(x, fitted(fit ), col = "blue", lty = "solid", lwd = mylwd))
+with(adata, matlines(x, fitted(fitp), col = "green", lty = "solid", lwd = mylwd))
+finexgrid = seq(0, 1, len = 1001)
for(ii in 1:length(mytau))
- lines(finexgrid, qpois(p=mytau[ii], lambda=mymu(finexgrid)),
- col="blue", lwd=mylwd)
-}
+ lines(finexgrid, qpois(p = mytau[ii], lambda = mymu(finexgrid)),
+ col = "blue", lwd = mylwd) }
fit at extra # Contains useful information
# Example 2: regression quantile at a new tau value from an existing fit
# Nb. regression splines are used here since it is easier.
-fitp2 = vglm(y ~ bs(x, df=mydof),
- family = alaplace1(tau=mytau, llocation="loge",
- parallelLoc=TRUE),
- adata, trace=TRUE)
+fitp2 = vglm(y ~ bs(x, df = mydof),
+ family = alaplace1(tau = mytau, llocation = "loge",
+ parallelLoc = TRUE),
+ adata, trace = TRUE)
newtau = 0.5 # Want to refit the model with this tau value
fitp3 = vglm(y ~ 1 + offset(predict(fitp2)[,1]),
- family = alaplace1(tau=newtau, llocation="loge"),
+ family = alaplace1(tau = newtau, llocation = "loge"),
adata)
-\dontrun{
-with(adata, plot(x, jitter(y, factor=0.5), col="red", ylab="y",
- pch="o", cex=0.75,
- main="Example 2; parallelLoc=TRUE"))
-with(adata, matlines(x, fitted(fitp2), col="blue", lty="solid", lwd=mylwd))
-with(adata, matlines(x, fitted(fitp3), col="black", lty="solid", lwd=mylwd))
-}
+\dontrun{ with(adata, plot(x, jitter(y, factor = 0.5), col = "red", ylab = "y",
+ pch = "o", cex = 0.75,
+ main = "Example 2; parallelLoc = TRUE"))
+with(adata, matlines(x, fitted(fitp2), col = "blue", lty = 1, lwd = mylwd))
+with(adata, matlines(x, fitted(fitp3), col = "black", lty = 1, lwd = mylwd)) }
@@ -307,35 +331,33 @@ with(adata, matlines(x, fitted(fitp3), col="black", lty="solid", lwd=mylwd))
# successive solutions which are added to previous solutions; use a log
# link to ensure an increasing quantiles at any value of x.
-mytau = seq(0.2, 0.9, by=0.1)
+mytau = seq(0.2, 0.9, by = 0.1)
answer = matrix(0, nrow(adata), length(mytau)) # Stores the quantiles
-adata = transform(adata, offsety=y*0)
+adata = transform(adata, offsety = y*0)
usetau = mytau
for(ii in 1:length(mytau)) {
-# cat("\n\nii =", ii, "\n")
- adata = transform(adata, usey=y-offsety)
- iloc = ifelse(ii==1, with(adata, median(y)), 1.0) # Well-chosen!
- mydf = ifelse(ii==1, 5, 3) # Maybe less smoothing will help
- lloc = ifelse(ii==1, "identity", "loge") # 2nd value must be "loge"
- fit3 = vglm(usey ~ ns(x, df=mydf), adata, trace=TRUE,
- fam=alaplace1(tau=usetau[ii], lloc=lloc, iloc=iloc))
- answer[,ii] = (if(ii==1) 0 else answer[,ii-1]) + fitted(fit3)
- adata = transform(adata, offsety=answer[,ii])
+# cat("\n\nii = ", ii, "\n")
+ adata = transform(adata, usey = y-offsety)
+ iloc = ifelse(ii == 1, with(adata, median(y)), 1.0) # Well-chosen!
+ mydf = ifelse(ii == 1, 5, 3) # Maybe less smoothing will help
+ lloc = ifelse(ii == 1, "identity", "loge") # 2nd value must be "loge"
+ fit3 = vglm(usey ~ ns(x, df = mydf), adata, trace = TRUE,
+ fam = alaplace1(tau = usetau[ii], lloc = lloc, iloc = iloc))
+ answer[,ii] = (if(ii == 1) 0 else answer[,ii-1]) + fitted(fit3)
+ adata = transform(adata, offsety = answer[,ii])
}
# Plot the results.
-\dontrun{
-with(adata, plot(x, y, col="blue",
- main=paste("Noncrossing and nonparallel; tau =",
- paste(mytau, collapse=", "))))
-with(adata, matlines(x, answer, col="red", lty=1))
+\dontrun{ with(adata, plot(x, y, col = "blue",
+ main = paste("Noncrossing and nonparallel; tau = ",
+ paste(mytau, collapse = ", "))))
+with(adata, matlines(x, answer, col = "orange", lty = 1))
# Zoom in near the origin.
-with(adata, plot(x, y, col="blue", xlim=c(0, 0.2), ylim=0:1,
- main=paste("Noncrossing and nonparallel; tau =",
- paste(mytau, collapse=", "))))
-with(adata, matlines(x, answer, col="red", lty=1))
-}
+with(adata, plot(x, y, col = "blue", xlim = c(0, 0.2), ylim = 0:1,
+ main = paste("Noncrossing and nonparallel; tau = ",
+ paste(mytau, collapse = ", "))))
+with(adata, matlines(x, answer, col = "orange", lty = 1)) }
}
\keyword{models}
\keyword{regression}
diff --git a/man/alaplaceUC.Rd b/man/alaplaceUC.Rd
index f9e221c..f646c41 100644
--- a/man/alaplaceUC.Rd
+++ b/man/alaplaceUC.Rd
@@ -13,11 +13,11 @@
}
\usage{
-dalap(x, location=0, scale=1, tau=0.5, kappa=sqrt(tau/(1-tau)),
- log=FALSE)
-palap(q, location=0, scale=1, tau=0.5, kappa=sqrt(tau/(1-tau)))
-qalap(p, location=0, scale=1, tau=0.5, kappa=sqrt(tau/(1-tau)))
-ralap(n, location=0, scale=1, tau=0.5, kappa=sqrt(tau/(1-tau)))
+dalap(x, location = 0, scale = 1, tau = 0.5, kappa = sqrt(tau/(1-tau)),
+ log = FALSE)
+palap(q, location = 0, scale = 1, tau = 0.5, kappa = sqrt(tau/(1-tau)))
+qalap(p, location = 0, scale = 1, tau = 0.5, kappa = sqrt(tau/(1-tau)))
+ralap(n, location = 0, scale = 1, tau = 0.5, kappa = sqrt(tau/(1-tau)))
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -84,22 +84,20 @@ Boston: Birkhauser.
}
\examples{
-x <- seq(-5, 5, by=0.01)
+x <- seq(-5, 5, by = 0.01)
loc <- 0; sigma <- 1.5; kappa <- 2
-\dontrun{
-plot(x, dalap(x, loc, sigma, kappa=kappa), type="l", col="blue",
- main="Blue is density, red is cumulative distribution function",
- ylim=c(0,1), sub="Purple are 5,10,...,95 percentiles",
- las=1, ylab="", cex.main=0.5)
-abline(h=0, col="blue", lty=2)
-lines(qalap(seq(0.05,0.95,by=0.05), loc, sigma, kappa=kappa),
- dalap(qalap(seq(0.05,0.95,by=0.05), loc, sigma, kappa=kappa),
- loc, sigma, kappa=kappa), col="purple", lty=3, type="h")
-lines(x, palap(x, loc, sigma, kappa=kappa), type="l", col="red")
-abline(h=0, lty=2)
-}
-palap(qalap(seq(0.05,0.95,by=0.05), loc, sigma, kappa=kappa),
- loc, sigma, kappa=kappa)
+\dontrun{ plot(x, dalap(x, loc, sigma, kappa = kappa), type = "l", col = "blue",
+ main = "Blue is density, red is cumulative distribution function",
+ ylim = c(0,1), sub = "Purple are 5, 10, ..., 95 percentiles",
+ las = 1, ylab = "", cex.main = 0.5)
+abline(h = 0, col = "blue", lty = 2)
+lines(qalap(seq(0.05, 0.95, by = 0.05), loc, sigma, kappa = kappa),
+ dalap(qalap(seq(0.05, 0.95, by = 0.05), loc, sigma, kappa = kappa),
+ loc, sigma, kappa = kappa), col = "purple", lty = 3, type = "h")
+lines(x, palap(x, loc, sigma, kappa = kappa), type = "l", col = "red")
+abline(h = 0, lty = 2) }
+palap(qalap(seq(0.05, 0.95, by = 0.05), loc, sigma, kappa = kappa),
+ loc, sigma, kappa = kappa)
}
\keyword{distribution}
diff --git a/man/amh.Rd b/man/amh.Rd
index aac83c3..b4f5cb5 100644
--- a/man/amh.Rd
+++ b/man/amh.Rd
@@ -9,7 +9,8 @@
}
\usage{
-amh(lalpha="rhobit", ealpha=list(), ialpha=NULL, method.init=1, nsimEIM=250)
+amh(lalpha = "rhobit", ealpha = list(), ialpha = NULL,
+ method.init = 1, nsimEIM = 250)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -47,17 +48,17 @@ amh(lalpha="rhobit", ealpha=list(), ialpha=NULL, method.init=1, nsimEIM=250)
The cumulative distribution function is
\deqn{P(Y_1 \leq y_1, Y_2 \leq y_2) = y_1 y_2
/ ( 1 - \alpha (1 - y_1) (1 - y_2) ) }{%
- P(Y1 <= y1, Y2 <= y2) =
+ P(Y1 < = y1, Y2 < = y2) =
y1 * y2 / ( 1 - alpha * (1 - y1) * (1 - y2) ) }
for \eqn{-1 < \alpha < 1}{-1 < alpha < 1}.
The support of the function is the unit square.
The marginal distributions are the standard uniform distributions.
- When \eqn{\alpha = 0}{alpha=0} the random variables are
+ When \eqn{\alpha = 0}{alpha = 0} the random variables are
independent.
% A variant of Newton-Raphson is used, which only seems to work for an
% intercept model.
-% It is a very good idea to set \code{trace=TRUE}.
+% It is a very good idea to set \code{trace = TRUE}.
% This \pkg{VGAM} family function is prone to numerical difficulties.
}
@@ -69,9 +70,15 @@ amh(lalpha="rhobit", ealpha=list(), ialpha=NULL, method.init=1, nsimEIM=250)
\references{
-Hutchinson, T. P. and Lai, C. D. (1990)
-\emph{Continuous Bivariate Distributions, Emphasising Applications},
-Adelaide, South Australia: Rumsby Scientific Publishing.
+%Hutchinson, T. P. and Lai, C. D. (1990)
+%\emph{Continuous Bivariate Distributions, Emphasising Applications},
+%Adelaide, South Australia: Rumsby Scientific Publishing.
+
+Balakrishnan, N. and Lai, C.-D. (2009)
+\emph{Continuous Bivariate Distributions},
+2nd ed.
+New York: Springer.
+
}
\author{ T. W. Yee and C. S. Chee }
@@ -91,9 +98,9 @@ Adelaide, South Australia: Rumsby Scientific Publishing.
}
\examples{
-ymat <- ramh(1000, alpha=rhobit(2, inverse=TRUE))
+ymat <- ramh(1000, alpha = rhobit(2, inverse = TRUE))
fit <- vglm(ymat ~ 1, amh, trace = TRUE)
-coef(fit, mat=TRUE)
+coef(fit, matrix = TRUE)
Coef(fit)
}
\keyword{models}
diff --git a/man/amlbinomial.Rd b/man/amlbinomial.Rd
index 766544c..ba5849c 100644
--- a/man/amlbinomial.Rd
+++ b/man/amlbinomial.Rd
@@ -8,7 +8,8 @@
}
\usage{
-amlbinomial(w.aml = 1, parallel = FALSE, digw = 4, link = "logit", earg = list())
+amlbinomial(w.aml = 1, parallel = FALSE, digw = 4,
+ link = "logit", earg = list())
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -115,11 +116,11 @@ amlbinomial(w.aml = 1, parallel = FALSE, digw = 4, link = "logit", earg = list()
set.seed(1234)
sizevec = rep(100, length=(nn <- 200))
mydat = data.frame(x = sort(runif(nn)))
-mydat = transform(mydat, prob=logit(-0+2.5*x+x^2, inverse=TRUE))
-mydat = transform(mydat, y = rbinom(nn, size=sizevec, prob=prob))
-mydat = transform(mydat, y = y / sizevec) # Convert to proportions
-(fit = vgam(y ~ s(x, df=3), fam=amlbinomial(w=c(0.01,0.2,1,5,60)),
- data=mydat, weight=sizevec, trace=TRUE))
+mydat = transform(mydat, prob = logit(-0+2.5*x+x^2, inverse = TRUE))
+mydat = transform(mydat, y = rbinom(nn, size = sizevec, prob = prob))
+(fit = vgam(cbind(y, sizevec - y) ~ s(x, df = 3),
+ amlbinomial(w = c(0.01, 0.2, 1, 5, 60)),
+ mydat, trace = TRUE))
fit at extra
\dontrun{
@@ -128,17 +129,18 @@ par(mfrow=c(1,2))
with(mydat, plot(x, jitter(y), col="blue", las=1, main=
paste(paste(round(fit at extra$percentile, dig=1), collapse=", "),
"percentile-expectile curves")))
-with(mydat, matlines(x, fitted(fit), lwd=2, col="blue", lty=1))
+with(mydat, matlines(x, 100 * fitted(fit), lwd=2, col="blue", lty=1))
# Compare the fitted expectiles with the quantiles
with(mydat, plot(x, jitter(y), col="blue", las=1, main=
paste(paste(round(fit at extra$percentile, dig=1), collapse=", "),
"percentile curves are red")))
-with(mydat, matlines(x, fitted(fit), lwd=2, col="blue", lty=1))
+with(mydat, matlines(x, 100 * fitted(fit), lwd=2, col="blue", lty=1))
for(ii in fit at extra$percentile)
- with(mydat, matlines(x, qbinom(p=ii/100, size=sizevec, prob=prob)/sizevec,
+ with(mydat, matlines(x, 100 *
+ qbinom(p=ii/100, size=sizevec, prob=prob) / sizevec,
col="red", lwd=2, lty=1))
}
}
diff --git a/man/auuc.Rd b/man/auuc.Rd
index 9af6923..4aa502b 100644
--- a/man/auuc.Rd
+++ b/man/auuc.Rd
@@ -18,10 +18,10 @@ at the University of Auckland in 1990.
}
}
\details{
-Each student is cross-classified by their
-colleges (Science and Engineering have been combined) and
-the socio-economic status (SES) of their fathers (1 = highest,
-down to 4 = lowest).
+Each student is cross-classified by their colleges (Science
+and Engineering have been combined) and the socio-economic
+status (SES) of their fathers
+(1 = highest, down to 4 = lowest).
}
\source{
@@ -35,6 +35,6 @@ down to 4 = lowest).
}
\examples{
round(fitted(grc(auuc)))
-round(fitted(grc(auuc, Rank=2)))
+round(fitted(grc(auuc, Rank = 2)))
}
\keyword{datasets}
diff --git a/man/betabin.ab.Rd b/man/betabin.ab.Rd
index ed51062..096d30d 100644
--- a/man/betabin.ab.Rd
+++ b/man/betabin.ab.Rd
@@ -68,6 +68,7 @@ betabin.ab(lshape12 = "loge", earg = list(), i1 = 1, i2 = NULL,
and the prior weights \eqn{N} are attached separately on the
object in a slot.
+
The probability function is
\deqn{P(T=t) = {N \choose t} \frac{B(\alpha+t, \beta+N-t)}
{B(\alpha, \beta)}}{%
@@ -75,6 +76,7 @@ betabin.ab(lshape12 = "loge", earg = list(), i1 = 1, i2 = NULL,
where \eqn{t=0,1,\ldots,N}, and \eqn{B} is the beta function
with shape parameters \eqn{\alpha}{alpha} and \eqn{\beta}{beta}.
Recall \eqn{Y = T/N} is the real response being modelled.
+
The default model is \eqn{\eta_1 = \log(\alpha)}{eta1 = log(alpha)}
and \eqn{\eta_2 = \log(\beta)}{eta2 = log(beta)} because both
@@ -90,6 +92,7 @@ betabin.ab(lshape12 = "loge", earg = list(), i1 = 1, i2 = NULL,
positive value of \eqn{\rho}{rho}. It is known as the
\emph{over-dispersion parameter}.
+
This family function uses Fisher scoring. The two diagonal
elements of the second-order expected
derivatives with respect to \eqn{\alpha}{alpha} and
@@ -97,17 +100,20 @@ betabin.ab(lshape12 = "loge", earg = list(), i1 = 1, i2 = NULL,
fail for large \eqn{\alpha}{alpha}, \eqn{\beta}{beta},
\eqn{N} or else take a long time.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}}.
+
Suppose \code{fit} is a fitted beta-binomial model. Then
\code{fit at y} contains the sample proportions \eqn{y},
\code{fitted(fit)} returns estimates of \eqn{E(Y)}, and
\code{weights(fit, type="prior")} returns the number
of trials \eqn{N}.
+
}
\references{
Moore, D. F. and Tsiatis, A. (1991)
@@ -116,12 +122,15 @@ betabin.ab(lshape12 = "loge", earg = list(), i1 = 1, i2 = NULL,
\emph{Biometrics},
\bold{47}, 383--401.
+
Prentice, R. L. (1986)
Binary regression using an extended beta-binomial distribution,
with discussion of correlation induced by
covariate measurement errors.
\emph{Journal of the American Statistical Association},
\bold{81}, 321--327.
+
+
}
\author{ T. W. Yee }
@@ -133,6 +142,7 @@ betabin.ab(lshape12 = "loge", earg = list(), i1 = 1, i2 = NULL,
Cases where \eqn{N=1} can be omitted via the
\code{subset} argument of \code{\link{vglm}}.
+
Although the two linear/additive predictors given
above are in terms of \eqn{\alpha}{alpha} and \eqn{\beta}{beta},
basic algebra shows that the default amounts to
@@ -142,11 +152,13 @@ betabin.ab(lshape12 = "loge", earg = list(), i1 = 1, i2 = NULL,
That is, \eqn{logit(p) = \eta_1 - \eta_2}{logit(p) = eta1 - eta2}.
This is illustated in one of the examples below.
+
The \emph{extended} beta-binomial distribution of Prentice (1986)
is currently not implemented in the \pkg{VGAM} package as it has
range-restrictions for the correlation parameter that are currently
too difficult to handle in this package.
+
}
\section{Warning }{
This family function is prone to numerical difficulties
@@ -155,9 +167,11 @@ betabin.ab(lshape12 = "loge", earg = list(), i1 = 1, i2 = NULL,
If problems occur try setting \code{i1} to be some other
positive value, using \code{i2} and/or setting \code{zero=2}.
+
This family function may be renamed in the future.
See the warnings in \code{\link{betabinomial}}.
+
}
\seealso{
\code{\link{betabinomial}},
@@ -195,18 +209,16 @@ all.equal(c(fitted(fit)),
# Example 3, which is more complicated
lirat = transform(lirat, fgrp = factor(grp))
summary(lirat) # Only 5 litters in group 3
-fit2 = vglm(cbind(R,N-R) ~ fgrp + hb, betabin.ab(zero=2),
- data=lirat, trace=TRUE, subset=N>1)
+fit2 = vglm(cbind(R,N-R) ~ fgrp + hb, betabin.ab(zero = 2),
+ data = lirat, trace = TRUE, subset = N>1)
coef(fit2, matrix=TRUE)
Coef(fit2)
coef(fit2, matrix=TRUE)[,1] - coef(fit2, matrix=TRUE)[,2] # logit(p)
-\dontrun{
-with(lirat, plot(hb[N>1], fit2 at misc$rho,
- xlab="Hemoglobin", ylab="Estimated rho",
- pch=as.character(grp[N>1]), col=grp[N>1]))
+\dontrun{ with(lirat, plot(hb[N>1], fit2 at misc$rho,
+ xlab = "Hemoglobin", ylab="Estimated rho",
+ pch = as.character(grp[N>1]), col = grp[N>1]))
}
-\dontrun{
-# cf. Figure 3 of Moore and Tsiatis (1991)
+\dontrun{ # cf. Figure 3 of Moore and Tsiatis (1991)
with(lirat, plot(hb, R/N, pch=as.character(grp), col=grp, las=1,
xlab="Hemoglobin level", ylab="Proportion Dead",
main="Fitted values (lines)"))
@@ -216,9 +228,7 @@ for(gp in 1:4) {
xx = with(smalldf, hb[grp==gp])
yy = with(smalldf, fitted(fit2)[grp==gp])
ooo = order(xx)
- lines(xx[ooo], yy[ooo], col=gp)
-}
-}
+ lines(xx[ooo], yy[ooo], col=gp) } }
}
\keyword{models}
\keyword{regression}
diff --git a/man/betabinomial.Rd b/man/betabinomial.Rd
index b2ac5f9..37a88d2 100644
--- a/man/betabinomial.Rd
+++ b/man/betabinomial.Rd
@@ -77,6 +77,7 @@ betabinomial(lmu="logit", lrho="logit", emu=list(), erho=list(), irho=NULL,
and the prior weights \eqn{N} are attached separately on the
object in a slot.
+
The probability function is
\deqn{P(T=t) = {N \choose t} \frac{B(\alpha+t, \beta+N-t)}
{B(\alpha, \beta)}}{%
@@ -86,6 +87,7 @@ betabinomial(lmu="logit", lrho="logit", emu=list(), erho=list(), irho=NULL,
with shape parameters \eqn{\alpha}{alpha} and \eqn{\beta}{beta}.
Recall \eqn{Y = T/N} is the real response being modelled.
+
The default model is \eqn{\eta_1 = logit(\mu)}{eta1 =logit(mu)}
and \eqn{\eta_2 = logit(\rho)}{eta2 = logit(rho)} because both
parameters lie between 0 and 1.
@@ -100,6 +102,7 @@ betabinomial(lmu="logit", lrho="logit", emu=list(), erho=list(), irho=NULL,
positive value of \eqn{\rho}{rho}. It is known as the
\emph{over-dispersion parameter}.
+
This family function uses Fisher scoring.
Elements of the second-order expected
derivatives with respect to \eqn{\alpha}{alpha} and
@@ -107,11 +110,13 @@ betabinomial(lmu="logit", lrho="logit", emu=list(), erho=list(), irho=NULL,
fail for large \eqn{\alpha}{alpha}, \eqn{\beta}{beta},
\eqn{N} or else take a long time.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}}.
+
Suppose \code{fit} is a fitted beta-binomial model. Then
\code{fit at y} contains the sample proportions \eqn{y},
\code{fitted(fit)} returns estimates of \eqn{E(Y)}, and
@@ -126,6 +131,7 @@ betabinomial(lmu="logit", lrho="logit", emu=list(), erho=list(), irho=NULL,
\emph{Biometrics},
\bold{47}, 383--401.
+
Prentice, R. L. (1986)
Binary regression using an extended beta-binomial distribution,
with discussion of correlation induced by
@@ -133,6 +139,7 @@ betabinomial(lmu="logit", lrho="logit", emu=list(), erho=list(), irho=NULL,
\emph{Journal of the American Statistical Association},
\bold{81}, 321--327.
+
}
\author{ T. W. Yee }
@@ -144,18 +151,21 @@ betabinomial(lmu="logit", lrho="logit", emu=list(), erho=list(), irho=NULL,
Cases where \eqn{N=1} can be omitted via the
\code{subset} argument of \code{\link{vglm}}.
+
The \emph{extended} beta-binomial distribution of Prentice (1986)
is currently not implemented in the \pkg{VGAM} package as it has
range-restrictions for the correlation parameter that are currently
too difficult to handle in this package.
However, try \code{lrho="rhobit"}.
+
}
\section{Warning }{
If the estimated rho parameter is close to zero then it pays to try
\code{lrho="rhobit"}. One day this may become the default link function.
+
This family function is prone to numerical difficulties
due to the expected information matrices not being positive-definite
or ill-conditioned over some regions of the parameter space.
@@ -173,40 +183,39 @@ betabinomial(lmu="logit", lrho="logit", emu=list(), erho=list(), irho=NULL,
\code{\link{dirmultinomial}},
\code{\link{lirat}}.
+
}
\examples{
# Example 1
-betabindat = data.frame(N = 10, mu = 0.5, rho = 0.8)
-betabindat = transform(betabindat,
- y = rbetabin(n=100, size=N, prob=mu, rho=rho))
-fit = vglm(cbind(y,N-y) ~ 1, betabinomial, betabindat, trace=TRUE)
+bbdat = data.frame(N = 10, mu = 0.5, rho = 0.8)
+bbdat = transform(bbdat,
+ y = rbetabin(n=100, size=N, prob=mu, rho=rho))
+fit = vglm(cbind(y,N-y) ~ 1, betabinomial, bbdat, trace=TRUE)
coef(fit, matrix=TRUE)
Coef(fit)
head(cbind(fit at y, weights(fit, type="prior")))
# Example 2
-fit = vglm(cbind(R,N-R)~1, betabinomial, lirat, trace=TRUE, subset=N>1)
-coef(fit, matrix=TRUE)
+fit = vglm(cbind(R, N-R) ~ 1, betabinomial, lirat,
+ trace = TRUE, subset = N > 1)
+coef(fit, matrix = TRUE)
Coef(fit)
t(fitted(fit))
t(fit at y)
-t(weights(fit, type="prior"))
+t(weights(fit, type = "prior"))
# Example 3, which is more complicated
lirat = transform(lirat, fgrp = factor(grp))
summary(lirat) # Only 5 litters in group 3
-fit2 = vglm(cbind(R,N-R) ~ fgrp + hb, betabinomial(zero=2),
- data=lirat, trace=TRUE, subset=N>1)
+fit2 = vglm(cbind(R, N-R) ~ fgrp + hb, betabinomial(zero = 2),
+ data = lirat, trace = TRUE, subset = N > 1)
coef(fit2, matrix=TRUE)
-\dontrun{
-with(lirat, plot(hb[N>1], fit2 at misc$rho,
+\dontrun{ with(lirat, plot(hb[N>1], fit2 at misc$rho,
xlab="Hemoglobin", ylab="Estimated rho",
- pch=as.character(grp[N>1]), col=grp[N>1]))
-}
-\dontrun{
-# cf. Figure 3 of Moore and Tsiatis (1991)
+ pch=as.character(grp[N>1]), col=grp[N>1])) }
+\dontrun{ # cf. Figure 3 of Moore and Tsiatis (1991)
with(lirat, plot(hb, R/N, pch=as.character(grp), col=grp, las=1,
xlab="Hemoglobin level", ylab="Proportion Dead",
main="Fitted values (lines)"))
@@ -215,9 +224,7 @@ for(gp in 1:4) {
xx = with(smalldf, hb[grp==gp])
yy = with(smalldf, fitted(fit2)[grp==gp])
ooo = order(xx)
- lines(xx[ooo], yy[ooo], col=gp)
-}
-}
+ lines(xx[ooo], yy[ooo], col=gp) } }
}
\keyword{models}
\keyword{regression}
diff --git a/man/binom2.or.Rd b/man/binom2.or.Rd
index 29b9c3e..78157ff 100644
--- a/man/binom2.or.Rd
+++ b/man/binom2.or.Rd
@@ -88,9 +88,10 @@ binom2.or(lmu = "logit", lmu1 = lmu, lmu2 = lmu, loratio = "loge",
is unity, or equivalently, the log odds ratio is zero. Fisher scoring
is implemented.
+
The default models \eqn{\eta_3}{eta3} as a single parameter only,
i.e., an intercept-only model, but this can be circumvented by setting
- \code{zero=NULL} in order to model the odds ratio as a function of all the
+ \code{zero = NULL} in order to model the odds ratio as a function of all the
explanatory variables.
The function \code{binom2.or} can handle other probability link
functions such as \code{\link{probit}},
@@ -101,61 +102,72 @@ binom2.or(lmu = "logit", lmu1 = lmu, lmu2 = lmu, loratio = "loge",
bivariate normal distribution, but the bivariate probit model is less
interpretable and flexible.
+
The \code{exchangeable} argument should be used when the error structure
is exchangeable, e.g., with eyes or ears data.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}}.
+
When fitted, the \code{fitted.values} slot of the object contains the
four joint probabilities, labelled as \eqn{(Y_1,Y_2)}{(Y1,Y2)} = (0,0),
(0,1), (1,0), (1,1), respectively.
These estimated probabilities should be extracted with the \code{fitted}
generic function.
+
}
\references{
McCullagh, P. and Nelder, J. A. (1989)
\emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+
le Cessie, S. and van Houwelingen, J. C. (1994)
Logistic regression for correlated binary data.
\emph{Applied Statistics},
\bold{43}, 95--108.
+
Palmgren, J. (1989)
\emph{Regression Models for Bivariate Binary Responses}.
Technical Report no. 101, Department of Biostatistics,
University of Washington, Seattle.
+
Yee, T. W. and Dirnbock, T. (2009)
Models for analysing species' presence/absence data
at two time points.
Journal of Theoretical Biology, \bold{259}. In press.
+
Documentation accompanying the \pkg{VGAM} package at
\url{http://www.stat.auckland.ac.nz/~yee}
contains further information and examples.
+
}
\author{ Thomas W. Yee }
\note{
The response should be either a 4-column matrix of counts
(whose columns correspond to \eqn{(Y_1,Y_2)}{(Y1,Y2)} = (0,0), (0,1), (1,0),
(1,1) respectively), or a two-column matrix where each column has two
- distinct values.
+ distinct values, or a factor with four levels.
The function \code{\link{rbinom2.or}} may be used to generate such data.
Successful convergence requires at least one case of each of the four
possible outcomes.
+
By default, a constant odds ratio is fitted because \code{zero=3}.
Set \code{zero=NULL} if you want the odds ratio to be modelled as a
function of the explanatory variables; however, numerical problems
are more likely to occur.
+
The argument \code{lmu}, which is actually redundant, is used for
convenience and for upward compatibility: specifying \code{lmu} only
means the link function will be applied to \code{lmu1} and \code{lmu2}.
@@ -165,6 +177,7 @@ binom2.or(lmu = "logit", lmu1 = lmu, lmu2 = lmu, loratio = "loge",
to specify \code{exchangeable=TRUE} and have different link functions
for the two marginal probabilities.
+
Regarding Yee and Dirnbock (2009),
the \code{xij} (see \code{\link{vglm.control}}) argument enables
environmental variables with different values at the two time points
@@ -183,6 +196,7 @@ binom2.or(lmu = "logit", lmu1 = lmu, lmu2 = lmu, loratio = "loge",
\code{\link{probit}},
\code{\link{cloglog}},
\code{\link{cauchit}}.
+
}
\examples{
# Fit the model in Table 6.7 in McCullagh and Nelder (1989)
diff --git a/man/binom2.rho.Rd b/man/binom2.rho.Rd
index 50efcb4..7273249 100644
--- a/man/binom2.rho.Rd
+++ b/man/binom2.rho.Rd
@@ -1,5 +1,6 @@
\name{binom2.rho}
\alias{binom2.rho}
+\alias{binom2.Rho}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Bivariate Probit Model (Family Function) }
\description{
@@ -9,7 +10,10 @@
}
\usage{
binom2.rho(lrho = "rhobit", erho=list(), imu1 = NULL, imu2 = NULL,
- init.rho = NULL, zero = 3, exchangeable = FALSE, nsimEIM=NULL)
+ irho = NULL, method.init = 1,
+ zero = 3, exchangeable = FALSE, nsimEIM = NULL)
+binom2.Rho(rho = 0, imu1 = NULL, imu2 = NULL,
+ exchangeable = FALSE, nsimEIM = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -23,7 +27,7 @@ binom2.rho(lrho = "rhobit", erho=list(), imu1 = NULL, imu2 = NULL,
See \code{earg} in \code{\link{Links}} for general information.
}
- \item{init.rho}{
+ \item{irho}{
Optional initial value for \eqn{\rho}{rho}.
If given, this should lie between \eqn{-1} and \eqn{1}.
See below for more comments.
@@ -47,12 +51,18 @@ binom2.rho(lrho = "rhobit", erho=list(), imu1 = NULL, imu2 = NULL,
be equal.
}
- \item{nsimEIM}{
+ \item{method.init, nsimEIM}{
See \code{\link{CommonVGAMffArguments}} for more information.
A value of at least 100 is recommended;
the larger the value the better.
}
+ \item{rho}{
+ Numeric vector.
+ Values are recycled to the needed length,
+ and ought to be in range.
+
+ }
}
\details{
@@ -64,8 +74,28 @@ binom2.rho(lrho = "rhobit", erho=list(), imu1 = NULL, imu2 = NULL,
unit variances). One can think of the joint probabilities being
\eqn{\Phi(\eta_1,\eta_2;\rho)}{Phi(eta1,eta2;rho)} where \eqn{\Phi}{Phi}
is the cumulative distribution function of a standard bivariate normal
- distribution (i.e., \code{\link[stats:Normal]{pnorm}})
- with correlation parameter \eqn{\rho}{rho}.
+ distribution.
+
+
+ Explicitly, the default model is
+ \deqn{probit[P(Y_j=1)] = \eta_j,\ \ \ j=1,2}{%
+ probit[P(Y_j=1)] = eta_j,\ \ \ j=1,2}
+ for the marginals, and
+ \deqn{rhobit[rho] = \eta_3.}{%
+ rhobit[rho] = eta_3.}
+ The joint probability
+ \eqn{P(Y_1=1,Y_2=1)=\Phi(\eta_1,\eta_2;\rho)}{P(Y_1=1,Y_2=1)=Phi(eta1,eta2;rho)},
+ and from these the other three joint probabilities are easily computed.
+ The model is fitted by maximum likelihood estimation since the full
+ likelihood is specified.
+ Fisher scoring is implemented.
+
+
+ The default models \eqn{\eta_3}{eta3} as a single parameter only,
+ i.e., an intercept-only model for rho, but this can be circumvented by setting
+ \code{zero = NULL} in order to model rho as a function of all the
+ explanatory variables.
+
The bivariate probit model should not be confused with a \emph{bivariate
logit model} with a probit link (see \code{\link{binom2.or}}).
@@ -74,16 +104,19 @@ binom2.rho(lrho = "rhobit", erho=list(), imu1 = NULL, imu2 = NULL,
model because the odds ratio is a more natural way of measuring the
association between two binary responses.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}},
and \code{\link{vgam}}.
+
When fitted, the \code{fitted.values} slot of the object contains the
four joint probabilities, labelled as
\eqn{(Y_1,Y_2)}{(Y1,Y2)} = (0,0), (0,1), (1,0), (1,1), respectively.
+
}
\references{
@@ -91,35 +124,48 @@ Ashford, J. R. and Sowden, R. R. (1970)
Multi-variate probit analysis.
\emph{Biometrics}, \bold{26}, 535--546.
-Documentation accompanying the \pkg{VGAM} package at
-\url{http://www.stat.auckland.ac.nz/~yee}
-contains further information and examples.
+
+Freedman, D. A. (2010)
+\emph{Statistical Models and Causal Inference: a Dialogue with
+ the Social Sciences}, Cambridge: Cambridge University Press.
+
}
\author{ Thomas W. Yee }
\note{
- The response should be either a 4-column matrix of counts (whose
- columns correspond to \eqn{(Y_1,Y_2)}{(Y1,Y2)} = (0,0), (0,1), (1,0),
- (1,1) respectively), or a two-column matrix where each column has two
- distinct values.
+ See \code{\link{binom2.or}} about the form of input the response
+ should have.
+
- By default, a constant \eqn{\rho}{rho} is fitted because \code{zero=3}.
- Set \code{zero=NULL} if you want the \eqn{\rho}{rho} parameter to
+ By default, a constant \eqn{\rho}{rho} is fitted because \code{zero = 3}.
+ Set \code{zero = NULL} if you want the \eqn{\rho}{rho} parameter to
be modelled as a function of the explanatory variables. The value
\eqn{\rho}{rho} lies in the interval \eqn{(-1,1)}{(-1,1)}, therefore
a \code{\link{rhobit}} link is default.
+
Converge problems can occur.
- If so, assign \code{init.rho} a range of
- values and monitor convergence (e.g., set \code{trace=TRUE}).
+ If so, assign \code{irho} a range of
+ values and monitor convergence (e.g., set \code{trace = TRUE}).
+ Else try \code{method.init}.
Practical experience shows that local solutions can occur,
- and that \code{init.rho} needs to be quite close to the (global)
+ and that \code{irho} needs to be quite close to the (global)
solution.
Also, \code{imu1} and \code{imu2} may be used.
+
+
+ This help file is mainly about \code{binom2.rho()}.
+ \code{binom2.Rho()} fits a bivariate probit model with
+ \emph{known} \eqn{\rho}{rho}.
+ The inputted \code{rho} is saved in the \code{misc} slot of
+ the fitted object, with \code{rho} as the component name.
+
+
}
\seealso{
\code{\link{rbinom2.rho}},
+ \code{\link{rhobit}},
\code{\link{binom2.or}},
\code{\link{loglinb2}},
\code{\link{coalminers}},
@@ -130,9 +176,13 @@ contains further information and examples.
}
\examples{
coalminers = transform(coalminers, Age = (age - 42) / 5)
-fit = vglm(cbind(nBnW,nBW,BnW,BW) ~ Age, binom2.rho, coalminers, trace=TRUE)
+fit = vglm(cbind(nBnW,nBW,BnW,BW) ~ Age, binom2.rho, coalminers, trace = TRUE)
summary(fit)
-coef(fit, matrix=TRUE)
+coef(fit, matrix = TRUE)
}
\keyword{models}
\keyword{regression}
+
+
+% (i.e., \code{\link[stats:Normal]{pnorm}})
+% with correlation parameter \eqn{\rho}{rho}.
diff --git a/man/binom2.rhoUC.Rd b/man/binom2.rhoUC.Rd
index 451bedb..1074114 100644
--- a/man/binom2.rhoUC.Rd
+++ b/man/binom2.rhoUC.Rd
@@ -92,28 +92,26 @@ dbinom2.rho(mu1,
}
\examples{
# Example 1
-nn = 2000
-(myrho <- rhobit(2, inverse=TRUE))
-ymat = rbinom2.rho(n=nn, mu1=0.8, rho=myrho, exch=TRUE)
-(mytab = table(ymat[,1], ymat[,2], dnn=c("Y1","Y2")))
-fit = vglm(ymat ~ 1, binom2.rho(exch=TRUE))
-coef(fit, matrix=TRUE)
+(myrho <- rhobit(2, inverse = TRUE))
+ymat = rbinom2.rho(nn <- 2000, mu1 = 0.8, rho = myrho, exch = TRUE)
+(mytab = table(ymat[,1], ymat[,2], dnn = c("Y1","Y2")))
+fit = vglm(ymat ~ 1, binom2.rho(exch = TRUE))
+coef(fit, matrix = TRUE)
# Example 2
-x = sort(runif(nn))
-mu1 = probit(-2+4*x, inverse=TRUE)
-mu2 = probit(-1+3*x, inverse=TRUE)
-dmat = dbinom2.rho(mu1=mu1, mu2=mu2, rho=myrho)
-ymat = rbinom2.rho(n=nn, mu1=mu1, mu2=mu2, rho=myrho)
-fit2 = vglm(ymat ~ x, binom2.rho)
-coef(fit2, matrix=TRUE)
-\dontrun{
-matplot(x, dmat, lty=1:4, col=1:4, type="l", main="Joint probabilities",
- ylim=0:1, lwd=2, ylab="Probability")
-legend(x=0.25, y=0.9, lty=1:4, col=1:4, lwd=2,
- legend=c("1 = (y1=0, y2=0)", "2 = (y1=0, y2=1)",
- "3 = (y1=1, y2=0)", "4 = (y1=1, y2=1)"))
-}
+bdata = data.frame(x = sort(runif(nn)))
+bdata = transform(bdata, mu1 = probit(-2+4*x, inverse = TRUE),
+ mu2 = probit(-1+3*x, inverse = TRUE))
+dmat = with(bdata, dbinom2.rho(mu1, mu2, myrho))
+ymat = with(bdata, rbinom2.rho(nn, mu1, mu2, myrho))
+fit2 = vglm(ymat ~ x, binom2.rho, bdata)
+coef(fit2, matrix = TRUE)
+\dontrun{ matplot(with(bdata, x), dmat, lty = 1:4, col = 1:4,
+ type = "l", main = "Joint probabilities",
+ ylim = 0:1, lwd = 2, ylab = "Probability")
+legend(x = 0.25, y = 0.9, lty = 1:4, col = 1:4, lwd = 2,
+ legend = c("1 = (y1=0, y2=0)", "2 = (y1=0, y2=1)",
+ "3 = (y1=1, y2=0)", "4 = (y1=1, y2=1)")) }
}
\keyword{distribution}
diff --git a/man/binomialff.Rd b/man/binomialff.Rd
index 1f9394b..8526951 100644
--- a/man/binomialff.Rd
+++ b/man/binomialff.Rd
@@ -16,13 +16,10 @@ binomialff(link = "logit", earg = list(), dispersion = 1, mv = FALSE,
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{link}{
- Link function. See \code{\link{Links}} for more choices.
-
- }
- \item{earg}{
- Extra argument optionally used by the link function.
- See \code{\link{Links}} for more information.
+ \item{link, earg}{
+ Link function and extra argument optionally used by the link function.
+ See \code{\link{Links}} for more choices, and also
+ \code{\link{CommonVGAMffArguments}} for more information.
}
\item{dispersion}{
@@ -66,6 +63,10 @@ binomialff(link = "logit", earg = list(), dispersion = 1, mv = FALSE,
}
}
\details{
+ This function is largely to mimic \code{\link[stats:Binomial]{binomial}},
+ however there are some differences.
+
+
If the dispersion parameter is unknown, then the resulting estimate
is not fully a maximum likelihood estimate (see pp.124--8 of McCullagh
and Nelder, 1989).
@@ -114,17 +115,15 @@ binomialff(link = "logit", earg = list(), dispersion = 1, mv = FALSE,
\note{
If \code{mv} is \code{FALSE} (default) then the response can be of one
- of three formats: a factor (first level taken as success), a vector of
- proportions of success, or a 2-column matrix (first column = successes)
- of counts. The argument \code{weights} in the modelling function can
- also be specified. In particular, for a general vector of proportions,
- you will need to specify \code{weights} because the number of trials
- is needed.
+ of two formats:
+ a factor (first level taken as failure),
+ or a 2-column matrix (first column = successes) of counts.
+ The argument \code{weights} in the modelling function can
+ also be specified as any vector of positive values.
In general, 1 means success and 0 means failure
(to check, see the \code{y} slot of the fitted object).
- To input general positive values into the \code{weights} argument of
- \code{\link{vglm}}/\code{\link{vgam}} one needs to input a 2-column
- response.
+ Note that a general vector of proportions of success is no
+ longer accepted.
The notation \eqn{M} is used to denote the number of linear/additive
@@ -135,7 +134,7 @@ binomialff(link = "logit", earg = list(), dispersion = 1, mv = FALSE,
one format: a matrix of 1's and 0's (1 = success).
- The call \code{binomialff(dispersion=0, ...)} is equivalent to
+ The call \code{binomialff(dispersion = 0, ...)} is equivalent to
\code{quasibinomialff(...)}. The latter was written so that R users
of \code{quasibinomial()} would only need to add a ``\code{ff}''
to the end of the family function name.
@@ -150,8 +149,9 @@ binomialff(link = "logit", earg = list(), dispersion = 1, mv = FALSE,
% \code{"ff"} can be dropped for this family function.
- Fisher scoring is used. This can sometimes fail to converge by oscillating between
- successive iterations (Ridout, 1990). See the example below.
+ Fisher scoring is used. This can sometimes fail to converge by
+ oscillating between successive iterations (Ridout, 1990).
+ See the example below.
}
@@ -177,6 +177,7 @@ binomialff(link = "logit", earg = list(), dispersion = 1, mv = FALSE,
for \emph{each} response is not handled well yet.
Currently, only a single known dispersion parameter is handled well.
+
The maximum likelihood estimate will not exist if the data is
\emph{completely separable} or \emph{quasi-completely separable}.
See Chapter 10 of Altman et al. (2004) for more details,
@@ -184,18 +185,19 @@ binomialff(link = "logit", earg = list(), dispersion = 1, mv = FALSE,
Yet to do: add a \code{sepcheck=TRUE}, say, argument to detect this
problem and give an appropriate warning.
+
}
\examples{
quasibinomialff()
-quasibinomialff(link="probit")
+quasibinomialff(link = "probit")
-fit = vgam(agaaus ~ poly(altitude, 2), binomialff(link=cloglog), hunua)
+fit = vgam(agaaus ~ poly(altitude, 2), binomialff(link = cloglog), hunua)
\dontrun{
with(hunua, plot(altitude, agaaus, col="blue", ylab="P(agaaus=1)",
- main="Presence/absence of Agathis australis", las=1))
+ main = "Presence/absence of Agathis australis", las = 1))
ooo = with(hunua, order(altitude))
-with(hunua, lines(altitude[ooo], fitted(fit)[ooo], col="red", lwd=2)) }
+with(hunua, lines(altitude[ooo], fitted(fit)[ooo], col="red", lwd = 2)) }
# Shows that Fisher scoring can sometime fail. See Ridout (1990).
@@ -203,14 +205,24 @@ ridout = data.frame(v = c(1000, 100, 10), r = c(4, 3, 3), n = c(5, 5, 5))
(ridout = transform(ridout, logv = log(v)))
# The iterations oscillates between two local solutions:
glm.fail = glm(r/n ~ offset(logv) + 1, weight=n,
- binomial(link=cloglog), ridout, trace=TRUE)
+ binomial(link = cloglog), ridout, trace = TRUE)
coef(glm.fail)
# vglm()'s half-stepping ensures the MLE of -5.4007 is obtained:
-vglm.ok = vglm(r/n ~ offset(logv) + 1, weight=n,
- binomialff(link=cloglog), ridout, trace=TRUE)
+vglm.ok = vglm(cbind(r, n-r) ~ offset(logv) + 1,
+ binomialff(link = cloglog), ridout, trace = TRUE)
coef(vglm.ok)
}
\keyword{models}
\keyword{regression}
+% a vector of proportions of success,
+% In particular, for a general vector of proportions,
+% you will need to specify \code{weights} because the number of trials
+% is needed.
+% To input general positive values into the \code{weights} argument of
+% \code{\link{vglm}}/\code{\link{vgam}} one needs to input a 2-column
+% response.
+
+
+
diff --git a/man/bisa.Rd b/man/bisa.Rd
index c6b15b1..7d4aaa5 100644
--- a/man/bisa.Rd
+++ b/man/bisa.Rd
@@ -63,47 +63,56 @@ bisa(lshape = "loge", lscale = "loge",
\eqn{b(1 + a^2/2)}{b*(1 + a*a/2)}.
and the variance is
\eqn{a^2 b^2 (1 + \frac{5}{4}a^2)}{a^2 b^2 (1 + (5/4)*a^2)}.
- By default, \eqn{\eta_1=\log(a)}{eta1=log(a)} and
- \eqn{\eta_2=\log(b)}{eta2=log(b)} for this family function.
+ By default, \eqn{\eta_1 = \log(a)}{eta1 = log(a)} and
+ \eqn{\eta_2 = \log(b)}{eta2 = log(b)} for this family function.
+
Note that \eqn{a} and \eqn{b} are orthogonal,
i.e., the Fisher information matrix is diagonal.
This family function implements Fisher scoring, and
it is unnecessary to compute any integrals numerically.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}},
and \code{\link{vgam}}.
+
}
\references{
+
Lemonte, A. J. and Cribari-Neto, F. and Vasconcellos, K. L. P. (2007)
Improved statistical inference for the two-parameter
Birnbaum-Saunders distribution.
\emph{Computational Statistics \& Data Analysis}, \bold{51}, 4656--4681.
+
Birnbaum, Z. W. and Saunders, S. C. (1969)
A new family of life distributions.
\emph{Journal of Applied Probability}, \bold{6}, 319--327.
+
Birnbaum, Z. W. and Saunders, S. C. (1969)
Estimation for a family of life distributions with applications to fatigue.
\emph{Journal of Applied Probability}, \bold{6}, 328--347.
+
Engelhardt, M. and Bain, L. J. and Wright, F. T. (1981)
Inferences on the parameters of the Birnbaum-Saunders fatigue
life distribution based on maximum likelihood estimation.
\emph{Technometrics}, \bold{23}, 251--256.
+
Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1995)
\emph{Continuous Univariate Distributions},
2nd edition,
Volume 2,
New York: Wiley.
+
}
\author{ T. W. Yee }
%\note{
@@ -118,24 +127,22 @@ New York: Wiley.
}
\examples{
-nn <- 1000
-bisa1dat = data.frame(x = runif(nn))
-bisa1dat = transform(bisa1dat, shape=exp(-0.5+x), scale=exp(1.5))
-bisa1dat = transform(bisa1dat, y = rbisa(nn, shape, scale))
-fit = vglm(y ~ x, bisa(zero=2), bisa1dat, trace=TRUE)
-coef(fit, matrix=TRUE)
+bdat1 <- data.frame(x2 = runif(nn <- 1000))
+bdat1 <- transform(bdat1, shape = exp(-0.5 + x2), scale = exp(1.5))
+bdat1 <- transform(bdat1, y = rbisa(nn, shape, scale))
+fit1 <- vglm(y ~ x2, bisa(zero = 2), bdat1, trace = TRUE)
+coef(fit1, matrix = TRUE)
\dontrun{
-bisa2dat = data.frame(shape=exp(-0.5), scale=exp(0.5))
-bisa2dat = transform(bisa2dat, y = rbisa(nn, shape, scale))
-fit = vglm(y ~ 1, bisa, bisa2dat, trace=TRUE)
-with(bisa2dat, hist(y, prob=TRUE, ylim=c(0,0.5), col="lightblue"))
-coef(fit, matrix=TRUE)
-with(bisa2dat, mean(y))
+bdat2 <- data.frame(shape = exp(-0.5), scale = exp(0.5))
+bdat2 <- transform(bdat2, y = rbisa(nn, shape, scale))
+fit <- vglm(y ~ 1, bisa, bdat2, trace = TRUE)
+with(bdat2, hist(y, prob = TRUE, ylim = c(0, 0.5), col = "lightblue"))
+coef(fit, matrix = TRUE)
+with(bdat2, mean(y))
head(fitted(fit))
-x = with(bisa2dat, seq(0, max(y), len=200))
-with(bisa2dat, lines(x, dbisa(x, Coef(fit)[1], Coef(fit)[2]), col="red", lwd=2))
-}
+x <- with(bdat2, seq(0, max(y), len = 200))
+lines(dbisa(x, Coef(fit)[1], Coef(fit)[2]) ~ x, bdat2, col = "orange", lwd = 2) }
}
\keyword{models}
\keyword{regression}
diff --git a/man/cauchit.Rd b/man/cauchit.Rd
index 1763547..8688166 100644
--- a/man/cauchit.Rd
+++ b/man/cauchit.Rd
@@ -110,6 +110,7 @@ cauchit(p) # Has no NAs
\dontrun{
par(mfrow=c(2,2))
y = seq(-4, 4, length=100)
+p = seq(0.01, 0.99, by=0.01)
for(d in 0:1) {
matplot(p, cbind(logit(p, deriv=d), probit(p, deriv=d)),
diff --git a/man/cauchy.Rd b/man/cauchy.Rd
index e3e0f44..dbe95c0 100644
--- a/man/cauchy.Rd
+++ b/man/cauchy.Rd
@@ -69,6 +69,7 @@ cauchy1(scale.arg=1, llocation="identity",
Fisher scoring is the default but if \code{nsimEIM} is specified then
Fisher scoring with simulation is used.
+
If the scale parameter is known (\code{cauchy1}) then there
may be multiple local maximum likelihood solutions for the location
parameter. However, if both location and scale parameters are to
@@ -76,12 +77,14 @@ cauchy1(scale.arg=1, llocation="identity",
likelihood solution provided \eqn{n > 2} and less than half the data
are located at any one point.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}},
and \code{\link{vgam}}.
+
}
\section{Warning }{
It is well-known that the Cauchy distribution may have local
@@ -97,23 +100,27 @@ Evans, M., Hastings, N. and Peacock, B. (2000)
\emph{Statistical Distributions},
New York: Wiley-Interscience, Third edition.
+
Barnett, V. D. (1966)
Evaluation of the maximum-likehood estimator where the
likelihood equation has multiple roots.
\emph{Biometrika},
\bold{53}, 151--165.
+
Copas, J. B. (1975)
On the unimodality of the likelihood for the Cauchy distribution.
\emph{Biometrika},
\bold{62}, 701--704.
+
Efron, B. and Hinkley, D. V. (1978)
Assessing the accuracy of the maximum likelihood estimator:
Observed versus expected Fisher information.
\emph{Biometrika},
\bold{65}, 457--481.
+
}
\author{ T. W. Yee }
\note{
@@ -124,31 +131,32 @@ Observed versus expected Fisher information.
of initial values via the \code{ilocation} and/or \code{iscale}
and/or \code{method.init} arguments.
+
}
\seealso{
\code{\link[stats:Cauchy]{Cauchy}},
- \code{\link{cauchit}}.
+ \code{\link{cauchit}},
+ \code{\link{studentt}}.
}
\examples{
# Both location and scale parameters unknown
-nn <- 1000
-cdata1 = data.frame(x = runif(nn))
-cdata1 = transform(cdata1, loc=exp(1+0.5*x), scale=exp(1))
-cdata1 = transform(cdata1, y = rcauchy(nn, loc, scale))
-fit = vglm(y ~ x, cauchy(lloc="loge"), cdata1, trace=TRUE)
-coef(fit, matrix=TRUE)
+cdata1 <- data.frame(x = runif(nn <- 1000))
+cdata1 <- transform(cdata1, loc = exp(1+0.5*x), scale = exp(1))
+cdata1 <- transform(cdata1, y = rcauchy(nn, loc, scale))
+fit <- vglm(y ~ x, cauchy(lloc="loge"), cdata1, trace = TRUE)
+coef(fit, matrix = TRUE)
head(fitted(fit)) # Location estimates
summary(fit)
# Location parameter unknown
set.seed(123)
-cdata2 = data.frame(x = runif(nn <- 500))
-cdata2 = transform(cdata2, loc=1+0.5*x, scale=0.4)
-cdata2 = transform(cdata2, y = rcauchy(nn, loc, scale))
-fit = vglm(y ~ x, cauchy1(scale=0.4), cdata2, trace=TRUE, crit="c")
-coef(fit, matrix=TRUE)
+cdata2 <- data.frame(x = runif(nn <- 500))
+cdata2 <- transform(cdata2, loc = 1+0.5*x, scale = 0.4)
+cdata2 <- transform(cdata2, y = rcauchy(nn, loc, scale))
+fit <- vglm(y ~ x, cauchy1(scale = 0.4), cdata2, trace = TRUE, crit = "c")
+coef(fit, matrix = TRUE)
}
\keyword{models}
\keyword{regression}
diff --git a/man/chisq.Rd b/man/chisq.Rd
index dfebed4..7604823 100644
--- a/man/chisq.Rd
+++ b/man/chisq.Rd
@@ -23,8 +23,8 @@ chisq(link = "loge", earg=list())
}
}
\details{
- The degrees of freedom is treated as a parameter to be estimated.
- It is treated as real and not integer.
+ The degrees of freedom is treated as a parameter to be estimated,
+ and as real (not integer).
Being positive, a log link is used by default.
Fisher scoring is used.
@@ -43,6 +43,7 @@ New York: Wiley-Interscience, Third edition.
\author{ T. W. Yee }
\note{
+ Multiple responses are permitted.
There may be convergence problems if the degrees of freedom
is very large.
@@ -53,10 +54,12 @@ New York: Wiley-Interscience, Third edition.
\code{\link{normal1}}.
}
\examples{
-cdata = data.frame(x = runif(nn <- 1000))
-cdata = transform(cdata, y = rchisq(nn, df=exp(2 - x)))
-fit = vglm(y ~ x, chisq, cdata, trace=TRUE)
-coef(fit, matrix=TRUE)
+cdata <- data.frame(x2 = runif(nn <- 1000))
+cdata <- transform(cdata,
+ y1 = rchisq(nn, df = exp(1 - 1 * x2)),
+ y2 = rchisq(nn, df = exp(2 - 2 * x2)))
+fit <- vglm(cbind(y1, y2) ~ x2, chisq, cdata, trace = TRUE)
+coef(fit, matrix = TRUE)
}
\keyword{models}
\keyword{regression}
diff --git a/man/cloglog.Rd b/man/cloglog.Rd
index 1414308..c068323 100644
--- a/man/cloglog.Rd
+++ b/man/cloglog.Rd
@@ -116,6 +116,7 @@ cloglog(p) # Has NAs
cloglog(p, earg=list(bvalue= .Machine$double.eps)) # Has no NAs
\dontrun{
+p = seq(0.01, 0.99, by=0.01)
plot(p, logit(p), type="l", col="limegreen", ylab="transformation",
lwd=2, las=1, main="Some probability link functions")
lines(p, probit(p), col="purple", lwd=2)
diff --git a/man/cnormal1.Rd b/man/cnormal1.Rd
index 11ac633..1cff80e 100644
--- a/man/cnormal1.Rd
+++ b/man/cnormal1.Rd
@@ -7,7 +7,7 @@
left and right censoring.
}
\usage{
-cnormal1(lmu="identity", lsd="loge", imethod=1, zero=2)
+cnormal1(lmu = "identity", lsd = "loge", method.init = 1, zero = 2)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -19,7 +19,7 @@ cnormal1(lmu="identity", lsd="loge", imethod=1, zero=2)
is the default.
}
- \item{imethod}{
+ \item{method.init}{
Initialization method. Either 1 or 2, this specifies
two methods for obtaining initial values for the parameters.
@@ -28,7 +28,7 @@ cnormal1(lmu="identity", lsd="loge", imethod=1, zero=2)
An integer vector, containing the value 1 or 2. If so,
the mean or standard deviation respectively are modelled
as an intercept only.
- Setting \code{zero=NULL} means both linear/additive predictors
+ Setting \code{zero = NULL} means both linear/additive predictors
are modelled as functions of the explanatory variables.
}
@@ -70,11 +70,12 @@ cnormal1(lmu="identity", lsd="loge", imethod=1, zero=2)
\code{\link{tobit}},
\code{\link{normal1}},
\code{\link{dcnormal1}}.
+
}
\examples{
cdata = data.frame(x = runif(nn <- 1000)) # ystar are true values
-cdata = transform(cdata, ystar = rnorm(nn, mean=100 + 15 * x, sd=exp(3)))
+cdata = transform(cdata, ystar = rnorm(nn, m = 100 + 15 * x, sd = exp(3)))
\dontrun{hist(ystar)}
L = runif(nn, 80, 90) # Lower censoring points
U = runif(nn, 130, 140) # Upper censoring points
@@ -83,8 +84,8 @@ cdata = transform(cdata, y = pmin(U, y)) # Right censored
\dontrun{hist(y)}
extra = list(leftcensored = with(cdata, ystar < L),
rightcensored = with(cdata, ystar > U))
-fit = vglm(y ~ x, cnormal1(zero=2), cdata, trace=TRUE, extra=extra)
-coef(fit, matrix=TRUE)
+fit = vglm(y ~ x, cnormal1(zero = 2), cdata, trace = TRUE, extra = extra)
+coef(fit, matrix = TRUE)
Coef(fit)
names(fit at extra)
}
diff --git a/man/constraints.Rd b/man/constraints.Rd
index 59ccd34..f1f4da6 100644
--- a/man/constraints.Rd
+++ b/man/constraints.Rd
@@ -3,8 +3,8 @@
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Constraint Matrices }
\description{
- Returns the \emph{constraint matrices} of objects in the
- \pkg{VGAM} package.
+ Extractor function for the \emph{constraint matrices} of objects
+ in the \pkg{VGAM} package.
}
\usage{
@@ -15,8 +15,13 @@ constraints(object, ...)
\item{object}{
Some \pkg{VGAM} object, for example, having
class \code{\link{vglmff-class}}.
+
+ }
+ \item{\dots}{
+ Other possible arguments such as \code{type}.
+
}
- \item{\dots}{ Other possible arguments. }
+
}
\details{
Constraint matrices describe the relationship of
@@ -27,14 +32,17 @@ constraints(object, ...)
matrix is the identity matrix) or all the same (constraint
matrix has one column and has unit values).
+
VGLMs and VGAMs have constraint matrices which are \emph{known}.
The class of RR-VGLMs have constraint matrices which are
\emph{unknown} and are to be estimated.
+
+
}
\value{
This extractor function returns a list comprising of
- constraint matrices---one for each column of the
- LM model matrix, and in that order.
+ constraint matrices---usually one for each column of the
+ VLM model matrix, and in that order.
The list is labelled with the variable names.
Each constraint matrix has \eqn{M} rows, where
\eqn{M} is the number of linear/additive predictors,
@@ -42,25 +50,39 @@ constraints(object, ...)
A model with no constraints at all has an order
\eqn{M} identity matrix as each variable's
constraint matrix.
+
+
+ For \code{\link{vglm}} and \code{\link{vgam}} objects,
+ feeding in the \code{"lm"}-type constraint matrices back
+ into the same model should work and give an identical model.
+ The default are the \code{"vlm"}-type constraint matrices.
+ See the \code{constraints} argument of \code{\link{vglm}},
+ and the example below.
+
+
}
\author{T. W. Yee }
\note{
- In all \pkg{VGAM} family functions \code{zero=NULL} means
+ In all \pkg{VGAM} family functions \code{zero = NULL} means
none of the linear/additive predictors are modelled as
intercepts-only.
Other arguments found in certain \pkg{VGAM} family functions
which affect constraint matrices include
\code{parallel} and \code{exchangeable}.
+
The \code{constraints} argument in \code{\link{vglm}}
and \code{\link{vgam}} allows constraint matrices to
- be inputted. If so, then \code{constraints(fit)} should
- return the same as the input.
+ be inputted. If so, then \code{constraints(fit, type = "lm")} can
+ be fed into the \code{constraints} argument of the same object
+ to get the same model.
+
The \code{xij} argument does not affect constraint matrices; rather,
it allows each row of the constraint matrix to be multiplied by a
specified vector.
+
}
\references{
@@ -69,13 +91,17 @@ Vector generalized additive models.
\emph{Journal of the Royal Statistical Society, Series B, Methodological},
\bold{58}, 481--493.
+
Yee, T. W. and Hastie, T. J. (2003)
Reduced-rank vector generalized linear models.
\emph{Statistical Modelling},
\bold{3}, 15--41.
+
\url{http://www.stat.auckland.ac.nz/~yee} contains additional
information.
+
+
}
@@ -83,26 +109,35 @@ information.
VGLMs are described in \code{\link{vglm-class}};
RR-VGLMs are described in \code{\link{rrvglm-class}}.
+
Arguments such as \code{zero} and \code{parallel}
found in many \pkg{VGAM}
family functions are a way of creating/modifying constraint
matrices conveniently, e.g., see \code{\link{zero}}.
See \code{\link{CommonVGAMffArguments}} for more information.
+
}
\examples{
-# Fit the proportional odds model
-pneumo = transform(pneumo, let=log(exposure.time))
-(fit = vglm(cbind(normal, mild, severe) ~ let,
- cumulative(parallel=TRUE, reverse=TRUE), pneumo))
-coef(fit, matrix=TRUE)
-constraints(fit) # Parallel assumption results in this
+# Fit the proportional odds model:
+pneumo <- transform(pneumo, let = log(exposure.time))
+(fit1 <- vglm(cbind(normal, mild, severe) ~ bs(let, 3),
+ cumulative(parallel = TRUE, reverse = TRUE), pneumo))
+coef(fit1, matrix = TRUE)
+constraints(fit1) # Parallel assumption results in this
+constraints(fit1, type = "vlm") # This is the same as the default ("vlm"-type)
+# An equivalent model to fit1 (needs the type "lm" constraints):
+clist.lm <- constraints(fit1, type = "lm") # The "lm"-type constraints
+(fit2 <- vglm(cbind(normal, mild, severe) ~ bs(let, 3),
+ cumulative(reverse = TRUE), pneumo, constraints = clist.lm))
+abs(max(coef(fit1, matrix = TRUE) -
+ coef(fit2, matrix = TRUE))) # Should be zero
-# Fit a rank-1 stereotype (RR-multinomial logit) model
+# Fit a rank-1 stereotype (RR-multinomial logit) model:
data(car.all)
-fit = rrvglm(Country ~ Width + Height + HP, multinomial, car.all, Rank=1)
-constraints(fit) # All except the first are the A matrix
+fit <- rrvglm(Country ~ Width + Height + HP, multinomial, car.all, Rank = 1)
+constraints(fit) # All except the first are the estimated A matrix
}
\keyword{models}
\keyword{regression}
diff --git a/man/crashes.Rd b/man/crashes.Rd
new file mode 100644
index 0000000..de47532
--- /dev/null
+++ b/man/crashes.Rd
@@ -0,0 +1,131 @@
+\name{crashes}
+\alias{crashi}
+\alias{crashf}
+\alias{crashtr}
+\alias{crashmc}
+\alias{crashbc}
+\alias{crashp}
+\alias{alcoff}
+\alias{alclevels}
+\docType{data}
+\title{Crashes on New Zealand Roads in 2009}
+\description{
+ A variety of reported crash data cross-classified by time (hour
+ of the day) and day of the week, accumulated over 2009. These
+ include fatalities and injuries (by car), trucks, motor cycles,
+ bicycles and pedestrians. There is some alcohol-related
+ data too.
+
+ }
+\usage{
+data(crashi)
+data(crashf)
+data(crashtr)
+data(crashmc)
+data(crashbc)
+data(crashp)
+data(alcoff)
+data(alclevels)
+}
+\format{
+ Data frames with hourly times as rows and days of the week as columns.
+ The \code{alclevels} dataset has hourly times and alcohol levels.
+
+ \describe{
+
+ \item{Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday}{
+ Day of the week.
+
+
+ }
+
+ \item{0-30, 31-50, 51-80, 81-100, 101-120, 121-150, 151-200, 201-250, 251-300, 301-350, 350+}{
+ Blood alcohol level (milligrams alcohol per 100 millilitres of blood).
+
+
+% Aggregate number of alcohol offenders or number of dead
+% drivers/passengers on NZ roads.
+
+ }
+ }
+}
+\details{
+ Each cell is the aggregate number of crashes reported at each
+ hour-day combination, over the 2009 calendar year.
+ The \code{rownames} of each data frame is the
+ start time (hourly from midnight onwards) on a 24 hour clock.
+ For example, 21 means 9.00pm to 9.59pm.
+
+
+ For crashes,
+ \code{chrashi} are the number of injuries by car,
+ \code{crashf} are the number of fatalities by car (not included in \code{chrashi}),
+ \code{crashtr} are the number of crashes involving trucks,
+ \code{crashmc} are the number of crashes involving motorcyclists,
+ \code{crashbc} are the number of crashes involving bicycles,
+ and
+ \code{crashp} are the number of crashes involving pedestrians.
+ For alcohol-related offences,
+ \code{alcoff} are the number of alcohol offenders from
+ breath screening drivers,
+ and
+ \code{alclevels} are the blood alcohol levels of fatally injured drivers.
+
+
+}
+\source{
+
+ \url{http://www.transport.govt.nz/research/Pages/Motor-Vehicle-Crashes-in-New-Zealand-2009.aspx}.
+ Thanks to Warwick Goold and Alfian F. Hadi for assistance.
+
+}
+\references{
+ Motor Vehicles Crashes in New Zealand 2009;
+ Statistical Statement Calendar Year 2009.
+ Ministry of Transport, NZ Government;
+ Yearly Report 2010.
+ ISSN: 1176-3949
+
+}
+\seealso{
+ \code{\link{rrvglm}},
+ \code{\link{rcam}},
+ \code{\link{grc}}.
+
+}
+\examples{
+\dontrun{ plot(unlist(alcoff), type = "l", frame.plot = TRUE,
+ axes = FALSE, col = "blue", bty = "o",
+ main = "Alcoholic offenders on NZ roads, aggregated over 2009",
+ sub = "Vertical lines at midnight (purple) and noon (orange)",
+ xlab = "Day/hour", ylab = "Number of offenders")
+axis(1, at = 1 + (0:6) * 24 + 12, labels = colnames(alcoff))
+axis(2, las = 1)
+axis(3:4, labels = FALSE, tick = FALSE)
+abline(v = sort(1 + c((0:7) * 24, (0:6) * 24 + 12)), lty = "dashed",
+ col = c("purple", "orange")) }
+
+
+# Goodmans RC model
+fitgrc1 <- grc(alcoff)
+
+fitgrc2 <- grc(alcoff, Rank = 2, Corner = FALSE, Uncor = TRUE)
+print(Coef(fitgrc2), dig = 2)
+\dontrun{ biplot(fitgrc2, scaleA = 2.3, Ccol = "blue", Acol = "red",
+ Clabels = as.character(1:23)) }
+}
+\keyword{datasets}
+
+
+%
+%
+%\alias{crashi} Table 18, p.39
+%\alias{crashf} Table 19, p.40
+%\alias{crashtr} Table 30, p.66
+%\alias{crashmc} Table 35, p.72
+%\alias{crashbc} Table 40, p.77
+%\alias{crashp} Table 45, p.84
+%\alias{alcoff} Table 3, p.121
+%\alias{alclevels} Table 2, p.132
+
+
diff --git a/man/cratio.Rd b/man/cratio.Rd
index 50b75dd..379b000 100644
--- a/man/cratio.Rd
+++ b/man/cratio.Rd
@@ -60,6 +60,7 @@ cratio(link = "logit", earg = list(),
Stopping ratios deal with quantities such as
\code{logit(P[Y=j|Y>=j])}.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
@@ -67,6 +68,7 @@ cratio(link = "logit", earg = list(),
\code{\link{rrvglm}}
and \code{\link{vgam}}.
+
}
\references{
Agresti, A. (2002)
@@ -100,9 +102,11 @@ contains further information and examples.
returned by \code{vglm}/\code{vgam}/\code{rrvglm} is the matrix
of counts.
+
For a nominal (unordered) factor response, the multinomial
logit model (\code{\link{multinomial}}) is more appropriate.
+
Here is an example of the usage of the \code{parallel} argument.
If there are covariates \code{x1}, \code{x2} and \code{x3}, then
\code{parallel = TRUE ~ x1 + x2 -1} and
@@ -110,6 +114,7 @@ contains further information and examples.
the regression coefficients for \code{x1} and \code{x2} to be
equal; those of the intercepts and \code{x3} would be different.
+
}
\section{Warning }{
No check is made to verify that the response is ordinal;
@@ -131,12 +136,12 @@ contains further information and examples.
}
\examples{
-pneumo = transform(pneumo, let=log(exposure.time))
-(fit = vglm(cbind(normal,mild,severe) ~ let, cratio(parallel=TRUE), pneumo))
-coef(fit, matrix=TRUE)
+pneumo = transform(pneumo, let = log(exposure.time))
+(fit = vglm(cbind(normal,mild,severe) ~ let, cratio(parallel = TRUE), pneumo))
+coef(fit, matrix = TRUE)
constraints(fit)
predict(fit)
-predict(fit, untransform=TRUE)
+predict(fit, untransform = TRUE)
}
\keyword{models}
\keyword{regression}
diff --git a/man/cumulative.Rd b/man/cumulative.Rd
index 1a8d6f8..e4a3229 100644
--- a/man/cumulative.Rd
+++ b/man/cumulative.Rd
@@ -61,12 +61,14 @@ cumulative(link = "logit", earg = list(), parallel = FALSE,
\eqn{P(Y\geq 2)}{P(Y>=2)}, \eqn{P(Y\geq 3)}{P(Y>=3)}, \dots,
\eqn{P(Y\geq J+1)}{P(Y>=J+1)} are used.
+
This should be set to \code{TRUE} for \code{link=}
\code{\link{golf}},
\code{\link{polf}},
\code{\link{nbolf}}.
For these links the cutpoints must be an increasing sequence;
- if \code{reverse=FALSE} for then the cutpoints must be an decreasing sequence.
+ if \code{reverse = FALSE} for then the cutpoints must be an
+ decreasing sequence.
}
\item{mv}{
@@ -113,9 +115,10 @@ cumulative(link = "logit", earg = list(), parallel = FALSE,
(\code{\link{cloglog}}) then
this is known as the \emph{proportional-hazards model}.
+
In almost all the literature, the constraint matrices associated
with this family of models are known. For example, setting
- \code{parallel=TRUE} will make all constraint matrices
+ \code{parallel = TRUE} will make all constraint matrices
(except for the intercept) equal to a vector of \eqn{M} 1's.
If the constraint matrices are equal, unknown and to be estimated, then
this can be achieved by fitting the model as a
@@ -142,6 +145,12 @@ Agresti, A. (2002)
\emph{Categorical Data Analysis},
2nd ed. New York: Wiley.
+
+Agresti, A. (2010)
+\emph{Analysis of Ordinal Categorical Data},
+2nd ed. New York: Wiley.
+
+
Dobson, A. J. and Barnett, A. (2008)
\emph{An Introduction to Generalized Linear Models},
3rd ed. Boca Raton: Chapman & Hall/CRC Press.
@@ -188,7 +197,8 @@ by the \pkg{VGAM} package can be found at
For a nominal (unordered) factor response, the multinomial
logit model (\code{\link{multinomial}}) is more appropriate.
- With the logit link, setting \code{parallel=TRUE} will fit a
+
+ With the logit link, setting \code{parallel = TRUE} will fit a
proportional odds model. Note that the \code{TRUE} here does
not apply to the intercept term.
In practice, the validity of the proportional odds assumption
@@ -200,6 +210,7 @@ by the \pkg{VGAM} package can be found at
outside of \eqn{(0,1)}; setting \code{parallel=TRUE} will help avoid
this problem.
+
Here is an example of the usage of the \code{parallel} argument.
If there are covariates \code{x2}, \code{x3} and \code{x4}, then
\code{parallel = TRUE ~ x2 + x3 -1} and
@@ -207,6 +218,7 @@ by the \pkg{VGAM} package can be found at
the regression coefficients for \code{x2} and \code{x3} to be
equal; those of the intercepts and \code{x4} would be different.
+
If the data is inputted in \emph{long} format
(not \emph{wide} format, as in \code{\link{pneumo}} below)
and the self-starting initial values are not good enough then
@@ -216,6 +228,7 @@ by the \pkg{VGAM} package can be found at
\code{etatstart}.
See the example below.
+
To fit the proportional odds model one can use the
\pkg{VGAM} family function \code{\link{propodds}}.
Note that \code{propodds(reverse)} is equivalent to
@@ -261,50 +274,49 @@ by the \pkg{VGAM} package can be found at
}
\examples{
# Fit the proportional odds model, p.179, in McCullagh and Nelder (1989)
-pneumo = transform(pneumo, let=log(exposure.time))
+pneumo = transform(pneumo, let = log(exposure.time))
(fit = vglm(cbind(normal, mild, severe) ~ let,
- cumulative(parallel=TRUE, reverse=TRUE), pneumo))
+ cumulative(parallel = TRUE, reverse = TRUE), pneumo))
fit at y # Sample proportions
-weights(fit, type="prior") # Number of observations
-coef(fit, matrix=TRUE)
+weights(fit, type = "prior") # Number of observations
+coef(fit, matrix = TRUE)
constraints(fit) # Constraint matrices
# Check that the model is linear in let ----------------------
-fit2 = vgam(cbind(normal, mild, severe) ~ s(let, df=2),
- cumulative(reverse=TRUE), pneumo)
-\dontrun{
-plot(fit2, se=TRUE, overlay=TRUE, lcol=1:2, scol=1:2) }
+fit2 = vgam(cbind(normal, mild, severe) ~ s(let, df = 2),
+ cumulative(reverse = TRUE), pneumo)
+\dontrun{ plot(fit2, se = TRUE, overlay = TRUE, lcol = 1:2, scol = 1:2) }
# Check the proportional odds assumption with a LRT ----------
(fit3 = vglm(cbind(normal, mild, severe) ~ let,
cumulative(parallel=FALSE, reverse=TRUE), pneumo))
pchisq(2*(logLik(fit3)-logLik(fit)),
- df=length(coef(fit3))-length(coef(fit)), lower.tail=FALSE)
+ df = length(coef(fit3))-length(coef(fit)), lower.tail = FALSE)
# A factor() version of fit ----------------------------------
# This is in long format (cf. wide format above)
-nobs = round(fit at y * c(weights(fit, type="prior")))
+nobs = round(fit at y * c(weights(fit, type = "prior")))
sumnobs = colSums(nobs) # apply(nobs, 2, sum)
-pneumo.long = data.frame(symptoms=ordered(rep(rep(colnames(nobs),
+pneumo.long = data.frame(symptoms = ordered(rep(rep(colnames(nobs),
nrow(nobs)),
- times=c(t(nobs))),
+ times = c(t(nobs))),
levels = colnames(nobs)),
- let = rep(rep(with(pneumo, let), each=ncol(nobs)),
- times=c(t(nobs))))
+ let = rep(rep(with(pneumo, let), each = ncol(nobs)),
+ times = c(t(nobs))))
with(pneumo.long, table(let, symptoms)) # check it; should be same as pneumo
-(fit.long1 = vglm(symptoms ~ let, data=pneumo.long,
+(fit.long1 = vglm(symptoms ~ let, data = pneumo.long,
cumulative(parallel=TRUE, reverse=TRUE), trace=TRUE))
coef(fit.long1, matrix=TRUE) # Should be same as coef(fit, matrix=TRUE)
# Could try using mustart if fit.long1 failed to converge.
mymustart = matrix(sumnobs / sum(sumnobs),
- nrow(pneumo.long), ncol(nobs), byrow=TRUE)
+ nrow(pneumo.long), ncol(nobs), byrow = TRUE)
fit.long2 = vglm(symptoms ~ let,
- fam = cumulative(parallel=TRUE, reverse=TRUE),
- mustart=mymustart, data = pneumo.long, trace=TRUE)
-coef(fit.long2, matrix=TRUE) # Should be same as coef(fit, matrix=TRUE)
+ fam = cumulative(parallel = TRUE, reverse = TRUE),
+ mustart = mymustart, data = pneumo.long, trace = TRUE)
+coef(fit.long2, matrix = TRUE) # Should be same as coef(fit, matrix = TRUE)
}
\keyword{models}
\keyword{regression}
diff --git a/man/dexpbinomial.Rd b/man/dexpbinomial.Rd
index 35b931a..fa500c4 100644
--- a/man/dexpbinomial.Rd
+++ b/man/dexpbinomial.Rd
@@ -9,8 +9,8 @@
}
\usage{
-dexpbinomial(lmean="logit", ldispersion="logit", emean=list(),
- edispersion=list(), idispersion=0.25, zero=2)
+dexpbinomial(lmean = "logit", ldispersion = "logit", emean = list(),
+ edispersion = list(), idispersion = 0.25, zero = 2)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -38,7 +38,7 @@ dexpbinomial(lmean="logit", ldispersion="logit", emean=list(),
If assigned, the single value should be either \code{1} or \code{2}.
The default is to have a single dispersion parameter value.
To model both parameters as functions of the covariates assign
- \code{zero=NULL}.
+ \code{zero = NULL}.
}
}
@@ -98,7 +98,7 @@ dexpbinomial(lmean="logit", ldispersion="logit", emean=list(),
\note{
This function processes the input in the same way
as \code{\link{binomialff}}, however multivariate responses are
- not allowed (\code{binomialff(mv=FALSE)}).
+ not allowed (\code{binomialff(mv = FALSE)}).
}
\section{Warning }{
@@ -122,22 +122,22 @@ toxop = transform(toxop,
# A fit similar (should be identical) to Section 6 of Efron (1986).
# But does not use poly(), and M=1.25 here, as in (5.3)
-cmlist = list("(Intercept)"=diag(2),
- "I(srainfall)"=rbind(1,0),
- "I(srainfall^2)"=rbind(1,0),
- "I(srainfall^3)"=rbind(1,0),
- "I(sN)"=rbind(0,1),
- "I(sN^2)"=rbind(0,1))
-dlist = list(min=0, max=1.25)
-fit = vglm(phat ~ I(srainfall) + I(srainfall^2) + I(srainfall^3) +
- I(sN) + I(sN^2),
- fam = dexpbinomial(ldisp="elogit", idisp=0.2,
- edisp=dlist, zero=NULL),
- data=toxop, weight=ssize, trace=TRUE, constraints=cmlist)
+cmlist = list("(Intercept)" = diag(2),
+ "I(srainfall)" = rbind(1,0),
+ "I(srainfall^2)" = rbind(1,0),
+ "I(srainfall^3)" = rbind(1,0),
+ "I(sN)" = rbind(0,1),
+ "I(sN^2)" = rbind(0,1))
+dlist = list(min = 0, max = 1.25)
+fit = vglm(cbind(phat, 1 - phat) * ssize ~
+ I(srainfall) + I(srainfall^2) + I(srainfall^3) +
+ I(sN) + I(sN^2),
+ dexpbinomial(ldisp = "elogit", idisp = 0.2,
+ edisp = dlist, zero = NULL),
+ toxop, trace = TRUE, constraints = cmlist)
# Now look at the results
-coef(fit)
-coef(fit, matrix=TRUE)
+coef(fit, matrix = TRUE)
head(fitted(fit))
summary(fit)
vcov(fit)
@@ -145,41 +145,40 @@ sqrt(diag(vcov(fit))) # Standard errors
# Effective sample size (not quite the last column of Table 1)
head(predict(fit))
-Dispersion = elogit(predict(fit)[,2], earg=dlist, inverse=TRUE)
-c(round(weights(fit, type="prior") * Dispersion, dig=1))
+Dispersion = elogit(predict(fit)[,2], earg = dlist, inverse = TRUE)
+c(round(weights(fit, type = "prior") * Dispersion, dig = 1))
# Ordinary logistic regression (gives same results as (6.5))
-ofit = vglm(phat ~ I(srainfall) + I(srainfall^2) + I(srainfall^3),
- fam = binomialff, data=toxop, weight=ssize, trace=TRUE)
+ofit = vglm(cbind(phat, 1 - phat) * ssize ~
+ I(srainfall) + I(srainfall^2) + I(srainfall^3),
+ binomialff, toxop, trace = TRUE)
# Same as fit but it uses poly(), and can be plotted (cf. Figure 1)
-cmlist2 = list("(Intercept)"=diag(2),
- "poly(srainfall, 3)"=rbind(1,0),
- "poly(sN, 2)"=rbind(0,1))
-fit2 = vglm(phat ~ poly(srainfall, 3) + poly(sN, 2),
- fam = dexpbinomial(ldisp="elogit", idisp=0.2,
- edisp=dlist, zero=NULL),
- data=toxop, weight=ssize, trace=TRUE, constraints=cmlist2)
-\dontrun{
-par(mfrow=c(1,2))
-plotvgam(fit2, se=TRUE, lcol="blue", scol="red") # Cf. Figure 1
-
+cmlist2 = list("(Intercept)" = diag(2),
+ "poly(srainfall, degree = 3)" = rbind(1, 0),
+ "poly(sN, degree = 2)" = rbind(0, 1))
+fit2 = vglm(cbind(phat, 1 - phat) * ssize ~
+ poly(srainfall, degree = 3) + poly(sN, degree = 2),
+ dexpbinomial(ldisp = "elogit", idisp = 0.2,
+ edisp = dlist, zero = NULL),
+ toxop, trace = TRUE, constraints = cmlist2)
+\dontrun{ par(mfrow = c(1, 2))
+plotvgam(fit2, se = TRUE, lcol = "blue", scol = "red") # Cf. Figure 1
# Cf. Figure 1(a)
-par(mfrow=c(1,2))
-o = with(toxop, sort.list(rainfall))
-with(toxop, plot(rainfall[o], fitted(fit2)[o], type="l", col="blue",
- las=1, ylim=c(0.3, 0.65)))
-with(toxop, points(rainfall[o], fitted(ofit)[o], col="red", type="b",
- pch=19))
+par(mfrow = c(1,2))
+ooo = with(toxop, sort.list(rainfall))
+with(toxop, plot(rainfall[ooo], fitted(fit2)[ooo], type = "l",
+ col = "blue", las = 1, ylim = c(0.3, 0.65)))
+with(toxop, points(rainfall[ooo], fitted(ofit)[ooo], col = "red",
+ type = "b", pch = 19))
# Cf. Figure 1(b)
-o = with(toxop, sort.list(ssize))
-with(toxop, plot(ssize[o], Dispersion[o], type="l", col="blue", las=1,
- xlim=c(0, 100)))
-}
+ooo = with(toxop, sort.list(ssize))
+with(toxop, plot(ssize[ooo], Dispersion[ooo], type = "l", col = "blue",
+ las = 1, xlim = c(0, 100))) }
}
\keyword{models}
\keyword{regression}
diff --git a/man/dirmul.old.Rd b/man/dirmul.old.Rd
index e97e30b..dabd972 100644
--- a/man/dirmul.old.Rd
+++ b/man/dirmul.old.Rd
@@ -70,6 +70,7 @@ The (posterior) mean is
% allele i appears y_j times, then the maximum likelihood estimate of
% the ith allele frequency is y_j / (2y_*).
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
@@ -77,24 +78,35 @@ The (posterior) mean is
\code{\link{rrvglm}}
and \code{\link{vgam}}.
+
}
\references{
Lange, K. (2002)
\emph{Mathematical and Statistical Methods for Genetic Analysis},
2nd ed. New York: Springer-Verlag.
+
Evans, M., Hastings, N. and Peacock, B. (2000)
\emph{Statistical Distributions},
New York: Wiley-Interscience, Third edition.
+
Paul, S. R., Balasooriya, U. and Banerjee, T. (2005)
Fisher information matrix of the Dirichlet-multinomial distribution.
\emph{Biometrical Journal}, \bold{47}, 230--236.
+
+Tvedebrink, T. (2010)
+Overdispersion in allelic counts and \eqn{\theta}-correction in forensic genetics.
+\emph{Theoretical Population Biology}, \bold{78}, 200--210.
+
+
+
Documentation accompanying the \pkg{VGAM} package at
\url{http://www.stat.auckland.ac.nz/~yee}
contains further information and examples.
+
}
\author{ Thomas W. Yee }
@@ -103,9 +115,11 @@ contains further information and examples.
Convergence seems to slow down if there are zero values.
Currently, initial values can be improved upon.
+
This function is almost defunct and may be withdrawn soon.
Use \code{\link{dirmultinomial}} instead.
+
}
\seealso{
@@ -113,38 +127,40 @@ contains further information and examples.
\code{\link{dirichlet}},
\code{\link{betabin.ab}},
\code{\link{multinomial}}.
+
+
}
\examples{
# Data from p.50 of Lange (2002)
-alleleCounts = c(2, 84, 59, 41, 53, 131, 2, 0,
+alleleCounts <- c(2, 84, 59, 41, 53, 131, 2, 0,
0, 50, 137, 78, 54, 51, 0, 0,
0, 80, 128, 26, 55, 95, 0, 0,
0, 16, 40, 8, 68, 14, 7, 1)
-dim(alleleCounts) = c(8, 4)
-alleleCounts = data.frame(t(alleleCounts))
-dimnames(alleleCounts) = list(c("White","Black","Chicano","Asian"),
- paste("Allele", 5:12, sep=""))
+dim(alleleCounts) <- c(8, 4)
+alleleCounts <- data.frame(t(alleleCounts))
+dimnames(alleleCounts) <- list(c("White","Black","Chicano","Asian"),
+ paste("Allele", 5:12, sep = ""))
set.seed(123) # @initialize uses random numbers
-fit = vglm(cbind(Allele5,Allele6,Allele7,Allele8,Allele9,
- Allele10,Allele11,Allele12) ~ 1, dirmul.old,
- trace=TRUE, crit="c", data=alleleCounts)
+fit <- vglm(cbind(Allele5,Allele6,Allele7,Allele8,Allele9,
+ Allele10,Allele11,Allele12) ~ 1, dirmul.old,
+ trace = TRUE, crit = "c", data = alleleCounts)
-(sfit = summary(fit))
+(sfit <- summary(fit))
vcov(sfit)
-round(eta2theta(coef(fit), fit at misc$link), dig=2) # not preferred
-round(Coef(fit), dig=2) # preferred # preferred
-round(t(fitted(fit)), dig=4) # 2nd row of Table 3.5 of Lange (2002)
-coef(fit, matrix=TRUE)
-
-
-pfit = vglm(cbind(Allele5,Allele6,Allele7,Allele8,Allele9,
- Allele10,Allele11,Allele12) ~ 1,
- dirmul.old(parallel=TRUE), trace=TRUE,
- data=alleleCounts)
-round(eta2theta(coef(pfit), pfit at misc$link), dig=2) # not preferred
-round(Coef(pfit), dig=2) # preferred
+round(eta2theta(coef(fit), fit at misc$link), dig = 2) # not preferred
+round(Coef(fit), dig = 2) # preferred # preferred
+round(t(fitted(fit)), dig = 4) # 2nd row of Table 3.5 of Lange (2002)
+coef(fit, matrix = TRUE)
+
+
+pfit <- vglm(cbind(Allele5,Allele6,Allele7,Allele8,Allele9,
+ Allele10,Allele11,Allele12) ~ 1,
+ dirmul.old(parallel = TRUE), trace = TRUE,
+ data = alleleCounts)
+round(eta2theta(coef(pfit), pfit at misc$link), dig = 2) # not preferred
+round(Coef(pfit), dig = 2) # preferred
}
\keyword{models}
\keyword{regression}
diff --git a/man/dirmultinomial.Rd b/man/dirmultinomial.Rd
index bb1d073..5e70255 100644
--- a/man/dirmultinomial.Rd
+++ b/man/dirmultinomial.Rd
@@ -90,11 +90,13 @@ dirmultinomial(lphi="logit", ephi = list(), iphi = 0.10,
The \eqn{M}th linear/additive predictor corresponds to \code{lphi}
applied to \eqn{\phi}{phi}.
+
Note that \eqn{E(Y_j) = N_* \pi_j}{E(Y_j) = N_* pi_j} but
the probabilities (returned as the fitted values)
\eqn{\pi_j}{pi_j} are bundled together as a \eqn{M}-column matrix.
The quantities \eqn{N_*} are returned as the prior weights.
+
The beta-binomial distribution is a special case of
the Dirichlet-multinomial distribution when \eqn{M=2};
see \code{\link{betabinomial}}. It is easy to show that
@@ -104,6 +106,7 @@ dirmultinomial(lphi="logit", ephi = list(), iphi = 0.10,
Also, \eqn{\phi=1/(1+shape1+shape2)}{phi=1/(1+shape1+shape2)}, which
is known as the \emph{intra-cluster correlation} coefficient.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
@@ -111,20 +114,31 @@ dirmultinomial(lphi="logit", ephi = list(), iphi = 0.10,
\code{\link{rrvglm}}
and \code{\link{vgam}}.
+
If the model is an intercept-only model then \code{@misc} (which is a
list) has a component called \code{shape} which is a vector with the
\eqn{M} values \eqn{\pi_j(1/\phi-1)}{pi_j * (1/phi-1)}.
+
% zz not sure: These are the shape parameters of the underlying
% Dirichlet distribution.
+
}
\references{
+
Paul, S. R., Balasooriya, U. and Banerjee, T. (2005)
Fisher information matrix of the Dirichlet-multinomial distribution.
\emph{Biometrical Journal}, \bold{47}, 230--236.
+
+
+Tvedebrink, T. (2010)
+Overdispersion in allelic counts and \eqn{\theta}-correction in forensic genetics.
+\emph{Theoretical Population Biology}, \bold{78}, 200--210.
+
+
}
\author{ Thomas W. Yee }
@@ -132,6 +146,7 @@ Fisher information matrix of the Dirichlet-multinomial distribution.
This \pkg{VGAM} family function is prone to numerical problems,
especially when there are covariates.
+
}
\note{
The response can be a matrix of non-negative integers, or else
@@ -139,16 +154,19 @@ Fisher information matrix of the Dirichlet-multinomial distribution.
each row specified using the \code{weights} argument.
This dual input option is similar to \code{\link{multinomial}}.
+
To fit a `parallel' model with the \eqn{\phi}{phi} parameter
being an intercept-only you will need to use the \code{constraints}
argument.
+
Currently, Fisher scoring is implemented. To compute the expected
information matrix a \code{for} loop is used; this may be very slow
when the counts are large.
Additionally, convergence may be slower than usual due to round-off
error when computing the expected information matrices.
+
}
\seealso{
@@ -157,24 +175,24 @@ Fisher information matrix of the Dirichlet-multinomial distribution.
\code{\link{betabin.ab}},
\code{\link{dirichlet}},
\code{\link{multinomial}}.
+
}
\examples{
-n = 10
-M = 5
-y = round(matrix(runif(n*M)*10, n, M)) # Integer counts
-fit = vglm(y ~ 1, dirmultinomial, trace=TRUE)
+n <- 10; M <- 5
+y <- round(matrix(runif(n*M)*10, n, M)) # Integer counts
+fit <- vglm(y ~ 1, dirmultinomial, trace = TRUE)
head(fitted(fit))
fit at y # Sample proportions
-weights(fit, type="prior", matrix=FALSE) # Total counts per row
+weights(fit, type = "prior", matrix = FALSE) # Total counts per row
-x = runif(n)
-fit = vglm(y ~ x, dirmultinomial, trace=TRUE)
+x <- runif(n)
+fit <- vglm(y ~ x, dirmultinomial, trace = TRUE)
\dontrun{
Coef(fit) # This does not work
}
-coef(fit, matrix=TRUE)
-(sfit = summary(fit))
+coef(fit, matrix = TRUE)
+(sfit <- summary(fit))
vcov(sfit)
}
\keyword{models}
diff --git a/man/eunifUC.Rd b/man/eunifUC.Rd
index ddc23e6..5f39766 100644
--- a/man/eunifUC.Rd
+++ b/man/eunifUC.Rd
@@ -129,7 +129,8 @@ quantile and expectile regression.
\seealso{
\code{\link{deexp}},
\code{\link{denorm}},
- \code{\link{dunif}}.
+ \code{\link{dunif}},
+ \code{\link{dkoenker}}.
}
diff --git a/man/expgeometric.Rd b/man/expgeometric.Rd
new file mode 100644
index 0000000..b2c2da0
--- /dev/null
+++ b/man/expgeometric.Rd
@@ -0,0 +1,90 @@
+\name{expgeometric}
+\alias{expgeometric}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{Exponential geometric Distribution Family Function}
+\description{
+ Estimates the two parameters of the exponential geometric distribution
+ by maximum likelihood estimation.
+
+}
+\usage{
+expgeometric(lscale = "loge", lshape = "logit",
+ escale = list(), eshape = list(),
+ iscale = NULL, ishape = NULL,
+ zero = 1, nsimEIM = 400)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lscale, lshape}{
+ Link function for the two parameters.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{escale, eshape}{
+ List. Extra argument for each of the links.
+ See \code{earg} in \code{\link{Links}} for general information.
+
+ }
+ \item{iscale, ishape}{
+ Numeric.
+ Optional initial values for the scale and shape parameters.
+
+ }
+ \item{zero, nsimEIM}{
+ See \code{\link{CommonVGAMffArguments}}.
+
+ }
+
+
+}
+\details{
+ The exponential geometric distribution has density function
+ \deqn{f(y; c = scale, s = shape) =
+ c (1 - s) e^{-c y} (1 - s e^{-c y})^{-2}}{%
+ c*(1-s)*e^(-c*y)*(1-s*e^(-c*y))^(-2)}
+ where \eqn{y > 0}, \eqn{c > 0} and \eqn{s \in (0, 1)}{0 < s < 1}.
+ The mean is \eqn{((s-1)/(sc)) \log(1-s)}{((s-1)/(sc)) * log(1-s)}
+ is returned as the fitted values.
+ Note the median is \eqn{(1/c) \log(2-s)}{(1/c) * log(2-s)}.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}}
+ and \code{\link{vgam}}.
+
+}
+\references{
+ Adamidis, K., Loukas, S. (1998).
+ A lifetime distribution with decreasing failure rate.
+ \emph{Statistics and Probability Letters},
+ \bold{39}, 35--42.
+
+}
+\author{ J. G. Lauder and T. W .Yee }
+\note{
+ Soon the scale term will be redefinined as the reciprocal of
+ the scale parameter used by Adamidis and Loukas (1998).
+ That is, division by the scale parameter will be used later.
+
+
+}
+
+\seealso{
+ \code{dexpgeom},
+ \code{\link{exponential}},
+ \code{\link{geometric}}.
+
+}
+\examples{
+scale = exp(2); shape = logit(-1, inverse = TRUE);
+edata = data.frame(y = rexpgeom(n = 2000, scale=scale, shape=shape))
+fit = vglm(y ~ 1, expgeometric, edata, trace = TRUE)
+c(with(edata, mean(y)), head(fitted(fit), 1))
+coef(fit, matrix = TRUE)
+Coef(fit)
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/exponential.Rd b/man/exponential.Rd
index 107c2e2..c738c89 100644
--- a/man/exponential.Rd
+++ b/man/exponential.Rd
@@ -40,16 +40,22 @@ exponential(link = "loge", earg = list(), location = 0, expected = TRUE)
By default, \eqn{A=0}.
Then \eqn{E(Y) = A + 1/ \lambda}{E(Y) = A + 1/rate} and
\eqn{Var(Y) = 1/ \lambda^2}{Var(Y) = 1/rate^2}.
+
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}},
and \code{\link{vgam}}.
+
+
}
\references{
Evans, M., Hastings, N. and Peacock, B. (2000)
\emph{Statistical Distributions},
New York: Wiley-Interscience, Third edition.
+
+
}
\author{ T. W. Yee }
@@ -64,11 +70,13 @@ New York: Wiley-Interscience, Third edition.
The argument \code{lambda} in \code{\link{rpois}} is somewhat
the same as \code{rate} here.
+
}
\seealso{
% \code{\link{cexpon}},
\code{\link{amlexponential}},
\code{\link{laplace}},
+ \code{\link{expgeometric}},
\code{\link{poissonff}},
\code{\link{mix2exp}},
\code{\link{freund61}}.
@@ -76,15 +84,17 @@ New York: Wiley-Interscience, Third edition.
}
\examples{
-edata = data.frame(x2 = runif(nn <- 100) - 0.5)
-edata = transform(edata, x3 = runif(nn) - 0.5)
-edata = transform(edata, eta = 0.2 - 0.7 * x2 + 1.9 * x3)
-edata = transform(edata, rate = exp(eta))
-edata = transform(edata, y = rexp(nn, rate=rate))
+edata <- data.frame(x2 = runif(nn <- 100) - 0.5)
+edata <- transform(edata, x3 = runif(nn) - 0.5)
+edata <- transform(edata, eta = 0.2 - 0.7 * x2 + 1.9 * x3)
+edata <- transform(edata, rate = exp(eta))
+edata <- transform(edata, y = rexp(nn, rate = rate))
with(edata, stem(y))
-fit.slow = vglm(y ~ x2 + x3, exponential, edata, trace=TRUE, crit="c")
-fit.fast = vglm(y ~ x2 + x3, exponential(exp=FALSE), edata, trace=TRUE, crit="c")
-coef(fit.slow, mat=TRUE)
+
+fit.slow <- vglm(y ~ x2 + x3, exponential, edata, trace = TRUE, crit = "c")
+fit.fast <- vglm(y ~ x2 + x3, exponential(exp = FALSE), edata,
+ trace = TRUE, crit = "c")
+coef(fit.slow, mat = TRUE)
summary(fit.slow)
}
\keyword{models}
diff --git a/man/gamma2.Rd b/man/gamma2.Rd
index 7d125bd..cf06696 100644
--- a/man/gamma2.Rd
+++ b/man/gamma2.Rd
@@ -61,9 +61,10 @@ gamma2(lmu = "loge", lshape = "loge", emu = list(), eshape = list(),
are modelled as an intercept only. By default, the shape parameter
(after \code{lshape} is applied) is modelled as a single unknown
number that is estimated. It can be modelled as a function of
- the explanatory variables by setting \code{zero=NULL}. A negative
+ the explanatory variables by setting \code{zero = NULL}. A negative
value means that the value is recycled, so setting \eqn{-2} means
all shape parameters are intercept only.
+ See \code{\link{CommonVGAMffArguments}} for more information.
}
}
@@ -92,17 +93,20 @@ gamma2(lmu = "loge", lshape = "loge", emu = list(), eshape = list(),
origin and is unimodal with mode at \eqn{y = \mu - \mu / \lambda}{y =
mu - mu / shape}; this can be achieved with \code{lshape="loglog"}.
+
By default, the two linear/additive predictors are
\eqn{\eta_1=\log(\mu)}{eta1=log(mu)} and
\eqn{\eta_2=\log(\lambda)}{eta2=log(shape)}.
This family function implements Fisher scoring and the working
weight matrices are diagonal.
+
This \pkg{VGAM} family function handles \emph{multivariate} responses,
so that a matrix can be used as the response. The number of columns is
the number of species, say, and \code{zero=-2} means that \emph{all}
species have a shape parameter equalling a (different) intercept only.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
@@ -142,22 +146,23 @@ McCullagh, P. and Nelder, J. A. (1989)
\code{\link{bivgamma.mckay}} for \emph{a} bivariate gamma distribution,
\code{\link{expexp}},
\code{\link[stats]{GammaDist}},
- \code{\link{golf}}.
+ \code{\link{golf}},
+ \code{\link{CommonVGAMffArguments}}.
}
\examples{
# Essentially a 1-parameter gamma
-gdata = data.frame(y = rgamma(n=100, shape= exp(1)))
-fit1 = vglm(y ~ 1, gamma1, gdata)
-fit2 = vglm(y ~ 1, gamma2, gdata, trace=TRUE, crit="c")
-coef(fit2, matrix=TRUE)
+gdata <- data.frame(y = rgamma(n = 100, shape = exp(1)))
+fit1 <- vglm(y ~ 1, gamma1, gdata)
+fit2 <- vglm(y ~ 1, gamma2, gdata, trace = TRUE, crit = "coef")
+coef(fit2, matrix = TRUE)
Coef(fit2)
# Essentially a 2-parameter gamma
-gdata = data.frame(y = rgamma(n=500, rate=exp(1), shape=exp(2)))
-fit2 = vglm(y ~ 1, gamma2, gdata, trace=TRUE, crit="c")
-coef(fit2, matrix=TRUE)
+gdata <- data.frame(y = rgamma(n = 500, rate = exp(1), shape = exp(2)))
+fit2 <- vglm(y ~ 1, gamma2, gdata, trace = TRUE, crit = "coef")
+coef(fit2, matrix = TRUE)
Coef(fit2)
summary(fit2)
}
diff --git a/man/gamma2.ab.Rd b/man/gamma2.ab.Rd
index a06b22a..b1e7957 100644
--- a/man/gamma2.ab.Rd
+++ b/man/gamma2.ab.Rd
@@ -6,8 +6,9 @@
by maximum likelihood estimation.
}
\usage{
-gamma2.ab(lrate = "loge", lshape = "loge", erate=list(), eshape=list(),
- irate=NULL, ishape=NULL, expected = TRUE, zero = 2)
+gamma2.ab(lrate = "loge", lshape = "loge",
+ erate = list(), eshape = list(),
+ irate = NULL, ishape = NULL, expected = TRUE, zero = 2)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -44,19 +45,20 @@ gamma2.ab(lrate = "loge", lshape = "loge", erate=list(), eshape=list(),
}
\details{
The density function is given by
- \deqn{f(y) = \exp(-rate \times y) \times y^{shape-1} \times rate^{shape} /
- \Gamma(shape)}{%
+ \deqn{f(y) = \exp(-rate \times y) \times y^{shape-1} \times rate^{shape}
+ / \Gamma(shape)}{%
f(y) = exp(-rate * y) y^(shape-1) rate^(shape) / gamma(shape)}
for \eqn{shape > 0}, \eqn{rate > 0} and \eqn{y > 0}.
Here, \eqn{\Gamma(shape)}{gamma(shape)} is the gamma
function, as in \code{\link[base:Special]{gamma}}.
- The mean of \emph{Y} is \eqn{\mu=shape/rate}{mu=shape/rate}
+ The mean of \emph{Y} is \eqn{\mu = shape/rate}{mu = shape/rate}
(returned as the fitted values) with variance
- \eqn{\sigma^2 = \mu^2 /shape = shape/rate^2}{sigma^2 =
+ \eqn{\sigma^2 = \mu^2 /shape = shape/rate^2}{sigma^2 =
mu^2 /shape = shape/rate^2}.
By default, the two linear/additive predictors are
- \eqn{\eta_1=\log(rate)}{eta1=log(rate)} and
- \eqn{\eta_2=\log(shape)}{eta2=log(shape)}.
+ \eqn{\eta_1 = \log(rate)}{eta1 = log(rate)} and
+ \eqn{\eta_2 = \log(shape)}{eta2 = log(shape)}.
+
The argument \code{expected} refers to the type of information
matrix. The expected information matrix corresponds to Fisher scoring
@@ -65,31 +67,37 @@ gamma2.ab(lrate = "loge", lshape = "loge", erate=list(), eshape=list(),
from the family function in the future. If both algorithms work then
the differences in the results are often not huge.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}}.
+
}
\references{
Most standard texts on statistical distributions describe
the 2-parameter gamma distribution, e.g.,
+
Evans, M., Hastings, N. and Peacock, B. (2000)
\emph{Statistical Distributions},
New York: Wiley-Interscience, Third edition.
+
}
\author{ T. W. Yee }
\note{
The parameters \eqn{rate} and \eqn{shape} match with the arguments
\code{rate} and \code{shape} of \code{\link[stats]{rgamma}}. Often,
- \eqn{scale=1/rate} is used.
+ \eqn{scale = 1/rate} is used.
+
- If \eqn{rate=1} use the family function \code{\link{gamma1}} to
+ If \eqn{rate = 1} use the family function \code{\link{gamma1}} to
estimate \eqn{shape}.
+
}
\seealso{
@@ -99,20 +107,21 @@ gamma2.ab(lrate = "loge", lshape = "loge", erate=list(), eshape=list(),
\code{\link{bivgamma.mckay}} for \emph{a} bivariate gamma distribution,
\code{\link{expexp}}.
+
}
\examples{
# Essentially a 1-parameter gamma
-gdata = data.frame(y = rgamma(n <- 100, shape= exp(1)))
-fit1 = vglm(y ~ 1, gamma1, gdata, trace=TRUE)
-fit2 = vglm(y ~ 1, gamma2.ab, gdata, trace=TRUE, crit="c")
-coef(fit2, matrix=TRUE)
+gdata <- data.frame(y = rgamma(n <- 100, shape = exp(1)))
+fit1 <- vglm(y ~ 1, gamma1, gdata, trace = TRUE)
+fit2 <- vglm(y ~ 1, gamma2.ab, gdata, trace = TRUE, crit = "c")
+coef(fit2, matrix = TRUE)
Coef(fit2)
# Essentially a 2-parameter gamma
-gdata = data.frame(y = rgamma(n=500, rate=exp(1), shape=exp(2)))
-fit2 = vglm(y ~ 1, gamma2.ab, gdata, trace=TRUE, crit="c")
-coef(fit2, matrix=TRUE)
+gdata <- data.frame(y = rgamma(n = 500, rate = exp(1), shape = exp(2)))
+fit2 <- vglm(y ~ 1, gamma2.ab, gdata, trace = TRUE, crit = "c")
+coef(fit2, matrix = TRUE)
Coef(fit2)
summary(fit2)
}
diff --git a/man/gaussianff.Rd b/man/gaussianff.Rd
index 866f49d..63a57e3 100644
--- a/man/gaussianff.Rd
+++ b/man/gaussianff.Rd
@@ -111,6 +111,7 @@ gaussianff(dispersion = 0, parallel = FALSE, zero = NULL)
\seealso{
\code{\link{normal1}},
+ \code{\link{huber}},
\code{\link{lqnorm}},
\code{vlm},
\code{\link{vglm}},
diff --git a/man/gengamma.Rd b/man/gengamma.Rd
index 16d4a6f..c002cd5 100644
--- a/man/gengamma.Rd
+++ b/man/gengamma.Rd
@@ -8,9 +8,9 @@
}
\usage{
-gengamma(lscale="loge", ld="loge", lk="loge",
- escale=list(), ed=list(), ek=list(),
- iscale=NULL, id=NULL, ik=NULL, zero=NULL)
+gengamma(lscale = "loge", ld = "loge", lk = "loge",
+ escale = list(), ed = list(), ek = list(),
+ iscale = NULL, id = NULL, ik = NULL, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -35,6 +35,7 @@ gengamma(lscale="loge", ld="loge", lk="loge",
linear/additive predictors are modelled as intercepts only.
The values must be from the set \{1,2,3\}.
The default value means none are modelled as intercept-only terms.
+ See \code{\link{CommonVGAMffArguments}} for more information.
}
}
@@ -107,7 +108,7 @@ Rayleigh \eqn{f(y;c\sqrt{2},2,1)}{f(y;c sqrt(2),2,1)} where \eqn{c>0}.
The notation used here differs from Stacy (1962) and Prentice (1974).
Poor initial values may result in failure to converge so
if there are covariates and there are convergence problems,
- try using the \code{zero} argument (e.g., \code{zero=2:3})
+ try using the \code{zero} argument (e.g., \code{zero = 2:3})
or the \code{ik} argument.
diff --git a/man/genpoisson.Rd b/man/genpoisson.Rd
index 3186a36..f61cb3c 100644
--- a/man/genpoisson.Rd
+++ b/man/genpoisson.Rd
@@ -6,10 +6,11 @@
Estimation of the two parameters of a generalized Poisson distribution.
}
\usage{
-genpoisson(llambda="elogit", ltheta="loge",
- elambda=if(llambda=="elogit") list(min=-1,max=1) else list(),
- etheta=list(), ilambda=NULL, itheta=NULL,
- use.approx=TRUE, method.init=1, zero=1)
+genpoisson(llambda = "elogit", ltheta = "loge",
+ elambda = if (llambda == "elogit") list(min = -1, max = 1)
+ else list(),
+ etheta = list(), ilambda = NULL, itheta = NULL,
+ use.approx = TRUE, method.init = 1, zero = 1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -55,53 +56,60 @@ genpoisson(llambda="elogit", ltheta="loge",
}
\details{
The generalized Poisson distribution has density
- \deqn{f(y) = \theta(\theta+\lambda y)^{y-1} \exp(-\theta-\lambda y) / y!}
-
-%{% f(y) = theta*(theta+lambda*y)^(y-1) exp(-theta-lambda*y) / y!}
+ \deqn{f(y) = \theta(\theta+\lambda y)^{y-1} \exp(-\theta-\lambda y) / y!}{
+ f(y) = \theta(\theta+\lambda * y)^(y-1) * exp(-\theta-\lambda * y) / y!}
for \eqn{\theta > 0} and \eqn{y = 0,1,2,\ldots}.
-Now \eqn{\max(-1,-\theta/m) \leq \lambda \leq 1}{\max(-1,-\theta/m) \le \lambda \le 1}
+Now \eqn{\max(-1,-\theta/m) \leq \lambda \leq 1}{
+ max(-1,-\theta/m) \le lambda \le 1}
where \eqn{m (\geq 4)}{m (\ge 4)} is the greatest positive
integer satisfying \eqn{\theta + m\lambda > 0}
when \eqn{\lambda < 0}
-[and then \eqn{P(Y=y)=0} for \eqn{y > m}].
+[and then \eqn{P(Y=y) = 0} for \eqn{y > m}].
Note the complicated support for this distribution means,
for some data sets,
the default link for \code{llambda} is not always appropriate.
+
An ordinary Poisson distribution corresponds to \eqn{\lambda=0}{lambda=0}.
The mean (returned as the fitted values) is
\eqn{E(Y) = \theta / (1 - \lambda)}
and the variance is \eqn{\theta / (1 - \lambda)^3}.
+
For more information see Consul and Famoye (2006) for a summary and
Consul (1989) for full details.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}},
and \code{\link{vgam}}.
+
}
\references{
Consul, P. C. and Famoye, F. (2006)
\emph{Lagrangian Probability Distributions},
Boston: Birkhauser.
+
Jorgensen, B. (1997)
\emph{The Theory of Dispersion Models}.
London: Chapman & Hall
+
Consul, P. C. (1989)
\emph{Generalized Poisson Distributions: Properties and Applications}.
New York: Marcel Dekker.
+
}
\author{ T. W. Yee }
\note{
This distribution is useful for dispersion modelling.
- Convergence problems may occur when \code{lambda} is very close to 0
- or 1.
+ Convergence problems may occur when \code{lambda} is very close
+ to 0 or 1.
}
\seealso{
@@ -111,8 +119,8 @@ New York: Marcel Dekker.
\examples{
gdata = data.frame(x = runif(nn <- 200))
gdata = transform(gdata, y = rpois(nn, exp(2-x))) # Ordinary Poisson data
-fit = vglm(y ~ x, genpoisson(zero=1), gdata, trace=TRUE)
-coef(fit, matrix=TRUE)
+fit = vglm(y ~ x, genpoisson(zero = 1), gdata, trace = TRUE)
+coef(fit, matrix = TRUE)
summary(fit)
}
\keyword{models}
@@ -120,3 +128,4 @@ summary(fit)
% yettodo: see csda 2009, 53(9): 3478--3489.
+%{% f(y) = theta*(theta+lambda*y)^(y-1) exp(-theta-lambda*y) / y!}
diff --git a/man/genrayleigh.Rd b/man/genrayleigh.Rd
new file mode 100644
index 0000000..8c5e535
--- /dev/null
+++ b/man/genrayleigh.Rd
@@ -0,0 +1,94 @@
+\name{genrayleigh}
+\alias{genrayleigh}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{Generalized Rayleigh Distribution Family Function}
+\description{
+ Estimates the two parameters of the generalized Rayleigh distribution
+ by maximum likelihood estimation.
+
+}
+\usage{
+genrayleigh(lshape = "loge", lscale = "loge",
+ eshape = list(), escale = list(),
+ ishape = NULL, iscale = NULL,
+ tol12 = 1e-05, nsimEIM = 300, zero = 1)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lshape, lscale}{
+ Link function for the two positive parameters, shape and scale.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{eshape, escale}{
+ List. Extra argument for each of the links.
+ See \code{earg} in \code{\link{Links}} for general information.
+
+ }
+ \item{ishape, iscale}{
+ Numeric.
+ Optional initial values for the shape and scale parameters.
+
+ }
+ \item{nsimEIM, zero}{
+ See \code{\link{CommonVGAMffArguments}}.
+
+}
+ \item{tol12}{
+ Numeric and positive. Tolerance for testing whether the
+ second shape parameter is either 1 or 2. If so then the
+ working weights need to handle these singularities.
+
+}
+}
+\details{
+ The generalized Rayleigh distribution has density function
+ \deqn{f(y;a = shape,b = scale) =
+ (2 a y/b^{2}) e^{-(y/b)^{2}} (1 - e^{-(y/b)^{2}})^{a-1}}{%
+ (2*a*y/b^2) * e^(-(y/b)^2) * (1 - e^(-(y/b)^2))^(a-1)}
+ where \eqn{y > 0} and the two parameters,
+ \eqn{a} and \eqn{b}, are positive.
+ The mean cannot be expressed nicely so the median is returned as
+ the fitted values.
+ Applications of the generalized Rayleigh distribution include modeling
+ strength data and general lifetime data.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}}
+ and \code{\link{vgam}}.
+
+}
+\references{
+ Kundu, D., Raqab, M. C. (2005).
+ Generalized Rayleigh distribution: different
+ methods of estimations.
+ \emph{Computational Statistics and Data Analysis},
+ \bold{49}, 187--200.
+
+}
+\author{ J. G. Lauder and T. W. Yee }
+\note{
+ We define \code{scale} as the reciprocal of the scale parameter
+ used by Kundu and Raqab (2005).
+
+}
+
+\seealso{
+ \code{\link{dgenray}},
+ \code{\link{rayleigh}}.
+
+}
+\examples{
+shape = exp(1); scale = exp(2);
+rdata = data.frame(y = rgenray(n = 1000, shape, scale))
+fit = vglm(y ~ 1, genrayleigh, rdata, trace = TRUE)
+c(with(rdata, mean(y)), head(fitted(fit),1))
+coef(fit, matrix = TRUE)
+Coef(fit)
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/genrayleighUC.Rd b/man/genrayleighUC.Rd
new file mode 100644
index 0000000..f3a2e8a
--- /dev/null
+++ b/man/genrayleighUC.Rd
@@ -0,0 +1,78 @@
+\name{genray}
+\alias{genray}
+\alias{dgenray}
+\alias{pgenray}
+\alias{qgenray}
+\alias{rgenray}
+\title{The Generalized Rayleigh Distribution}
+\description{
+ Density, distribution function, quantile function and random
+ generation for the generalized Rayleigh distribution.
+
+}
+\usage{
+dgenray(x, shape, scale = 1, log = FALSE)
+pgenray(q, shape, scale = 1)
+qgenray(p, shape, scale = 1)
+rgenray(n, shape, scale = 1)
+}
+\arguments{
+ \item{x, q}{vector of quantiles.}
+ \item{p}{vector of probabilities.}
+ \item{n}{number of observations.
+ If \code{length(n) > 1} then the length is taken to be the number required. }
+ \item{shape, scale}{
+ positive shape and scale parameters. }
+ \item{log}{
+ Logical.
+ If \code{log=TRUE} then the logarithm of the density is returned.
+
+ }
+
+}
+\value{
+ \code{dgenray} gives the density,
+ \code{pgenray} gives the distribution function,
+ \code{qgenray} gives the quantile function, and
+ \code{rgenray} generates random deviates.
+
+}
+\author{ J. G. Lauder and T. W. Yee }
+\details{
+ See \code{\link{genrayleigh}}, the \pkg{VGAM} family function
+ for estimating the parameters,
+ for the formula of the probability density function and other details.
+
+}
+\note{
+ We define \code{scale} as the reciprocal of the scale parameter
+ used by Kundu and Raqab (2005).
+
+}
+\seealso{
+ \code{\link{genrayleigh}}.
+ \code{\link{rayleigh}}.
+
+}
+\examples{
+\dontrun{
+shape = 0.5; scale = 1; nn = 501
+#x = seq(0.0 , 1.00, len=nn)
+x = seq(-0.1, 3.0, len=nn)
+plot(x, dgenray(x, shape, scale), type="l", las=1, ylim=c(0,1.0),
+ ylab=paste("fgenray(shape=", shape, ", scale=", scale, ")"),
+ col="blue", cex.main=0.8,
+ main="Blue is density, red is cumulative distribution function",
+ sub="Purple lines are the 10,20,...,90 percentiles")
+lines(x, pgenray(x, shape, scale), col="red")
+probs = seq(0.1, 0.9, by=0.1)
+Q = qgenray(probs, shape, scale)
+lines(Q, dgenray(Q, shape, scale), col="purple", lty=3, type="h")
+lines(Q, pgenray(Q, shape, scale), col="purple", lty=3, type="h")
+abline(h=probs, col="purple", lty=3)
+max(abs(pgenray(Q, shape, scale) - probs)) # Should be 0
+}
+}
+\keyword{distribution}
+
+
diff --git a/man/geometric.Rd b/man/geometric.Rd
index ce0c55a..ccb6e6f 100644
--- a/man/geometric.Rd
+++ b/man/geometric.Rd
@@ -69,6 +69,7 @@ geometric(link = "logit", earg=list(), expected = TRUE, method.init = 1)
\code{\link{negbinomial}},
\code{\link[stats]{Geometric}},
\code{\link{betageometric}},
+ \code{\link{expgeometric}},
\code{\link{rbetageom}}.
}
diff --git a/man/get.smart.Rd b/man/get.smart.Rd
index 36a6106..59a6bd0 100644
--- a/man/get.smart.Rd
+++ b/man/get.smart.Rd
@@ -5,6 +5,7 @@
Retrieve one component of the list \code{.smart.prediction} from
\code{smartpredenv} (\R) or
frame 1 (S-PLUS).
+
}
\usage{
get.smart()
@@ -17,6 +18,7 @@ frame 1 (S-PLUS),
in fact, \code{.smart.prediction[[.smart.prediction.counter]]}.
The whole procedure mimics a first-in first-out stack (better known
as a \emph{queue}).
+
}
\section{Side Effects}{
The variable \code{.smart.prediction.counter} in
@@ -27,6 +29,7 @@ is incremented beforehand, and then written back to
\code{smartpredenv} (\R)
or
frame 1 (S-PLUS).
+
}
\details{
\code{get.smart} is used in \code{"read"} mode within a smart function:
@@ -39,19 +42,21 @@ frame 1 (S-PLUS).
The function
\code{\link{get.smart}} gets only a part of \code{.smart.prediction} whereas
\code{\link{get.smart.prediction}} gets the entire \code{.smart.prediction}.
+
+
}
\seealso{
-\code{\link{get.smart.prediction}}.
+ \code{\link{get.smart.prediction}}.
}
\examples{
-"my1" <- function(x, minx=min(x)) { # Here is a smart function
+"my1" <- function(x, minx = min(x)) { # Here is a smart function
x <- x # Needed for nested calls, e.g., bs(scale(x))
if(smart.mode.is("read")) {
smart <- get.smart()
minx <- smart$minx # Overwrite its value
} else
if(smart.mode.is("write"))
- put.smart(list(minx=minx))
+ put.smart(list(minx = minx))
sqrt(x-minx)
}
attr(my1, "smart") <- TRUE
diff --git a/man/grc.Rd b/man/grc.Rd
index 4a41306..a8789b7 100644
--- a/man/grc.Rd
+++ b/man/grc.Rd
@@ -1,26 +1,60 @@
\name{grc}
\alias{grc}
+\alias{rcam}
%- Also NEED an `\alias' for EACH other topic documented here.
-\title{ Fitting Goodman's RC Association Model }
+\title{ Row-Column Association Models including Goodman's RC Association Model }
\description{
- Fits a Goodman's RC Association Model to a matrix of counts
+ Fits a Goodman's RC association model to a matrix of counts,
+ and more generally, a sub-class of row-column association models.
}
\usage{
grc(y, Rank = 1, Index.corner = 2:(1 + Rank),
- Structural.zero = 1, summary.arg = FALSE, h.step = 1e-04, ...)
+ szero = 1, summary.arg = FALSE, h.step = 1e-04, ...)
+rcam(y, Rank = 0, family = poissonff, Musual = NULL,
+ Index.corner = if (!Rank) NULL else 1 + Musual * (1:Rank),
+ rprefix = "Row.", cprefix = "Col.",
+ szero = if (!Rank) NULL else {
+ if (Musual == 1) 1 else setdiff(1:(Musual * ncol(y)),
+ c(1 + (1:ncol(y)) * Musual, Index.corner))
+ },
+ summary.arg = FALSE, h.step = 0.0001,
+ rbaseline = 1, cbaseline = 1, ...)
}
%- maybe also `usage' for other objects documented here.
\arguments{
\item{y}{
- A matrix of counts. Output from \code{table()} is acceptable;
- it is converted into a matrix.
+ For \code{grc} a matrix of counts.
+ For \code{rcam} a general matrix response depending on \code{family}.
+ Output from \code{table()} is acceptable; it is converted into a matrix.
Note that \code{y} must be at least 3 by 3.
+
}
\item{Rank}{
An integer from the set
- \{1,\ldots,\code{min(nrow(y), ncol(y))}\}.
- This is the dimension of the fit.
+ \{0,\ldots,\code{min(nrow(y), ncol(y))}\}.
+ This is the dimension of the fit in terms of the interaction.
+ For \code{grc()} this argument must be positive.
+ A value of 0 means no interactions (i.e., main effects only);
+ each row and column is represented by an indicator variable.
+
+ }
+ \item{family}{
+ A \pkg{VGAM} family function.
+ The first linear/additive predictor is fitted using main effects plus
+ an optional rank-\code{Rank} interaction term.
+ Not all family functions are suitable or make sense.
+ All other linear/additive predictors are fitted using an intercept-only,
+ so it has a common value over all rows and columns.
+ For example,
+ \code{\link{zipoissonff}} may be suitable for counts but not
+ \code{\link{zipoisson}} because of the ordering of the
+ linear/additive predictors.
+ If the \pkg{VGAM} family function does not have an \code{infos}
+ slot then \code{Musual} needs to be inputted.
+ The \pkg{VGAM} family function also needs to be able to
+ handle multiple responses; and not all of them can do this.
+
}
\item{Index.corner}{
@@ -29,10 +63,17 @@ grc(y, Rank = 1, Index.corner = 2:(1 + Rank),
identity matrix in the
\code{A} matrix; corner constraints are used.
+
+ }
+ \item{rprefix, cprefix}{
+ Character, for rows and columns resp.
+ For labelling the indicator variables.
+
}
- \item{Structural.zero}{
+ \item{szero}{
An integer from the set \{1,\ldots,\code{min(nrow(y), ncol(y))}\},
specifying the row that is used as the structural zero.
+
}
\item{summary.arg}{
Logical. If \code{TRUE}, a summary is returned.
@@ -42,70 +83,157 @@ grc(y, Rank = 1, Index.corner = 2:(1 + Rank),
}
\item{h.step}{
A small positive value that is passed into
- \code{summary.rrvglm()}. Only used when \code{summary.arg=TRUE}. }
+ \code{summary.rrvglm()}. Only used when \code{summary.arg = TRUE}. }
\item{\dots}{ Arguments that are passed into \code{rrvglm.control()}.
}
+ \item{Musual}{
+ The number of linear predictors of the \pkg{VGAM} \code{family} function
+ for an ordinary (univariate) response.
+ Then the number of linear predictors of the \code{rcam()} fit is
+ usually the number of columns of \code{y} multiplied by \code{Musual}.
+ The default is to evaluate the \code{infos} slot of the
+ \pkg{VGAM} \code{family} function to try to evaluate it;
+ see \code{\link{vglmff-class}}.
+ If this information is not yet supplied by the family function then
+ the value needs to be inputted manually using this argument.
+
+
+ }
+ \item{rbaseline, cbaseline}{
+ Baseline reference levels for the rows and columns.
+ Currently stored on the object but not used.
+
+ }
}
\details{
- Goodman's RC association model can fit a reduced-rank approximation
+ Goodman's RC association model fits a reduced-rank approximation
to a table of counts. The log of each cell mean is decomposed as an
intercept plus a row effect plus a column effect plus a reduced-rank
- part. The latter can be collectively written \code{A \%*\% t(C)},
+ component. The latter can be collectively written \code{A \%*\% t(C)},
the product of two `thin' matrices.
Indeed, \code{A} and \code{C} have \code{Rank} columns.
By default, the first column and row of the interaction matrix
\code{A \%*\% t(C)} is chosen
-to be structural zeros, because \code{Structural.zero=1}.
+to be structural zeros, because \code{szero = 1}.
This means the first row of \code{A} are all zeros.
+
This function uses \code{options()$contrasts} to set up the row and
column indicator variables.
+In particular, Equation (4.5) of Yee and Hastie (2003) is used.
+These are called \code{Row.} and \code{Col.} (by default) followed
+by the row or column number.
+
+
+
+
+The function \code{rcam()} is more general than \code{grc()}.
+Its default is a no-interaction model of \code{grc()}, i.e.,
+rank-0 and a Poisson distribution. This means that each
+row and column has a dummy variable associated with it.
+The first row and column is baseline.
+The power of \code{rcam()} is that many \pkg{VGAM} family functions
+can be assigned to its \code{family} argument.
+For example,
+\code{\link{normal1}} fits something in between a 2-way
+ANOVA with and without interactions,
+\code{\link{alaplace2}} with \code{Rank = 0} is something like
+\code{\link[stats]{medpolish}}.
+Others include
+\code{\link{zipoissonff}},
+\code{\link{negbinomial}}.
+Hopefully one day \emph{all} \pkg{VGAM} family functions will work when
+assigned to the \code{family} argument
+although the result may not have meaning.
+
}
\value{
An object of class \code{"grc"}, which currently is the same as
an \code{"rrvglm"} object.
+ Currently,
+ a rank-0 \code{rcam()} object is of class \code{\link{vglm-class}},
+ but it may become of class \code{"rcam"} one day.
+
+
}
\references{
+Yee, T. W. and Hastie, T. J. (2003)
+Reduced-rank vector generalized linear models.
+\emph{Statistical Modelling},
+\bold{3}, 15--41.
+
+
Goodman, L. A. (1981)
Association models and canonical correlation in the analysis
of cross-classifications having ordered categories.
\emph{Journal of the American Statistical Association},
\bold{76}, 320--334.
-Yee, T. W. and Hastie, T. J. (2003)
-Reduced-rank vector generalized linear models.
-\emph{Statistical Modelling},
-\bold{3}, 15--41.
Documentation accompanying the \pkg{VGAM} package at
\url{http://www.stat.auckland.ac.nz/~yee}
contains further information about the setting up of the
indicator variables.
+
}
-\author{ Thomas W. Yee }
+\author{
+Thomas W. Yee, with some
+assistance from Alfian F. Hadi.
+
+
+}
\note{
- This function sets up variables etc. before calling \code{rrvglm()}.
- The \code{...} is passed into \code{rrvglm.control()}, meaning, e.g.,
- \code{Rank=1} is default. Seting \code{trace=TRUE} may be useful for
- monitoring convergence.
+ These functions set up the indicator variables etc. before calling
+ \code{\link{rrvglm}}
+ or
+ \code{\link{vglm}}.
+ The \code{...} is passed into \code{\link{rrvglm.control}} or
+ \code{\link{vglm.control}},
+ This means, e.g., \code{Rank = 1} is default for \code{grc()}.
+
+
+ The data should be labelled with \code{\link[base]{rownames}} and
+ \code{\link[base]{colnames}}.
+ Setting \code{trace = TRUE} is recommended for monitoring
+ convergence.
+ Using \code{criterion = "coefficients"} can result in slow convergence.
- Using \code{criterion="coefficients"} can result in slow convergence.
- If \code{summary=TRUE}, then \code{y} can be a \code{"grc"} object,
- in which case a summary can be returned. That is,
- \code{grc(y, summary=TRUE)} is equivalent to
- \code{summary(grc(y))}.
+ If \code{summary = TRUE}, then \code{y} can be a
+ \code{"grc"} object, in which case a summary can be
+ returned. That is, \code{grc(y, summary = TRUE)} is
+ equivalent to \code{summary(grc(y))}.
+
}
\section{Warning}{
- This function temporarily creates a permanent data frame called
- \code{.grc.df}, which used to be needed by \code{summary.rrvglm()}.
- Then \code{.grc.df} is deleted before exiting the function. If an
- error occurs, then \code{.grc.df} may be present in the workspace.
+ The function \code{rcam()} is experimental at this stage and
+ may have some bugs.
+ Quite a lot of expertise is needed when fitting and in its
+ interpretion thereof. For example, the constraint
+ matrices applies the reduced-rank regression to the first linear
+ predictor and the other linear predictors are intercept-only and
+ have a common value throughout the entire data set.
+ This means that \code{family =} \code{\link{zipoissonff}} is
+ appropriate but not
+ \code{family =} \code{\link{zipoisson}}.
+ To understand what is going on, do examine the constraint
+ matrices of the fitted object, and reconcile this with Equations
+ (4.3) to (4.5) of Yee and Hastie (2003).
+
+
+ The functions temporarily create a permanent data frame
+ called \code{.grc.df} or \code{.rcam.df}, which used
+ to be needed by \code{summary.rrvglm()}. Then these
+ data frames are deleted before exiting the function.
+ If an error occurs, then the data frames may be present
+ in the workspace.
+
+
}
@@ -114,36 +242,56 @@ indicator variables.
\code{\link{rrvglm.control}},
\code{\link{rrvglm-class}},
\code{summary.grc},
+ \code{\link{Rcam}},
+ \code{\link{plotrcam0}},
\code{\link{auuc}},
- \code{\link{olympic}}.
+ \code{\link{olympic}},
+ \code{\link{poissonff}}.
+
}
\examples{
# Some undergraduate student enrolments at the University of Auckland in 1990
-g1 = grc(auuc, Rank=1)
-fitted(g1)
-summary(g1)
+grc1 <- grc(auuc)
+fitted(grc1)
+summary(grc1)
-g2 = grc(auuc, Rank=2, Index.corner=c(2,5))
-fitted(g2)
-summary(g2)
+grc2 <- grc(auuc, Rank = 2, Index.corner = c(2, 5))
+fitted(grc2)
+summary(grc2)
# 2008 Summer Olympic Games in Beijing
-top10 = head(olympic, n=10)
-oly1 = with(top10, grc(cbind(gold,silver,bronze)))
+top10 <- head(olympic, n = 10)
+oly1 <- with(top10, grc(cbind(gold, silver, bronze)))
round(fitted(oly1))
-round(resid(oly1, type="response"), dig=1) # Response residuals
+round(resid(oly1, type = "response"), dig = 1) # Response residuals
summary(oly1)
Coef(oly1)
+
+
+# Roughly median polish
+rcam0 <- rcam(auuc, fam = alaplace2(tau = 0.5, intparloc = TRUE), trace = TRUE)
+round(fitted(rcam0), dig = 0)
+rcam0 at y
+round(coef(rcam0, matrix = TRUE), dig = 2)
+print(Coef(rcam0, matrix = TRUE), dig = 3)
+# constraints(rcam0)
+names(constraints(rcam0))
}
\keyword{models}
\keyword{regression}
% plot(oly1)
-% oly2 = with(top10, grc(cbind(gold,silver,bronze), Rank=2)) # Saturated model
+% oly2 <- with(top10, grc(cbind(gold,silver,bronze), Rank = 2)) # Saturated model
% round(fitted(oly2))
% round(fitted(oly2)) - with(top10, cbind(gold,silver,bronze))
% summary(oly2) # Saturated model
+% zz 20100927 unsure
+% Then \code{.grc.df} is deleted before exiting the function.
+
+
+
+
diff --git a/man/huber.Rd b/man/huber.Rd
new file mode 100644
index 0000000..7224db7
--- /dev/null
+++ b/man/huber.Rd
@@ -0,0 +1,109 @@
+\name{huber}
+\alias{huber}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Huber's least favourable distribution family function }
+\description{
+ M-estimation of the two parameters of
+ Huber's least favourable distribution.
+
+}
+\usage{
+huber(llocation = "identity", lscale = "loge", elocation = list(),
+ escale = list(), k = 0.862, method.init = 1, zero = 2)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{llocation, lscale}{
+ Link functions applied to the location and scale parameters.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{elocation, escale}{
+ List. Extra argument for the links.
+ See \code{earg} in \code{\link{Links}} for general information.
+
+ }
+ \item{k}{
+ Tuning constant.
+ See \code{\link{rhuber}} for more information.
+
+ }
+ \item{method.init, zero}{
+ See \code{\link{CommonVGAMffArguments}} for information.
+ The default value of \code{zero} means the scale parameter is
+ modelled as an intercept-only.
+
+ }
+
+}
+\details{
+ Huber's least favourable distribution family function is popular for
+ resistant/robust regression. The center of the distribution is normal
+ and its tails are double exponential.
+
+ By default, the mean is the first linear/additive predictor (returned
+ as the fitted values; this is the location parameter), and
+ the log of the scale parameter is the second linear/additive predictor.
+ The Fisher information matrix is diagonal; Fisher scoring is implemented.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+
+}
+\references{
+ Huber, P. J. and Ronchetti, E. (2009)
+ \emph{Robust Statistics}, 2nd ed. New York: Wiley.
+
+}
+
+\author{
+ T. W. Yee. Help was given by Arash Ardalan.
+
+}
+
+\note{
+ The response should be univariate.
+
+}
+\seealso{
+ \code{\link{rhuber}},
+ \code{\link{normal1}},
+ \code{\link{gaussianff}},
+ \code{\link{laplace}},
+ \code{\link{CommonVGAMffArguments}}.
+
+}
+\examples{
+set.seed(1231); NN = 30; coef1 = 1; coef2 = 10
+hdata = data.frame(x2 = sort(runif(NN)))
+hdata = transform(hdata, y = rhuber(NN, mu = coef1 + coef2 * x2))
+
+hdata$x2[1] = 0.0 # Add an outlier
+hdata$y[1] = 10
+
+fit.huber <- vglm(y ~ x2, huber(meth = 3), hdata, trace = TRUE)
+
+coef(fit.huber, matrix = TRUE)
+summary(fit.huber)
+
+
+\dontrun{ # Plot the results
+plot(y ~ x2, hdata, col = "blue", las = 1)
+lines(fitted(fit.huber) ~ x2, hdata, col = "darkgreen", lwd = 2)
+
+fit.lm <- lm(y ~ x2, hdata) # Compare to a LM:
+lines(fitted(fit.lm) ~ x2, hdata, col = "lavender", lwd = 3)
+
+# Compare to truth:
+lines(coef1 + coef2 * x2 ~ x2, hdata, col = "red", lwd = 2, lty = "dashed")
+
+legend("bottomright", legend = c("truth", "huber", "lm"),
+ col = c("red", "darkgreen", "lavender"),
+ lty = c("dashed", "solid", "solid"), lwd = c(2, 2, 3))
+}
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/huberUC.Rd b/man/huberUC.Rd
new file mode 100644
index 0000000..e1ee7af
--- /dev/null
+++ b/man/huberUC.Rd
@@ -0,0 +1,111 @@
+\name{dhuber}
+\alias{dhuber}
+\alias{edhuber}
+\alias{rhuber}
+\alias{qhuber}
+\alias{phuber}
+\title{Huber's least favourable distribution}
+\description{
+ Density, distribution function, quantile function and random generation
+ for Huber's least favourable distribution, see Huber and Ronchetti (2009).
+
+}
+\usage{
+ dhuber(x, k = 0.862, mu = 0, sigma = 1, log = FALSE)
+ edhuber(x, k = 0.862, mu = 0, sigma = 1, log = FALSE)
+ rhuber(n, k = 0.862, mu = 0, sigma = 1)
+ qhuber(p, k = 0.862, mu = 0, sigma = 1)
+ phuber(q, k = 0.862, mu = 0, sigma = 1)
+
+}
+\arguments{
+ \item{x, q}{numeric vector, vector of quantiles.}
+ \item{p}{vector of probabilities.}
+ \item{n}{number of random values to be generated.
+ If \code{length(n) > 1} then the length is taken to be the number required. }
+ \item{k}{numeric. Borderline value of central Gaussian part of the
+ distribution.
+ This is known as the tuning constant, and should be positive.
+ For example, \code{k = 0.862} refers to a 20\% contamination
+ neighborhood of the Gaussian distribution.
+ If \code{k = 1.40} then this is 5\% contamination.
+
+ }
+ \item{mu}{numeric. distribution mean.}
+ \item{sigma}{numeric. Distribution scale (\code{sigma = 1} defines the
+ distribution in standard form, with standard Gaussian centre).}
+ \item{log}{
+ Logical.
+ If \code{log = TRUE} then the logarithm of the result is returned.
+
+ }
+
+
+}
+\details{
+ Details are given in \code{\link{huber}}, the
+ \pkg{VGAM} family function for estimating the
+ parameters \code{mu} and \code{sigma}.
+
+}
+
+\value{
+ \code{dhuber} gives out a vector of density values.
+
+ \code{edhuber} gives out a list with components \code{val} (density
+ values) and \code{eps} (contamination proportion).
+
+ \code{rhuber} gives out a vector of random numbers generated by
+ Huber's least favourable distribution.
+
+ \code{phuber} gives the distribution function,
+ \code{qhuber} gives the quantile function.
+
+
+}
+%\references{
+% Huber, P. J. and Ronchetti, E. (2009)
+% \emph{Robust Statistics}, 2nd ed. New York: Wiley.
+%
+% Huber, P. J. and Ronchetti, E. (2009) Robust Statistics
+% (2nd ed.). Wiley, New York.
+%
+%}
+
+\author{
+ Christian Hennig wrote \code{[d,ed,r]huber()}
+ (from \pkg{smoothmest}) and
+ slight modifications were made by T. W. Yee to
+ replace looping by vectorization and addition of the \code{log} argument.
+ Arash Ardalan wrote \code{[pq]huber()}.
+ This helpfile was adapted from \pkg{smoothmest}.
+
+}
+\seealso{
+ \code{\link{huber}}.
+
+}
+
+\examples{
+set.seed(123456)
+edhuber(1:5, k = 1.5)
+rhuber(5)
+
+# Plot cdf and pdf
+\dontrun{ mu = 3; xx = seq(-2, 7, len = 100)
+plot(xx, dhuber(xx, mu = mu), type = "l", col = "blue", las = 1, ylab = "",
+ main = "blue is density, red is cumulative distribution function",
+ sub = "Purple lines are the 10,20,...,90 percentiles",
+ ylim = 0:1)
+abline(h = 0, col = "blue", lty = 2)
+lines(xx, phuber(xx, mu = mu), type = "l", col = "red")
+probs = seq(0.1, 0.9, by = 0.1)
+Q = qhuber(probs, mu = mu)
+lines(Q, dhuber(Q, mu = mu), col = "purple", lty = 3, type = "h")
+lines(Q, phuber(Q, mu = mu), col = "purple", lty = 3, type = "h")
+abline(h = probs, col = "purple", lty = 3)
+phuber(Q, mu = mu) - probs # Should be all zero
+}
+}
+\keyword{distribution}
+
diff --git a/man/inv.gaussianff.Rd b/man/inv.gaussianff.Rd
index 0e51810..7c6678e 100644
--- a/man/inv.gaussianff.Rd
+++ b/man/inv.gaussianff.Rd
@@ -8,8 +8,10 @@
}
\usage{
-inv.gaussianff(lmu="loge", llambda="loge", emu=list(), elambda=list(),
- ilambda=1, zero=NULL)
+inv.gaussianff(lmu = "loge", llambda = "loge",
+ emu = list(), elambda = list(),
+ method.init = 1, ilambda = 1,
+ shrinkage.init = 0.99, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -28,16 +30,14 @@ inv.gaussianff(lmu="loge", llambda="loge", emu=list(), elambda=list(),
Initial value for the \eqn{\lambda}{lambda} parameter.
}
- \item{zero}{
- An integer-valued vector specifying which
- linear/additive predictors \eqn{\eta_j}{eta_j}
- are modelled as intercepts only.
- The values must be from the set \{1,2\}.
+ \item{method.init, shrinkage.init, zero}{
+ See \code{\link{CommonVGAMffArguments}} for more information.
}
}
\details{
- The inverse Gaussian distribution has a density
+ The standard (``canonical'') form of the
+ inverse Gaussian distribution has a density
that can be written as
\deqn{f(y;\mu,\lambda) = \sqrt{\lambda/(2\pi y^3)}
\exp\left(-\lambda (y-\mu)^2/(2 \mu^2 y)\right)}{%
@@ -51,6 +51,7 @@ inv.gaussianff(lmu="loge", llambda="loge", emu=list(), elambda=list(),
\eqn{\mu^3/\lambda}{mu^3/lambda}.
By default, \eqn{\eta_1=\log(\mu)}{eta1=log(mu)} and
\eqn{\eta_2=\log(\lambda)}{eta2=log(lambda)}.
+ The mean is returned as the fitted values.
}
\value{
@@ -89,13 +90,16 @@ New York: Wiley-Interscience, Third edition.
}
\examples{
-n = 1000
-shape = exp(3)
-y = rinv.gaussian(n=n, mu=exp(2), lambda=shape)
-fit = vglm(y ~ 1, inv.gaussianff(ilam=shape), trace=TRUE)
-coef(fit, matrix=TRUE)
-Coef(fit)
-summary(fit)
+idat <- data.frame(x2 = runif(nn <- 1000))
+idat <- transform(idat, mymu = exp(2 + 1 * x2),
+ Lambda = exp(2 + 1 * x2))
+idat <- transform(idat, y = rinv.gaussian(nn, mu = mymu, lambda = Lambda))
+fit1 <- vglm(y ~ x2, inv.gaussianff, idat, trace = TRUE)
+rrig <- rrvglm(y ~ x2, inv.gaussianff, idat, trace = TRUE)
+coef(fit1, matrix = TRUE)
+coef(rrig, matrix = TRUE)
+Coef(rrig)
+summary(fit1)
}
\keyword{models}
\keyword{regression}
diff --git a/man/is.smart.Rd b/man/is.smart.Rd
index c97235e..95fd53a 100644
--- a/man/is.smart.Rd
+++ b/man/is.smart.Rd
@@ -24,6 +24,7 @@
\code{object} has the logical attribute \code{"smart"}. If so then
this is returned, else \code{FALSE}.
+
If \code{object} is a fitted model then this function looks to see whether
\code{object at smart.prediction} or
\code{object\$smart.prediction} exists.
@@ -32,24 +33,26 @@
The reason for this is because, e.g., \code{lm(...,smart=FALSE)}
and \code{vglm(...,smart=FALSE)}, will return such a specific list.
+
Writers of smart functions manually have to assign this attribute to
their smart function after it has been written.
+
}
\examples{
is.smart(my1) # TRUE
is.smart(poly) # TRUE
-if(is.R()) library(splines)
+library(splines)
is.smart(bs) # TRUE
is.smart(ns) # TRUE
is.smart(tan) # FALSE
if(!is.R()) is.smart(lm) # TRUE
\dontrun{
library(VGAM)
-x = rnorm(9)
-fit1 = vglm(rnorm(9) ~ x, normal1)
+x <- rnorm(9)
+fit1 <- vglm(rnorm(9) ~ x, normal1)
is.smart(fit1) # TRUE
-fit2 = vglm(rnorm(9) ~ x, normal1, smart=FALSE)
+fit2 <- vglm(rnorm(9) ~ x, normal1, smart = FALSE)
is.smart(fit2) # FALSE
fit2 at smart.prediction
}
diff --git a/man/koenker.Rd b/man/koenker.Rd
new file mode 100644
index 0000000..7a2e754
--- /dev/null
+++ b/man/koenker.Rd
@@ -0,0 +1,120 @@
+\name{koenker}
+\alias{koenker}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Koenker's Distribution Family Function }
+\description{
+ Estimates the location and scale parameters of Koenker's
+ distribution by maximum likelihood estimation.
+
+}
+\usage{
+koenker(percentile = 50, llocation = "identity", lscale = "loge",
+ elocation = list(), escale = list(), ilocation = NULL,
+ iscale = NULL, method.init = 1, zero = 2)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{percentile}{
+ A numerical vector containing values between 0 and 100,
+ which are the quantiles and expectiles.
+ They will be returned as `fitted values'.
+
+ }
+ \item{llocation, lscale, elocation, escale}{
+ See \code{\link{Links}} for more choices,
+ and \code{\link{CommonVGAMffArguments}}.
+
+ }
+ \item{ilocation, iscale, method.init, zero}{
+ See \code{\link{CommonVGAMffArguments}} for details.
+
+ }
+}
+\details{
+ Koenker (1993) solved for the distribution whose quantiles are
+ equal to its expectiles. This is called Koenker's distribution
+ here. Its canonical form has mean and mode at 0 and has a heavy
+ tail (in fact, its variance is infinite).
+
+ The standard (``canonical'') form of Koenker's
+ distribution can be endowed with a location and scale parameter.
+ The standard form has a density
+ that can be written as
+ \deqn{f(z) = 2 / (4 + z^2)^{3/2}}{%
+ f(z) = 2 / (4 + z^2)^(3/2)
+ }
+ for real \eqn{y}.
+ Then \eqn{z = (y-a)/b} for location and scale parameters
+ \eqn{a} and \eqn{b > 0}.
+ The mean of \eqn{Y} is \eqn{a}{a}.
+ By default, \eqn{\eta_1=a)}{eta1=a} and
+ \eqn{\eta_2=\log(b)}{eta2=log(b)}.
+ The expectiles/quantiles corresponding to \code{percentile}
+ are returned as the fitted values;
+ in particular, \code{percentile = 50} corresponds to the mean
+ (0.5 expectile) and median (0.5 quantile).
+
+
+ Note that if \eqn{Y} has a standard Koenker distribution
+ then \eqn{Y = \sqrt{2} T_2}{Y = sqrt(2) * T_2} where \eqn{T_2}
+ has a Student-t distribution with 2 degrees of freedom.
+ The two parameters here can also be estimated using
+ \code{\link{studentt2}} by specifying \code{df = 2} and making
+ an adjustment for the scale parameter, however, this \pkg{VGAM}
+ family function is more efficient since the EIM is known
+ (Fisher scoring is implemented.)
+
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ \code{\link{rrvglm}}
+ and \code{\link{vgam}}.
+
+}
+\references{
+
+Koenker, R. (1993)
+When are expectiles percentiles? (solution)
+\emph{Econometric Theory},
+\bold{9}, 526--527.
+
+}
+\author{ T. W. Yee }
+%\note{
+%
+%}
+
+\seealso{
+ \code{\link{dkoenker}},
+ \code{\link{studentt2}}.
+
+
+}
+\examples{
+set.seed(123); nn <- 1000
+kdat <- data.frame(x2 = sort(runif(nn)))
+kdat <- transform(kdat, mylocat = 1 + 3 * x2,
+ myscale = 1)
+kdat <- transform(kdat, y = rkoenker(nn, loc = mylocat, scale = myscale))
+fit <- vglm(y ~ x2, koenker(perc = c(1, 50, 99)), kdat, trace = TRUE)
+fit2 <- vglm(y ~ x2, studentt2(df = 2), kdat, trace = TRUE) # 'same' as fit
+
+coef(fit, matrix = TRUE)
+head(fitted(fit))
+head(predict(fit))
+
+# Nice plot of the results
+\dontrun{ plot(y ~ x2, kdat, col = "blue", las = 1,
+ sub = paste("n =", nn),
+ main = "Fitted quantiles/expectiles using Koenker's distribution")
+matplot(with(kdat, x2), fitted(fit), add = TRUE, type = "l", lwd = 3)
+legend("bottomright", lty = 1:3, lwd = 3, legend = colnames(fitted(fit)),
+ col = 1:3) }
+
+fit at extra$percentile # Sample quantiles
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/koenkerUC.Rd b/man/koenkerUC.Rd
new file mode 100644
index 0000000..a2a3336
--- /dev/null
+++ b/man/koenkerUC.Rd
@@ -0,0 +1,93 @@
+\name{Expectiles-Koenker}
+\alias{Expectiles-Koenker}
+\alias{dkoenker}
+\alias{pkoenker}
+\alias{qkoenker}
+\alias{rkoenker}
+\title{ Expectiles/Quantiles of the Koenker Distribution }
+\description{
+ Density function, distribution function, and
+ quantile/expectile function and random generation for the
+ Koenker distribution.
+
+}
+\usage{
+dkoenker(x, location = 0, scale = 1, log = FALSE)
+pkoenker(q, location = 0, scale = 1, log = FALSE)
+qkoenker(p, location = 0, scale = 1)
+rkoenker(n, location = 0, scale = 1)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{x, q}{
+ Vector of expectiles/quantiles.
+ See the terminology note below.
+ }
+ \item{p}{
+ Vector of probabilities. % (tau or \eqn{\tau}).
+ These should lie in \eqn{(0,1)}.
+ }
+ \item{n, log}{See \code{\link[stats:Uniform]{runif}}.}
+ \item{location, scale}{
+ Location and scale parameters.
+ The latter should have positive values.
+ Values of these vectors are recyled.
+ }
+}
+\details{
+ A Student-t distribution with 2 degrees of freedom and
+ a scale parameter of \code{sqrt(2)} is equivalent to the
+ standard Koenker distribution.
+ Further details about this distribution are given in
+ \code{\link{koenker}}.
+
+}
+\value{
+ \code{dkoenker(x)} gives the density function.
+ \code{pkoenker(q)} gives the distribution function.
+ \code{qkoenker(p)} gives the expectile and quantile function.
+ \code{rkoenker(n)} gives \eqn{n} random variates.
+
+}
+\author{ T. W. Yee }
+
+%\note{
+%}
+
+\seealso{
+ \code{\link[stats:TDist]{dt}},
+ \code{\link{koenker}}.
+
+}
+
+\examples{
+my_p = 0.25; y = rkoenker(nn <- 5000)
+(myexp = qkoenker(my_p))
+sum(myexp - y[y <= myexp]) / sum(abs(myexp - y)) # Should be my_p
+# Equivalently:
+I1 = mean(y <= myexp) * mean( myexp - y[y <= myexp])
+I2 = mean(y > myexp) * mean(-myexp + y[y > myexp])
+I1 / (I1 + I2) # Should be my_p
+# Or:
+I1 = sum( myexp - y[y <= myexp])
+I2 = sum(-myexp + y[y > myexp])
+
+# Non-standard Koenker distribution
+myloc = 1; myscale = 2
+yy = rkoenker(nn, myloc, myscale)
+(myexp = qkoenker(my_p, myloc, myscale))
+sum(myexp - yy[yy <= myexp]) / sum(abs(myexp - yy)) # Should be my_p
+pkoenker(mean(yy), myloc, myscale) # Should be 0.5
+abs(qkoenker(0.5, myloc, myscale) - mean(yy)) # Should be 0
+abs(pkoenker(myexp, myloc, myscale) - my_p) # Should be 0
+integrate(f = dkoenker, lower = -Inf, upper = Inf,
+ locat = myloc, scale = myscale) # Should be 1
+
+y <- seq(-7, 7, len = 201)
+max(abs(dkoenker(y) - dt(y / sqrt(2), df = 2) / sqrt(2))) # Should be 0
+\dontrun{ plot(y, dkoenker(y), type = "l", col = "blue", las = 1,
+ ylim = c(0, 0.4), main = "Blue = Koenker; orange = N(0, 1)")
+lines(y, dnorm(y), type = "l", col = "orange")
+abline(h = 0, v = 0, lty = 2) }
+}
+\keyword{distribution}
diff --git a/man/kumar.Rd b/man/kumar.Rd
index 826e48a..6c0ece5 100644
--- a/man/kumar.Rd
+++ b/man/kumar.Rd
@@ -8,13 +8,17 @@
}
\usage{
-kumar(lshape1 = "loge", lshape2 = "loge", eshape1 = list(), eshape2 = list(),
- ishape1 = NULL, ishape2 = NULL, nsimEIM = 500, zero = NULL)
+kumar(lshape1 = "loge", lshape2 = "loge",
+ eshape1 = list(), eshape2 = list(),
+ ishape1 = NULL, ishape2 = NULL, grid.shape1 = c(0.4, 6.0),
+ tol12 = 1.0e-4, zero = NULL)
+
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{lshape1, lshape2}{
- Link function for the two positive shape parameters.
+ Link function for the two positive shape parameters,
+ respectively, called \eqn{a} and \eqn{b} below.
See \code{\link{Links}} for more choices.
}
@@ -28,7 +32,17 @@ kumar(lshape1 = "loge", lshape2 = "loge", eshape1 = list(), eshape2 = list(),
Optional initial values for the two positive shape parameters.
}
- \item{nsimEIM, zero}{
+ \item{tol12}{
+ Numeric and positive.
+ Tolerance for testing whether the second shape parameter is either 1 or 2.
+ If so then the working weights need to handle these singularities.
+
+ }
+ \item{grid.shape1}{
+ Lower and upper limits for a grid search for the first shape parameter.
+
+ }
+ \item{zero}{
See \code{\link{CommonVGAMffArguments}}.
}
@@ -40,11 +54,14 @@ kumar(lshape1 = "loge", lshape2 = "loge", eshape1 = list(), eshape2 = list(),
a*b*y^(a-1)*(1-y^a)^(b-1)}
where \eqn{0 < y < 1} and the two shape parameters,
\eqn{a} and \eqn{b}, are positive.
- The mean is \eqn{b Beta(1+1/a,b)}
+ The mean is \eqn{b \times Beta(1+1/a,b)}{b * Beta(1+1/a,b)}
(returned as the fitted values) and the variance is
- \eqn{b Beta(1+2/a,b) - (b Beta(1+1/a,b))^2}.
+ \eqn{b \times Beta(1+2/a,b) -
+ (b \times Beta(1+1/a,b))^2}{b * Beta(1+2/a,b) - (b * Beta(1+1/a,b))^2}.
Applications of the Kumaraswamy distribution include the storage
volume of a water reservoir.
+ Fisher scoring is implemented.
+
}
\value{
@@ -52,6 +69,7 @@ kumar(lshape1 = "loge", lshape2 = "loge", eshape1 = list(), eshape2 = list(),
The object is used by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}}.
+
}
\references{
Kumaraswamy, P. (1980).
@@ -60,6 +78,14 @@ kumar(lshape1 = "loge", lshape2 = "loge", eshape1 = list(), eshape2 = list(),
\emph{Journal of Hydrology},
\bold{46}, 79--88.
+
+ Jones, M. C. (2009).
+ Kumaraswamy's distribution: A beta-type distribution with some
+ tractability advantages.
+ \emph{Statistical Methodology},
+ \bold{6}, 70--81.
+
+
}
\author{ T. W. Yee }
%\note{
@@ -72,10 +98,10 @@ kumar(lshape1 = "loge", lshape2 = "loge", eshape1 = list(), eshape2 = list(),
}
\examples{
-shape1 = exp(1); shape2 = exp(2);
-kdata = data.frame(y = rkumar(n = 1000, shape1, shape2))
-fit = vglm(y ~ 1, kumar, kdata, trace = TRUE)
-c(with(kdata, mean(y)), head(fitted(fit),1))
+shape1 <- exp(1); shape2 <- exp(2);
+kdata <- data.frame(y = rkumar(n = 1000, shape1, shape2))
+fit <- vglm(y ~ 1, kumar, kdata, trace = TRUE)
+c(with(kdata, mean(y)), head(fitted(fit), 1))
coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
diff --git a/man/kumarUC.Rd b/man/kumarUC.Rd
index d3abf91..5b5caf9 100644
--- a/man/kumarUC.Rd
+++ b/man/kumarUC.Rd
@@ -11,7 +11,7 @@
}
\usage{
-dkumar(x, shape1, shape2, log=FALSE)
+dkumar(x, shape1, shape2, log = FALSE)
pkumar(q, shape1, shape2)
qkumar(p, shape1, shape2)
rkumar(n, shape1, shape2)
@@ -20,11 +20,13 @@ rkumar(n, shape1, shape2)
\item{x, q}{vector of quantiles.}
\item{p}{vector of probabilities.}
\item{n}{number of observations.
- If \code{length(n) > 1} then the length is taken to be the number required. }
+ If \code{length(n) > 1} then the length is taken to be the number required.
+
+ }
\item{shape1, shape2}{ positive shape parameters. }
\item{log}{
Logical.
- If \code{log=TRUE} then the logarithm of the density is returned.
+ If \code{log = TRUE} then the logarithm of the density is returned.
}
@@ -34,6 +36,7 @@ rkumar(n, shape1, shape2)
\code{pkumar} gives the distribution function,
\code{qkumar} gives the quantile function, and
\code{rkumar} generates random deviates.
+
}
\author{ T. W. Yee }
\details{
@@ -41,6 +44,7 @@ rkumar(n, shape1, shape2)
for estimating the parameters,
for the formula of the probability density function and other details.
+
}
%\note{
%}
@@ -50,20 +54,19 @@ rkumar(n, shape1, shape2)
}
\examples{
\dontrun{
-shape1 = 2; shape2 = 2; nn = 201; # shape1 = shape2 = 0.5;
-x = seq(0.0 , 1.00, len=nn)
-x = seq(-0.05, 1.05, len=nn)
-plot(x, dkumar(x, shape1, shape2), type="l", las=1, ylim=c(0,1.5),
- ylab=paste("fkumar(shape1=", shape1, ", shape2=", shape2, ")"),
- col="blue", cex.main=0.8,
- main="Blue is density, red is cumulative distribution function",
- sub="Purple lines are the 10,20,...,90 percentiles")
-lines(x, pkumar(x, shape1, shape2), col="red")
-probs = seq(0.1, 0.9, by=0.1)
-Q = qkumar(probs, shape1, shape2)
-lines(Q, dkumar(Q, shape1, shape2), col="purple", lty=3, type="h")
-lines(Q, pkumar(Q, shape1, shape2), col="purple", lty=3, type="h")
-abline(h=probs, col="purple", lty=3)
+shape1 <- 2; shape2 <- 2; nn <- 201; # shape1 = shape2 = 0.5;
+x <- seq(-0.05, 1.05, len = nn)
+plot(x, dkumar(x, shape1, shape2), type = "l", las = 1, ylim = c(0,1.5),
+ ylab = paste("fkumar(shape1 = ", shape1, ", shape2 = ", shape2, ")"),
+ col = "blue", cex.main = 0.8,
+ main = "Blue is density, orange is cumulative distribution function",
+ sub = "Purple lines are the 10,20,...,90 percentiles")
+lines(x, pkumar(x, shape1, shape2), col = "orange")
+probs <- seq(0.1, 0.9, by = 0.1)
+Q <- qkumar(probs, shape1, shape2)
+lines(Q, dkumar(Q, shape1, shape2), col = "purple", lty = 3, type = "h")
+lines(Q, pkumar(Q, shape1, shape2), col = "purple", lty = 3, type = "h")
+abline(h = probs, col = "purple", lty = 3)
max(abs(pkumar(Q, shape1, shape2) - probs)) # Should be 0
}
}
diff --git a/man/lambertW.Rd b/man/lambertW.Rd
new file mode 100644
index 0000000..a0ce990
--- /dev/null
+++ b/man/lambertW.Rd
@@ -0,0 +1,80 @@
+\name{lambertW}
+\alias{lambertW}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{
+The Lambert W function
+}
+\description{
+Computes the Lambert \emph{W} function for real values.
+}
+\usage{
+lambertW(x, tolerance = 1e-10, maxit = 50)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{x}{
+A vector of reals.
+}
+ \item{tolerance}{
+Accuracy desired.
+}
+ \item{maxit}{
+Maximum number of iterations of third-order Halley's method.
+}
+}
+\details{
+ The Lambert \eqn{W} function is the root of the equation
+ \eqn{W(z) \exp(W(z)) = z}{W(z) * exp(W(z)) = z}
+ for complex \eqn{z}.
+ It is multi-valued if \eqn{z} is real and \eqn{z < -1/e}.
+ For real \eqn{-1/e \leq z < 0}{-1/e <= z < 0} it has two
+ possible real values, and currently only the upper branch
+ is computed.
+
+}
+\value{
+ This function returns the principal branch of the \eqn{W} function
+ for \emph{real} \eqn{z}.
+ It returns \eqn{W(z) \geq -1}{W(z) >= -1},
+ and \code{NA} for \eqn{z < -1/e}.
+
+}
+\references{
+Corless, R. M. and Gonnet, G. H. and
+Hare, D. E. G. and Jeffrey, D. J. and Knuth, D. E. (1996)
+On the Lambert \eqn{W} function.
+\emph{Advances in Computational Mathematics},
+\bold{5}(4), 329--359.
+
+}
+\author{
+T. W. Yee
+
+}
+\note{
+If convergence does not occur then increase the value of
+\code{maxit} and/or \code{tolerance}.
+
+
+Yet to do: add an argument \code{lbranch = TRUE} to return
+the lower branch for
+real \eqn{-1/e \leq z < 0}{-1/e <= z < 0};
+this would give \eqn{W(z) \leq -1}{W(z) <= -1}.
+
+}
+
+%% ~Make other sections like Warning with \section{Warning }{....} ~
+
+\seealso{
+ \code{\link[base:log]{log}},
+ \code{\link[base:log]{exp}}.
+
+}
+\examples{ \dontrun{
+curve(lambertW, -exp(-1), 3, xlim = c(-1, 3), ylim = c(-2, 1), col = "red")
+abline(v = -exp(-1), h = -1, lty = "dotted")
+abline(h = 0, v = 0, lty = "dashed", lwd = 2) }
+}
+% Add one or more standard keywords, see file 'KEYWORDS' in the
+% R documentation directory.
+\keyword{math}
diff --git a/man/laplace.Rd b/man/laplace.Rd
index 22b2cd1..e748aa7 100644
--- a/man/laplace.Rd
+++ b/man/laplace.Rd
@@ -8,8 +8,9 @@
}
\usage{
-laplace(llocation="identity", lscale="loge", elocation=list(),
- escale=list(), ilocation=NULL, iscale=NULL, method.init=1, zero=2)
+laplace(llocation = "identity", lscale = "loge", elocation = list(),
+ escale = list(), ilocation = NULL, iscale = NULL,
+ method.init = 1, zero = 2)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -100,16 +101,16 @@ Boston: Birkhauser.
}
\examples{
-lddat = data.frame(y = rlaplace(nn <- 100, loc=2, scale=exp(1)))
-fit = vglm(y ~ 1, laplace, lddat, trace=TRUE, crit="l")
-coef(fit, matrix=TRUE)
+lddat = data.frame(y = rlaplace(nn <- 100, loc = 2, scale = exp(1)))
+fit = vglm(y ~ 1, laplace, lddat, trace = TRUE, crit = "l")
+coef(fit, matrix = TRUE)
Coef(fit)
with(lddat, median(y))
lddat = data.frame(x = runif(nn <- 1001))
-lddat = transform(lddat, y = rlaplace(nn, loc=2, scale=exp(-1+1*x)))
-fit = vglm(y ~ x, laplace(iloc=0.2, meth=2, zero=1), lddat, trace=TRUE)
-coef(fit, matrix=TRUE)
+lddat = transform(lddat, y = rlaplace(nn, loc = 2, scale = exp(-1+1*x)))
+coef(vglm(y ~ x, laplace(iloc = .2, meth = 2, zero = 1), lddat,
+ trace = TRUE), matrix = TRUE)
}
\keyword{models}
\keyword{regression}
diff --git a/man/laplaceUC.Rd b/man/laplaceUC.Rd
index d55dc35..4cce574 100644
--- a/man/laplaceUC.Rd
+++ b/man/laplaceUC.Rd
@@ -12,10 +12,10 @@
}
\usage{
-dlaplace(x, location=0, scale=1, log=FALSE)
-plaplace(q, location=0, scale=1)
-qlaplace(p, location=0, scale=1)
-rlaplace(n, location=0, scale=1)
+dlaplace(x, location = 0, scale = 1, log = FALSE)
+plaplace(q, location = 0, scale = 1)
+qlaplace(p, location = 0, scale = 1)
+rlaplace(n, location = 0, scale = 1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -31,7 +31,7 @@ rlaplace(n, location=0, scale=1)
}
\item{log}{
Logical.
- If \code{log=TRUE} then the logarithm of the density is returned.
+ If \code{log = TRUE} then the logarithm of the density is returned.
}
@@ -78,25 +78,24 @@ New York: Wiley-Interscience, Third edition.
}
\examples{
loc = 1; b = 2
-y = rlaplace(n=100, loc=loc, scale=b)
-mean(y)
+y = rlaplace(n = 100, loc = loc, scale = b)
+mean(y) # sample mean
loc # population mean
-var(y)
+var(y) # sample variance
2 * b^2 # population variance
-\dontrun{
-loc = 0; b = 1.5; x = seq(-5, 5, by=0.01)
-plot(x, dlaplace(x, loc, b), type="l", col="blue", ylim=c(0,1),
- main="Blue is density, red is cumulative distribution function",
- sub="Purple are 5,10,...,95 percentiles", las=1, ylab="")
-abline(h=0, col="blue", lty=2)
-lines(qlaplace(seq(0.05,0.95,by=0.05), loc, b),
- dlaplace(qlaplace(seq(0.05,0.95,by=0.05), loc, b), loc, b),
- col="purple", lty=3, type="h")
-lines(x, plaplace(x, loc, b), type="l", col="red")
-abline(h=0, lty=2) }
+\dontrun{ loc = 0; b = 1.5; x = seq(-5, 5, by = 0.01)
+plot(x, dlaplace(x, loc, b), type = "l", col = "blue", ylim = c(0,1),
+ main = "Blue is density, red is cumulative distribution function",
+ sub = "Purple are 5,10,...,95 percentiles", las = 1, ylab = "")
+abline(h = 0, col = "blue", lty = 2)
+lines(qlaplace(seq(0.05,0.95,by = 0.05), loc, b),
+ dlaplace(qlaplace(seq(0.05,0.95,by = 0.05), loc, b), loc, b),
+ col = "purple", lty = 3, type = "h")
+lines(x, plaplace(x, loc, b), type = "l", col = "red")
+abline(h = 0, lty = 2) }
-plaplace(qlaplace(seq(0.05,0.95,by=0.05), loc, b), loc, b)
+plaplace(qlaplace(seq(0.05,0.95,by = 0.05), loc, b), loc, b)
}
\keyword{distribution}
diff --git a/man/loge.Rd b/man/loge.Rd
index ee6a969..f6971f6 100644
--- a/man/loge.Rd
+++ b/man/loge.Rd
@@ -92,14 +92,16 @@ nloge(theta, earg = list(), inverse = FALSE, deriv = 0,
\code{\link{logc}},
\code{\link{loglog}},
\code{\link[base:Log]{log}},
- \code{\link{logoff}}.
+ \code{\link{logoff}},
+ \code{\link{lambertW}}.
+
}
\examples{
\dontrun{
-loge(seq(-0.2, 0.5, by=0.1))
-loge(seq(-0.2, 0.5, by=0.1), earg=list(bvalue= .Machine$double.xmin))
-nloge(seq(-0.2, 0.5, by=0.1))
-nloge(seq(-0.2, 0.5, by=0.1), earg=list(bvalue= .Machine$double.xmin))
+loge(seq(-0.2, 0.5, by = 0.1))
+loge(seq(-0.2, 0.5, by = 0.1), earg = list(bvalue = .Machine$double.xmin))
+nloge(seq(-0.2, 0.5, by = 0.1))
+nloge(seq(-0.2, 0.5, by = 0.1), earg = list(bvalue = .Machine$double.xmin))
}
}
\keyword{math}
diff --git a/man/logistic.Rd b/man/logistic.Rd
index f836c51..9fadbe9 100644
--- a/man/logistic.Rd
+++ b/man/logistic.Rd
@@ -5,22 +5,24 @@
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Logistic Distribution Family Function }
\description{
- Estimates the location and scale parameters of the logistic distribution
- by maximum likelihood estimation.
+ Estimates the location and scale parameters of the logistic
+ distribution by maximum likelihood estimation.
}
\usage{
-logistic1(llocation="identity", elocation=list(),
- scale.arg=1, method.init=1)
-logistic2(llocation="identity", lscale="loge",
- elocation=list(), escale=list(),
- ilocation=NULL, iscale=NULL, method.init=1, zero=NULL)
+logistic1(llocation = "identity", elocation = list(),
+ scale.arg = 1, method.init = 1)
+logistic2(llocation = "identity", lscale = "loge",
+ elocation = list(), escale = list(),
+ ilocation = NULL, iscale = NULL, method.init = 1, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{llocation}{
- Link function applied to the location parameter \eqn{l}.
- See \code{\link{Links}} for more choices.
+ \item{llocation, lscale}{
+ Parameter link functions applied to the location parameter \eqn{l}
+ and scale parameter \eqn{s}.
+ See \code{\link{Links}} for more choices, and
+ \code{\link{CommonVGAMffArguments}} for more information.
}
\item{elocation, escale}{
@@ -32,56 +34,43 @@ logistic2(llocation="identity", lscale="loge",
Known positive scale parameter (called \eqn{s} below).
}
- \item{lscale}{
- Parameter link function applied to the
- scale parameter \eqn{s}.
- See \code{\link{Links}} for more choices.
+ \item{ilocation, iscale}{
+ See \code{\link{CommonVGAMffArguments}} for more information.
}
- \item{ilocation}{
- Initial value for the location \eqn{l} parameter.
- By default, an initial value is chosen internally using
- \code{method.init}. Assigning a value will override
- the argument \code{method.init}.
-
- }
- \item{iscale}{
- Initial value for the scale \eqn{s} parameter.
- By default, an initial value is chosen internally using
- \code{method.init}. Assigning a value will override
- the argument \code{method.init}.
-
- }
- \item{method.init}{
- An integer with value \code{1} or \code{2} which
- specifies the initialization method. If failure to converge occurs
- try the other value.
-
- }
- \item{zero}{
- An integer-valued vector specifying which linear/additive predictors
- are modelled as intercepts only. The default is none of them. If used,
- choose one value from the set \{1,2\}.
+ \item{method.init, zero}{
+ See \code{\link{CommonVGAMffArguments}} for more information.
}
}
\details{
- The two-parameter logistic distribution
- has a density that can be written as
+ The two-parameter logistic distribution has a density that can
+ be written as
\deqn{f(y;l,s) = \frac{\exp[-(y-l)/s]}{
s\left( 1 + \exp[-(y-l)/s] \right)^2}}{%
f(y;l,s) = exp[-(y-l)/s] /
[s * ( 1 + exp[-(y-l)/s] )^2]
- }
- where \eqn{s>0} is the scale parameter, and \eqn{l} is the location
- parameter. The response \eqn{-\infty<y<\infty}{-Inf<y<Inf}. The mean
- of \eqn{Y} (which is the fitted value) is \eqn{l} and its variance is
- \eqn{\pi^2 s^2 / 3}{pi^2 s^2 / 3}.
+ }
+ where \eqn{s > 0} is the scale parameter, and \eqn{l} is the location
+ parameter. The response \eqn{-\infty<y<\infty}{-Inf<y<Inf}. The mean
+ of \eqn{Y} (which is the fitted value) is \eqn{l} and its variance is
+ \eqn{\pi^2 s^2 / 3}{pi^2 s^2 / 3}.
+
+
+ A logistic distribution with \code{scale = 0.65}
+ (see \code{\link[stats:Logistic]{dlogis}})
+ resembles
+ \code{\link[stats]{dt}}
+ with \code{df = 7};
+ see \code{\link{logistic1}} and \code{\link{studentt}}.
+
+
+ \code{logistic1} estimates the location parameter only while
+ \code{logistic2} estimates both parameters.
+ By default,
+ \eqn{\eta_1 = l}{eta1 = l} and \eqn{\eta_2 = \log(s)}{eta2 = log(s)} for
+ \code{logistic2}.
- \code{logistic1} estimates the location parameter only while
- \code{logistic2} estimates both parameters. By default,
- \eqn{\eta_1=l}{eta1=l} and \eqn{\eta_2=\log(s)}{eta2=log(s)} for
- \code{logistic2}.
}
\value{
@@ -89,26 +78,31 @@ logistic2(llocation="identity", lscale="loge",
The object is used by modelling functions such as \code{\link{vglm}},
\code{\link{rrvglm}} and \code{\link{vgam}}.
+
}
\references{
Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1994)
\emph{Continuous Univariate Distributions},
2nd edition, Volume 1, New York: Wiley. Chapter 15.
+
Evans, M., Hastings, N. and Peacock, B. (2000)
\emph{Statistical Distributions},
New York: Wiley-Interscience, Third edition.
+
Castillo, E., Hadi, A. S., Balakrishnan, N. Sarabia, J. S. (2005)
\emph{Extreme Value and Related Models with Applications in
Engineering and Science},
Hoboken, N.J.: Wiley-Interscience, p.130.
+
deCani, J. S. and Stine, R. A. (1986)
A note on Deriving the Information Matrix for a Logistic Distribution,
\emph{The American Statistician},
\bold{40}, 220--222.
+
}
\author{ T. W. Yee }
\note{
@@ -127,16 +121,16 @@ A note on Deriving the Information Matrix for a Logistic Distribution,
}
\examples{
# location unknown, scale known
-lodat1 = data.frame(x = runif(nn <- 500))
-lodat1 = transform(lodat1, y = rlogis(nn, loc=1+5*x, scale=4))
-fit = vglm(y ~ x, logistic1(scale=4), lodat1, trace=TRUE, crit="c")
-coef(fit, matrix=TRUE)
+ldat1 = data.frame(x = runif(nn <- 500))
+ldat1 = transform(ldat1, y = rlogis(nn, loc = 1+5*x, scale = 4))
+fit = vglm(y ~ x, logistic1(scale = 4), ldat1, trace = TRUE, crit = "c")
+coef(fit, matrix = TRUE)
# Both location and scale unknown
-lodat2 = data.frame(x = runif(nn <- 2000))
-lodat2 = transform(lodat2, y = rlogis(nn, loc=1+5*x, scale=exp(0+1*x)))
-fit = vglm(y ~ x, logistic2, lodat2)
-coef(fit, matrix=TRUE)
+ldat2 = data.frame(x = runif(nn <- 2000))
+ldat2 = transform(ldat2, y = rlogis(nn, loc = 1+5*x, scale = exp(0+1*x)))
+fit = vglm(y ~ x, logistic2, ldat2)
+coef(fit, matrix = TRUE)
vcov(fit)
summary(fit)
}
diff --git a/man/logit.Rd b/man/logit.Rd
index d3fe51f..b58ecca 100644
--- a/man/logit.Rd
+++ b/man/logit.Rd
@@ -152,6 +152,7 @@ elogit(p, earg=list(min=1, max=2,
\dontrun{
par(mfrow=c(2,2))
y = seq(-4, 4, length=100)
+p = seq(0.01, 0.99, by=0.01)
for(d in 0:1) {
matplot(p, cbind(logit(p, deriv=d), probit(p, deriv=d)),
type="n", col="purple", ylab="transformation",
diff --git a/man/loglaplace.Rd b/man/loglaplace.Rd
index dbf4617..d612327 100644
--- a/man/loglaplace.Rd
+++ b/man/loglaplace.Rd
@@ -146,7 +146,7 @@ Log-Laplace distributions.
\emph{International Mathematical Journal},
\bold{3}, 467--495.
- Yee, T. W. (2009)
+ Yee, T. W. (2011)
Quantile regression for counts and proportions.
In preparation.
diff --git a/man/lognormal.Rd b/man/lognormal.Rd
index d543496..361d173 100644
--- a/man/lognormal.Rd
+++ b/man/lognormal.Rd
@@ -10,10 +10,10 @@
}
\usage{
lognormal(lmeanlog = "identity", lsdlog = "loge",
- emeanlog=list(), esdlog=list(), zero = NULL)
+ emeanlog = list(), esdlog = list(), zero = 2)
lognormal3(lmeanlog = "identity", lsdlog = "loge",
- emeanlog=list(), esdlog=list(),
- powers.try = (-3):3, delta = NULL, zero = NULL)
+ emeanlog = list(), esdlog = list(),
+ powers.try = (-3):3, delta = NULL, zero = 2)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -38,6 +38,8 @@ lognormal3(lmeanlog = "identity", lsdlog = "loge",
For \code{lognormal3()},
the values must be from the set \{1,2,3\} where 3 is for
\eqn{\lambda}{\lambda}.
+ See \code{\link{CommonVGAMffArguments}} for more information.
+
}
\item{powers.try}{
@@ -63,6 +65,7 @@ lognormal3(lmeanlog = "identity", lsdlog = "loge",
E(Y) = exp(mu + 0.5 sigma^2)}
and not \eqn{\mu}{mu}, make up the fitted values.
+
A random variable \eqn{Y} has a 3-parameter lognormal distribution
if \eqn{\log(Y-\lambda)}{log(Y-lambda)}
is distributed \eqn{N(\mu, \sigma^2)}{N(mu, sigma^2)}. Here,
@@ -72,16 +75,19 @@ lognormal3(lmeanlog = "identity", lsdlog = "loge",
E(Y) = lambda + exp(mu + 0.5 sigma^2)}
and not \eqn{\mu}{mu}, make up the fitted values.
+
\code{lognormal()} and \code{lognormal3()} fit the 2- and 3-parameter
lognormal distribution respectively. Clearly, if the location
parameter \eqn{\lambda=0}{lambda=0} then both distributions coincide.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}},
and \code{\link{vgam}}.
+
}
\references{
Kleiber, C. and Kotz, S. (2003)
@@ -89,6 +95,7 @@ Kleiber, C. and Kotz, S. (2003)
Actuarial Sciences},
Hoboken, NJ: Wiley-Interscience.
+
}
\author{ T. W. Yee }
%\note{
@@ -99,27 +106,30 @@ Hoboken, NJ: Wiley-Interscience.
%}
\seealso{
-% \code{\link{lognormal3}},
- \code{\link[stats]{rlnorm}},
- \code{\link{normal1}}.
+% \code{\link{lognormal3}},
+ \code{\link[stats]{rlnorm}},
+ \code{\link{normal1}},
+ \code{\link{CommonVGAMffArguments}}.
+
+
}
\examples{
-lndat = data.frame(y = rlnorm(nn <- 1000, meanlog=1.5, sdlog=exp(-0.8)))
-fit = vglm(y ~ 1, lognormal, lndat, trace=TRUE)
-coef(fit, mat=TRUE)
+ldat <- data.frame(y = rlnorm(nn <- 1000, meanlog = 1.5, sdlog = exp(-0.8)))
+fit <- vglm(y ~ 1, lognormal, ldat, trace = TRUE)
+coef(fit, mat = TRUE)
Coef(fit)
-lndat2 = data.frame(x = runif(nn <- 1000))
-lndat2 = transform(lndat2, y = rlnorm(nn, mean=0.5, sd=exp(x)))
-fit = vglm(y ~ x, lognormal(zero=1), lndat2, trace=TRUE, crit="c")
-coef(fit, mat=TRUE)
+ldat2 <- data.frame(x2 = runif(nn <- 1000))
+ldat2 <- transform(ldat2, y = rlnorm(nn, mean = 0.5, sd = exp(x2)))
+fit <- vglm(y ~ x2, lognormal(zero = 1), ldat2, trace = TRUE, crit = "c")
+coef(fit, mat = TRUE)
Coef(fit)
-lambda = 4
-lndat3 = data.frame(y = lambda + rlnorm(n=1000, mean=1.5, sd=exp(-0.8)))
-fit = vglm(y ~ 1, lognormal3, lndat3, trace=TRUE, crit="c")
-coef(fit, mat=TRUE)
+lambda <- 4
+ldat3 <- data.frame(y = lambda + rlnorm(n = 1000, mean = 1.5, sd = exp(-0.8)))
+fit <- vglm(y ~ 1, lognormal3, ldat3, trace = TRUE, crit = "c")
+coef(fit, mat = TRUE)
summary(fit)
}
\keyword{models}
diff --git a/man/maxwellUC.Rd b/man/maxwellUC.Rd
index ea3fe97..3942255 100644
--- a/man/maxwellUC.Rd
+++ b/man/maxwellUC.Rd
@@ -6,7 +6,9 @@
\alias{rmaxwell}
\title{The Maxwell Distribution}
\description{
- Density, and distribution function
+ Density,
+ distribution function,
+ quantile function and random generation
for the Maxwell distribution.
}
@@ -19,11 +21,14 @@ rmaxwell(n, a)
\arguments{
\item{x, q}{vector of quantiles.}
\item{p}{vector of probabilities.}
- \item{n}{number of observations. A single positive integer. }
+ \item{n}{number of observations.
+ A single positive integer.
+
+ }
\item{a}{the parameter.}
\item{log}{
Logical.
- If \code{log=TRUE} then the logarithm of the density is returned.
+ If \code{log = TRUE} then the logarithm of the density is returned.
}
@@ -33,12 +38,14 @@ rmaxwell(n, a)
\code{pmaxwell} gives the distribution function,
\code{qmaxwell} gives the quantile function, and
\code{rmaxwell} generates random deviates.
+
}
\references{
Balakrishnan, N. and Nevzorov, V. B. (2003)
\emph{A Primer on Statistical Distributions}.
Hoboken, New Jersey: Wiley.
+
}
\author{ T. W. Yee }
\details{
@@ -46,6 +53,7 @@ rmaxwell(n, a)
for estimating the parameter \eqn{a} by maximum likelihood estimation,
for the formula of the probability density function.
+
}
\note{
The Maxwell distribution is related to the Rayleigh distribution.
@@ -56,21 +64,21 @@ rmaxwell(n, a)
\code{\link{Rayleigh}},
\code{\link{rayleigh}}.
+
}
\examples{
-\dontrun{
-a = 3; x = seq(-0.5, 3, len = 100)
-plot(x, dmaxwell(x, a=a), type="l", col="blue", las=1, ylab="",
- main="blue is density, red is cumulative distribution function",
- sub="Purple lines are the 10,20,...,90 percentiles")
-abline(h=0, col="blue", lty=2)
-lines(x, pmaxwell(x, a=a), type="l", col="red")
-probs = seq(0.1, 0.9, by=0.1)
-Q = qmaxwell(probs, a=a)
-lines(Q, dmaxwell(Q, a), col="purple", lty=3, type="h")
-lines(Q, pmaxwell(Q, a), col="purple", lty=3, type="h")
-abline(h=probs, col="purple", lty=3)
-pmaxwell(Q, a) - probs # Should be all zero
+\dontrun{ a <- 3; x <- seq(-0.5, 3, len = 100)
+plot(x, dmaxwell(x, a = a), type = "l", col = "blue", las = 1, ylab = "",
+ main = "Blue is density, orange is cumulative distribution function",
+ sub = "Purple lines are the 10,20,...,90 percentiles")
+abline(h = 0, col = "blue", lty = 2)
+lines(x, pmaxwell(x, a = a), type = "l", col = "orange")
+probs <- seq(0.1, 0.9, by = 0.1)
+Q <- qmaxwell(probs, a = a)
+lines(Q, dmaxwell(Q, a), col = "purple", lty = 3, type = "h")
+lines(Q, pmaxwell(Q, a), col = "purple", lty = 3, type = "h")
+abline(h = probs, col = "purple", lty = 3)
+max(abs(pmaxwell(Q, a) - probs)) # Should be zero
}
}
\keyword{distribution}
diff --git a/man/micmen.Rd b/man/micmen.Rd
index 17852ad..865a1ab 100644
--- a/man/micmen.Rd
+++ b/man/micmen.Rd
@@ -8,8 +8,12 @@
}
\usage{
micmen(rpar = 0.001, divisor = 10, init1 = NULL, init2 = NULL,
- link1 = "identity", link2 = "identity", earg1 = list(),
- earg2 = list(), dispersion = 0, zero = NULL)
+ method.init = 1, oim = TRUE,
+ link1 = "identity", link2 = "identity",
+ firstDeriv = c("nsimEIM", "rpar"),
+ earg1 = list(), earg2 = list(), prob.x = c(0.15, 0.85),
+ nsimEIM = 500,
+ dispersion = 0, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -25,7 +29,8 @@ micmen(rpar = 0.001, divisor = 10, init1 = NULL, init2 = NULL,
}
\item{init1, init2}{
- Numerical. Initial value for the first and second parameters,
+ Numerical.
+ Optional initial value for the first and second parameters,
respectively. The default is to use a self-starting value.
}
@@ -44,6 +49,25 @@ micmen(rpar = 0.001, divisor = 10, init1 = NULL, init2 = NULL,
Numerical. Dispersion parameter.
}
+ \item{firstDeriv}{
+ Character. Algorithm for computing the first derivatives and
+ working weights.
+ The first is the default.
+
+ }
+ \item{method.init, prob.x}{
+ See \code{\link{CommonVGAMffArguments}} for more information.
+
+ }
+ \item{nsimEIM}{
+ See \code{\link{CommonVGAMffArguments}} for more information.
+
+ }
+ \item{oim}{
+ Use the OIM?
+ See \code{\link{CommonVGAMffArguments}} for more information.
+
+ }
\item{zero}{
An integer-valued vector specifying which
linear/additive predictors are modelled as intercepts only.
@@ -78,20 +102,24 @@ micmen(rpar = 0.001, divisor = 10, init1 = NULL, init2 = NULL,
\emph{Nonlinear Regression},
New York: Wiley.
+
Wedderburn, R. W. M. (1974)
Quasi-likelihood functions, generalized linear models,
and the Gauss-Newton method.
\emph{Biometrika},
\bold{61}, 439--447.
+
Bates, D. M. and Watts, D. G. (1988)
\emph{Nonlinear Regression Analysis and Its Applications},
New York: Wiley.
+
Documentation accompanying the \pkg{VGAM} package at
\url{http://www.stat.auckland.ac.nz/~yee}
contains further information and examples.
+
}
\author{ T. W. Yee }
\note{
@@ -101,14 +129,17 @@ micmen(rpar = 0.001, divisor = 10, init1 = NULL, init2 = NULL,
It should just a single vector, therefore omit the intercept term.
The LHS of the formula \code{form2} is ignored.
+
To predict the response at new values of \eqn{u_i}{u_i} one must assign
the \code{@extra$Xm2} slot in the fitted object these values, e.g.,
see the example below.
+
Numerical problems may occur. If so, try setting some initial values
for the parameters. In the future, several self-starting initial values
will be implemented.
+
}
\seealso{
@@ -116,6 +147,12 @@ micmen(rpar = 0.001, divisor = 10, init1 = NULL, init2 = NULL,
% \code{skira}.
}
+\section{Warning }{
+ This function is not (nor could ever be) entirely reliable.
+ Plotting the fitted function and monitoring convergence is recommended.
+
+
+}
\examples{
fit = vglm(velocity ~ 1, micmen, enzyme, trace = TRUE, crit = "coef",
form2 = ~ conc - 1)
diff --git a/man/moffset.Rd b/man/moffset.Rd
new file mode 100644
index 0000000..eaa507d
--- /dev/null
+++ b/man/moffset.Rd
@@ -0,0 +1,92 @@
+\name{moffset}
+\alias{moffset}
+
+\title{
+Makes an effective day data set
+
+}
+\description{
+ Modify a matrix by shitting successive elements.
+
+}
+\usage{
+moffset(mat, roffset = 1, coffset = 1)
+
+}
+\arguments{
+\item{mat}{
+ Data frame or matrix, which is considered to 24 hours of rows by
+ 7 days a week of columns, or can be any matrix.
+
+}
+\item{roffset, coffset}{
+ Numeric or character. The number or name of the row/column
+ of matrix input. rows/column start as the first cell in
+ matrix output. For example, for the \code{\link{alcoff}},
+ put \code{roffset = "6"} means that we make an effective days
+ dataset start from 6:00 am of the day, and from midnight to
+ 05.59 am is part the previous days. Similarly \code{coffset}
+ for the column.
+
+}
+}
+
+\details{
+ In \pkg{VGAM} there are crash data (e.g., see
+ \code{\link{crashi}}) and alcohol-related related
+ datasets, and also function of \code{\link{rcam}} and
+ \code{\link{plotrcam0}} available. In regression analysis of
+ main effect model or rank zero of row-column association model,
+ it becomes more realistic using the effective days dataset. Since
+ in the effective days datasets for example, partying on Friday
+ night includes Saturday morning.
+
+
+}
+
+\value{
+ A matrix of the same dimensional as its input.
+
+}
+
+
+\author{
+ Alfian F. Hadi,
+ T. W. Yee.
+
+}
+\note{
+ This function was specifically 24 x 7 dimensional matrix (24
+ hours of days of row by 7 days a week), \code{\link{alcoff}}.
+ Change into a matrix of "effective days" with flexible start of
+ first hour of in the effective day. But now can more generaly
+ applied into any dimensional matrix.
+
+
+ This is a data preprocessing function for \code{\link{rcam}}
+ and \code{\link{plotrcam0}}. The differences between
+ \code{\link{Rcam}} and \code{\link{moffset}} is that the
+ \code{\link{Rcam}} only reordering the level of row and column
+ and marking the row/column base line, there is no change in
+ entire rows and column. But here in \code{\link{moffset}}
+ values in row/column before the rstar/cstar will be moved to
+ previuous row/column. So if there is very similar matrix to
+ \code{\link{alcoff}}, but on other dimension 7 by 24, for example,
+ then the user can use this function very easy.
+
+
+}
+\seealso{
+ \code{\link{Rcam}},
+ \code{\link{rcam}},
+ \code{\link{plotrcam0}}.
+
+}
+\examples{
+alcoff.e <- moffset(alcoff, roffset = "6")
+fit0.orig <- rcam(alcoff) #default baselines are first row & col
+fit0 <- rcam(alcoff.e) #default baselines are first roe & col
+
+#customise rowbaseline
+fit0.based <- rcam(Rcam(alcoff.e, rbaseline="11", cbaseline="Monday"))
+}
diff --git a/man/multinomial.Rd b/man/multinomial.Rd
index 7f60cdc..39ab318 100644
--- a/man/multinomial.Rd
+++ b/man/multinomial.Rd
@@ -38,8 +38,8 @@ multinomial(zero = NULL, parallel = FALSE, nointercept = NULL,
If used, this argument will be often assigned the value \code{1}.
If inputted as a value of a factor then beware of missing values
of certain levels of the factor
- (\code{drop.unused.levels=TRUE} or
- \code{drop.unused.levels=FALSE}).
+ (\code{drop.unused.levels = TRUE} or
+ \code{drop.unused.levels = FALSE}).
See the example below.
}
@@ -61,9 +61,10 @@ multinomial(zero = NULL, parallel = FALSE, nointercept = NULL,
The reference or baseline level can be changed with the
\code{refLevel} argument.
+
In almost all the literature, the constraint matrices associated
with this family of models are known. For example, setting
- \code{parallel=TRUE} will make all constraint matrices (except for
+ \code{parallel = TRUE} will make all constraint matrices (except for
the intercept) equal to a vector of \eqn{M} 1's. If the constraint
matrices are unknown and to be estimated, then this can be achieved
by fitting the model as a reduced-rank vector generalized linear model
@@ -71,6 +72,7 @@ multinomial(zero = NULL, parallel = FALSE, nointercept = NULL,
model with unknown constraint matrices is known as a \emph{stereotype} model
(Anderson, 1984), and can be fitted with \code{\link{rrvglm}}.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
@@ -81,10 +83,10 @@ multinomial(zero = NULL, parallel = FALSE, nointercept = NULL,
}
\references{
-Yee, T. W. (2009)
+Yee, T. W. (2010)
The \pkg{VGAM} package for categorical data analysis.
\emph{Journal of Statistical Software},
-\bold{32}, % zzz--zzz.
+\bold{32}, 1--34.
\url{http://www.jstatsoft.org/v32/i10/}.
@@ -135,18 +137,21 @@ by the \pkg{VGAM} package can be found at
by \code{\link{vglm}}/\code{\link{vgam}}/\code{\link{rrvglm}} is the
matrix of sample proportions.
+
The multinomial logit model is more appropriate for a nominal
(unordered) factor response than for an ordinal (ordered) factor
response.
Models more suited for the latter include those based on cumulative
probabilities, e.g., \code{\link{cumulative}}.
+
\code{multinomial} is prone to numerical difficulties if the groups
are separable and/or the fitted probabilities are close to 0 or 1.
The fitted values returned are estimates of the probabilities
\eqn{P[Y=j]} for \eqn{j=1,\ldots,M+1}.
See \pkg{safeBinaryRegression} for the logistic regression case.
+
Here is an example of the usage of the \code{parallel} argument.
If there are covariates \code{x2}, \code{x3} and \code{x4}, then
\code{parallel = TRUE ~ x2 + x3 - 1} and
@@ -154,6 +159,7 @@ by the \pkg{VGAM} package can be found at
the regression coefficients for \code{x2} and \code{x3} to be equal;
those of the intercepts and \code{x4} would be different.
+
In Example 4 below, a conditional logit model is fitted to an artificial
data set that explores how cost and travel time affect people's
decision about how to travel to work. Walking is the baseline group.
@@ -163,11 +169,13 @@ by the \pkg{VGAM} package can be found at
walking, etc. For other details about the \code{xij} argument see
\code{\link{vglm.control}} and \code{\link{fill}}.
+
The \code{\link[nnet]{multinom}} function in the \pkg{nnet} package
uses the first level of the factor as baseline, whereas the last
level of the factor is used here. Consequently the estimated
regression coefficients differ.
+
}
% In the future, this family function may be renamed to
@@ -203,33 +211,24 @@ by the \pkg{VGAM} package can be found at
\examples{
# Example 1: fit a multinomial logit model to Edgar Anderson's iris data
data(iris)
-\dontrun{
-fit = vglm(Species ~ ., multinomial, iris)
-coef(fit, matrix=TRUE)
-}
+\dontrun{ fit = vglm(Species ~ ., multinomial, iris)
+coef(fit, matrix = TRUE) }
# Example 2a: a simple example
-ymat = t(rmultinom(10, size = 20, prob=c(0.1,0.2,0.8))) # Counts
-fit = vglm(ymat ~ 1, multinomial)
+ycounts = t(rmultinom(10, size = 20, prob = c(0.1, 0.2, 0.8))) # Counts
+fit = vglm(ycounts ~ 1, multinomial)
head(fitted(fit)) # Proportions
fit at prior.weights # Not recommended for extraction of prior weights
-weights(fit, type="prior", matrix=FALSE) # The better method
+weights(fit, type = "prior", matrix = FALSE) # The better method
fit at y # Sample proportions
constraints(fit) # Constraint matrices
# Example 2b: Different reference level used as the baseline
-fit2 = vglm(ymat ~ 1, multinomial(refLevel=2))
-coef(fit2, matrix=TRUE)
-coef(fit , matrix=TRUE) # Easy to reconcile this output with fit2
+fit2 = vglm(ycounts ~ 1, multinomial(refLevel = 2))
+coef(fit2, matrix = TRUE)
+coef(fit , matrix = TRUE) # Easy to reconcile this output with fit2
-# Example 2c: Different input to Example 2a but same result
-w = apply(ymat, 1, sum) # Prior weights
-yprop = ymat / w # Sample proportions
-fitprop = vglm(yprop ~ 1, multinomial, weights=w)
-head(fitted(fitprop)) # Proportions
-weights(fitprop, type="prior", matrix=FALSE)
-fitprop at y # Same as the input
# Example 3: The response is a factor.
@@ -237,37 +236,37 @@ nn = 10
dframe3 = data.frame(yfactor = gl(3, nn, labels=c("Control","Trt1","Trt2")),
x = runif(3 * nn))
myrefLevel = with(dframe3, yfactor[12])
-fit3a = vglm(yfactor ~ x, multinomial(refLevel=myrefLevel), data=dframe3)
-fit3b = vglm(yfactor ~ x, multinomial(refLevel=2), data=dframe3)
-coef(fit3a, matrix=TRUE) # "Treatment1" is the reference level
-coef(fit3b, matrix=TRUE) # "Treatment1" is the reference level
+fit3a = vglm(yfactor ~ x, multinomial(refLevel = myrefLevel), dframe3)
+fit3b = vglm(yfactor ~ x, multinomial(refLevel = 2), dframe3)
+coef(fit3a, matrix = TRUE) # "Treatment1" is the reference level
+coef(fit3b, matrix = TRUE) # "Treatment1" is the reference level
margeff(fit3b)
# Example 4: Fit a rank-1 stereotype model
data(car.all)
-fit4 = rrvglm(Country ~ Width + Height + HP, multinomial, car.all, Rank=1)
+fit4 = rrvglm(Country ~ Width + Height + HP, multinomial, car.all)
coef(fit4) # Contains the C matrix
-constraints(fit4)$HP # The A matrix
-coef(fit4, matrix=TRUE) # The B matrix
-Coef(fit4)@C # The C matrix
-ccoef(fit4) # Better to get the C matrix this way
-Coef(fit4)@A # The A matrix
-svd(coef(fit4, matrix=TRUE)[-1,])$d # This has rank 1; = C %*% t(A)
+constraints(fit4)$HP # The A matrix
+coef(fit4, matrix = TRUE) # The B matrix
+Coef(fit4)@C # The C matrix
+ccoef(fit4) # Better to get the C matrix this way
+Coef(fit4)@A # The A matrix
+svd(coef(fit4, matrix = TRUE)[-1, ])$d # This has rank 1; = C %*% t(A)
# Example 5: The use of the xij argument (aka conditional logit model)
set.seed(111)
nn = 100 # Number of people who travel to work
M = 3 # There are M+1 models of transport to go to work
-ymat = matrix(0, nn, M+1)
-ymat[cbind(1:nn, sample(x=M+1, size=nn, replace=TRUE))] = 1
-dimnames(ymat) = list(NULL, c("bus","train","car","walk"))
+ycounts = matrix(0, nn, M+1)
+ycounts[cbind(1:nn, sample(x = M+1, size = nn, replace = TRUE))] = 1
+dimnames(ycounts) = list(NULL, c("bus","train","car","walk"))
gotowork = data.frame(cost.bus = runif(nn), time.bus = runif(nn),
cost.train= runif(nn), time.train= runif(nn),
cost.car = runif(nn), time.car = runif(nn),
cost.walk = runif(nn), time.walk = runif(nn))
-gotowork = round(gotowork, dig=2) # For convenience
+gotowork = round(gotowork, dig = 2) # For convenience
gotowork = transform(gotowork,
Cost.bus = cost.bus - cost.walk,
Cost.car = cost.car - cost.walk,
@@ -277,21 +276,34 @@ gotowork = transform(gotowork,
Time.car = time.car - time.walk,
Time.train = time.train - time.walk,
Time = time.train - time.walk) # for labelling
-fit = vglm(ymat ~ Cost + Time,
- multinomial(parall=TRUE ~ Cost + Time - 1),
+fit = vglm(ycounts ~ Cost + Time,
+ multinomial(parall = TRUE ~ Cost + Time - 1),
xij = list(Cost ~ Cost.bus + Cost.train + Cost.car,
Time ~ Time.bus + Time.train + Time.car),
form2 = ~ Cost + Cost.bus + Cost.train + Cost.car +
Time + Time.bus + Time.train + Time.car,
- data=gotowork, trace=TRUE)
-head(model.matrix(fit, type="lm")) # LM model matrix
-head(model.matrix(fit, type="vlm")) # Big VLM model matrix
+ data=gotowork, trace = TRUE)
+head(model.matrix(fit, type = "lm")) # LM model matrix
+head(model.matrix(fit, type = "vlm")) # Big VLM model matrix
coef(fit)
-coef(fit, matrix=TRUE)
+coef(fit, matrix = TRUE)
constraints(fit)
summary(fit)
-max(abs(predict(fit)-predict(fit, new=gotowork))) # Should be 0
+max(abs(predict(fit) - predict(fit, new = gotowork))) # Should be 0
}
\keyword{models}
\keyword{regression}
+
+
+% 20100915; this no longer works:
+% # Example 2c: Different input to Example 2a but same result
+% w = apply(ycounts, 1, sum) # Prior weights
+% yprop = ycounts / w # Sample proportions
+% fitprop = vglm(yprop ~ 1, multinomial, weights=w)
+% head(fitted(fitprop)) # Proportions
+% weights(fitprop, type="prior", matrix=FALSE)
+% fitprop at y # Same as the input
+
+
+
diff --git a/man/negbinomial.Rd b/man/negbinomial.Rd
index b778db0..9001fab 100644
--- a/man/negbinomial.Rd
+++ b/man/negbinomial.Rd
@@ -11,7 +11,7 @@ negbinomial(lmu = "loge", lk = "loge", emu = list(), ek = list(),
imu = NULL, ik = NULL, quantile.probs = 0.75,
nsimEIM = 100, cutoff = 0.995,
Maxiter = 5000, deviance.arg = FALSE, method.init = 1,
- shrinkage.init = 0.95, zero = -2)
+ parallel = FALSE, shrinkage.init = 0.95, zero = -2)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -19,7 +19,10 @@ negbinomial(lmu = "loge", lk = "loge", emu = list(), ek = list(),
Link functions applied to the \eqn{\mu}{mu} and \eqn{k} parameters.
See \code{\link{Links}} for more choices.
Note that the \eqn{k} parameter is the \code{size} argument of
- \code{\link[stats:NegBinomial]{rnbinom}} etc.
+ \code{\link[stats:NegBinomial]{rnbinom}}.
+ Common alternatives for \code{lk} are
+ \code{\link{nloge}} and
+ \code{\link{reciprocal}}.
}
\item{emu, ek}{
@@ -91,6 +94,16 @@ negbinomial(lmu = "loge", lk = "loge", emu = list(), ek = list(),
and/or else specify a value for \code{ik}.
}
+ \item{parallel}{
+ See \code{\link{CommonVGAMffArguments}} for more information.
+ Setting \code{parallel = TRUE} is useful in order to get
+ something similar to \code{\link{quasipoissonff}} or
+ what is known as NB-1.
+ The parallelism constraint does not apply to any intercept term.
+ You should set \code{zero = NULL} too if \code{parallel = TRUE} to
+ avoid a conflict.
+
+ }
\item{shrinkage.init}{
How much shrinkage is used when initializing \eqn{\mu}{mu}.
The value must be between 0 and 1 inclusive, and
@@ -109,6 +122,7 @@ negbinomial(lmu = "loge", lk = "loge", emu = list(), ek = list(),
explanatory variables by setting \code{zero=NULL}. A negative value
means that the value is recycled, so setting \eqn{-2} means all \eqn{k}
are intercept-only.
+ See \code{\link{CommonVGAMffArguments}} for more information.
}
@@ -129,23 +143,28 @@ negbinomial(lmu = "loge", lk = "loge", emu = list(), ek = list(),
[mu/(mu+k)]^y [k/(k+mu)]^k}
where \eqn{y=0,1,2,\ldots},
and \eqn{\mu > 0}{mu > 0} and \eqn{k > 0}.
- Note that the dispersion parameter is
+ Note that the \emph{dispersion} parameter is
\eqn{1/k}, so that as \eqn{k} approaches infinity the negative
binomial distribution approaches a Poisson distribution.
The response has variance \eqn{Var(Y)=\mu+\mu^2/k}{Var(Y)=mu*(1+mu/k)}.
When fitted, the \code{fitted.values} slot of the object contains
the estimated value of the \eqn{\mu}{mu} parameter, i.e., of the mean
\eqn{E(Y)}.
+ It is common for some to use \eqn{\alpha=1/k}{alpha=1/k} as the
+ ancillary or heterogeneity parameter;
+ so common alternatives for \code{lk} are
+ \code{\link{nloge}} and
+ \code{\link{reciprocal}}.
- The negative binomial distribution can be coerced into the classical
- GLM framework, with one of the parameters being of interest and the
- other treated as a nuisance/scale parameter (and implemented in the
- MASS library). This \pkg{VGAM} family function \code{negbinomial} treats
- both parameters on the same footing, and estimates them both by full
- maximum likelihood estimation.
- Simulated Fisher scoring is employed as the default
- (see the \code{nsimEIM} argument).
+ The negative binomial distribution can be coerced into the
+ classical GLM framework with one of the parameters being of
+ interest and the other treated as a nuisance/scale parameter
+ (this is implemented in the MASS library). This \pkg{VGAM}
+ family function \code{negbinomial} treats both parameters
+ on the same footing, and estimates them both by full maximum
+ likelihood estimation. Simulated Fisher scoring is employed
+ as the default (see the \code{nsimEIM} argument).
The parameters \eqn{\mu}{mu} and \eqn{k} are independent (diagonal EIM),
@@ -163,14 +182,16 @@ negbinomial(lmu = "loge", lk = "loge", emu = list(), ek = list(),
}
\section{Warning}{
The Poisson model corresponds to \eqn{k} equalling infinity.
- If the data is Poisson or close to Poisson, numerical problems will
- occur. Possibly choosing a log-log link may help in such cases,
- otherwise use \code{\link{poissonff}}.
+ If the data is Poisson or close to Poisson, numerical
+ problems will occur. Possibly choosing a log-log link may
+ help in such cases, otherwise use \code{\link{poissonff}}
+ or \code{\link{quasipoissonff}}.
This function is fragile; the maximum likelihood estimate of the
index parameter is fraught (see Lawless, 1987). In general, the
\code{\link{quasipoissonff}} is more robust than this function.
+ Other alternatives are to fit a NB-1 or RR-NB model; see Yee (2010).
Assigning values to the \code{ik} argument may lead to a local solution,
and smaller values are preferred over large values when using this argument.
@@ -182,7 +203,8 @@ negbinomial(lmu = "loge", lk = "loge", emu = list(), ek = list(),
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
- The object is used by modelling functions such as \code{\link{vglm}}
+ The object is used by modelling functions such as \code{\link{vglm}},
+ \code{\link{rrvglm}}
and \code{\link{vgam}}.
}
@@ -204,6 +226,12 @@ Fitting the negative binomial distribution to biological data.
\bold{9}, 174--200.
+ Yee, T. W. (2010)
+ Two-parameter reduced-rank vector generalized linear models.
+ \emph{In preparation}.
+
+
+
}
\author{ Thomas W. Yee }
\note{
@@ -261,41 +289,106 @@ Fitting the negative binomial distribution to biological data.
female European red mites were counted.
+
+ There are two special uses of this \pkg{VGAM} family function
+ for handling count data.
+ Firstly,
+ when used by \code{\link{rrvglm}} this
+ results in a continuum of models in between and
+ inclusive of quasi-Poisson and negative binomial regression.
+ This is known as a reduced-rank negative binomial model \emph{(RR-NB)}.
+ It fits a negative binomial log-linear regression with variance function
+ \eqn{Var(Y) = \mu + \delta_1 \mu^{\delta_2}}{Var(Y) = mu + delta1 * mu^delta2}
+ where \eqn{\delta_1}{delta1}
+ and \eqn{\delta_2}{delta2}
+ are parameters to be estimated by MLE.
+ Confidence intervals are available for \eqn{\delta_2}{delta2},
+ therefore it can be decided upon whether the
+ data are quasi-Poisson or negative binomial, if any.
+
+
+ Secondly,
+ the use of this \pkg{VGAM} family function with \code{parallel = TRUE}
+ and by \code{\link{vglm}}
+ can result in a model similar to \code{\link{quasipoissonff}}.
+ This is named the \emph{NB-1} model.
+ The dispersion parameter is estimated by MLE whereas
+ \code{\link[stats:glm]{glm}} uses the method of moments.
+ In particular, it fits a negative binomial log-linear regression
+ with variance function
+ \eqn{Var(Y) = \phi_0 \mu}{Var(Y) = phi0 * mu}
+ where \eqn{\phi_0}{phi0}
+ is a parameter to be estimated by MLE.
+ Confidence intervals are available for \eqn{\phi_0}{phi0}.
+
+
}
\seealso{
\code{\link{quasipoissonff}},
\code{\link{poissonff}},
- \code{\link{cao}},
- \code{\link{cqo}},
\code{\link{zinegbinomial}},
\code{\link{posnegbinomial}},
\code{\link{invbinomial}},
% \code{\link[MASS]{rnegbin}}.
\code{\link[stats:NegBinomial]{rnbinom}},
- \code{\link{nbolf}}.
+ \code{\link{nbolf}},
+ \code{\link{rrvglm}},
+ \code{\link{cao}},
+ \code{\link{cqo}},
+ \code{\link{CommonVGAMffArguments}}.
+
}
\examples{
# Example 1: apple tree data
-appletree = data.frame(y = 0:7, w = c(70, 38, 17, 10, 9, 3, 2, 1))
-fit = vglm(y ~ 1, negbinomial, appletree, weights = w)
+appletree <- data.frame(y = 0:7, w = c(70, 38, 17, 10, 9, 3, 2, 1))
+fit <- vglm(y ~ 1, negbinomial, appletree, weights = w)
summary(fit)
coef(fit, matrix = TRUE)
Coef(fit)
# Example 2: simulated data with multivariate response
-ndata = data.frame(x = runif(nn <- 500))
-ndata = transform(ndata, y1 = rnbinom(nn, mu=exp(3+x), size = exp(1)),
- y2 = rnbinom(nn, mu=exp(2-x), size = exp(0)))
-fit1 = vglm(cbind(y1,y2) ~ x, negbinomial, ndata, trace = TRUE)
+ndata <- data.frame(x = runif(nn <- 500))
+ndata <- transform(ndata, y1 = rnbinom(nn, mu = exp(3+x), size = exp(1)),
+ y2 = rnbinom(nn, mu = exp(2-x), size = exp(0)))
+fit1 <- vglm(cbind(y1, y2) ~ x, negbinomial, ndata, trace = TRUE)
coef(fit1, matrix = TRUE)
# Example 3: large counts so definitely use the nsimEIM argument
-ndata = transform(ndata, y3 = rnbinom(nn, mu=exp(12+x), size = exp(1)))
+ndata <- transform(ndata, y3 = rnbinom(nn, mu = exp(12+x), size = exp(1)))
with(ndata, range(y3)) # Large counts
-fit2 = vglm(y3 ~ x, negbinomial(nsimEIM=100), ndata, trace = TRUE)
+fit2 <- vglm(y3 ~ x, negbinomial(nsimEIM = 100), ndata, trace = TRUE)
coef(fit2, matrix = TRUE)
+
+# Example 4: a NB-1 to estimate a negative binomial with Var(Y) = phi0 * mu
+nn <- 1000 # Number of observations
+phi0 <- 10 # Specify this; should be greater than unity
+delta0 <- 1 / (phi0 - 1)
+mydata <- data.frame(x2 = runif(nn), x3 = runif(nn))
+mydata <- transform(mydata, mu = exp(2 + 3 * x2 + 0 * x3))
+mydata <- transform(mydata, y3 = rnbinom(nn, mu = mu, size = delta0 * mu))
+\dontrun{
+plot(y3 ~ x2, data = mydata, pch = "+", col = 'blue',
+ main = paste("Var(Y) = ", phi0, " * mu", sep = ""), las = 1) }
+nb1 <- vglm(y3 ~ x2 + x3, negbinomial(parallel = TRUE, zero = NULL),
+ mydata, trace = TRUE)
+# Extracting out some quantities:
+cnb1 <- coef(nb1, matrix = TRUE)
+mydiff <- (cnb1["(Intercept)", "log(k)"] - cnb1["(Intercept)", "log(mu)"])
+delta0.hat <- exp(mydiff)
+(phi.hat <- 1 + 1 / delta0.hat) # MLE of phi
+summary(nb1)
+# Obtain a 95 percent confidence interval for phi0:
+myvec <- rbind(-1, 1, 0, 0)
+(se.mydiff <- sqrt(t(myvec) \%*\% vcov(nb1) \%*\% myvec))
+ci.mydiff <- mydiff + c(-1.96, 1.96) * se.mydiff
+ci.delta0 <- ci.exp.mydiff <- exp(ci.mydiff)
+(ci.phi0 <- 1 + 1 / rev(ci.delta0)) # The 95 percent conf. interval for phi0
+
+confint_nb1(nb1) # Quick way to get it
+
+summary(glm(y3 ~ x2 + x3, quasipoisson, mydata))$disper # cf. moment estimator
}
\keyword{models}
\keyword{regression}
diff --git a/man/normal1.Rd b/man/normal1.Rd
index 71fcb19..94701aa 100644
--- a/man/normal1.Rd
+++ b/man/normal1.Rd
@@ -8,19 +8,16 @@
}
\usage{
-normal1(lmean="identity", lsd="loge", emean=list(), esd=list(), zero=NULL)
+normal1(lmean = "identity", lsd = "loge",
+ emean = list(), esd = list(), method.init = 1, zero = -2)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{lmean}{
- Link function applied to the mean.
+ \item{lmean, lsd}{
+ Link functions applied to the mean and standard deviation.
See \code{\link{Links}} for more choices.
-
- }
- \item{lsd}{
- Parameter link function applied to the standard deviation.
- See \code{\link{Links}} for more choices.
- Being a positive quantity, a log link is the default.
+ Being a positive quantity, a log link is the default for the
+ standard deviation.
}
\item{emean, esd}{
@@ -28,20 +25,20 @@ normal1(lmean="identity", lsd="loge", emean=list(), esd=list(), zero=NULL)
See \code{earg} in \code{\link{Links}} for general information.
}
- \item{zero}{
- An integer vector, containing the value 1 or 2. If so, the mean or
- standard deviation respectively are modelled as an intercept only.
- Usually, setting \code{zero=2} will be used, if used at all.
- The default value \code{NULL} means both linear/additive predictors
- are modelled as functions of the explanatory variables.
+ \item{method.init, zero}{
+ See \code{\link{CommonVGAMffArguments}} for more information.
}
}
\details{
- By default, the mean is the first linear/additive predictor and
+ This fits a linear model (LM) as the first linear/additive predictor.
+ So, by default, this is just the mean.
+ By default,
the log of the standard deviation is the second linear/additive predictor.
The Fisher information matrix is diagonal.
+ This \pkg{VGAM} family function can handle multiple responses.
+
}
\value{
@@ -49,6 +46,7 @@ normal1(lmean="identity", lsd="loge", emean=list(), esd=list(), zero=NULL)
The object is used by modelling functions such as \code{\link{vglm}},
and \code{\link{vgam}}.
+
}
\references{
Evans, M., Hastings, N. and Peacock, B. (2000)
@@ -59,11 +57,9 @@ New York: Wiley-Interscience, Third edition.
\author{ T. W. Yee }
\note{
- The response should be univariate. Multivariate
- responses are more generally handled using
- \code{\link{gaussianff}}, however this only handles
- the mean and the variance-covariance matrices are
- assumed known.
+ Yet to do: allow an argument such as \code{sameSD} that enables the
+ standard devations to be the same.
+ And a \code{parallel} argument.
}
\seealso{
@@ -75,21 +71,22 @@ New York: Wiley-Interscience, Third edition.
\code{\link{fnormal1}},
\code{\link{skewnormal1}},
\code{\link{dcnormal1}},
+ \code{\link{huber}},
\code{\link{studentt}},
\code{\link[stats:Normal]{dnorm}}.
}
\examples{
-ndata = data.frame(x = rnorm(nn <- 200))
-ndata = transform(ndata, y = rnorm(nn, mean = 1-3*x, sd = exp(1+0.2*x)))
-fit = vglm(y ~ x, normal1, ndata, trace = TRUE)
+ndata <- data.frame(x2 = rnorm(nn <- 200))
+ndata <- transform(ndata, y = rnorm(nn, mean = 1-3*x2, sd = exp(1+0.2*x2)))
+fit <- vglm(y ~ x2, normal1(zero = NULL), ndata, trace = TRUE)
coef(fit, matrix = TRUE)
# Generate data from N(mu = theta = 10, sigma = theta) and estimate theta.
-theta = 10
-ndata = data.frame(y = rnorm(100, m = theta, sd = theta))
-fit = vglm(y ~ 1, normal1(lsd = "identity"), ndata,
- constraints = list("(Intercept)" = rbind(1, 1)))
+theta <- 10
+ndata <- data.frame(y = rnorm(100, m = theta, sd = theta))
+fit <- vglm(y ~ 1, normal1(lsd = "identity"), ndata,
+ constraints = list("(Intercept)" = rbind(1, 1)))
coef(fit, matrix = TRUE)
}
\keyword{models}
diff --git a/man/notdocumentedyet.Rd b/man/notdocumentedyet.Rd
index bb57d42..c702bb6 100644
--- a/man/notdocumentedyet.Rd
+++ b/man/notdocumentedyet.Rd
@@ -2,6 +2,34 @@
\alias{notdocumentedyet}
%
%
+%
+%
+%
+% 20110202; James Lauder work
+\alias{dexpgeom}
+\alias{pexpgeom}
+\alias{qexpgeom}
+\alias{rexpgeom}
+%
+%
+%
+% 20110202; for Melbourne; these include datasets.
+\alias{azprocedure}
+\alias{confint_rrnb}
+\alias{confint_nb1}
+\alias{gala}
+\alias{mmt}
+%
+%
+%
+%20101222; Alfian work
+%\alias{Rcam} % Has been written
+%\alias{plotrcam0} % Has been written
+%\alias{moffset} % Has been written
+%
+%
+%
+%
%20090330
\alias{dclogloglap}
\alias{dlogitlap}
@@ -54,6 +82,8 @@
\alias{add1.vglm}
% \alias{adjust.Dmat.expression}
\alias{alaplace1.control}
+\alias{alaplace2.control}
+\alias{alaplace3.control}
% \alias{alias.vgam}
% \alias{alias.vglm}
\alias{anova.vgam}
@@ -180,6 +210,7 @@
\alias{is.bell.qrrvglm}
\alias{is.bell.rrvglm}
\alias{is.bell.vlm}
+\alias{Kayfun.studentt}
% \alias{is.linear.term}
% \alias{jitteruqo}
\alias{lm}
@@ -275,6 +306,7 @@
% \alias{printuqo}
% \alias{printvsmooth.spline}
\alias{procVec}
+\alias{negzero.expression}
\alias{process.binomial2.data.vgam}
\alias{process.categorical.data.vgam}
% \alias{process.constraints}
@@ -341,6 +373,7 @@
\alias{summary.lms}
\alias{summary.qrrvglm}
\alias{summary.rc.exponential}
+\alias{summaryrcam}
\alias{summary.rrvglm}
\alias{summary.uqo}
\alias{summaryvgam}
@@ -366,7 +399,7 @@
\alias{vcontrol.expression}
% \alias{vcovdefault}
% \alias{vcovqrrvglm}
-% \alias{vcovrrvglm}
+\alias{vcovrrvglm}
% \alias{vcovvlm}
% \alias{veigen}
% \alias{vellipse}
@@ -406,6 +439,7 @@
%
\alias{Coef.uqo-class}
\alias{cao-class}
+\alias{rcam-class}
\alias{grc-class}
\alias{qrrvglm-class}
\alias{summary.qrrvglm-class}
@@ -413,6 +447,9 @@
\alias{summary.vgam-class}
\alias{summary.vglm-class}
\alias{summary.vlm-class}
+%%% 20101216 \alias{summary.rcam-class}
+%\alias{summary.rcam-class}
+%\alias{summaryrcam-class}
\alias{uqo-class}
\alias{vcov.qrrvglm-class}
\alias{vlm-class}
diff --git a/man/plotdeplot.lmscreg.Rd b/man/plotdeplot.lmscreg.Rd
index 483f2c7..8d5c36c 100644
--- a/man/plotdeplot.lmscreg.Rd
+++ b/man/plotdeplot.lmscreg.Rd
@@ -14,26 +14,38 @@ plotdeplot.lmscreg(answer, y.arg, add.arg = FALSE,
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{answer}{ Output from functions of the form
+ \item{answer}{
+ Output from functions of the form
\code{deplot.???} where \code{???} is the name of the
\pkg{VGAM} LMS family function, e.g., \code{lms.yjn}.
See below for details.
+
}
- \item{y.arg}{ Numerical vector. The values of the response variable
+ \item{y.arg}{
+ Numerical vector. The values of the response variable
at which to evaluate the density. This should be a grid that is fine
- enough to ensure the plotted curves are smooth. }
+ enough to ensure the plotted curves are smooth.
+
+ }
- \item{add.arg}{ Logical. Add the density to an existing plot? }
+ \item{add.arg}{
+ Logical. Add the density to an existing plot?
+
+ }
\item{xlab, ylab}{
Caption for the x- and y-axes. See \code{\link[graphics]{par}}.
+
}
\item{xlim, ylim}{
Limits of the x- and y-axes. See \code{\link[graphics]{par}}.
+
}
- \item{llty.arg}{ Line type.
+ \item{llty.arg}{
+ Line type.
See the \code{lty} argument of \code{\link[graphics]{par}}. }
- \item{col.arg}{ Line color.
+ \item{col.arg}{
+ Line color.
See the \code{col} argument of \code{\link[graphics]{par}}. }
\item{llwd.arg}{ Line width.
@@ -43,56 +55,66 @@ plotdeplot.lmscreg(answer, y.arg, add.arg = FALSE,
when setting up the entire plot. Useful arguments here include
\code{main} and \code{las}.
}
+
}
\details{
The above graphical parameters offer some flexibility when
plotting the quantiles.
+
}
\value{
The list \code{answer}, which has components
- \item{newdata}{ The argument \code{newdata} above from
+ \item{newdata}{
+ The argument \code{newdata} above from
the argument list of \code{\link{deplot.lmscreg}},
or a one-row
- data frame constructed out of the \code{x0} argument. }
+ data frame constructed out of the \code{x0} argument.
+
+ }
\item{y}{ The argument \code{y.arg} above. }
- \item{density}{ Vector of the density function values evaluated at \code{y.arg}. }
+ \item{density}{
+ Vector of the density function values evaluated at \code{y.arg}.
+
+ }
}
\references{
+
Yee, T. W. (2004)
Quantile regression via vector generalized additive models.
\emph{Statistics in Medicine}, \bold{23}, 2295--2315.
+
Documentation accompanying the \pkg{VGAM} package at
\url{http://www.stat.auckland.ac.nz/~yee}
contains further information and examples.
+
}
\author{ Thomas W. Yee }
\note{
-While the graphical arguments of this function are useful to the user,
-this function should not be called directly.
+ While the graphical arguments of this function are useful to
+ the user, this function should not be called directly.
}
\seealso{
-\code{\link{deplot.lmscreg}}.
+ \code{\link{deplot.lmscreg}}.
+
}
\examples{
-fit = vgam(BMI ~ s(age, df=c(4,2)), fam=lms.bcn(zero=1), data=bminz)
-\dontrun{
-y = seq(15, 43, by=0.25)
-deplot(fit, x0=20, y=y, xlab="BMI", col="green", llwd=2,
- main="BMI distribution at ages 20 (green), 40 (blue), 60 (red)")
-deplot(fit, x0=40, y=y, add=TRUE, col="blue", llwd=2)
-deplot(fit, x0=60, y=y, add=TRUE, col="red", llwd=2) -> a
-
-names(a at post$deplot)
-a at post$deplot$newdata
-head(a at post$deplot$y)
-head(a at post$deplot$density)
-}
+fit = vgam(BMI ~ s(age, df = c(4,2)), fam = lms.bcn(zero = 1), data = bminz)
+\dontrun{ y = seq(15, 43, by = 0.25)
+deplot(fit, x0 = 20, y = y, xlab = "BMI", col = "green", llwd = 2,
+ main = "BMI distribution at ages 20 (green), 40 (blue), 60 (red)")
+deplot(fit, x0 = 40, y = y, add = TRUE, col = "blue", llwd = 2)
+deplot(fit, x0 = 60, y = y, add = TRUE, col = "red", llwd = 2) -> aa
+
+names(aa at post$deplot)
+aa at post$deplot$newdata
+head(aa at post$deplot$y)
+head(aa at post$deplot$density) }
}
\keyword{dplot}
\keyword{models}
diff --git a/man/plotqrrvglm.Rd b/man/plotqrrvglm.Rd
index a0b2c5e..3c1c870 100644
--- a/man/plotqrrvglm.Rd
+++ b/man/plotqrrvglm.Rd
@@ -30,29 +30,35 @@ plotqrrvglm(object,
\details{
Plotting the residuals can be potentially very useful for checking
that the model fit is adequate.
+
}
\value{
The original object.
+
}
\references{
+
Yee, T. W. (2004)
A new technique for maximum-likelihood
canonical Gaussian ordination.
\emph{Ecological Monographs},
\bold{74}, 685--701.
+
}
\author{Thomas W. Yee}
\note{
-An ordination plot of a QRR-VGLM can be obtained
-by \code{\link{lvplot.qrrvglm}}.
+ An ordination plot of a QRR-VGLM can be obtained
+ by \code{\link{lvplot.qrrvglm}}.
+
}
\seealso{
\code{\link{lvplot.qrrvglm}},
\code{\link{cqo}}.
+
}
\examples{\dontrun{
@@ -72,3 +78,5 @@ plot(p1, rtype="d", col="blue", pch=4, las=1)
\keyword{dplot}
\keyword{models}
\keyword{regression}
+
+
diff --git a/man/plotrcam0.Rd b/man/plotrcam0.Rd
new file mode 100644
index 0000000..53bcfce
--- /dev/null
+++ b/man/plotrcam0.Rd
@@ -0,0 +1,173 @@
+\name{plotrcam0}
+\alias{plotrcam0}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{
+ Main effects plot for a Row-Column Association Model (RCAM)
+
+}
+\description{
+ Produces the main effects plot for Row-Column Association
+ Models (RCAMs) in \code{\link{rcam}} with rank zero for some
+ family functions. The main result of the rank-zero \bold{RCAM}
+ models is the plot of main effects (row and column).
+
+}
+\usage{
+ plotrcam0(object, rfirst = 1, cfirst = 1,
+ rtype = "h", ctype = "h",
+ rlas = 1, rcex.lab = 1,
+ rcex.axis = 1, rlabels = FALSE,
+ rtick = FALSE, clas = 1, ccex.lab = 1,
+ ccex.axis = 1, clabels = FALSE, ctick = FALSE,
+ rmain = "Row effects", rsub = "",
+ rxlabel = "", rylabel = "Row effects",
+ cmain = "Column effects", csub = "", cxlabel= "",
+ cylabel = "Column effects",
+ rcol = par()$col, ccol = par()$col,
+ ...)
+
+}
+\arguments{
+\item{object}{
+ An \code{\link{rcam}} object.
+
+}
+\item{rfirst, cfirst}{
+ \code{rfirst} is the level of row that is placed first in the
+ row effect plot, and \code{cfirst} for the column effect.
+
+}
+\item{rmain, cmain}{
+ \code{rmain} is a character of name for main label in row effect
+ plot, and \code{cmain} for the column one.
+
+}
+\item{rtype, ctype}{
+ See \code{type} argument of
+ \code{\link[graphics:plot]{plot}}.
+
+}
+\item{rlabels, clabels}{
+ Currently not functioning properly.
+ zz.
+ See \code{labels} argument of
+ \code{\link[graphics:plot]{plot}}.
+
+}
+\item{rsub, csub}{
+ See \code{sub} argument of
+ \code{\link[graphics:plot]{plot}}.
+
+}
+\item{rxlabel, rylabel, cxlabel, cylabel}{
+ \code{rxlabel} is a character of names for x-axis label and
+ \code{rylabel} is for the y-axis in the row effect plot.
+ And \code{cxlabel} and \code{ylabel} repectivrly name the
+ x and y label on the column effect plot.
+
+}
+\item{rcex.lab, ccex.lab}{
+ Both are numeric.
+ \code{rcex.lab} is number of character expansion for row effect
+ label, and \code{ccex.lab} for the column one.
+
+}
+\item{rcex.axis, ccex.axis}{
+ Both are numeric.
+ \code{rcex.axis} is number of character expansion for the row
+ axis label, and \code{ccex.axis } for the column one.
+
+}
+
+\item{rtick, ctick}{
+ Logical. \code{rtick = TRUE} means give ticks in row effect
+ plot, and \code{ctick} for the column one.
+
+}
+\item{rcol, ccol}{
+ \code{rcol} give a colour for the row effect, and \code{ccol}
+ for the column one.
+
+}
+%\item{llwd}{
+% Fed into \code{lwd} of \code{\link[graphics:par]{par}}.
+%
+%}
+\item{rlas, clas}{
+ Fed into \code{las} of \code{\link[graphics:par]{par}}.
+
+}
+%\item{type}{
+% Fed into \code{type} of \code{\link[graphics:plot]{plot}}.
+%
+%}
+\item{...}{
+ Arguments fed into both \code{\link[graphics:plot]{plot}} calls.
+
+}
+
+}
+
+\note{
+ This function plots the rows and columns effect of a rank-0 RCAM.
+ As the result is a main effect plot of a regression analysis, its
+ interpretation relative to the baseline or reference level of row
+ and column and should be considered the link function was used.
+
+
+ This function should be only used to plots the object of rank-0 RCAM.
+ If it use to plot non-rank-0 RCAM object, it will issue a warning.
+
+
+}
+
+
+\author{
+ T. W. Yee,
+ A. F. Hadi.
+}
+
+\section{Warning}{
+ This function is not finished yet.
+ There are many bugs!
+
+}
+
+
+\seealso{
+ \code{\link{moffset}}
+ \code{\link{Rcam}},
+ \code{\link{rcam}}.
+
+}
+\examples{
+# poissonff example
+fit0 <- rcam(Rcam(moffset(alcoff), rbaseline = "11", cbaseline = "Monday"),
+ fam = poissonff)
+\dontrun{par(oma = c(0,0,4,0), mfrow = c(1, 2))
+plotrcam0(fit0) }
+
+# negbinomial example
+fit1 <- rcam(Rcam(moffset(alcoff), rbaseline = "11", cbaseline = "Monday"),
+ fam = negbinomial)
+\dontrun{par(oma = c(0,0,4,0), mfrow = c(1, 2))
+plotrcam0(fit1) }
+
+# normal1 example
+fit2 <- rcam(Rcam(moffset(alcoff), rbaseline = "11", cbaseline = "Monday"),
+ fam = normal1)
+\dontrun{par(oma = c(0,0,4,0), mfrow = c(1, 2))
+plotrcam0(fit2) }
+
+# medpolish example
+fit3 <- rcam(Rcam(moffset(alcoff), rbaseline = "11", cbaseline = "Monday"),
+ fam = alaplace2(tau = 0.5, intparloc = TRUE))
+\dontrun{par(oma = c(0,0,4,0), mfrow = c(1, 2))
+plotrcam0(fit3) }
+
+# zipoissonff example
+fit4 <- rcam(Rcam(moffset(crashp), rbaseline = "11", cbaseline = "Monday"),
+ fam = zipoissonff, Rank = 0, trace = TRUE, crit = "l")
+\dontrun{par(oma = c(0,0,4,0), mfrow = c(1, 2))
+plotrcam0(fit4) }
+}
diff --git a/man/posbinomUC.Rd b/man/posbinomUC.Rd
index 9f2b0ac..0681db3 100644
--- a/man/posbinomUC.Rd
+++ b/man/posbinomUC.Rd
@@ -87,13 +87,12 @@ table(rposbinom(100, size, prob))
table(qposbinom(runif(1000), size, prob))
round(dposbinom(1:10, size, prob) * 1000) # Should be similar
-\dontrun{
-barplot(rbind(dposbinom(x, size, prob),
- dbinom(x, size, prob)),
+\dontrun{ barplot(rbind(dposbinom(x = 0:size, size, prob),
+ dbinom(x = 0:size, size, prob)),
beside = TRUE, col = c("blue","green"),
main=paste("Positive-binomial(", size, ",", prob, ") (blue) vs",
" Binomial(", size, ",", prob, ") (green)", sep=""),
- names.arg = as.character(x), las=1) }
+ names.arg = as.character(0:size), las=1) }
}
\keyword{distribution}
diff --git a/man/posbinomial.Rd b/man/posbinomial.Rd
index eb929c9..7c5ebee 100644
--- a/man/posbinomial.Rd
+++ b/man/posbinomial.Rd
@@ -68,12 +68,13 @@ parameter.
\examples{
# Number of albinotic children in families with 5 kids (from Patil, 1962)
akids = data.frame(y = c(rep(1,25), rep(2,23), rep(3,10), 4,5), n=rep(5,60))
-akids = transform(akids, yprop = y / 5)
-fit1 = vglm(yprop~1, posbinomial, akids, trace=TRUE, weights=n)
-fit2 = vglm(cbind(y,n-y)~1, posbinomial, akids, trace=TRUE) # Same model
+fit1 = vglm(cbind(y, n-y) ~ 1, posbinomial, akids, trace = TRUE)
summary(fit1)
-Coef(fit2) # = MLE of p = 0.3088
-head(fitted(fit2))
+Coef(fit1) # = MLE of p = 0.3088
+head(fitted(fit1))
}
\keyword{models}
\keyword{regression}
+
+% akids = transform(akids, yprop = y / 5)
+
diff --git a/man/posnegbinUC.Rd b/man/posnegbinUC.Rd
index 98e1bea..c7df7b8 100644
--- a/man/posnegbinUC.Rd
+++ b/man/posnegbinUC.Rd
@@ -12,10 +12,10 @@
}
\usage{
-dposnegbin(x, size, prob=NULL, munb=NULL, log=FALSE)
-pposnegbin(q, size, prob=NULL, munb=NULL)
-qposnegbin(p, size, prob=NULL, munb=NULL)
-rposnegbin(n, size, prob=NULL, munb=NULL)
+dposnegbin(x, size, prob = NULL, munb = NULL, log = FALSE)
+pposnegbin(q, size, prob = NULL, munb = NULL)
+qposnegbin(p, size, prob = NULL, munb = NULL)
+rposnegbin(n, size, prob = NULL, munb = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -72,6 +72,7 @@ for counts with extra zeros.
\bold{88},
297--308.
+
}
\author{ T. W. Yee }
\note{
@@ -90,18 +91,18 @@ for counts with extra zeros.
}
\examples{
-munb = 5; size = 4; n = 1000
+munb <- 5; size <- 4; n <- 1000
table(y <- rposnegbin(n, munb=munb, size=size))
mean(y) # sample mean
munb / (1 - (size/(size+munb))^size) # population mean
-munb / pnbinom(0, mu=munb, size=size, lower.tail=FALSE) # same as before
+munb / pnbinom(0, mu=munb, size=size, lower.tail = FALSE) # same as before
x <- (-1):17
-(ii = dposnegbin(x, munb=munb, size=size))
+(ii <- dposnegbin(x, munb=munb, size=size))
max(abs(cumsum(ii) - pposnegbin(x, munb=munb, size=size))) # Should be 0
\dontrun{
-x = 0:10
+x <- 0:10
barplot(rbind(dposnegbin(x, munb=munb, size=size), dnbinom(x, mu=munb, size=size)),
beside = TRUE, col = c("blue","green"),
main=paste("dposnegbin(munb=", munb, ", size=", size, ") (blue) vs",
@@ -109,9 +110,9 @@ barplot(rbind(dposnegbin(x, munb=munb, size=size), dnbinom(x, mu=munb, size=size
names.arg = as.character(x)) }
# Another test for pposnegbin()
-nn = 5000
-mytab = cumsum(table(rposnegbin(nn, munb=munb, size=size))) / nn
-myans = pposnegbin(sort(as.numeric(names(mytab))), munb=munb, size=size)
+nn <- 5000
+mytab <- cumsum(table(rposnegbin(nn, munb=munb, size=size))) / nn
+myans <- pposnegbin(sort(as.numeric(names(mytab))), munb=munb, size=size)
max(abs(mytab - myans)) # Should be 0
}
\keyword{distribution}
diff --git a/man/posnegbinomial.Rd b/man/posnegbinomial.Rd
index e9e11f3..edfcee4 100644
--- a/man/posnegbinomial.Rd
+++ b/man/posnegbinomial.Rd
@@ -8,9 +8,9 @@
}
\usage{
-posnegbinomial(lmunb = "loge", lk = "loge", emunb =list(), ek = list(),
- ik = NULL, zero = -2, cutoff = 0.995, shrinkage.init=0.95,
- method.init=1)
+posnegbinomial(lmunb = "loge", lk = "loge", emunb = list(), ek = list(),
+ ik = NULL, zero = -2, cutoff = 0.995, shrinkage.init = 0.95,
+ method.init = 1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -48,9 +48,10 @@ posnegbinomial(lmunb = "loge", lk = "loge", emunb =list(), ek = list(),
modelled as an intercept only. By default, the \code{k} parameter
(after \code{lk} is applied) is modelled as a single unknown
number that is estimated. It can be modelled as a function of
- the explanatory variables by setting \code{zero=NULL}. A negative
+ the explanatory variables by setting \code{zero = NULL}. A negative
value means that the value is recycled, so setting \eqn{-2} means
all \code{k} are intercept only.
+ See \code{\link{CommonVGAMffArguments}} for more information.
}
\item{cutoff}{
@@ -71,6 +72,7 @@ posnegbinomial(lmunb = "loge", lk = "loge", emunb =list(), ek = list(),
binomial distribution but with the probability of a zero response
being zero. The other probabilities are scaled to sum to unity.
+
This family function is based on \code{\link{negbinomial}} and most
details can be found there. To avoid confusion, the parameter
\code{munb} here corresponds to the mean of an ordinary negative
@@ -81,15 +83,18 @@ posnegbinomial(lmunb = "loge", lk = "loge", emunb =list(), ek = list(),
where \eqn{p(0) = (k/(k + \mu_{nb}))^k}{p(0) = (k/(k + munb))^k} is the
probability an ordinary negative binomial distribution has a zero value.
+
The parameters \code{munb} and \code{k} are not independent in the
positive negative binomial distribution, whereas they are in the
ordinary negative binomial distribution.
+
This function handles \emph{multivariate} responses, so that a matrix
can be used as the response. The number of columns is the number
- of species, say, and setting \code{zero=-2} means that \emph{all}
+ of species, say, and setting \code{zero = -2} means that \emph{all}
species have a \code{k} equalling a (different) intercept only.
+
}
\section{Warning}{
The Poisson model corresponds to \code{k} equalling infinity.
@@ -111,12 +116,14 @@ Generalized additive modelling and zero inflated count data.
\bold{157},
179--188.
+
Williamson, E. and Bretherton, M. H. (1964)
Tables of the logarithmic series distribution.
\emph{Annals of Mathematical Statistics},
\bold{35},
284--297.
+
}
\author{ Thomas W. Yee }
\note{
@@ -130,24 +137,26 @@ Tables of the logarithmic series distribution.
\code{\link{negbinomial}},
\code{\link{zanegbinomial}},
% \code{\link[MASS]{rnegbin}}.
- \code{\link[stats:NegBinomial]{rnbinom}}.
+ \code{\link[stats:NegBinomial]{rnbinom}},
+ \code{\link{CommonVGAMffArguments}}.
+
}
\examples{
-pndat = data.frame(x = runif(nn <- 2000))
-pndat = transform(pndat, y1 = rposnegbin(nn, munb=exp(0+2*x), size=exp(1)),
- y2 = rposnegbin(nn, munb=exp(1+2*x), size=exp(3)))
-fit = vglm(cbind(y1,y2) ~ x, posnegbinomial, pndat, trace=TRUE)
-coef(fit, matrix=TRUE)
+pndat <- data.frame(x = runif(nn <- 2000))
+pndat <- transform(pndat, y1 = rposnegbin(nn, munb = exp(0+2*x), size = exp(1)),
+ y2 = rposnegbin(nn, munb = exp(1+2*x), size = exp(3)))
+fit <- vglm(cbind(y1, y2) ~ x, posnegbinomial, pndat, trace = TRUE)
+coef(fit, matrix = TRUE)
dim(fit at y)
# Another artificial data example
-pndat2 = data.frame(munb = exp(2), size = exp(3)); nn = 1000
-pndat2 = transform(pndat2, y = rposnegbin(nn, munb=munb, size=size))
+pndat2 <- data.frame(munb = exp(2), size = exp(3)); nn <- 1000
+pndat2 <- transform(pndat2, y = rposnegbin(nn, munb = munb, size = size))
with(pndat2, table(y))
-fit = vglm(y ~ 1, posnegbinomial, pndat2, trace=TRUE)
-coef(fit, matrix=TRUE)
+fit <- vglm(y ~ 1, posnegbinomial, pndat2, trace = TRUE)
+coef(fit, matrix = TRUE)
with(pndat2, mean(y)) # Sample mean
head(with(pndat2, munb/(1-(size/(size+munb))^size)), 1) # Population mean
head(fitted(fit), 3)
@@ -155,15 +164,15 @@ head(predict(fit), 3)
# Example: Corbet (1943) butterfly Malaya data
-corbet = data.frame(nindiv = 1:24,
- ofreq = c(118, 74, 44, 24, 29, 22, 20, 19, 20, 15, 12,
- 14, 6, 12, 6, 9, 9, 6, 10, 10, 11, 5, 3, 3))
-fit = vglm(nindiv ~ 1, posnegbinomial, weights=ofreq, data=corbet)
-coef(fit, matrix=TRUE)
+corbet <- data.frame(nindiv = 1:24,
+ ofreq = c(118, 74, 44, 24, 29, 22, 20, 19, 20, 15, 12,
+ 14, 6, 12, 6, 9, 9, 6, 10, 10, 11, 5, 3, 3))
+fit <- vglm(nindiv ~ 1, posnegbinomial, weights = ofreq, data = corbet)
+coef(fit, matrix = TRUE)
Coef(fit)
-(khat = Coef(fit)['k'])
-pdf2 = dposnegbin(x=with(corbet, nindiv), mu=fitted(fit), size=khat)
-print( with(corbet, cbind(nindiv, ofreq, fitted=pdf2*sum(ofreq))), dig=1)
+(khat <- Coef(fit)["k"])
+pdf2 <- dposnegbin(x = with(corbet, nindiv), mu = fitted(fit), size = khat)
+print( with(corbet, cbind(nindiv, ofreq, fitted = pdf2*sum(ofreq))), dig = 1)
}
\keyword{models}
\keyword{regression}
diff --git a/man/pospoisUC.Rd b/man/pospoisUC.Rd
index 9fca271..3c2265d 100644
--- a/man/pospoisUC.Rd
+++ b/man/pospoisUC.Rd
@@ -22,10 +22,17 @@ rpospois(n, lambda)
\item{x, q}{vector of quantiles.}
\item{p}{vector of probabilities.}
\item{n}{number of observations.
- If \code{length(n) > 1} then the length is taken to be the number required. }
- \item{lambda}{ vector of positive means (of an ordinary Poisson distribution).
- Short vectors are recycled. }
- \item{log}{ logical.
+ If \code{length(n) > 1} then the length is taken to be the number required.
+
+ }
+ \item{lambda}{
+ vector of positive means (of an ordinary Poisson distribution).
+ Short vectors are recycled.
+
+ }
+ \item{log}{
+ logical.
+
}
}
\details{
@@ -40,12 +47,15 @@ rpospois(n, lambda)
Unlike similar functions for the Poisson distribution, a zero value
of \code{lambda} is not permitted here.
+
}
\value{
\code{dpospois} gives the density,
\code{ppospois} gives the distribution function,
\code{qpospois} gives the quantile function, and
\code{rpospois} generates random deviates.
+
+
}
%\references{
%None.
@@ -53,28 +63,32 @@ rpospois(n, lambda)
\author{ T. W. Yee }
\note{
- For \code{rpospois}, the arguments of the function are fed into
- \code{\link[stats:Poisson]{rpois}} until \eqn{n} positive values
- are obtained. This may take a long time if \code{lambda} has values
- close to 0.
+ For \code{rpospois}, the arguments of the function are fed
+ into \code{\link[stats:Poisson]{rpois}} until \eqn{n} positive
+ values are obtained. This may take a long time if \code{lambda}
+ has values close to 0.
+
The family function \code{\link{pospoisson}} estimates
\eqn{\lambda}{lambda} by maximum likelihood estimation.
+
}
\seealso{
\code{\link{pospoisson}},
\code{\link{zapoisson}},
\code{\link[stats:Poisson]{rpois}}.
+
+
}
\examples{
-lambda = 2; y = rpospois(n=1000, lambda)
+lambda <- 2; y = rpospois(n = 1000, lambda)
table(y)
mean(y) # Sample mean
lambda / (1-exp(-lambda)) # Population mean
-(ii = dpospois(0:7, lambda))
+(ii <- dpospois(0:7, lambda))
cumsum(ii) - ppospois(0:7, lambda) # Should be 0s
table(rpospois(100, lambda))
@@ -82,12 +96,12 @@ table(qpospois(runif(1000), lambda))
round(dpospois(1:10, lambda) * 1000) # Should be similar
\dontrun{
-x = 0:7
+x <- 0:7
barplot(rbind(dpospois(x, lambda), dpois(x, lambda)),
- beside = TRUE, col = c("blue","green"),
- main=paste("Positive Poisson(", lambda, ") (blue) vs",
- " Poisson(", lambda, ") (green)", sep=""),
- names.arg = as.character(x), las=1, lwd=2) }
+ beside = TRUE, col = c("blue", "orange"),
+ main = paste("Positive Poisson(", lambda, ") (blue) vs",
+ " Poisson(", lambda, ") (orange)", sep = ""),
+ names.arg = as.character(x), las = 1, lwd = 2) }
}
\keyword{distribution}
diff --git a/man/pospoisson.Rd b/man/pospoisson.Rd
index 66929ed..0399f89 100644
--- a/man/pospoisson.Rd
+++ b/man/pospoisson.Rd
@@ -6,8 +6,8 @@
Fits a positive Poisson distribution.
}
\usage{
-pospoisson(link = "loge", earg=list(),
- expected=TRUE, ilambda=NULL, method.init=1)
+pospoisson(link = "loge", earg = list(),
+ expected = TRUE, ilambda = NULL, method.init = 1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
diff --git a/man/prentice74.Rd b/man/prentice74.Rd
index c6ea4ad..3f55c14 100644
--- a/man/prentice74.Rd
+++ b/man/prentice74.Rd
@@ -81,6 +81,13 @@ else \eqn{q < 0} is right skew.
A log gamma model and its maximum likelihood estimation.
\emph{Biometrika}, \bold{61}, 539--544.
+
+%On Maximisation of the Likelihood for the Generalised Gamma Distribution.
+%Angela Noufaily & M.C. Jones,
+%29-Oct-2009,
+%\url{http://stats-www.open.ac.uk/TechnicalReports/}
+
+
}
\section{Warning }{
The special case \eqn{q = 0} is not handled, therefore
@@ -97,7 +104,8 @@ else \eqn{q < 0} is right skew.
}
\seealso{
\code{\link{lgamma3ff}},
- \code{\link[base:Special]{lgamma}}.
+ \code{\link[base:Special]{lgamma}},
+ \code{\link{gengamma}}.
}
\examples{
diff --git a/man/probit.Rd b/man/probit.Rd
index 4504993..f8f8d14 100644
--- a/man/probit.Rd
+++ b/man/probit.Rd
@@ -101,6 +101,7 @@ probit(p) # Has NAs
probit(p, earg=list(bvalue= .Machine$double.eps)) # Has no NAs
\dontrun{
+p = seq(0.01, 0.99, by=0.01)
plot(p, logit(p), type="l", col="limegreen", ylab="transformation",
lwd=2, las=1, main="Some probability link functions")
lines(p, probit(p), col="purple", lwd=2)
diff --git a/man/propodds.Rd b/man/propodds.Rd
index 96df79f..5518c10 100644
--- a/man/propodds.Rd
+++ b/man/propodds.Rd
@@ -44,6 +44,11 @@ Agresti, A. (2002)
2nd ed. New York: Wiley.
+Agresti, A. (2010)
+\emph{Analysis of Ordinal Categorical Data},
+2nd ed. New York: Wiley.
+
+
Yee, T. W. (2010)
The \pkg{VGAM} package for categorical data analysis.
\emph{Journal of Statistical Software},
@@ -75,24 +80,23 @@ contains further information and examples.
}
\examples{
# Fit the proportional odds model, p.179, in McCullagh and Nelder (1989)
-pneumo = transform(pneumo, let=log(exposure.time))
+pneumo = transform(pneumo, let = log(exposure.time))
(fit = vglm(cbind(normal, mild, severe) ~ let, propodds, pneumo))
fit at y # Sample proportions
-weights(fit, type="prior") # Number of observations
-coef(fit, matrix=TRUE)
+weights(fit, type = "prior") # Number of observations
+coef(fit, matrix = TRUE)
constraints(fit) # Constraint matrices
summary(fit)
# Check that the model is linear in let ----------------------
-fit2 = vgam(cbind(normal, mild, severe) ~ s(let, df=2), propodds, pneumo)
-\dontrun{
-plot(fit2, se=TRUE, lcol=2, scol=2) }
+fit2 = vgam(cbind(normal, mild, severe) ~ s(let, df = 2), propodds, pneumo)
+\dontrun{ plot(fit2, se = TRUE, lcol = 2, scol = 2) }
# Check the proportional odds assumption with a LRT ----------
(fit3 = vglm(cbind(normal, mild, severe) ~ let,
- cumulative(parallel=FALSE, reverse=TRUE), pneumo))
+ cumulative(parallel = FALSE, reverse = TRUE), pneumo))
pchisq(2*(logLik(fit3)-logLik(fit)),
- df=length(coef(fit3))-length(coef(fit)), lower.tail=FALSE)
+ df = length(coef(fit3))-length(coef(fit)), lower.tail = FALSE)
}
\keyword{models}
\keyword{regression}
diff --git a/man/put.smart.Rd b/man/put.smart.Rd
index 5922438..e356778 100644
--- a/man/put.smart.Rd
+++ b/man/put.smart.Rd
@@ -7,6 +7,7 @@ in
\code{smartpredenv} (\R)
or
frame 1 (S-PLUS).
+
}
\usage{
put.smart(smart)
@@ -14,10 +15,13 @@ put.smart(smart)
\arguments{
\item{smart}{
a list containing parameters needed later for smart prediction.
+
}
}
\value{
Nothing is returned.
+
+
}
\section{Side Effects}{
The variable \code{.smart.prediction.counter} in
@@ -38,6 +42,8 @@ Nothing is returned.
or
frame 1 (S-PLUS)
is adjusted accordingly.
+
+
}
\details{
\code{put.smart} is used in \code{"write"} mode within a smart function.
@@ -46,9 +52,12 @@ Nothing is returned.
The function \code{put.smart} is the opposite of
\code{\link{get.smart}}, and both deal with the same contents.
+
}
\seealso{
\code{\link{get.smart}}.
+
+
}
\examples{
"my1" <- function(x, minx=min(x)) { # Here is a smart function
diff --git a/man/quasipoissonff.Rd b/man/quasipoissonff.Rd
index 7f67c37..4120e59 100644
--- a/man/quasipoissonff.Rd
+++ b/man/quasipoissonff.Rd
@@ -41,17 +41,21 @@ quasipoissonff(link = "loge", onedpar = FALSE,
\details{
\eqn{M} defined above is the number of linear/additive predictors.
+
If the dispersion parameter is unknown, then the resulting estimate
is not fully a maximum likelihood estimate.
+
A dispersion parameter that is less/greater than unity corresponds to
under-/over-dispersion relative to the Poisson model. Over-dispersion
is more common in practice.
+
When fitting a Quadratic RR-VGLM, the response is a matrix of \eqn{M},
say, columns (e.g., one column per species). Then there will be \eqn{M}
dispersion parameters (one per column of the response matrix).
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
@@ -73,15 +77,18 @@ quasipoissonff(link = "loge", onedpar = FALSE,
\note{
This function will handle a matrix response automatically.
- The call \code{poissonff(dispersion=0, ...)} is equivalent to
+
+ The call \code{poissonff(dispersion = 0, ...)} is equivalent to
\code{quasipoissonff(...)}. The latter was written so that R users
of \code{quasipoisson()} would only need to add a ``\code{ff}''
to the end of the family function name.
+
Regardless of whether the dispersion parameter is to be estimated or
not, its value can be seen from the output from the \code{summary()}
of the object.
+
% With the introduction of name spaces for the \pkg{VGAM} package,
% \code{"ff"} can be dropped for this family function.
@@ -89,6 +96,7 @@ quasipoissonff(link = "loge", onedpar = FALSE,
\seealso{
\code{\link{poissonff}},
+ \code{\link{negbinomial}},
\code{\link{loge}},
\code{\link{rrvglm}},
\code{\link{cqo}},
@@ -96,6 +104,7 @@ quasipoissonff(link = "loge", onedpar = FALSE,
\code{\link{binomialff}},
\code{\link{quasibinomialff}},
\code{\link[stats]{quasipoisson}}.
+
}
\examples{
quasipoissonff()
diff --git a/man/rayleigh.Rd b/man/rayleigh.Rd
index f8b6374..365d825 100644
--- a/man/rayleigh.Rd
+++ b/man/rayleigh.Rd
@@ -9,20 +9,21 @@
}
\usage{
-rayleigh(link = "loge", earg=list(), nrfs=1/3+0.01)
-crayleigh(link ="loge", earg=list(), expected=FALSE)
+ rayleigh(lscale = "loge", escale = list(), nrfs = 1/3 + 0.01)
+crayleigh(lscale = "loge", escale = list(), oim = TRUE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{link}{
- Parameter link function applied to the parameter \eqn{a}.
+ \item{lscale}{
+ Parameter link function applied to the scale parameter \eqn{b}.
See \code{\link{Links}} for more choices.
- A log link is the default because \eqn{a} is positive.
+ A log link is the default because \eqn{b} is positive.
}
- \item{earg}{
+ \item{escale}{
List. Extra argument for the link.
- See \code{earg} in \code{\link{Links}} for general information.
+ See \code{earg} in \code{\link{Links}} for general information, as well
+ as \code{\link{CommonVGAMffArguments}}.
}
\item{nrfs}{
@@ -33,22 +34,25 @@ crayleigh(link ="loge", earg=list(), expected=FALSE)
positive-definite working weights.
}
- \item{expected}{
- Logical. For censored data only, \code{FALSE}
- means the Newton-Raphson algorithm, and \code{TRUE} means Fisher scoring.
+ \item{oim}{
+ Logical.
+ For censored data only,
+ \code{TRUE} means the Newton-Raphson algorithm, and
+ \code{FALSE} means Fisher scoring.
}
}
\details{
The Rayleigh distribution, which is used in physics,
has a probability density function that can be written
- \deqn{f(y) = y \exp(-0.5 (y/a)^2)/a^2}{%
- f(y) = y*exp(-0.5*(y/a)^2)/a^2}
- for \eqn{y>0} and \eqn{a>0}.
+ \deqn{f(y) = y \exp(-0.5 (y/b)^2)/b^2}{%
+ f(y) = y*exp(-0.5*(y/b)^2)/b^2}
+ for \eqn{y > 0} and \eqn{b > 0}.
The mean of \eqn{Y} is
- \eqn{a \sqrt{\pi / 2}}{a * sqrt(pi / 2)}
+ \eqn{b \sqrt{\pi / 2}}{b * sqrt(pi / 2)}
and its variance is
- \eqn{a^2 (4-\pi)/2}{a^2 (4-pi)/2}.
+ \eqn{b^2 (4-\pi)/2}{b^2 (4-pi)/2}.
+
The \pkg{VGAM} family function \code{crayleigh} handles right-censored
data (the true value is greater than the observed value). To indicate
@@ -58,9 +62,10 @@ crayleigh(link ="loge", earg=list(), expected=FALSE)
taken to be \code{FALSE}. The fitted object has this component stored
in the \code{extra} slot.
+
}
\section{Warning}{
- The theory behind the argument \code{expected} is not fully complete.
+ The theory behind the argument \code{oim} is not fully complete.
}
\value{
@@ -68,41 +73,45 @@ crayleigh(link ="loge", earg=list(), expected=FALSE)
The object is used by modelling functions such as \code{\link{vglm}},
\code{\link{rrvglm}}
and \code{\link{vgam}}.
+
+
}
\references{
Evans, M., Hastings, N. and Peacock, B. (2000)
\emph{Statistical Distributions},
New York: Wiley-Interscience, Third edition.
+
+
}
\author{ T. W. Yee }
\note{
A related distribution is the Maxwell distribution.
+
}
\seealso{
\code{\link{Rayleigh}},
+ \code{\link{genrayleigh}},
\code{\link{riceff}},
\code{\link{maxwell}}.
}
\examples{
-n = 1000; a = exp(2)
-ystar = rrayleigh(n, a=a)
-fit = vglm(ystar ~ 1, rayleigh, trace=TRUE, crit="c")
+nn <- 1000; Scale <- exp(2)
+rdata <- data.frame(ystar = rrayleigh(nn, scale = Scale))
+fit <- vglm(ystar ~ 1, rayleigh, rdata, trace = TRUE, crit = "c")
head(fitted(fit))
-mean(ystar)
-coef(fit, matrix=TRUE)
+with(rdata, mean(ystar))
+coef(fit, matrix = TRUE)
Coef(fit)
# Censored data
-U = runif(n, 5, 15)
-y = pmin(U, ystar)
-\dontrun{
-par(mfrow=c(1,2)); hist(ystar); hist(y)
-}
-extra = list(rightcensored = ystar > U)
-fit = vglm(y ~ 1, crayleigh, trace=TRUE, extra=extra)
+rdata <- transform(rdata, U = runif(nn, 5, 15))
+rdata <- transform(rdata, y = pmin(U, ystar))
+\dontrun{ par(mfrow = c(1,2)); hist(with(rdata, ystar)); hist(with(rdata, y)) }
+extra <- with(rdata, list(rightcensored = ystar > U))
+fit <- vglm(y ~ 1, crayleigh, rdata, trace = TRUE, extra = extra)
table(fit at extra$rightcen)
-coef(fit, matrix=TRUE)
+coef(fit, matrix = TRUE)
head(fitted(fit))
}
\keyword{models}
diff --git a/man/rrvglm-class.Rd b/man/rrvglm-class.Rd
index 7eb0699..83eef97 100644
--- a/man/rrvglm-class.Rd
+++ b/man/rrvglm-class.Rd
@@ -225,19 +225,24 @@ Reduced-rank vector generalized linear models.
\emph{Statistical Modelling},
\bold{3}, 15--41.
+
Yee, T. W. and Wild, C. J. (1996)
Vector generalized additive models.
\emph{Journal of the Royal Statistical Society, Series B, Methodological},
\bold{58}, 481--493.
+
\url{http://www.stat.auckland.ac.nz/~yee}
+
}
\author{ Thomas W. Yee }
\note{
The slots of \code{"rrvglm"} objects are currently identical to \code{"vglm"}
objects.
+
+
}
% ~Make other sections like Warning with \section{Warning }{....} ~
@@ -248,15 +253,17 @@ Vector generalized additive models.
\code{\link{rrvglm}},
\code{\link{lvplot.rrvglm}},
\code{\link{vglmff-class}}.
+
}
\examples{
# Rank-1 stereotype model of Anderson (1984)
-set.seed(111)
-pneumo = transform(pneumo, let=log(exposure.time),
- x1=runif(nrow(pneumo))) # x1 is some unrelated covariate
-fit = rrvglm(cbind(normal, mild, severe) ~ let + x1,
- multinomial, pneumo, Rank=1)
+pneumo <- transform(pneumo,
+ let = log(exposure.time),
+ x3 = runif(nrow(pneumo))) # x3 is some unrelated covariate
+fit <- rrvglm(cbind(normal, mild, severe) ~ let + x3,
+ multinomial, pneumo, Rank = 1)
Coef(fit)
}
\keyword{classes}
+% set.seed(111)
diff --git a/man/rrvglm.Rd b/man/rrvglm.Rd
index 10e6724..cef717f 100644
--- a/man/rrvglm.Rd
+++ b/man/rrvglm.Rd
@@ -19,65 +19,23 @@ rrvglm(formula, family, data = list(), weights = NULL, subset = NULL,
%- maybe also `usage' for other objects documented here.
\arguments{
- \item{formula}{
- a symbolic description of the model to be fit.
- The RHS of the formula is applied to each linear predictor. Different
- variables in each linear predictor can be chosen by specifying
- constraint matrices.
+ \item{formula, family, weights}{
+ See \code{\link{vglm}}.
}
- \item{family}{
- a function of class \code{"vglmff"} (see \code{\link{vglmff-class}})
- describing what statistical model is to be fitted. This is called a
- ``\pkg{VGAM} family function''. See \code{\link{CommonVGAMffArguments}}
- for general information about many types of arguments found in this
- type of function.
- }
\item{data}{
an optional data frame containing the variables in the model.
By default the variables are taken from \code{environment(formula)},
typically the environment from which \code{rrvglm} is called.
}
- \item{weights}{
- an optional vector or matrix of (prior) weights
- to be used in the fitting process.
- If \code{weights} is a matrix, then it must be in
- \emph{matrix-band} form, whereby the first \eqn{M}
- columns of the matrix are the
- diagonals, followed by the upper-diagonal band, followed by the
- band above that, etc. In this case, there can be up to \eqn{M(M+1)}
- columns, with the last column corresponding to the
- (1,\eqn{M}) elements of the weight matrices.
-
- }
- \item{subset}{
- an optional logical vector specifying a subset of observations to be
- used in the fitting process.
-
- }
- \item{na.action}{
- a function which indicates what should happen when the data contain
- \code{NA}s.
- The default is set by the \code{na.action} setting
- of \code{\link[base]{options}}, and is \code{na.fail} if that is unset.
- The ``factory-fresh'' default is \code{na.omit}.
-
- }
- \item{etastart}{
- starting values for the linear predictors.
- It is a \eqn{M}-column matrix.
- If \eqn{M=1} then it may be a vector.
-
- }
- \item{mustart}{
- starting values for the fitted values. It can be a vector or a matrix.
- Some family functions do not make use of this argument.
+ \item{subset, na.action}{
+ See \code{\link{vglm}}.
}
- \item{coefstart}{
- starting values for the coefficient vector.
+ \item{etastart, mustart, coefstart}{
+ See \code{\link{vglm}}.
}
\item{control}{
@@ -85,10 +43,8 @@ rrvglm(formula, family, data = list(), weights = NULL, subset = NULL,
See \code{\link{rrvglm.control}} for details.
}
- \item{offset}{
- a vector or \eqn{M}-column matrix of offset values.
- These are \emph{a priori} known and are
- added to the linear predictors during fitting.
+ \item{offset, model, contrasts}{
+ See \code{\link{vglm}}.
}
\item{method}{
@@ -97,11 +53,6 @@ rrvglm(formula, family, data = list(), weights = NULL, subset = NULL,
uses iteratively reweighted least squares (IRLS).
}
- \item{model}{
- a logical value indicating whether the \emph{model frame}
- should be assigned in the \code{model} slot.
-
- }
\item{x.arg, y.arg}{
logical values indicating whether
the model matrix and response vector/matrix used in the fitting
@@ -111,39 +62,12 @@ rrvglm(formula, family, data = list(), weights = NULL, subset = NULL,
\code{vglmfit} is a \code{vglm} object.
}
- \item{contrasts}{
- an optional list. See the \code{contrasts.arg}
- of \code{\link{model.matrix.default}}.
-
- }
\item{constraints}{
- an optional list of constraint matrices.
- The components of the list must be named with the term it
- corresponds to (and it must match in character format).
- Each constraint matrix must have \eqn{M} rows, and be of
- full-column rank.
- By default, constraint matrices are the \eqn{M} by \eqn{M}
- identity
- matrix unless arguments in the family function itself override
- these values.
- If \code{constraints} is used it must contain \emph{all} the
- terms; an incomplete list is not accepted.
-
- }
- \item{extra}{
- an optional list with any extra information that might be needed
- by the family function.
+ See \code{\link{vglm}}.
}
- \item{qr.arg}{
- logical value indicating whether
- the slot \code{qr}, which returns the QR decomposition of the
- VLM model matrix, is returned on the object.
-
- }
- \item{smart}{
- logical value indicating whether smart prediction
- (\code{\link{smartpred}}) will be used.
+ \item{extra, smart, qr.arg}{
+ See \code{\link{vglm}}.
}
\item{\dots}{
@@ -170,6 +94,12 @@ rrvglm(formula, family, data = list(), weights = NULL, subset = NULL,
means that the response is a monotonically increasing or
decreasing function of the latent variables.
+ For identifiability
+ it is common to enforce \emph{corner constraints} on \eqn{A}:
+ by default, the top \eqn{R} by \eqn{R} submatrix is fixed to
+ be the order-\eqn{R} identity matrix and the remainder of \eqn{A}
+ is estimated.
+
The underlying algorithm of RR-VGLMs is iteratively
reweighted least squares (IRLS) with an optimizing
@@ -180,10 +110,8 @@ rrvglm(formula, family, data = list(), weights = NULL, subset = NULL,
In theory, any \pkg{VGAM} family function that works for
\code{\link{vglm}} and \code{\link{vgam}} should work
for \code{rrvglm} too.
-
-
- \code{rrvglm.fit} is the function that actually does the work.
- It is \code{vglm.fit} with some extra code.
+ The function that actually does the work is \code{rrvglm.fit};
+ it is \code{vglm.fit} with some extra code.
}
@@ -217,6 +145,12 @@ Regression and ordered categorical variables.
\bold{46}, 1--30.
+ Yee, T. W. (2010)
+ Two-parameter reduced-rank vector generalized linear models.
+ \emph{In preparation}.
+
+
+
Documentation accompanying the \pkg{VGAM} package at
\url{http://www.stat.auckland.ac.nz/~yee}
contains further information and examples.
@@ -225,16 +159,16 @@ Regression and ordered categorical variables.
\author{ Thomas W. Yee }
\note{
- The smart prediction (\code{\link{smartpred}}) library
- is packed with the \pkg{VGAM} library.
-
-
- The arguments of \code{rrvglm} are the same as
+ The arguments of \code{rrvglm} are in general the same as
those of \code{\link{vglm}} but with some extras in
\code{\link{rrvglm.control}}.
- In the example below, a rank-1 \emph{stereotype}
+ The smart prediction (\code{\link{smartpred}}) library
+ is packed with the \pkg{VGAM} library.
+
+
+ In an example below, a rank-1 \emph{stereotype}
model of Anderson (1984) is fitted to some car data.
The reduced-rank regression is performed, adjusting for
two covariates. Setting a trivial constraint matrix for
@@ -245,7 +179,7 @@ Regression and ordered categorical variables.
car of fixed weight and width.
- If \code{fit <- rrvglm(..., data=mydata)} then
+ If \code{fit <- rrvglm(..., data = mydata)} then
\code{summary(fit)} requires corner constraints and no
missing values in \code{mydata}. Often the estimated
variance-covariance matrix of the parameters is not
@@ -258,7 +192,7 @@ Regression and ordered categorical variables.
With multivariate binary responses, one must use
- \code{binomialff(mv=TRUE)} to indicate that the response
+ \code{binomialff(mv = TRUE)} to indicate that the response
(matrix) is multivariate. Otherwise, it is interpreted
as a single binary response variable.
@@ -281,45 +215,71 @@ Regression and ordered categorical variables.
\code{\link{vglm-class}},
\code{\link{smartpred}},
\code{rrvglm.fit}.
+ Special family functions include
+ \code{\link{negbinomial}}
+ \code{\link{zipoisson}}
+ and \code{\link{zinegbinomial}}.
+ (see Yee (2010) and \pkg{COZIGAM}).
Methods functions include
\code{\link{Coef.rrvglm}},
\code{summary.rrvglm},
etc.
+ Data include
+ \code{\link{crashi}}.
}
\examples{
-# Example 1: negative binomial with Var(Y) = mu + mu^s2, s2 unknown
-nn = 500
-s2 = 1.5 # Specify this
-c2 = 2 - s2
-ndata = data.frame(x2 = runif(nn), x3 = runif(nn))
-ndata = transform(ndata, mu = exp(2 + 1 * x2 + 0 * x3))
-ndata = transform(ndata, y2 = rnbinom(nn, mu=mu, size=mu^c2))
-\dontrun{plot(y2 ~ x2, data = ndata, pch = "+", col = 'blue',
- main=paste("Var(Y) = mu + mu^", s2, sep="")) }
-Fit2 = rrvglm(y2 ~ x2 + x3, negbinomial(zero = NULL),
- data = ndata, Norrr = NULL)
-c2hat = (Coef(Fit2)@A)["log(k)", 1]
-s2hat = 2 - c2hat
-s2hat # Estimate of s2
-
-
-# Example 2
+# Example 1: RR negative binomial (RR-NB) with Var(Y) = mu + delta1 * mu^delta2
+nn <- 1000 # Number of observations
+delta1 <- 3.0 # Specify this
+delta2 <- 1.5 # Specify this; should be greater than unity
+a21 <- 2 - delta2
+mydata <- data.frame(x2 = runif(nn), x3 = runif(nn))
+mydata <- transform(mydata, mu = exp(2 + 3 * x2 + 0 * x3))
+mydata <- transform(mydata, y2 = rnbinom(nn, mu=mu, size=(1/delta1)*mu^a21))
+\dontrun{
+plot(y2 ~ x2, data = mydata, pch = "+", col = 'blue', las = 1,
+ main = paste("Var(Y) = mu + ", delta1, " * mu^", delta2, sep = "")) }
+rrnb2 <- rrvglm(y2 ~ x2 + x3, negbinomial(zero = NULL), mydata, trace = TRUE)
+
+a21.hat <- (Coef(rrnb2)@A)["log(k)", 1]
+beta11.hat <- Coef(rrnb2)@B1["(Intercept)", "log(mu)"]
+beta21.hat <- Coef(rrnb2)@B1["(Intercept)", "log(k)"]
+(delta1.hat <- exp(a21.hat * beta11.hat - beta21.hat))
+(delta2.hat <- 2 - a21.hat)
+# exp(a21.hat * predict(rrnb2)[1,1] - predict(rrnb2)[1,2]) # delta1.hat
+summary(rrnb2)
+
+# Obtain a 95 percent confidence interval for delta2:
+se.a21.hat <- sqrt(vcov(rrnb2)["I(lv.mat)", "I(lv.mat)"])
+ci.a21 <- a21.hat + c(-1, 1) * 1.96 * se.a21.hat
+(ci.delta2 <- 2 - rev(ci.a21)) # The 95 percent confidence interval
+
+confint_rrnb(rrnb2) # Quick way to get it
+
+# Plot the abundances and fitted values against the latent variable
+\dontrun{
+plot(y2 ~ lv(rrnb2), data = mydata, col = "blue",
+ xlab = "Latent variable", las = 1)
+ooo <- order(lv(rrnb2))
+lines(fitted(rrnb2)[ooo] ~ lv(rrnb2)[ooo], col = "red") }
+
+# Example 2: stereotype model (reduced-rank multinomial logit model)
data(car.all)
index = with(car.all, Country == "Germany" | Country == "USA" |
Country == "Japan" | Country == "Korea")
scar = car.all[index, ] # standardized car data
fcols = c(13,14,18:20,22:26,29:31,33,34,36) # These are factors
-scar[,-fcols] = scale(scar[,-fcols]) # Standardize all numerical vars
+scar[,-fcols] = scale(scar[, -fcols]) # Standardize all numerical vars
ones = matrix(1, 3, 1)
-cms = list("(Intercept)" = diag(3), Width = ones, Weight = ones,
- Disp. = diag(3), Tank = diag(3), Price = diag(3),
- Frt.Leg.Room = diag(3))
+clist = list("(Intercept)" = diag(3), Width = ones, Weight = ones,
+ Disp. = diag(3), Tank = diag(3), Price = diag(3),
+ Frt.Leg.Room = diag(3))
set.seed(111)
fit = rrvglm(Country ~ Width + Weight + Disp. + Tank + Price + Frt.Leg.Room,
multinomial, data = scar, Rank = 2, trace = TRUE,
- constraints = cms, Norrr = ~ 1 + Width + Weight,
+ constraints = clist, Norrr = ~ 1 + Width + Weight,
Uncor = TRUE, Corner = FALSE, Bestof = 2)
fit at misc$deviance # A history of the fits
Coef(fit)
diff --git a/man/rrvglm.control.Rd b/man/rrvglm.control.Rd
index 2cf2a56..813020b 100644
--- a/man/rrvglm.control.Rd
+++ b/man/rrvglm.control.Rd
@@ -9,11 +9,11 @@
\usage{
rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
Corner = TRUE, Uncorrelated.lv = FALSE, Wmat = NULL, Svd.arg = FALSE,
- Index.corner = if (length(Structural.zero))
- head((1:1000)[-Structural.zero], Rank) else 1:Rank,
+ Index.corner = if (length(szero))
+ head((1:1000)[-szero], Rank) else 1:Rank,
Ainit = NULL, Alpha = 0.5, Bestof = 1, Cinit = NULL,
Etamat.colmax = 10,
- SD.Ainit = 0.02, SD.Cinit = 0.02, Structural.zero = NULL,
+ SD.Ainit = 0.02, SD.Cinit = 0.02, szero = NULL,
Norrr = ~1, trace = FALSE, Use.Init.Poisson.QO = FALSE,
checkwz = TRUE, wzepsilon = .Machine$double.eps^0.75, ...)
}
@@ -102,9 +102,11 @@ rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
% RR-VGLM is to be fitted. If \code{TRUE}, an object of class
% \code{"qrrvglm"} will be returned, otherwise \code{"rrvglm"}.
% }
- \item{Structural.zero}{
+ \item{szero}{
Integer vector specifying which rows
of the constraint matrices are to be all zeros.
+ These are called structural zeros.
+
}
\item{SD.Ainit, SD.Cinit}{
Standard deviation of the initial values for the elements
@@ -116,14 +118,19 @@ rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
% \item{ppar}{ Ignore this. }
\item{Norrr}{
Formula giving terms that are \emph{not} to be included
- in the reduced-rank regression. These variables constitute
- the \eqn{\bold{B}_1}{\bold{B}1} matrix in the Yee and Hastie
- paper. Those variables which are subject to the reduced-rank
+ in the reduced-rank regression.
+ That is, \code{Norrr} specifes which explanatory variables
+ are in the \eqn{x_1}{x1} vector of \code{\link{rrvglm}},
+ and the rest go into \eqn{x_2}{x2}.
+ The \eqn{x_1}{x1} variables constitute
+ the \eqn{\bold{B}_1}{\bold{B}1} matrix in Yee and Hastie (2003).
+ Those \eqn{x_2}{x2} variables which are subject to the reduced-rank
regression correspond to the \eqn{\bold{B}_2}{\bold{B}2}
matrix.
Set \code{Norrr = NULL} for the reduced-rank regression to
be applied to every explanatory variable including the intercept.
+
}
\item{trace}{
Logical indicating if output should be produced for
@@ -215,14 +222,15 @@ rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
\code{\link{vglm}},
\code{\link{vglm.control}},
\code{\link{cqo}}.
+
}
\examples{
set.seed(111)
-pneumo = transform(pneumo, let=log(exposure.time),
- x1 = runif(nrow(pneumo))) # x1 is some unrelated covariate
-fit = rrvglm(cbind(normal, mild, severe) ~ let + x1,
- multinomial, pneumo, Rank=1, Index.corner=2)
+pneumo <- transform(pneumo, let = log(exposure.time),
+ x3 = runif(nrow(pneumo))) # x3 is random noise
+fit <- rrvglm(cbind(normal, mild, severe) ~ let + x3,
+ multinomial, pneumo, Rank = 1, Index.corner = 2)
constraints(fit)
vcov(fit)
summary(fit)
diff --git a/man/setup.smart.Rd b/man/setup.smart.Rd
index 1ee0d94..60faab4 100644
--- a/man/setup.smart.Rd
+++ b/man/setup.smart.Rd
@@ -4,9 +4,10 @@
\description{
Sets up smart prediction in one of two modes:
\code{"write"} and \code{"read"}.
+
}
\usage{
-setup.smart(mode.arg, smart.prediction=NULL, max.smart=30)
+setup.smart(mode.arg, smart.prediction = NULL, max.smart = 30)
}
\arguments{
\item{mode.arg}{
@@ -16,53 +17,62 @@ setup.smart(mode.arg, smart.prediction=NULL, max.smart=30)
fitting. This is stored in \code{object at smart.prediction} or
\code{object$smart.prediction} where
\code{object} is the name of the fitted object.
+
}
\item{smart.prediction}{
-If in \code{"read"} mode then \code{smart.prediction} must be assigned
-the list of data dependent parameters, which is stored
-on the fitted object.
-Otherwise, \code{smart.prediction} is ignored.
+ If in \code{"read"} mode then \code{smart.prediction} must be assigned
+ the list of data dependent parameters, which is stored
+ on the fitted object.
+ Otherwise, \code{smart.prediction} is ignored.
+
+
}
\item{max.smart}{
-\code{max.smart} is the initial length of the list \code{.smart.prediction}.
-It is not important because \code{.smart.prediction} is made larger if
-needed.
+ \code{max.smart} is the initial length of the list
+ \code{.smart.prediction}. It is not important because
+ \code{.smart.prediction} is made larger if needed.
+
}}
\value{
Nothing is returned.
}
\section{Side Effects}{
-In \code{"write"} mode
-\code{.smart.prediction} in
-\code{smartpredenv} (\R) or frame 1 (S-PLUS)
-is assigned an empty list with \code{max.smart} components.
-In \code{"read"} mode
-\code{.smart.prediction} in
-\code{smartpredenv} (\R) or frame 1 (S-PLUS)
-is assigned \code{smart.prediction}.
-In both cases,
-\code{.smart.prediction.counter} in
-\code{smartpredenv} (\R) or
-frame 1 (S-PLUS)
-is assigned the value 0, and
-\code{.smart.prediction.mode} and \code{.max.smart} are written to
-\code{smartpredenv} (\R) or frame 1 (S-PLUS) too.
+ In \code{"write"} mode
+ \code{.smart.prediction} in
+ \code{smartpredenv} (\R) or frame 1 (S-PLUS)
+ is assigned an empty list with \code{max.smart} components.
+ In \code{"read"} mode
+ \code{.smart.prediction} in
+ \code{smartpredenv} (\R) or frame 1 (S-PLUS)
+ is assigned \code{smart.prediction}.
+ In both cases,
+ \code{.smart.prediction.counter} in
+ \code{smartpredenv} (\R) or
+ frame 1 (S-PLUS)
+ is assigned the value 0, and
+ \code{.smart.prediction.mode} and \code{.max.smart} are written to
+ \code{smartpredenv} (\R) or frame 1 (S-PLUS) too.
+
+
}
\details{
-This function is only required by programmers writing a modelling
-function such as \code{\link[stats]{lm}}
-and \code{\link[stats]{glm}}, or a prediction functions of such,
-e.g., \code{\link[stats]{predict.lm}}.
-The function
-\code{setup.smart} operates by mimicking the operations of a
-first-in first-out stack (better known as a \emph{queue}).
+ This function is only required by programmers writing a modelling
+ function such as \code{\link[stats]{lm}}
+ and \code{\link[stats]{glm}}, or a prediction functions of such,
+ e.g., \code{\link[stats]{predict.lm}}.
+ The function
+ \code{setup.smart} operates by mimicking the operations of a
+ first-in first-out stack (better known as a \emph{queue}).
+
+
}
\seealso{
\code{\link[stats]{lm}},
\code{\link[stats]{predict.lm}}.
+
}
\examples{
\dontrun{# Put at the beginning of lm
@@ -72,8 +82,6 @@ setup.smart("write")
\dontrun{# Put at the beginning of predict.lm
setup.smart("read", smart.prediction=object$smart.prediction)
}
-
-
}
%\keyword{smart}
\keyword{models}
diff --git a/man/simplex.Rd b/man/simplex.Rd
index 49f1e05..1b4f473 100644
--- a/man/simplex.Rd
+++ b/man/simplex.Rd
@@ -74,22 +74,26 @@ simplex(lmu = "logit", lsigma = "loge", emu=list(), esigma=list(),
The object is used by modelling functions such as \code{\link{vglm}},
and \code{\link{vgam}}.
+
}
\references{
Jorgensen, B. (1997)
\emph{The Theory of Dispersion Models}.
London: Chapman & Hall
+
Song, P. X.-K. (2007)
\emph{Correlated Data Analysis: Modeling, Analytics, and Applications}.
Springer.
+
}
\author{ T. W. Yee }
\note{
This distribution is potentially useful for dispersion modelling.
Numerical problems may occur when \code{mu} is very close to 0 or 1.
+
}
\seealso{
@@ -98,13 +102,15 @@ simplex(lmu = "logit", lsigma = "loge", emu=list(), esigma=list(),
\code{\link{rig}},
\code{\link{binomialff}}.
+
}
\examples{
-nn = 1000
-sdata = data.frame(x = runif(nn))
-sdata = transform(sdata, y = rsimplex(nn, mu = logit(1+2*x, inverse = TRUE),
- dispersion = exp(1 - 2*x)))
-(fit = vglm(y ~ x, simplex(zero = NULL), sdata, trace = TRUE))
+sdata <- data.frame(x2 = runif(nn <- 1000))
+sdata <- transform(sdata, eta1 = 1 + 2 * x2,
+ eta2 = 1 - 2 * x2)
+sdata <- transform(sdata, y = rsimplex(nn, mu = logit(eta1, inverse = TRUE),
+ dispersion = exp(eta2)))
+(fit <- vglm(y ~ x2, simplex(zero = NULL), sdata, trace = TRUE))
coef(fit, matrix = TRUE)
summary(fit)
}
diff --git a/man/skewnormal1.Rd b/man/skewnormal1.Rd
index 6069482..603acf6 100644
--- a/man/skewnormal1.Rd
+++ b/man/skewnormal1.Rd
@@ -8,28 +8,14 @@
}
\usage{
-skewnormal1(lshape = "identity", earg = list(), ishape = NULL, nsimEIM=NULL)
+skewnormal1(lshape = "identity", earg = list(), ishape = NULL,
+ nsimEIM = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{lshape}{
- Link function applied to the shape parameter.
- See \code{\link{Links}} for more choices.
-
- }
- \item{earg}{
- List. Extra argument for the link.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
- \item{ishape}{
- Optional inital value for the shape parameter.
- The default is to choose one internally.
- See the note below.
-
- }
- \item{nsimEIM}{
- See \code{\link{CommonVGAMffArguments}}.
+ \item{lshape, earg, ishape, nsimEIM}{
+ See \code{\link{Links}} and
+ \code{\link{CommonVGAMffArguments}}.
}
}
@@ -51,6 +37,7 @@ skewnormal1(lshape = "identity", earg = list(), ishape = NULL, nsimEIM=NULL)
When the shape parameter changes its sign, the density is reflected
about \eqn{y=0}.
+
The mean of the distribution is
\eqn{\mu=\alpha \sqrt{2/(\pi (1+\alpha^2))}}{mu=alpha*sqrt(2/(pi*(1+alpha^2)))}
and these are returned as the fitted values.
@@ -58,6 +45,7 @@ skewnormal1(lshape = "identity", earg = list(), ishape = NULL, nsimEIM=NULL)
The Newton-Raphson algorithm is used unless the \code{nsimEIM}
argument is used.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
@@ -72,12 +60,14 @@ A class of distributions which include the normal.
\emph{Scandinavian Journal of Statistics},
\bold{12}, 171--178.
+
Azzalini, A. and Capitanio, A. (1999)
Statistical applications of the multivariate skew-normal
distribution.
\emph{Journal of the Royal Statistical Society, Series B, Methodological},
\bold{61}, 579--602.
+
}
\author{ Thomas W. Yee }
@@ -85,13 +75,19 @@ distribution.
It is a good idea to use several different initial values to ensure
that the global solution is obtained.
+
This family function will be modified (hopefully soon) to handle a
location and scale parameter too.
+
}
-%\section{Warning }{
-% Numerical problems may occur.
-%}
+\section{Warning }{
+ It is well known that the EIM of Azzalini's skew-normal distribution
+ is singular for skewness parameter tending to zero, and thus produces
+ influential problems.
+
+
+}
\seealso{
\code{\link{snorm}},
\code{\link{normal1}},
@@ -100,19 +96,18 @@ distribution.
}
\examples{
-sdata = data.frame(y = rsnorm(nn <- 1000, shape=5))
-fit = vglm(y ~ 1, skewnormal1, sdata, trace=TRUE)
-coef(fit, matrix=TRUE)
+sdata <- data.frame(y = rsnorm(nn <- 1000, shape = 5))
+fit <- vglm(y ~ 1, skewnormal1, sdata, trace = TRUE)
+coef(fit, matrix = TRUE)
head(fitted(fit), 1)
with(sdata, mean(y))
-\dontrun{
-with(sdata, hist(y, prob=TRUE))
-x = with(sdata, seq(min(y), max(y), len=200))
-with(sdata, lines(x, dsnorm(x, shape=Coef(fit)), col="blue")) }
-
-sdata = data.frame(x = runif(nn))
-sdata = transform(sdata, y = rsnorm(nn, shape=1 + 2*x))
-fit = vglm(y ~ x, skewnormal1, sdata, trace=TRUE, crit="coef")
+\dontrun{ with(sdata, hist(y, prob = TRUE))
+x <- with(sdata, seq(min(y), max(y), len = 200))
+with(sdata, lines(x, dsnorm(x, shape = Coef(fit)), col = "blue")) }
+
+sdata <- data.frame(x = runif(nn))
+sdata <- transform(sdata, y = rsnorm(nn, shape = 1 + 2*x))
+fit <- vglm(y ~ x, skewnormal1, sdata, trace = TRUE, crit = "coef")
summary(fit)
}
\keyword{models}
diff --git a/man/smart.expression.Rd b/man/smart.expression.Rd
index c811152..c3cfa46 100644
--- a/man/smart.expression.Rd
+++ b/man/smart.expression.Rd
@@ -8,12 +8,17 @@
It requires the primary argument of the smart function to be called
\code{"x"}.
+
The list component \code{match.call} must be assigned the
value of \code{match.call()} in the smart function; this is so
that the smart function can call itself later.
+
+
}
\seealso{
\code{\link[base]{match.call}}.
+
+
}
\examples{
"my2" <- function(x, minx=min(x)) { # Here is a smart function
diff --git a/man/smart.mode.is.Rd b/man/smart.mode.is.Rd
index 28a4c6a..3585310 100644
--- a/man/smart.mode.is.Rd
+++ b/man/smart.mode.is.Rd
@@ -12,15 +12,19 @@ smart.mode.is(mode.arg=NULL)
a character string, either \code{"read"}, \code{"write"} or \code{"neutral"}.
}}
\value{
-If \code{mode.arg} is given, then either \code{TRUE} or \code{FALSE} is returned.
-If \code{mode.arg} is not given, then the mode (\code{"neutral"},
+ If \code{mode.arg} is given, then either \code{TRUE} or \code{FALSE} is returned.
+ If \code{mode.arg} is not given, then the mode (\code{"neutral"},
\code{"read"} or \code{"write"})
-is returned. Usually, the mode is \code{"neutral"}.
+ is returned. Usually, the mode is \code{"neutral"}.
+
+
}
\seealso{
\code{\link{put.smart}},
\code{\link[splines]{bs}},
\code{\link[stats]{poly}}.
+
+
}
\details{
Smart functions such as
diff --git a/man/smartpred.Rd b/man/smartpred.Rd
index 29dbf6f..4c5979f 100644
--- a/man/smartpred.Rd
+++ b/man/smartpred.Rd
@@ -28,6 +28,8 @@
\value{
Returns the usual object, but with one list/slot component called
\code{smart.prediction} containing any data-dependent parameters.
+
+
}
\section{Side Effects}{
The variables
@@ -44,6 +46,7 @@
be left in \code{smartpredenv}. At the beginning of model fitting,
these variables are deleted if present in \code{smartpredenv}.
+
During prediction, the variables
\code{.smart.prediction} and
\code{.smart.prediction.counter}
@@ -51,11 +54,13 @@
frame is re-evaluated.
After prediction, these variables are deleted.
- If the modelling function is used with argument \code{smart=FALSE}
- (e.g., \code{vglm(..., smart=FALSE)}) then smart prediction will not
+
+ If the modelling function is used with argument \code{smart = FALSE}
+ (e.g., \code{vglm(..., smart = FALSE)}) then smart prediction will not
be used, and the results should match with the original \R or S-PLUS
functions.
+
}
\details{
\R version 1.6.0 introduced a partial fix for the prediction
@@ -80,8 +85,8 @@
\author{T. W. Yee and T. J. Hastie}
\note{
In S-PLUS you will need to load in the \pkg{smartpred} library with
- the argument \code{first=T}, e.g.,
- \code{library(smartpred, lib="./mys8libs", first=T)}.
+ the argument \code{first = T}, e.g.,
+ \code{library(smartpred, lib = "./mys8libs", first = T)}.
Here, \code{mys8libs} is the name of a directory of installed packages.
To install the smartpred package in Linux/Unix, type something like
\code{Splus8 INSTALL -l ./mys8libs ./smartpred_0.8-2.tar.gz}.
@@ -153,39 +158,39 @@
\examples{
# Create some data first
-n = 20
+n <- 20
set.seed(86) # For reproducibility of the random numbers
-x = sort(runif(n))
-y = sort(runif(n))
+x <- sort(runif(n))
+y <- sort(runif(n))
\dontrun{if(is.R()) library(splines) # To get ns() in R
}
# This will work for R 1.6.0 and later, but fail for S-PLUS
-fit = lm(y ~ ns(x, df=5))
-\dontrun{plot(x, y)
+fit <- lm(y ~ ns(x, df = 5))
+\dontrun{ plot(x, y)
lines(x, fitted(fit))
-newx = seq(0, 1, len=n)
-points(newx, predict(fit, data.frame(x=newx)), type="b", col=2, err=-1)
-}
+newx <- seq(0, 1, len = n)
+points(newx, predict(fit, data.frame(x = newx)), type = "b",
+ col = 2, err = -1) }
# The following fails for R 1.6.x and later but works with smart prediction
-\dontrun{fit = lm(y ~ ns(scale(x), df=5))
-fit$smart.prediction
+fit <- lm(y ~ ns(scale(x), df = 5))
+\dontrun{ fit$smart.prediction
plot(x, y)
lines(x, fitted(fit))
-newx = seq(0, 1, len=n)
-points(newx, predict(fit, data.frame(x=newx)), type="b", col=2, err=-1)
-}
+newx <- seq(0, 1, len = n)
+points(newx, predict(fit, data.frame(x = newx)), type = "b",
+ col = 2, err = -1) }
# The following requires the VGAM package to be loaded
-\dontrun{library(VGAM)
-fit = vlm(y ~ ns(scale(x), df=5))
+\dontrun{ library(VGAM)
+fit <- vlm(y ~ ns(scale(x), df = 5))
fit at smart.prediction
plot(x, y)
lines(x, fitted(fit))
-newx = seq(0, 1, len=n)
-points(newx, predict(fit, data.frame(x=newx)), type="b", col=2, err=-1)
-}
+newx <- seq(0, 1, len = n)
+points(newx, predict(fit, data.frame(x = newx)), type = "b",
+ col = 2, err = -1) }
}
%\keyword{smart}
\keyword{models}
@@ -194,20 +199,20 @@ points(newx, predict(fit, data.frame(x=newx)), type="b", col=2, err=-1)
-%lm(..., smart=TRUE)
-%glm(..., smart=TRUE)
+%lm(..., smart = TRUE)
+%glm(..., smart = TRUE)
%ns()
%bs()
%poly()
%scale()
-%vglm(..., smart=TRUE)
-%rrvglm(..., smart=TRUE)
-%vgam(..., smart=TRUE)
-%cao(..., smart=TRUE)
-%cqo(..., smart=TRUE)
-%uqo(..., smart=TRUE)
-
-%library(smartpred, lib="./mys8libs", first=T)
+%vglm(..., smart = TRUE)
+%rrvglm(..., smart = TRUE)
+%vgam(..., smart = TRUE)
+%cao(..., smart = TRUE)
+%cqo(..., smart = TRUE)
+%uqo(..., smart = TRUE)
+
+%library(smartpred, lib = "./mys8libs", first = T)
diff --git a/man/snormUC.Rd b/man/snormUC.Rd
index 6e515e6..b2f1182 100644
--- a/man/snormUC.Rd
+++ b/man/snormUC.Rd
@@ -25,15 +25,19 @@ rsnorm(n, location = 0, scale = 1, shape = 0)
% \item{x, q}{vector of quantiles.}
% \item{p}{vector of probabilities.}
\item{n}{number of observations. Must be a single positive integer. }
+
\item{location}{
The location parameter \eqn{\xi}{xi}. A vector.
+
}
\item{scale}{
The scale parameter \eqn{\omega}{w}. A positive vector.
+
}
\item{shape}{
The shape parameter. It is called \eqn{\alpha}{alpha} in
\code{\link{skewnormal1}}.
+
}
\item{log}{
Logical.
@@ -46,7 +50,8 @@ rsnorm(n, location = 0, scale = 1, shape = 0)
See \code{\link{skewnormal1}}, which currently only estimates the shape
parameter.
More generally here, \eqn{Z = \xi + \omega Y}{Z = xi + w * Y} where
- \eqn{Y} has a standard skew-normal distribution (see \code{\link{skewnormal1}}),
+ \eqn{Y} has a standard skew-normal distribution
+ (see \code{\link{skewnormal1}}),
\eqn{\xi}{xi} is the location parameter and
\eqn{\omega}{w} is the scale parameter.
@@ -56,32 +61,35 @@ rsnorm(n, location = 0, scale = 1, shape = 0)
% \code{psnorm} gives the distribution function,
% \code{qsnorm} gives the quantile function, and
\code{rsnorm} generates random deviates.
+
}
\references{
\url{http://tango.stat.unipd.it/SN}.
+
}
\author{ T. W. Yee }
\note{
- The default values of all three parameters corresponds to the skew-normal
- being the standard normal distribution.
+ The default values of all three parameters corresponds to the
+ skew-normal being the standard normal distribution.
}
\seealso{
\code{\link{skewnormal1}}.
+
}
\examples{
-\dontrun{
-n = 200 # grid resolution
-shape = 7
-x = seq(-4, 4, len=n)
-plot(x, dsnorm(x, shape=shape), type="l", col="blue", las=1, ylab="")
-abline(v=0, h=0, lty="dashed", col="darkgreen")
-lines(x, dnorm(x), col="red")
-legend(-3.5, 0.6, leg=c(paste("Blue = dsnorm(x, ", shape,")", sep=""),
- "standard normal density"), lty=1, col=c("blue","red"))
-}
+\dontrun{ N <- 200 # grid resolution
+shape <- 7
+x <- seq(-4, 4, len = N)
+plot(x, dsnorm(x, shape = shape), type = "l", col = "blue", las = 1,
+ ylab = "", lty = 1, lwd = 2)
+abline(v = 0, h = 0)
+lines(x, dnorm(x), col = "orange", lty = 2, lwd = 2)
+legend("topleft", leg = c(paste("Blue = dsnorm(x, ", shape,")", sep = ""),
+ "Orange = standard normal density"), lty = 1:2, lwd = 2,
+ col = c("blue", "orange")) }
}
\keyword{distribution}
diff --git a/man/sratio.Rd b/man/sratio.Rd
index 477a28d..c1f61ec 100644
--- a/man/sratio.Rd
+++ b/man/sratio.Rd
@@ -61,12 +61,14 @@ sratio(link = "logit", earg = list(),
Continuation ratios deal with quantities such as
\code{logit(P[Y>j|Y>=j])}.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}},
\code{\link{rrvglm}}
and \code{\link{vgam}}.
+
}
\references{
Agresti, A. (2002)
@@ -100,15 +102,19 @@ contains further information and examples.
returned by \code{vglm}/\code{vgam}/\code{rrvglm} is the matrix
of counts.
+
For a nominal (unordered) factor response, the multinomial
logit model (\code{\link{multinomial}}) is more appropriate.
+
Here is an example of the usage of the \code{parallel} argument.
If there are covariates \code{x1}, \code{x2} and \code{x3}, then
\code{parallel = TRUE ~ x1 + x2 -1} and
\code{parallel = FALSE ~ x3} are equivalent. This would constrain
the regression coefficients for \code{x1} and \code{x2} to be
equal; those of the intercepts and \code{x3} would be different.
+
+
}
\section{Warning }{
No check is made to verify that the response is ordinal;
@@ -128,12 +134,12 @@ contains further information and examples.
}
\examples{
-pneumo = transform(pneumo, let=log(exposure.time))
-(fit = vglm(cbind(normal,mild,severe) ~ let, sratio(parallel=TRUE), pneumo))
-coef(fit, matrix=TRUE)
+pneumo = transform(pneumo, let = log(exposure.time))
+(fit = vglm(cbind(normal,mild,severe) ~ let, sratio(parallel = TRUE), pneumo))
+coef(fit, matrix = TRUE)
constraints(fit)
predict(fit)
-predict(fit, untransform=TRUE)
+predict(fit, untransform = TRUE)
}
\keyword{models}
\keyword{regression}
diff --git a/man/studentt.Rd b/man/studentt.Rd
index c84c904..104823a 100644
--- a/man/studentt.Rd
+++ b/man/studentt.Rd
@@ -1,58 +1,105 @@
\name{studentt}
\alias{studentt}
+\alias{studentt2}
+\alias{studentt3}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Student t Distribution }
\description{
- Estimation of the degrees of freedom for a Student t distribution.
+ Estimation of parameters in a Student t distribution.
+
}
\usage{
-studentt(link.df = "loglog", earg=list(), idf=NULL, nsimEIM=300)
+studentt(ldf = "loglog", edf = list(), idf = NULL, tol1 = 0.1,
+ method.init = 1)
+studentt2(df = Inf,
+ llocation = "identity", elocation = list(),
+ lscale = "loge", escale = list(),
+ ilocation = NULL, iscale = NULL,
+ method.init = 1, zero = -2)
+studentt3(llocation = "identity", elocation = list(),
+ lscale = "loge", escale = list(),
+ ldf = "loglog", edf = list(),
+ ilocation = NULL, iscale = NULL, idf = NULL,
+ method.init = 1, zero = -(2:3))
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{link.df}{
- Parameter link function for the degrees of freedom \eqn{\nu}{nu}.
+ \item{llocation, lscale, ldf}{
+ Parameter link functions for each parameter,
+ e.g., for degrees of freedom \eqn{\nu}{nu}.
See \code{\link{Links}} for more choices.
- The default ensures the parameter is greater than unity.
+ The defaults ensures the parameters are in range.
+ A \code{\link{loglog}} link keeps the degrees of freedom greater
+ than unity; see below.
}
- \item{earg}{
- List. Extra argument for the link.
+ \item{elocation, escale, edf}{
+ List. Extra arguments for the links.
See \code{earg} in \code{\link{Links}} for general information.
}
- \item{idf}{
- Optional initial value.
- If given, its value must be greater than 1.
+ \item{ilocation, iscale, idf}{
+ Optional initial values.
+ If given, the values must be in range.
The default is to compute an initial value internally.
}
- \item{nsimEIM}{
+ \item{tol1}{
+ A positive value, the tolerance for testing whether an
+ initial value is 1.
+ Best to leave this argument alone.
+
+ }
+
+ \item{df}{
+ Numeric, user-specified degrees of freedom.
+ It may be of length equal to the number of columns of a response
+ matrix.
+
+ }
+ \item{method.init, zero}{
See \code{\link{CommonVGAMffArguments}}.
}
}
\details{
- The density function is
- \deqn{f(y) = \frac{\Gamma((\nu+1)/2)}{\sqrt{\nu \pi} \Gamma(\nu/2)}
+ The Student t density function is
+ \deqn{f(y;\nu) = \frac{\Gamma((\nu+1)/2)}{\sqrt{\nu \pi} \Gamma(\nu/2)}
\left(1 + \frac{y^2}{\nu} \right)^{-(\nu+1)/2}}{%
- f(y) = (gamma((nu+1)/2) / (sqrt(nu*pi) gamma(nu/2))) *
- (1 + y^2 / nu)^{-(nu+1)/2} }
+ f(y;nu) = (gamma((nu+1)/2) / (sqrt(nu*pi) gamma(nu/2))) *
+ (1 + y^2 / nu)^{-(nu+1)/2}}
for all real \eqn{y}.
Then \eqn{E(Y)=0} if \eqn{\nu>1}{nu>1} (returned as the fitted values),
and \eqn{Var(Y)= \nu/(\nu-2)}{Var(Y)= nu/(nu-2)}
for \eqn{\nu > 2}{nu > 2}.
When \eqn{\nu=1}{nu=1} then the Student \eqn{t}-distribution
- corresponds to the standard Cauchy distribution.
- The degrees of freedom is treated as a parameter to be estimated,
- and as real and not integer.
-
- Simulation is used to estimate the EIM.
- Consequently the results will be reproducible only if
- a function such as \code{\link[base:Random]{set.seed}} is used.
- Increasing the value of \code{nsimEIM} will give more accurate results.
- In general convergence will be slow, especially when there are
+ corresponds to the standard Cauchy distribution,
+ \code{\link{cauchy1}}.
+ When \eqn{\nu=2}{nu=2} with a scale parameter of \code{sqrt(2)} then
+ the Student \eqn{t}-distribution
+ corresponds to the standard Koenker distribution,
+ \code{\link{koenker}}.
+ The degrees of freedom can be treated as a parameter to be estimated,
+ and as a real and not an integer.
+ The Student t distribution is used for a variety of reasons
+ in statistics, including robust regression.
+
+
+ Let \eqn{Y = (T - \mu) / \sigma}{Y = (T - mu) / sigma} where
+ \eqn{\mu}{mu} and \eqn{\sigma}{sigma} are the location
+ and scale parameters respectively.
+ Then \code{studentt3} estimates the location, scale and degrees of freedom parameters.
+ And \code{studentt2} estimates the location, scale parameters for a user-specified
+ degrees of freedom, \code{df}.
+ And \code{studentt} estimates the degrees of freedom parameter only.
+ The fitted values are the location parameters.
+ By default the linear/additive predictors are
+ \eqn{(\mu, \log(\sigma), \log\log(\nu))^T}{(mu, log(sigma), log log(nu))^T}
+ or subsets thereof.
+
+
+ In general convergence can be slow, especially when there are
covariates.
}
@@ -61,40 +108,66 @@ studentt(link.df = "loglog", earg=list(), idf=NULL, nsimEIM=300)
The object is used by modelling functions such as \code{\link{vglm}},
and \code{\link{vgam}}.
+
}
\references{
-Evans, M., Hastings, N. and Peacock, B. (2000)
-\emph{Statistical Distributions},
-New York: Wiley-Interscience, Third edition.
+
Student (1908)
The probable error of a mean.
\emph{Biometrika}, \bold{6}, 1--25.
+
+Zhu, D. and Galbraith, J. W. (2010)
+A generalized asymmetric Student-\emph{t} distribution with
+application to financial econometrics.
+\emph{Journal of Econometrics}, \bold{157}, 297--305.
+
+
}
\author{ T. W. Yee }
\note{
+ \code{studentt3} and \code{studentt2} can handle multiple responses.
+
+
Practical experience has shown reasonably good initial values are
- required. If convergence failure occurs try using \code{idf}.
- Local solutions are also possible.
+ required. If convergence failure occurs try using arguments
+ such as \code{idf}.
+ Local solutions are also possible, especially when
+ the degrees of freedom is close to unity or
+ the scale parameter is close to zero.
+
A standard normal distribution corresponds to a \emph{t} distribution
- with infinite degrees of freedom. Consequently, if the data is close
- to normal, there may be convergence problems.
+ with infinite degrees of freedom. Consequently, if the data is close
+ to normal, there may be convergence problems; best to use
+ \code{\link{normal1}} instead.
+
}
\seealso{
\code{\link{normal1}},
- \code{\link{loglog}},
+ \code{\link{cauchy1}},
+ \code{\link{logistic}},
+ \code{\link{huber}},
+ \code{\link{koenker}},
\code{\link[stats]{TDist}}.
}
\examples{
-sdata = data.frame(x = runif(nn <- 1000))
-sdata = transform(sdata, y = rt(nn, df=exp(exp(0.5 - x))))
-fit = vglm(y ~ x, studentt, sdata, trace=TRUE)
-coef(fit, matrix=TRUE)
+tdata <- data.frame(x2 = runif(nn <- 1000))
+tdata <- transform(tdata, y1 = rt(nn, df = exp(exp(0.5 - x2))),
+ y2 = rt(nn, df = exp(exp(0.5 - x2))))
+fit1 <- vglm(y1 ~ x2, studentt, tdata, trace = TRUE)
+coef(fit1, matrix = TRUE)
+
+fit2 <- vglm(cbind(y1, y2) ~ x2, studentt3, tdata, trace = TRUE)
+coef(fit2, matrix = TRUE)
}
\keyword{models}
\keyword{regression}
+
+%Evans, M., Hastings, N. and Peacock, B. (2000)
+%\emph{Statistical Distributions},
+%New York: Wiley-Interscience, Third edition.
diff --git a/man/tikuv.Rd b/man/tikuv.Rd
index 1eb2716..892da29 100644
--- a/man/tikuv.Rd
+++ b/man/tikuv.Rd
@@ -73,9 +73,10 @@ tikuv(d, lmean="identity", lsigma="loge", emean=list(), esigma=list(),
}
\references{
- Akkaya, A. D. and Tiku, M. L. (2006)
+ Akkaya, A. D. and Tiku, M. L. (2008)
Short-tailed distributions and inliers.
- \emph{Test}, \bold{15}(2), in press.
+ \emph{Test}, \bold{17}, 282--296.
+
Tiku, M. L. and Vaughan, D. C. (1999)
A family of short-tailed symmetric distributions.
diff --git a/man/tobit.Rd b/man/tobit.Rd
index f8bdbdb..95dddc7 100644
--- a/man/tobit.Rd
+++ b/man/tobit.Rd
@@ -6,8 +6,8 @@
Fits a Tobit model to a univariate response.
}
\usage{
-tobit(Lower = 0, Upper = Inf, lmu="identity", lsd="loge",
- emu=list(), esd=list(), imethod=1, zero=2)
+tobit(Lower = 0, Upper = Inf, lmu = "identity", lsd = "loge",
+ emu = list(), esd = list(), method.init = 1, zero = 2)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -40,7 +40,7 @@ tobit(Lower = 0, Upper = Inf, lmu="identity", lsd="loge",
See \code{earg} in \code{\link{Links}} for general information.
}
- \item{imethod}{
+ \item{method.init}{
Initialization method. Either 1 or 2, this specifies
two methods for obtaining initial values for the parameters.
@@ -48,7 +48,7 @@ tobit(Lower = 0, Upper = Inf, lmu="identity", lsd="loge",
\item{zero}{
An integer vector, containing the value 1 or 2. If so,
the mean or standard deviation respectively are modelled as an intercept only.
- Setting \code{zero=NULL} means both linear/additive predictors
+ Setting \code{zero = NULL} means both linear/additive predictors
are modelled as functions of the explanatory variables.
}
@@ -108,21 +108,21 @@ Estimation of relationships for limited dependent variables.
\code{\link{dcnormal1}}.
}
\examples{
-tdata = data.frame(x = seq(-1, 1, len=(nn <- 1000)))
+tdata = data.frame(x = seq(-1, 1, len = (nn <- 1000)))
foo = function(x) 1 + 4*x
tdata = transform(tdata, ystar = foo(x) + rnorm(nn))
Lower = 1; Upper = 4
-tdata = transform(tdata, y = pmax(ystar, Lower))
-tdata = transform(tdata, y = pmin(y, Upper))
-with(tdata, table(y==Lower | y==Upper)) # How many censored values?
+tdata = transform(tdata, y = pmax(ystar, Lower))
+tdata = transform(tdata, y = pmin(y, Upper))
+with(tdata, table(y == Lower | y == Upper)) # How many censored values?
-fit = vglm(y ~ x, tobit(Lower=Lower, Upper=Upper), tdata, trace=TRUE)
+fit = vglm(y ~ x, tobit(Lower = Lower, Upper = Upper), tdata, trace = TRUE)
table(fit at extra$censoredL)
table(fit at extra$censoredU)
-coef(fit, matrix=TRUE)
+coef(fit, matrix = TRUE)
summary(fit)
\dontrun{
-with(tdata, plot(x, y, main="Tobit model", las=1))
+with(tdata, plot(x, y, main = "Tobit model", las=1))
legend(-0.9, 3, c("Truth", "Estimate"), col=c("blue", "red"), lwd=2)
with(tdata, lines(x, foo(x), col="blue", lwd=2))
with(tdata, lines(x, fitted(fit), col="red", lwd=2, lty="dashed")) }
diff --git a/man/undocumented-methods.Rd b/man/undocumented-methods.Rd
index e43504c..cf1c936 100644
--- a/man/undocumented-methods.Rd
+++ b/man/undocumented-methods.Rd
@@ -209,6 +209,7 @@
\alias{summary,grc-method}
\alias{summary,cao-method}
\alias{summary,qrrvglm-method}
+\alias{summary,rcam-method}
\alias{summary,rrvglm-method}
\alias{summary,vgam-method}
\alias{summary,vglm-method}
diff --git a/man/vgam.Rd b/man/vgam.Rd
index ba85130..fbcfcbe 100644
--- a/man/vgam.Rd
+++ b/man/vgam.Rd
@@ -251,30 +251,31 @@ The \code{VGAM} Package.
\code{\link{cao}}.
}
-\examples{
-# Nonparametric proportional odds model
-pneumo = transform(pneumo, let=log(exposure.time))
-vgam(cbind(normal,mild,severe) ~ s(let), cumulative(par=TRUE), pneumo)
+\examples{ # Nonparametric proportional odds model
+pneumo = transform(pneumo, let = log(exposure.time))
+vgam(cbind(normal, mild, severe) ~ s(let), cumulative(par = TRUE), pneumo)
# Nonparametric logistic regression
-fit = vgam(agaaus ~ s(altitude, df=2), binomialff, hunua)
-\dontrun{
-plot(fit, se=TRUE)
-}
+fit = vgam(agaaus ~ s(altitude, df = 2), binomialff, hunua)
+\dontrun{ plot(fit, se = TRUE) }
+pfit = predict(fit, type = "terms", raw = TRUE, se = TRUE)
+names(pfit)
+head(pfit$fitted)
+head(pfit$se.fit)
+pfit$df
+pfit$sigma
# Fit two species simultaneously
-fit2 = vgam(cbind(agaaus, kniexc) ~ s(altitude, df=c(2,3)),
- binomialff(mv=TRUE), hunua)
-coef(fit2, mat=TRUE) # Not really interpretable
-\dontrun{
-plot(fit2, se=TRUE, overlay=TRUE, lcol=1:2, scol=1:2)
+fit2 = vgam(cbind(agaaus, kniexc) ~ s(altitude, df = c(2, 3)),
+ binomialff(mv = TRUE), hunua)
+coef(fit2, mat = TRUE) # Not really interpretable
+\dontrun{ plot(fit2, se = TRUE, overlay = TRUE, lcol = 1:2, scol = 1:2)
ooo = with(hunua, order(altitude))
-with(hunua, matplot(altitude[ooo], fitted(fit2)[ooo,], type="l", lwd=2,
- xlab="Altitude (m)", ylab="Probability of presence", las=1,
- main="Two plant species' response curves", ylim=c(0,.8)))
-with(hunua, rug(altitude))
-}
+with(hunua, matplot(altitude[ooo], fitted(fit2)[ooo,], ylim = c(0, .8),
+ xlab = "Altitude (m)", ylab = "Probability of presence", las = 1,
+ main = "Two plant species' response curves", type = "l", lwd = 2))
+with(hunua, rug(altitude)) }
}
\keyword{models}
\keyword{regression}
diff --git a/man/vglm.Rd b/man/vglm.Rd
index 5f5ea29..4b20816 100644
--- a/man/vglm.Rd
+++ b/man/vglm.Rd
@@ -4,7 +4,7 @@
\title{Fitting Vector Generalized Linear Models }
\description{
\code{vglm} is used to fit vector generalized linear models (VGLMs).
- This is a large class of models that includes
+ This is a very large class of models that includes
generalized linear models (GLMs) as a special case.
}
@@ -128,16 +128,28 @@ vglm(formula, family, data = list(), weights = NULL, subset = NULL,
}
\item{constraints}{
an optional list of constraint matrices.
- The components of the list must be named with the term it corresponds
- to (and it must match in character format exactly).
- Each constraint matrix must have \eqn{M} rows, and be of full-column
- rank. By default, constraint matrices are the \eqn{M} by \eqn{M}
- identity
+ The components of the list must be named with the term it corresponds to
+ (and it must match in character format exactly).
+ There are two types of input: \code{"lm"}-type and \code{"vlm"}-type.
+ The former is a subset of the latter.
+ The former has a matrix for each term of the LM matrix.
+ The latter has a matrix for each column of the VLM matrix.
+ After fitting, the \code{\link{constraints}} extractor function may be applied;
+ it returns
+ the \code{"vlm"}-type list of constraint matrices by default.
+ If \code{"lm"}-type are returned by \code{\link{constraints}} then
+ these can be fed into this argument and it should give the
+ same model as before.
+
+
+ Each constraint matrix must have \eqn{M} rows, and be of full-column rank.
+ By default, constraint matrices are the \eqn{M} by \eqn{M} identity
matrix unless arguments in the family function itself override
- these values.
+ these values, e.g., \code{parallel} (see \code{\link{CommonVGAMffArguments}}).
If \code{constraints} is used it must contain \emph{all} the
terms; an incomplete list is not accepted.
+
}
\item{extra}{
an optional list with any extra information that might be needed by
@@ -364,8 +376,8 @@ vglm(cbind(normal, mild, severe) ~ let, multinomial, pneumo)
# Example 3. Proportional odds model
-fit3 = vglm(cbind(normal,mild,severe) ~ let, propodds, pneumo, trace=TRUE)
-coef(fit3, matrix=TRUE)
+fit3 = vglm(cbind(normal,mild,severe) ~ let, propodds, pneumo, trace = TRUE)
+coef(fit3, matrix = TRUE)
constraints(fit3)
model.matrix(fit3, type="lm") # LM model matrix
model.matrix(fit3) # Larger VGLM (or VLM) model matrix
@@ -373,7 +385,7 @@ model.matrix(fit3) # Larger VGLM (or VLM) model matrix
# Example 4. Bivariate logistic model
fit4 = vglm(cbind(nBnW, nBW, BnW, BW) ~ age, binom2.or, coalminers)
-coef(fit4, matrix=TRUE)
+coef(fit4, matrix = TRUE)
fit4 at y # Response are proportions
weights(fit4, type="prior")
@@ -387,16 +399,16 @@ eyesdat = round(data.frame(lop = runif(nn),
eyesdat = transform(eyesdat, eta1 = -1+2*lop,
eta2 = -1+2*lop)
eyesdat = transform(eyesdat,
- leye = rbinom(nn, size=1, prob=logit(eta1, inv=TRUE)),
- reye = rbinom(nn, size=1, prob=logit(eta2, inv=TRUE)))
+ leye = rbinom(nn, size=1, prob=logit(eta1, inv = TRUE)),
+ reye = rbinom(nn, size=1, prob=logit(eta2, inv = TRUE)))
head(eyesdat)
fit5 = vglm(cbind(leye,reye) ~ op,
- binom2.or(exchangeable=TRUE, zero=3),
- data=eyesdat, trace=TRUE,
+ binom2.or(exchangeable = TRUE, zero=3),
+ data=eyesdat, trace = TRUE,
xij = list(op ~ lop + rop + fill(lop)),
form2 = ~ op + lop + rop + fill(lop))
coef(fit5)
-coef(fit5, matrix=TRUE)
+coef(fit5, matrix = TRUE)
constraints(fit5)
}
\keyword{models}
@@ -404,7 +416,7 @@ constraints(fit5)
%eyesdat$leye = ifelse(runif(n) < 1/(1+exp(-1+2*eyesdat$lop)), 1, 0)
%eyesdat$reye = ifelse(runif(n) < 1/(1+exp(-1+2*eyesdat$rop)), 1, 0)
-%coef(fit, matrix=TRUE, compress=FALSE)
+%coef(fit, matrix = TRUE, compress = FALSE)
@@ -417,20 +429,20 @@ constraints(fit5)
%POLY3 = function(x, ...) {
% # A cubic; ensures that the basis functions are the same.
% poly(c(x,...), 3)[1:length(x),]
-% head(poly(c(x,...), 3), length(x), drop=FALSE)
+% head(poly(c(x,...), 3), length(x), drop = FALSE)
%}
%
-%fit6 = vglm(cbind(leye,reye) ~ POLY3(op), trace=TRUE,
-% fam = binom2.or(exchangeable=TRUE, zero=3), data=eyesdat,
+%fit6 = vglm(cbind(leye,reye) ~ POLY3(op), trace = TRUE,
+% fam = binom2.or(exchangeable = TRUE, zero=3), data=eyesdat,
% xij = list(POLY3(op) ~ POLY3(lop,rop) + POLY3(rop,lop) +
% fill(POLY3(lop,rop))),
% form2 = ~ POLY3(op) + POLY3(lop,rop) + POLY3(rop,lop) +
% fill(POLY3(lop,rop)))
%coef(fit6)
-%coef(fit6, matrix=TRUE)
+%coef(fit6, matrix = TRUE)
%head(predict(fit6))
%\dontrun{
-%plotvgam(fit6, se=TRUE) # Wrong since it plots against op, not lop.
+%plotvgam(fit6, se = TRUE) # Wrong since it plots against op, not lop.
%}
%
%
@@ -442,7 +454,7 @@ constraints(fit5)
% X2=runif(n), Z2=runif(n))
%mydat = round(mydat, dig=2)
%fit7 = vglm(ymat ~ X2 + Z2, data=mydat, crit="c",
-% fam = dirichlet(parallel=TRUE), # Intercept is also parallel.
+% fam = dirichlet(parallel = TRUE), # Intercept is also parallel.
% xij = list(Z2 ~ z1 + z2 + z3 + z4,
% X2 ~ x1 + x2 + x3 + x4),
% form2 = ~ Z2 + z1 + z2 + z3 + z4 +
@@ -450,7 +462,7 @@ constraints(fit5)
%head(model.matrix(fit7, type="lm")) # LM model matrix
%head(model.matrix(fit7, type="vlm")) # Big VLM model matrix
%coef(fit7)
-%coef(fit7, matrix=TRUE)
+%coef(fit7, matrix = TRUE)
%max(abs(predict(fit7)-predict(fit7, new=mydat))) # Predicts correctly
%summary(fit7)
diff --git a/man/vglmff-class.Rd b/man/vglmff-class.Rd
index ed64716..045a8cc 100644
--- a/man/vglmff-class.Rd
+++ b/man/vglmff-class.Rd
@@ -30,7 +30,7 @@ Objects can be created by calls of the form \code{new("vglmff", ...)}.
Object of class \code{"function"}
returning the deviance of the model. This slot is optional.
If present, the function must have arguments
- \code{function(mu, y, w, residuals = FALSE, eta, extra=NULL)}.
+ \code{function(mu, y, w, residuals = FALSE, eta, extra = NULL)}.
Deviance residuals are returned if \code{residuals = TRUE}.
}
@@ -47,6 +47,16 @@ Objects can be created by calls of the form \code{new("vglmff", ...)}.
\code{vgam}.
}
+ \item{\code{infos}:}{
+ Object of class \code{"function"} which
+ returns a list with components such as \code{Musual}.
+ At present only a very few \pkg{VGAM} family functions have this
+ feature implemented.
+ Those that do do not require specifying the \code{Musual}
+ argument when used with \code{\link{rcam}}.
+
+
+ }
\item{\code{initialize}:}{
Object of class \code{"expression"} used
to perform error checking (especially for the variable \code{y})
@@ -60,7 +70,7 @@ Objects can be created by calls of the form \code{new("vglmff", ...)}.
Object of class \code{"function"} which
returns the fitted values, given the linear/additive predictors.
The function must have arguments
- \code{function(eta, extra=NULL)}.
+ \code{function(eta, extra = NULL)}.
}
\item{\code{last}:}{
@@ -76,17 +86,18 @@ Objects can be created by calls of the form \code{new("vglmff", ...)}.
Object of class \code{"function"} which,
given the fitted values, returns the linear/additive predictors.
If present, the function must have arguments
- \code{function(mu, extra=NULL)}.
+ \code{function(mu, extra = NULL)}.
}
\item{\code{loglikelihood}:}{
Object of class \code{"function"}
returning the log-likelihood of the model. This slot is optional.
If present, the function must have arguments
- \code{function(mu, y, w, residuals = FALSE, eta, extra=NULL)}.
+ \code{function(mu, y, w, residuals = FALSE, eta, extra = NULL)}.
The argument \code{residuals} can be ignored because
log-likelihood residuals aren't defined.
+
}
\item{\code{middle}:}{
Object of class \code{"expression"} to insert
@@ -99,6 +110,7 @@ Objects can be created by calls of the form \code{new("vglmff", ...)}.
code at a special position in \code{vglm.fit} or
\code{vgam.fit}.
+
}
\item{\code{summary.dispersion}:}{
Object of class \code{"logical"}
@@ -107,6 +119,7 @@ Objects can be created by calls of the form \code{new("vglmff", ...)}.
parameter. It is \code{TRUE} for most models except for nonlinear
regression models.
+
}
\item{\code{vfamily}:}{
Object of class \code{"character"}
@@ -120,6 +133,7 @@ Objects can be created by calls of the form \code{new("vglmff", ...)}.
out the \code{vglm} coefficients plus \code{"VGAMcat"}
information as well.
+
}
\item{\code{deriv}:}{
Object of class \code{"expression"} which
@@ -130,6 +144,7 @@ Objects can be created by calls of the form \code{new("vglmff", ...)}.
\eqn{\bold{d}_i}{\bold{d}i} vector. Thus each row of the
matrix returned by this slot is such a vector.
+
}
\item{\code{weight}:}{
Object of class \code{"expression"} which
@@ -166,11 +181,13 @@ Yee, T. W. and Wild, C. J. (1996)
Vector generalized additive models.
\emph{Journal of the Royal Statistical Society, Series B, Methodological},
\bold{58}, 481--493.
-
+
+
\url{http://www.stat.auckland.ac.nz/~yee} contains further
information on how to write \pkg{VGAM} family functions.
The file is amongst other \pkg{VGAM} PDF documentation.
+
}
\author{ Thomas W. Yee }
\note{
@@ -181,6 +198,7 @@ The file is amongst other \pkg{VGAM} PDF documentation.
\code{eval(substitute( function(...) { ... }, list(...) )) }
for functions.
+
A unified method of handling arguments is to use
\code{match.arg}. This allows, for example,
\code{vglm(..., family = cratio(link=logit))}
@@ -188,16 +206,20 @@ The file is amongst other \pkg{VGAM} PDF documentation.
\code{vglm(..., family = cratio(link="logi"))}
to be equivalent (Nb. there is a \code{logit} function).
+
The \code{extra} argument in
\code{inverse}, \code{link}, \code{deviance}, \code{loglikelihood}, etc.
matches with the argument \code{extra}
in \code{vglm}, \code{vgam} and \code{rrvglm}. This allows input
to be fed into all slots of a \pkg{VGAM} family function.
- The expression \code{derivative} is evaluated immediately prior to
- \code{weight}, so there is provision for re-use of variables etc.
- Programmers must be careful to choose variable names that do not
- interfere with \code{vglm.fit}, \code{vgam.fit} etc.
+
+ The expression \code{derivative} is evaluated immediately
+ prior to \code{weight}, so there is provision for re-use
+ of variables etc. Programmers must be careful to choose
+ variable names that do not interfere with \code{vglm.fit},
+ \code{vgam.fit} etc.
+
Programmers of \pkg{VGAM} family functions are encouraged
to keep to previous conventions regarding the naming of arguments,
@@ -206,11 +228,13 @@ The file is amongst other \pkg{VGAM} PDF documentation.
\code{zero} for allowing some of the
linear/additive predictors to be an intercept term only, etc.
- In general, Fisher-scoring is recommended over Newton-Raphson where
- tractable. Although usually slightly slower in convergence,
- the weight matrices from
- using the expected information are positive-definite over a larger
+
+ In general, Fisher-scoring is recommended over
+ Newton-Raphson where tractable. Although usually slightly
+ slower in convergence, the weight matrices from using the
+ expected information are positive-definite over a larger
parameter space.
+
}
@@ -221,13 +245,15 @@ The file is amongst other \pkg{VGAM} PDF documentation.
}
\seealso{
-\code{\link{vglm}},
-\code{\link{vgam}},
-\code{\link{rrvglm}}.
+ \code{\link{vglm}},
+ \code{\link{vgam}},
+ \code{\link{rrvglm}},
+ \code{\link{rcam}}.
+
}
\examples{
cratio()
-cratio(link="cloglog")
-cratio(link=cloglog, reverse=TRUE)
+cratio(link = "cloglog")
+cratio(link = cloglog, reverse = TRUE)
}
\keyword{classes}
diff --git a/man/vonmises.Rd b/man/vonmises.Rd
index 6f0be1d..d351b87 100644
--- a/man/vonmises.Rd
+++ b/man/vonmises.Rd
@@ -107,7 +107,7 @@ New York: Wiley-Interscience, Third edition.
}
\section{Warning }{
- Numerically, the von~Mises can be difficult to fit because of a
+ Numerically, the von Mises can be difficult to fit because of a
log-likelihood having multiple maxima.
The user is therefore encouraged to try different starting values,
i.e., make use of \code{ilocation} and \code{iscale}.
diff --git a/man/wald.Rd b/man/wald.Rd
index fc9f93a..08d24ba 100644
--- a/man/wald.Rd
+++ b/man/wald.Rd
@@ -7,7 +7,7 @@ Estimates the parameter of the standard Wald distribution
by maximum likelihood estimation.
}
\usage{
-wald(link.lambda="loge", earg=list(), init.lambda=NULL)
+wald(link.lambda = "loge", earg = list(), init.lambda = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -37,16 +37,19 @@ wald(link.lambda="loge", earg=list(), init.lambda=NULL)
exp(-lambda*(y-1)^2/(2*y))
}
where \eqn{y>0} and \eqn{\lambda>0}{lambda>0}.
- The mean of \eqn{Y} is \eqn{1} (returned as the fitted values) and its variance is
+ The mean of \eqn{Y} is \eqn{1}
+ (returned as the fitted values) and its variance is
\eqn{1/\lambda}{1/lambda}.
By default, \eqn{\eta=\log(\lambda)}{eta=log(lambda)}.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}},
and \code{\link{vgam}}.
+
}
\references{
Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1994)
@@ -55,23 +58,26 @@ Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1994)
Volume 1,
New York: Wiley.
+
}
\author{ T. W. Yee }
\note{
The \pkg{VGAM} family function \code{\link{inv.gaussianff}}
estimates the location parameter \eqn{\mu}{mu} too.
+
}
\seealso{
\code{\link{inv.gaussianff}}.
+
}
\examples{
-wdata = data.frame(y = rgamma(n=1000, shape=1)) # Not inverse Gaussian!!
-fit = vglm(y ~ 1, wald(init=0.2), wdata, trace=TRUE)
-coef(fit, matrix=TRUE)
+wdata <- data.frame(y = rgamma(n = 1000, shape = 1)) # Not inverse Gaussian!!
+fit <- vglm(y ~ 1, wald(init = 0.2), wdata, trace = TRUE)
+coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
}
diff --git a/man/weibull.Rd b/man/weibull.Rd
index 1d69162..9afa1a2 100644
--- a/man/weibull.Rd
+++ b/man/weibull.Rd
@@ -1,5 +1,7 @@
\name{weibull}
\alias{weibull}
+%\alias{weibullff}
+%\alias{weibull.lsh}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Weibull Distribution Family Function }
\description{
@@ -10,7 +12,7 @@
\usage{
weibull(lshape = "loge", lscale = "loge",
eshape = list(), escale = list(),
- ishape = NULL, iscale = NULL, nrfs = 1, imethod=1, zero = 2)
+ ishape = NULL, iscale = NULL, nrfs = 1, method.init = 1, zero = 2)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -40,7 +42,7 @@ weibull(lshape = "loge", lscale = "loge",
positive-definite working weights.
}
- \item{imethod}{
+ \item{method.init}{
Initialization method used if there are censored observations.
Currently only the values 1 and 2 are allowed.
}
@@ -48,7 +50,7 @@ weibull(lshape = "loge", lscale = "loge",
An integer specifying which linear/additive predictor is to be modelled
as an intercept only. The value must be from the set \{1,2\},
which correspond to the shape and scale parameters respectively.
- Setting \code{zero=NULL} means none of them.
+ Setting \code{zero = NULL} means none of them.
}
}
@@ -80,7 +82,7 @@ weibull(lshape = "loge", lscale = "loge",
(according to Kleiber and Kotz (2003)).
If this is violated then a warning message is issued.
One can enforce \eqn{a>2} by choosing \code{lshape = "logoff"}
- and \code{eshape=list(offset=-2)}.
+ and \code{eshape = list(offset = -2)}.
}
diff --git a/man/weightsvglm.Rd b/man/weightsvglm.Rd
index fec9f50..52d9b8e 100644
--- a/man/weightsvglm.Rd
+++ b/man/weightsvglm.Rd
@@ -107,26 +107,25 @@ weightsvglm(object, type = c("prior", "working"),
\code{\link{vglm}}.
}
\examples{
-pneumo = transform(pneumo, let=log(exposure.time))
+pneumo = transform(pneumo, let = log(exposure.time))
(fit = vglm(cbind(normal, mild, severe) ~ let,
- cumulative(parallel=TRUE, reverse=TRUE), pneumo))
+ cumulative(parallel = TRUE, reverse = TRUE), pneumo))
fit at y # These are sample proportions
-weights(fit, type="prior", matrix=FALSE) # Number of observations
+weights(fit, type = "prior", matrix = FALSE) # Number of observations
# Look at the working residuals
-n = nrow(model.matrix(fit, type="lm"))
+nn = nrow(model.matrix(fit, type = "lm"))
M = ncol(predict(fit))
temp = weights(fit, type="working", deriv=TRUE)
wz = m2adefault(temp$weights, M=M) # In array format
-wzinv = array(apply(wz, 3, solve), c(M,M,n))
-wresid = matrix(NA, n, M) # working residuals
-for(i in 1:n)
- wresid[i,] = wzinv[,,i,drop=TRUE] \%*\% temp$deriv[i,]
-max(abs(c(resid(fit, type="w")) - c(wresid))) # Should be 0
-
-z = predict(fit) + wresid # Adjusted dependent vector
-z
+wzinv = array(apply(wz, 3, solve), c(M, M, nn))
+wresid = matrix(NA, nn, M) # Working residuals
+for(ii in 1:nn)
+ wresid[ii,] = wzinv[,,ii, drop = TRUE] \%*\% temp$deriv[ii,]
+max(abs(c(resid(fit, type = "w")) - c(wresid))) # Should be 0
+
+(z <- predict(fit) + wresid) # Adjusted dependent vector
}
\keyword{models}
\keyword{regression}
diff --git a/man/wffc.P2star.Rd b/man/wffc.P2star.Rd
index 13aecf3..8e0d732 100644
--- a/man/wffc.P2star.Rd
+++ b/man/wffc.P2star.Rd
@@ -3,34 +3,54 @@
\alias{wffc.P1star}
\alias{wffc.P2}
\alias{wffc.P2star}
+\alias{wffc.P3}
+\alias{wffc.P3star}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Point System for the 2008 World Fly Fishing Championships }
\description{
Point system for the 2008 World Fly Fishing Championships:
- current and proposed.
+ current and some proposals.
}
\usage{
-wffc.P1(length, min.eligible = 0.18)
-wffc.P1star(length, min.eligible = 0.18)
-wffc.P2(length, min.eligible = 0.18)
-wffc.P2star(length, min.eligible = 0.18)
+wffc.P1(length, c1 = 100, min.eligible = 0.18, ppm = 2000)
+wffc.P2(length, c1 = 100, min.eligible = 0.18, ppm = 2000)
+wffc.P3(length, c1 = 100, min.eligible = 0.18, ppm = 2000)
+wffc.P1star(length, c1 = 100, min.eligible = 0.18, ppm = 2000)
+wffc.P2star(length, c1 = 100, min.eligible = 0.18, ppm = 2000)
+wffc.P3star(length, c1 = 100, min.eligible = 0.18, ppm = 2000)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{length}{ Length of the fish, in metres. Numeric vector. }
- \item{min.eligible}{ Currently the regulations stipulate that the
- smallest eligible fish is 180 mm. }
+ \item{length}{ Length of the fish, in meters. Numeric vector. }
+ \item{c1}{ Points added to each eligible fish. }
+ \item{min.eligible}{ The 2008 WFFC regulations stipulated that the
+ smallest eligible fish was 0.180 m, which is 180 mm. }
+ \item{ppm}{ Points per meter of length of the fish. }
+
}
\details{
The official website contains a document with the official rules and
regulations of the competition.
- The function \code{wffc.P1()} implements the current WFFC point system.
+ The function \code{wffc.P1()} implements the current WFFC point system,
+ and is `discrete' in that fish lengths are rounded up to the nearest
+ centimeter (provided it is greater or equal to \code{min.eligible} m).
\code{wffc.P1star()} is a `continuous' version of it.
- The function \code{wffc.P2()} is a new proposal which rewards catching bigger
- fish.
+
+ The function \code{wffc.P2()} is a new proposal which
+ rewards catching bigger fish.
+ It is based on a quadratic polynomial.
\code{wffc.P2star()} is a `continuous' version of it.
+
+
+ The function \code{wffc.P3()} is another new proposal which
+ rewards catching bigger fish.
+ Named a \emph{cumulative linear proposal},
+ it adds \code{ppm} to each multiple of \code{min.eligible} of length.
+ One adds one lot of \code{c1} to each eligible fish.
+ \code{wffc.P3star()} is a `continuous' version of \code{wffc.P3()}.
+
}
\value{
A vector with the number of points.
@@ -39,7 +59,9 @@ wffc.P2star(length, min.eligible = 0.18)
\references{
% \url{http://www.2008worldflyfishingchamps.com}
-% is the official website.
+% was the official 2008 website.
+% \url{http://www.http://san2010.pl}
+% was the official 2010 website.
Yee, T. W. (2010)
On strategies and issues raised by an analysis of
@@ -50,21 +72,27 @@ wffc.P2star(length, min.eligible = 0.18)
\author{ T. W. Yee. }
\note{
- \code{wffc.P2} and \code{wffc.P2star} may change in the future.
+ \code{wffc.P2} and \code{wffc.P2star} may change in the future,
+ as well as possibly
+ \code{wffc.P3} and \code{wffc.P3star}.
}
\seealso{ \code{\link{wffc}}. }
\examples{
-\dontrun{
-fishlength = seq(0.0, 0.72, by=0.001)
-plot(fishlength, wffc.P2star(fishlength), type="l", col="blue",
- las=1, lty="dashed", lwd=2, las=1, cex.main=0.8,
- xlab="Fish length (m)", ylab="Competition points",
- main="Current (red) and proposed (blue) WFFC point system")
-lines(fishlength, wffc.P1star(fishlength), type="l", col="red", lwd=2)
-abline(v=(1:4)*0.18, lty="dotted")
-abline(h=(1:9)*wffc.P1star(0.18), lty="dotted")
-}
+\dontrun{ fishlength = seq(0.0, 0.72, by = 0.001)
+plot(fishlength, wffc.P2star(fishlength), type = "l", col = "blue",
+ las = 1, lty = "dashed", lwd = 2, las = 1, cex.main = 0.8,
+ xlab = "Fish length (m)", ylab = "Competition points",
+ main = "Current (red) and proposed (blue and green) WFFC point system")
+lines(fishlength, wffc.P1star(fishlength), type = "l", col = "red", lwd = 2)
+lines(fishlength, wffc.P3star(fishlength), type = "l", col = "darkgreen",
+ lwd = 2, lty = "dashed")
+abline(v = (1:4) * 0.18, lty = "dotted")
+abline(h = (1:9) * wffc.P1star(0.18), lty = "dotted") }
+
+# Successive slopes:
+(wffc.P1star((2:8)*0.18) - wffc.P1star((1:7)*0.18)) / (0.18 * 2000)
+(wffc.P3star((2:8)*0.18) - wffc.P3star((1:7)*0.18)) / (0.18 * 2000)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
diff --git a/man/wrapup.smart.Rd b/man/wrapup.smart.Rd
index 9830957..a2d4c48 100644
--- a/man/wrapup.smart.Rd
+++ b/man/wrapup.smart.Rd
@@ -2,8 +2,10 @@
\alias{wrapup.smart}
\title{ Cleans Up After Smart Prediction }
\description{
-\code{wrapup.smart} deletes any variables used by smart prediction.
-Needed by both the modelling function and the prediction function.
+ \code{wrapup.smart} deletes any variables used by smart prediction.
+ Needed by both the modelling function and the prediction function.
+
+
}
\usage{
wrapup.smart()
@@ -12,10 +14,12 @@ wrapup.smart()
The variables to be deleted are \code{.smart.prediction},
\code{.smart.prediction.counter}, and \code{.smart.prediction.mode}.
The function \code{wrapup.smart} is useful in \R because
- these variables are held in \code{smartpredenv}. In S-PLUS,
+ these variables are held in \code{smartpredenv}.
+ In S-PLUS,
\code{wrapup.smart} is not really necessary because the variables are
placed in frame 1, which disappears when finished anyway.
+
}
\references{
See the technical help file at \url{http://www.stat.auckland.ac.nz/~yee}
@@ -23,7 +27,7 @@ wrapup.smart()
}
\seealso{
-\code{\link{setup.smart}}.
+ \code{\link{setup.smart}}.
}
\examples{
\dontrun{# Place this inside modelling functions such as lm, glm, vglm.
diff --git a/man/zanegbinUC.Rd b/man/zanegbinUC.Rd
index 5db561c..3afbb52 100644
--- a/man/zanegbinUC.Rd
+++ b/man/zanegbinUC.Rd
@@ -12,10 +12,10 @@
}
\usage{
-dzanegbin(x, p0, size, prob=NULL, munb=NULL, log = FALSE)
-pzanegbin(q, p0, size, prob=NULL, munb=NULL)
-qzanegbin(p, p0, size, prob=NULL, munb=NULL)
-rzanegbin(n, p0, size, prob=NULL, munb=NULL)
+dzanegbin(x, p0, size, prob = NULL, munb = NULL, log = FALSE)
+pzanegbin(q, p0, size, prob = NULL, munb = NULL)
+qzanegbin(p, p0, size, prob = NULL, munb = NULL)
+rzanegbin(n, p0, size, prob = NULL, munb = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -48,6 +48,7 @@ rzanegbin(n, p0, size, prob=NULL, munb=NULL)
\code{pzanegbin} gives the distribution function,
\code{qzanegbin} gives the quantile function, and
\code{rzanegbin} generates random deviates.
+
}
%\references{ }
\author{ Thomas W. Yee }
@@ -63,18 +64,18 @@ rzanegbin(n, p0, size, prob=NULL, munb=NULL)
}
\examples{
-munb = 3; size = 4; p0 = 0.3; x = (-1):7
-(ii = dzanegbin(x, p0=p0, munb=munb, size=size))
-table(rzanegbin(100, p0=p0, munb=munb, size=size))
+munb <- 3; size <- 4; p0 <- 0.3; x <- (-1):7
+(ii <- dzanegbin(x, p0 = p0, munb = munb, size = size))
+ table(rzanegbin(100, p0 = p0, munb = munb, size = size))
\dontrun{
x = 0:10
-barplot(rbind(dzanegbin(x, p0=p0, munb=munb, size=size),
- dnbinom(x, mu=munb, size=size)),
+barplot(rbind(dzanegbin(x, p0 = p0, munb = munb, size = size),
+ dnbinom(x, mu = munb, size = size)),
beside = TRUE, col = c("blue","green"), cex.main=0.7, las=1,
ylab = "Probability",names.arg = as.character(x),
main=paste("ZANB(p0=", p0, ", munb=", munb, ", size=", size,
- ") [blue] vs", " NB(mu=", munb, ", size=", size,
+ ") [blue] vs", " NB(mu=", munb, ", size=", size,
") [green] densities", sep="")) }
}
\keyword{distribution}
diff --git a/man/zanegbinomial.Rd b/man/zanegbinomial.Rd
index a8ce3c0..cfa6be9 100644
--- a/man/zanegbinomial.Rd
+++ b/man/zanegbinomial.Rd
@@ -53,13 +53,14 @@ zanegbinomial(lp0 = "logit", lmunb = "loge", lk = "loge",
}
\item{zero}{
Integer valued vector, usually assigned \eqn{-3} or \eqn{3} if
- used at all. Specifies which of the three linear predictors are
+ used at all. Specifies which of the three linear predictors are
modelled as an intercept only. By default, the \code{k} parameter
(after \code{lk} is applied) for each response is modelled as
- a single unknown number that is estimated. It can be modelled as a
- function of the explanatory variables by setting \code{zero=NULL}.
+ a single unknown number that is estimated. It can be modelled as a
+ function of the explanatory variables by setting \code{zero = NULL}.
A negative value means that the value is recycled, so setting \eqn{-3}
- means all \code{k} are intercept only.
+ means all \code{k} are intercept-only.
+ See \code{\link{CommonVGAMffArguments}} for more information.
}
\item{cutoff}{
@@ -71,7 +72,8 @@ zanegbinomial(lp0 = "logit", lmunb = "loge", lk = "loge",
}
\item{method.init, shrinkage.init}{
- See \code{\link{negbinomial}}.
+ See \code{\link{negbinomial}}
+ and \code{\link{CommonVGAMffArguments}}.
}
}
@@ -88,16 +90,19 @@ zanegbinomial(lp0 = "logit", lmunb = "loge", lk = "loge",
is implemented in the \pkg{VGAM} package. Some people
call the zero-altered negative binomial a \emph{hurdle} model.
+
For one response/species, by default, the three linear/additive
predictors are \eqn{(logit(p_0), \log(\mu_{nb}), \log(k))^T}{(logit(p0),
log(munb), log(k))^T}. This vector is recycled for multiple species.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}},
and \code{\link{vgam}}.
+
The \code{fitted.values} slot of the fitted object,
which should be extracted by the generic function \code{fitted}, returns
the mean \eqn{\mu}{mu} which is given by
@@ -114,16 +119,19 @@ for counts with extra zeros.
\bold{88},
297--308.
+
}
\section{Warning }{
Convergence for this \pkg{VGAM} family function seems to depend quite
strongly on providing good initial values.
+
Inference obtained from \code{summary.vglm} and \code{summary.vgam}
may or may not be correct. In particular, the p-values, standard errors
and degrees of freedom may need adjustment. Use simulation on artificial
data to check that these are reasonable.
+
}
\author{ T. W. Yee }
@@ -133,12 +141,16 @@ for counts with extra zeros.
functions of the covariates. It is a conditional model, not a mixture
model.
+
This family function effectively combines
\code{\link{posnegbinomial}} and \code{\link{binomialff}} into
one family function.
+
This family function can handle a multivariate response, e.g., more
than one species.
+
+
}
\seealso{
@@ -149,21 +161,23 @@ for counts with extra zeros.
\code{\link{rposnegbin}},
\code{\link{zinegbinomial}},
\code{\link{zipoisson}},
- \code{\link[stats:NegBinomial]{dnbinom}}.
+ \code{\link[stats:NegBinomial]{dnbinom}},
+ \code{\link{CommonVGAMffArguments}}.
+
}
\examples{
zdata <- data.frame(x = runif(nn <- 2000))
-zdata <- transform(zdata, p0 = logit(-1 + 2*x, inverse=TRUE),
- y1 = rposnegbin(nn, munb=exp(0+2*x), size=exp(1)),
- y2 = rposnegbin(nn, munb=exp(1+2*x), size=exp(1)))
+zdata <- transform(zdata, p0 = logit(-1 + 2*x, inverse = TRUE),
+ y1 = rposnegbin(nn, munb = exp(0+2*x), size = exp(1)),
+ y2 = rposnegbin(nn, munb = exp(1+2*x), size = exp(1)))
zdata <- transform(zdata, y1 = ifelse(runif(nn) < p0, 0, y1),
y2 = ifelse(runif(nn) < p0, 0, y2))
with(zdata, table(y1))
with(zdata, table(y2))
-fit <- vglm(cbind(y1,y2) ~ x, zanegbinomial, zdata, trace=TRUE)
-coef(fit, matrix=TRUE)
+fit <- vglm(cbind(y1, y2) ~ x, zanegbinomial, zdata, trace = TRUE)
+coef(fit, matrix = TRUE)
head(fitted(fit))
head(predict(fit))
}
diff --git a/man/zapoisson.Rd b/man/zapoisson.Rd
index cff6601..74388af 100644
--- a/man/zapoisson.Rd
+++ b/man/zapoisson.Rd
@@ -9,8 +9,8 @@
}
\usage{
-zapoisson(lp0 = "logit", llambda = "loge", ep0=list(),
- elambda=list(), zero=NULL)
+zapoisson(lp0 = "logit", llambda = "loge", ep0 = list(),
+ elambda = list(), zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -35,12 +35,13 @@ zapoisson(lp0 = "logit", llambda = "loge", ep0=list(),
modelled as an intercept only.
By default, both linear/additive predictors are modelled using
the explanatory variables.
- If \code{zero=1} then the \eqn{p_0}{p0} parameter
+ If \code{zero = 1} then the \eqn{p_0}{p0} parameter
(after \code{lp0} is applied) is modelled as a single unknown
number that is estimated. It is modelled as a function of the
- explanatory variables by \code{zero=NULL}. A negative value
+ explanatory variables by \code{zero = NULL}. A negative value
means that the value is recycled, so setting \eqn{-1} means
all \eqn{p_0}{p0} are intercept-only (for multivariate responses).
+ See \code{\link{CommonVGAMffArguments}} for more information.
}
}
@@ -54,21 +55,25 @@ zapoisson(lp0 = "logit", llambda = "loge", ep0=list(),
has zeros coming from the Poisson distribution too. Some people call
the zero-altered Poisson a \emph{hurdle} model.
+
For one response/species, by default, the two linear/additive predictors
are \eqn{(logit(p_0), \log(\lambda))^T}{(logit(p0), log(lambda))^T}.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}},
and \code{\link{vgam}}.
+
The \code{fitted.values} slot of the fitted object,
which should be extracted by the generic function \code{fitted}, returns
the mean \eqn{\mu}{mu} which is given by
\deqn{\mu = (1-p_0) \lambda / [1 - \exp(-\lambda)].}{%
mu = (1-p0) * lambda / [1 - exp(-lambda)].}
+
}
\references{
Welsh, A. H., Cunningham, R. B., Donnelly, C. F. and Lindenmayer,
@@ -79,23 +84,27 @@ for counts with extra zeros.
\bold{88},
297--308.
+
Angers, J-F. and Biswas, A. (2003)
A Bayesian analysis of zero-inflated generalized Poisson
model.
\emph{Computational Statistics & Data Analysis},
\bold{42}, 37--46.
+
Documentation accompanying the \pkg{VGAM} package at
\url{http://www.stat.auckland.ac.nz/~yee}
contains further information and examples.
+
}
\section{Warning }{
-Inference obtained from \code{summary.vglm} and
-\code{summary.vgam} may or may not be correct.
-In particular, the p-values, standard errors and degrees of freedom
-may need adjustment. Use simulation on artificial data to check
-that these are reasonable.
+ Inference obtained from \code{summary.vglm}
+ and \code{summary.vgam} may or may not be correct.
+ In particular, the p-values, standard errors and degrees of
+ freedom may need adjustment. Use simulation on artificial
+ data to check that these are reasonable.
+
}
@@ -107,16 +116,20 @@ that these are reasonable.
\emph{mixture} model whereas \code{zapoisson} and \code{\link{yip88}}
are \emph{conditional} models.
+
Note this family function allows \eqn{p_0}{p0} to be modelled
as functions of the covariates. It can be thought of an extension
of \code{\link{yip88}}, which is also a conditional model but its
\eqn{\phi}{phi} parameter is a scalar only.
+
This family function effectively combines \code{\link{pospoisson}}
and \code{\link{binomialff}} into one family function.
- This family function can handle a multivariate response, e.g.,
- more than one species.
+
+ This family function can handle a multivariate response,
+ e.g., more than one species.
+
}
@@ -126,32 +139,33 @@ that these are reasonable.
\code{\link{pospoisson}},
\code{\link{posnegbinomial}},
\code{\link{binomialff}},
- \code{\link{rpospois}}.
+ \code{\link{rpospois}},
+ \code{\link{CommonVGAMffArguments}}.
}
\examples{
-zapdata = data.frame(x = runif(nn <- 1000))
-zapdata = transform(zapdata, p0 = logit(-1 + 1*x, inverse=TRUE),
- lambda = loge(-0.5 + 2*x, inverse=TRUE))
-zapdata = transform(zapdata, y = rzapois(nn, lambda, p0=p0))
+zapdata <- data.frame(x = runif(nn <- 1000))
+zapdata <- transform(zapdata, p0 = logit( -1 + 1*x, inverse = TRUE),
+ lambda = loge(-0.5 + 2*x, inverse = TRUE))
+zapdata <- transform(zapdata, y = rzapois(nn, lambda, p0 = p0))
with(zapdata, table(y))
-fit = vglm(y ~ x, zapoisson, zapdata, trace=TRUE)
-fit = vglm(y ~ x, zapoisson, zapdata, trace=TRUE, crit="c")
+fit <- vglm(y ~ x, zapoisson, zapdata, trace = TRUE)
+fit <- vglm(y ~ x, zapoisson, zapdata, trace = TRUE, crit = "c")
head(fitted(fit))
head(predict(fit))
-head(predict(fit, untransform=TRUE))
-coef(fit, matrix=TRUE)
+head(predict(fit, untransform = TRUE))
+coef(fit, matrix = TRUE)
# Another example ------------------------------
# Data from Angers and Biswas (2003)
-abdata = data.frame(y = 0:7, w = c(182, 41, 12, 2, 2, 0, 0, 1))
-abdata = subset(abdata, w>0)
-yy = with(abdata, rep(y, w))
-fit3 = vglm(yy ~ 1, zapoisson, trace=TRUE, crit="c")
-coef(fit3, matrix=TRUE)
+abdata <- data.frame(y = 0:7, w = c(182, 41, 12, 2, 2, 0, 0, 1))
+abdata <- subset(abdata, w > 0)
+yy <- with(abdata, rep(y, w))
+fit3 <- vglm(yy ~ 1, zapoisson, trace = TRUE, crit = "c")
+coef(fit3, matrix = TRUE)
Coef(fit3) # Estimate of lambda (they get 0.6997 with standard error 0.1520)
head(fitted(fit3), 1)
mean(yy) # compare this with fitted(fit3)
diff --git a/man/zibinomial.Rd b/man/zibinomial.Rd
index 4811180..a544b0d 100644
--- a/man/zibinomial.Rd
+++ b/man/zibinomial.Rd
@@ -105,18 +105,20 @@ zibinomial(lphi="logit", lmu="logit", ephi=list(), emu=list(),
\code{\link{binomialff}},
\code{\link{posbinomial}},
\code{\link[stats:Binomial]{rbinom}}.
+
}
\examples{
size = 10 # number of trials; N in the notation above
nn = 200
-zibdata = data.frame(phi = logit(0, inv=TRUE), # 0.50
- mubin = logit(-1, inv=TRUE), # Mean of usual binomial
- sv = rep(size, len=nn))
+zibdata = data.frame(phi = logit( 0, inv = TRUE), # 0.50
+ mubin = logit(-1, inv = TRUE), # Mean of usual binomial
+ sv = rep(size, len = nn))
zibdata = transform(zibdata,
- y = rzibinom(n=nn, size=sv, prob=mubin, phi=phi)/sv)
+ y = rzibinom(nn, size = sv, prob = mubin, phi = phi))
with(zibdata, table(y))
-fit = vglm(y ~ 1, zibinomial, weight=sv, data=zibdata, trace=TRUE)
-coef(fit, matrix=TRUE)
+fit = vglm(cbind(y, sv - y) ~ 1, zibinomial, zibdata, trace = TRUE)
+
+coef(fit, matrix = TRUE)
Coef(fit) # Useful for intercept-only models
fit at misc$p0 # Estimate of P(Y=0)
head(fitted(fit))
diff --git a/man/zinegbinomial.Rd b/man/zinegbinomial.Rd
index 011b18a..e99bc85 100644
--- a/man/zinegbinomial.Rd
+++ b/man/zinegbinomial.Rd
@@ -8,9 +8,9 @@
}
\usage{
-zinegbinomial(lphi="logit", lmunb = "loge", lk = "loge", ephi=list(),
- emunb =list(), ek = list(), iphi = NULL, ik = NULL,
- zero = -3, method.init=1, shrinkage.init=0.95, nsimEIM=200)
+zinegbinomial(lphi = "logit", lmunb = "loge", lk = "loge", ephi = list(),
+ emunb = list(), ek = list(), iphi = NULL, ik = NULL, zero = -3,
+ method.init = 1, shrinkage.init = 0.95, nsimEIM = 200)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -96,16 +96,19 @@ zinegbinomial(lphi="logit", lmunb = "loge", lk = "loge", ephi=list(),
\code{p0} which is the estimate of \eqn{P(Y=0)}. Note that \eqn{P(Y=0)}
is not the parameter \eqn{\phi}{phi}.
+
If \eqn{k} is large then the use of \pkg{VGAM} family function
\code{\link{zipoisson}} is probably preferable.
This follows because the Poisson is the limiting distribution of a
negative binomial as \eqn{k} tends to infinity.
+
The zero-\emph{deflated} negative binomial distribution cannot
be handled with this family function. It can be handled
with the zero-altered negative binomial distribution; see
\code{\link{zanegbinomial}}.
+
}
\section{Warning }{
@@ -125,22 +128,39 @@ zinegbinomial(lphi="logit", lmunb = "loge", lk = "loge", ephi=list(),
\seealso{
\code{\link{Zinegbin}},
\code{\link{negbinomial}},
- \code{\link[stats:Poisson]{rpois}}.
+ \code{\link[stats:Poisson]{rpois}},
+ \code{\link{CommonVGAMffArguments}}.
}
\examples{
-nbdat = data.frame(x = runif(nn <- 1000))
-nbdat = transform(nbdat, phi = logit(-0.5+1*x, inverse=TRUE),
- munb = exp(3+x),
- k = exp(0+2*x))
-nbdat = transform(nbdat, y1 = rzinegbin(nn, phi, mu=munb, size=k),
- y2 = rzinegbin(nn, phi, mu=munb, size=k))
+# Example 1
+nbdat <- data.frame(x2 = runif(nn <- 1000))
+nbdat <- transform(nbdat, phi = logit(-0.5 + 1 * x2, inverse = TRUE),
+ munb = exp(3 + x2),
+ k = exp(0 + 2*x2))
+nbdat <- transform(nbdat, y1 = rzinegbin(nn, phi, mu = munb, size = k),
+ y2 = rzinegbin(nn, phi, mu = munb, size = k))
with(nbdat, table(y1)["0"] / sum(table(y1)))
-fit = vglm(cbind(y1,y2) ~ x, zinegbinomial(zero=NULL), nbdat, trace=TRUE)
-coef(fit, matrix=TRUE)
+fit <- vglm(cbind(y1, y2) ~ x2, zinegbinomial(zero = NULL), nbdat)
+coef(fit, matrix = TRUE)
summary(fit)
head(cbind(fitted(fit), with(nbdat, (1-phi) * munb)))
round(vcov(fit), 3)
+
+
+# Example 2: RR-ZINB could also be called a COZIVGLM-ZINB-2
+nbdat <- data.frame(x2 = runif(nn <- 2000))
+nbdat <- transform(nbdat, x3 = runif(nn))
+nbdat <- transform(nbdat, eta1 = 3 + 1 * x2 + 2 * x3)
+nbdat <- transform(nbdat, phi = logit(-1.5 + 0.5 * eta1, inverse = TRUE),
+ munb = exp(eta1),
+ k = exp(4))
+nbdat <- transform(nbdat, y1 = rzinegbin(nn, phi, mu = munb, size = k))
+with(nbdat, table(y1)["0"] / sum(table(y1)))
+rrzinb <- rrvglm(y1 ~ x2 + x3, zinegbinomial(zero = NULL), nbdat,
+ Index.corner = 2, szero = 3, trace = TRUE)
+coef(rrzinb, matrix = TRUE)
+Coef(rrzinb)
}
\keyword{models}
\keyword{regression}
diff --git a/man/zipoisson.Rd b/man/zipoisson.Rd
index cd9edf2..8cc72d3 100644
--- a/man/zipoisson.Rd
+++ b/man/zipoisson.Rd
@@ -1,5 +1,6 @@
\name{zipoisson}
\alias{zipoisson}
+\alias{zipoissonff}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Zero-Inflated Poisson Distribution Family Function }
\description{
@@ -8,39 +9,52 @@
}
\usage{
-zipoisson(lphi="logit", llambda = "loge", ephi=list(), elambda =list(),
- iphi = NULL, method.init=1, shrinkage.init=0.8, zero = NULL)
+zipoissonff(llambda = "loge", lprobp = "logit",
+ elambda = list(), eprobp = list(),
+ ilambda = NULL, iprobp = NULL, method.init = 1,
+ shrinkage.init = 0.8, zero = -2)
+zipoisson(lphi ="logit", llambda = "loge",
+ ephi = list(), elambda = list(),
+ iphi = NULL, ilambda = NULL, method.init = 1,
+ shrinkage.init = 0.8, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{lphi}{
- Link function for the parameter \eqn{\phi}{phi}.
- See \code{\link{Links}} for more choices.
-
- }
- \item{llambda}{
- Link function for the usual \eqn{\lambda}{lambda} parameter.
+ \item{lphi, llambda}{
+ Link function for the parameter \eqn{\phi}{phi}
+ and the usual \eqn{\lambda}{lambda} parameter.
See \code{\link{Links}} for more choices.
}
\item{ephi, elambda}{
List. Extra argument for the respective links.
See \code{earg} in \code{\link{Links}} for general information.
+ See \code{\link{CommonVGAMffArguments}} for more information.
}
- \item{iphi}{
+ \item{iphi, ilambda}{
Optional initial values for \eqn{\phi}{phi}, whose values must lie
- between 0 and 1. The default is to compute an initial value internally.
+ between 0 and 1.
+ Optional initial values for \eqn{\lambda}{lambda}, whose values must
+ be positive.
+ The defaults are to compute an initial value internally for each.
If a vector then recycling is used.
}
+ \item{lprobp, eprobp, iprobp}{
+ Corresponding arguments for the other parameterization.
+ See details below.
+
+ }
+
\item{method.init}{
An integer with value \code{1} or \code{2} which
specifies the initialization method for \eqn{\lambda}{lambda}.
If failure to converge occurs try another value
and/or else specify a value for \code{shrinkage.init}
and/or else specify a value for \code{iphi}.
+ See \code{\link{CommonVGAMffArguments}} for more information.
}
\item{shrinkage.init}{
@@ -49,40 +63,62 @@ zipoisson(lphi="logit", llambda = "loge", ephi=list(), elambda =list(),
a value of 0 means the individual response values are used,
and a value of 1 means the median or mean is used.
This argument is used in conjunction with \code{method.init}.
+ See \code{\link{CommonVGAMffArguments}} for more information.
}
\item{zero}{
An integer specifying which linear/additive predictor is modelled as
intercepts only. If given, the value must be either 1 or 2, and the
- default is none of them. Setting \code{zero=1} makes \eqn{\phi}{phi}
+ default is none of them. Setting \code{zero = 1} makes \eqn{\phi}{phi}
a single parameter.
+ See \code{\link{CommonVGAMffArguments}} for more information.
}
}
\details{
- The model is a mixture of a Poisson distribution and the value 0;
+ This model is a mixture of a Poisson distribution and the value 0;
it has value 0 with probability \eqn{\phi}{phi} else is
Poisson(\eqn{\lambda}{lambda}) distributed.
- The model can be written
- \deqn{P(Y=0) = \phi + (1-\phi) \exp(-\lambda),}{%
- P(Y=0) = phi + (1-phi) * exp(-lambda),}
+ Thus there are two sources for zero values, and \eqn{\phi}{phi}
+ is the probability of a \emph{structural zero}.
+ The model for \code{zipoisson()} can be written
+ \deqn{P(Y = 0) = \phi + (1-\phi) \exp(-\lambda),}{%
+ P(Y = 0) = phi + (1-phi) * exp(-lambda),}
and for \eqn{y=1,2,\ldots},
- \deqn{P(Y=y) = (1-\phi) \exp(-\lambda) \lambda^y / y!.}{%
- P(Y=y) = (1-phi) * exp(-lambda) * lambda^y / y!.}
+ \deqn{P(Y = y) = (1-\phi) \exp(-\lambda) \lambda^y / y!.}{%
+ P(Y = y) = (1-phi) * exp(-lambda) * lambda^y / y!.}
Here, the parameter \eqn{\phi}{phi} satisfies
\eqn{0 < \phi < 1}{0 < phi < 1}.
The mean of \eqn{Y} is \eqn{(1-\phi) \lambda}{(1-phi)*lambda} and these
- are returned as the fitted values. By default, the two linear/additive
- predictors are \eqn{(logit(\phi), \log(\lambda))^T}{(logit(phi),
- log(lambda))^T}.
+ are returned as the fitted values.
+ By default, the two linear/additive predictors are
+ \eqn{(logit(\phi), \log(\lambda))^T}{(logit(phi), log(lambda))^T}.
This function implements Fisher scoring.
+
+ The \pkg{VGAM} family function \code{zipoissonff()} has a few
+ changes compared to \code{zipoisson()}.
+ These are:
+ (i) the order of the linear/additive predictors is switched so the
+ Poisson mean comes first;
+ (ii) \code{probp} is now the probability of the Poisson component,
+ i.e., \code{probp} is \code{1-phi};
+ (iii) it can handle multiple responses;
+ (iv) argument \code{zero} has a new default so that the \code{probp}
+ is an intercept-only by default.
+ Now \code{zipoissonff()} is generally recommended over
+ \code{zipoisson()}, and definitely recommended over
+ \code{\link{yip88}}.
+
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
- The object is used by modelling functions such as \code{\link{vglm}}
+ The object is used by modelling functions such as \code{\link{vglm}},
+ \code{\link{rrvglm}}
and \code{\link{vgam}}.
+
}
\references{
Thas, O. and Rayner, J. C. W. (2005)
@@ -90,29 +126,48 @@ zipoisson(lphi="logit", llambda = "loge", ephi=list(), elambda =list(),
\emph{Biometrics},
\bold{61}, 808--815.
+
Data: Angers, J-F. and Biswas, A. (2003)
A Bayesian analysis of zero-inflated generalized Poisson model.
\emph{Computational Statistics & Data Analysis},
\bold{42}, 37--46.
+
Cameron, A. C. and Trivedi, P. K. (1998)
\emph{Regression Analysis of Count Data}.
Cambridge University Press: Cambridge.
+
+ Yee, T. W. (2010)
+ Two-parameter reduced-rank vector generalized linear models.
+ \emph{In preparation}.
+
+
}
\author{ T. W. Yee }
\note{
For intercept-models, the \code{misc} slot has a component called
- \code{p0} which is the estimate of \eqn{P(Y=0)}. Note that \eqn{P(Y=0)}
- is not the parameter \eqn{\phi}{phi}. This family function currently
- cannot handle a multivariate response.
+ \code{p0} which is the estimate of \eqn{P(Y = 0)}. Note that
+ \eqn{P(Y = 0)} is not the parameter \eqn{\phi}{phi}. This family
+ function currently cannot handle a multivariate response.
- This family function is now recommended above \code{\link{yip88}}.
The zero-\emph{deflated} Poisson distribution cannot be handled with
this family function. It can be handled with the zero-altered Poisson
distribution; see \code{\link{zapoisson}}.
+
+ The use of this \pkg{VGAM} family function with \code{\link{rrvglm}}
+ can result in a so-called COZIGAM or COZIGLM.
+ That is, a reduced-rank zero-inflated Poisson model (RR-ZIP)
+ is a constrained zero-inflated generalized linear model.
+ See \pkg{COZIGAM}.
+ A RR-ZINB model can also be fitted easily;
+ see \code{\link{zinegbinomial}}.
+ Jargon-wies, a COZIGLM might be better described as a
+ COZIVGLM-ZIP.
+
+
}
\section{Warning }{
@@ -126,7 +181,9 @@ zipoisson(lphi="logit", llambda = "loge", ephi=list(), elambda =list(),
\code{method.init},
\code{shrinkage.init},
\code{iphi}, and/or
- \code{zero=1} if there are explanatory variables.
+ \code{zipoisson(zero = 1)} if there are explanatory variables.
+ The default for \code{zipoissonff()} is to model the
+ structural zero probability as an intercept-only.
}
@@ -134,40 +191,71 @@ zipoisson(lphi="logit", llambda = "loge", ephi=list(), elambda =list(),
\code{\link{zapoisson}},
\code{\link{Zipois}},
\code{\link{yip88}},
+ \code{\link{rrvglm}},
\code{\link{zipebcom}},
\code{\link[stats:Poisson]{rpois}}.
}
\examples{
-zipdat = data.frame(x = runif(nn <- 2000))
-zipdat = transform(zipdat, phi = logit(-0.5 + 1*x, inverse=TRUE),
- lambda = loge( 0.5 + 2*x, inverse=TRUE))
-zipdat = transform(zipdat, y = rzipois(nn, lambda, phi))
-with(zipdat, table(y))
-fit = vglm(y ~ x, zipoisson, zipdat, trace=TRUE)
-coef(fit, matrix=TRUE) # These should agree with the above values
-
-
-# Another example: McKendrick (1926). Data from 223 Indian village households
-cholera = data.frame(ncases = 0:4, # Number of cholera cases,
- wfreq = c(168, 32, 16, 6, 1)) # Frequencies
-fit = vglm(ncases ~ 1, zipoisson, wei=wfreq, data=cholera, trace=TRUE)
-coef(fit, matrix=TRUE)
-with(cholera, cbind(actual=wfreq, fitted= round(
- dzipois(ncases, lambda=Coef(fit)[2], phi=Coef(fit)[1]) * sum(wfreq), dig=2)))
-
-
-# Another example: data from Angers and Biswas (2003)
-abdat = data.frame(y = 0:7, w = c(182, 41, 12, 2, 2, 0, 0, 1))
-abdat = subset(abdat, w>0)
-fit = vglm(y ~ 1, zipoisson(lphi=probit, iphi=0.3), abdat, wei=w, trace=TRUE)
-fit at misc$prob0 # Estimate of P(Y=0)
-coef(fit, matrix=TRUE)
+# Example 1: simulated ZIP data
+zdata <- data.frame(x2 = runif(nn <- 2000))
+zdata <- transform(zdata, phi1 = logit(-0.5 + 1*x2, inverse = TRUE),
+ phi2 = logit( 0.5 - 1*x2, inverse = TRUE),
+ Phi1 = logit(-0.5 , inverse = TRUE),
+ Phi2 = logit( 0.5 , inverse = TRUE),
+ lambda1 = loge( 0.5 + 2*x2, inverse = TRUE),
+ lambda2 = loge( 0.5 + 2*x2, inverse = TRUE))
+zdata <- transform(zdata, y1 = rzipois(nn, lambda1, Phi1),
+ y2 = rzipois(nn, lambda2, Phi2))
+
+with(zdata, table(y1)) # Eyeball the data
+with(zdata, table(y2))
+fit1 <- vglm(y1 ~ x2, zipoisson(zero = 1), zdata, crit = "c")
+fit2 <- vglm(y2 ~ x2, zipoisson(zero = 1), zdata, crit = "c")
+coef(fit1, matrix = TRUE) # These should agree with the above values
+coef(fit2, matrix = TRUE) # These should agree with the above values
+
+# Fit all two simultaneously, using a different parameterization:
+fit12 <- vglm(cbind(y1, y2) ~ x2, zipoissonff, zdata, crit = "c")
+coef(fit12, matrix = TRUE) # These should agree with the above values
+
+
+# Example 2: McKendrick (1926). Data from 223 Indian village households
+cholera <- data.frame(ncases = 0:4, # Number of cholera cases,
+ wfreq = c(168, 32, 16, 6, 1)) # Frequencies
+fit <- vglm(ncases ~ 1, zipoisson, wei = wfreq, cholera, trace = TRUE)
+coef(fit, matrix = TRUE)
+with(cholera, cbind(actual = wfreq,
+ fitted = round(dzipois(ncases, lambda = Coef(fit)[2],
+ phi = Coef(fit)[1]) *
+ sum(wfreq), dig = 2)))
+
+# Example 3: data from Angers and Biswas (2003)
+abdata <- data.frame(y = 0:7, w = c(182, 41, 12, 2, 2, 0, 0, 1))
+abdata <- subset(abdata, w > 0)
+fit <- vglm(y ~ 1, zipoisson(lphi = probit, iphi = 0.3),
+ abdata, weight = w, trace = TRUE)
+fit at misc$prob0 # Estimate of P(Y = 0)
+coef(fit, matrix = TRUE)
Coef(fit) # Estimate of phi and lambda
fitted(fit)
-with(abdat, weighted.mean(y, w)) # Compare this with fitted(fit)
+with(abdata, weighted.mean(y, w)) # Compare this with fitted(fit)
summary(fit)
+
+
+# Example 4: This RR-ZIP is known as a COZIGAM or COZIVGLM-ZIP
+rrzip <- rrvglm(Alopacce ~ bs(WaterCon), zipoissonff(zero = NULL),
+ hspider, trace = TRUE)
+coef(rrzip, matrix = TRUE)
+Coef(rrzip)
+summary(rrzip)
+\dontrun{plotvgam(rrzip, lcol = "blue")}
}
\keyword{models}
\keyword{regression}
+% Yee, T. W. (2010)
+% An alternative to quasi-Poisson vs. negative binomial
+% regression: the reduced-rank negative binomial model.
+% \emph{In preparation}.
+
diff --git a/src/caqo3.c b/src/caqo3.c
index aac8551..c41c7d3 100644
--- a/src/caqo3.c
+++ b/src/caqo3.c
@@ -133,7 +133,7 @@ void fvlmz9iyC_enbin9(double lfu2qhid[], double hdqsx7bk[], double nm0eljqk[],
double *n2kersmx, int *f8yswcat, int *dvhw1ulq, int *zy1mchbf,
double *ux3nadiw, double *rsynp1go, int *sguwj9ty);
-void Yee_vbfa(int psdvgce3[], double *doubvec, double he7mqnvy[], double tlgduey8[],
+void Yee_vbfa(int psdvgce3[], double *fjcasv7g, double he7mqnvy[], double tlgduey8[],
double rbne6ouj[], double hdnw2fts[], double lamvec[], double wbkq9zyi[],
int ezlgm2up[], int lqsahu0r[], int which[],
double kispwgx3[], double m0ibglfx[],
@@ -2149,7 +2149,7 @@ void vcao6(double lncwkfq7[], double tlgduey8[], double ufgqj9ck[],
f7svlajr, qfx3vhct, c5aesxkul, wr0lbopv, vtsou9pz, zaupqv9b, xlpjcg3s,
sedf7mxb, kcm6jfob, lensmo = (xwdf5ltg == 1 ? 2 : 4) * *afpc0kns;
double rpto5qwb, dn3iasxug, wiptsjx8, bh2vgiay, uaf2xgqy, vsoihn1r,
- rsynp1go, doubvec[6], zpcqv3uj, ghdetj8v;
+ rsynp1go, fjcasv7g[6], zpcqv3uj, ghdetj8v = 0.0;
double *fpdlcqk9kispwgx3;
double hmayv1xt = 0.0, Totdev = 0.0e0;
@@ -2167,12 +2167,12 @@ void vcao6(double lncwkfq7[], double tlgduey8[], double ufgqj9ck[],
double *wkumc9idhdnw2fts, *wkumc9idwbkq9zyi;
- doubvec[0] = 0.001; // bf.qaltf0nz
- doubvec[1] = 0.0; // ghdetj8v
- doubvec[2] = -1.5; // low
- doubvec[3] = 1.5; // high
- doubvec[4] = 1.0e-4; // tol
- doubvec[5] = 2.0e-8; // eps
+ fjcasv7g[0] = 0.001;
+ fjcasv7g[1] = 0.0;
+ fjcasv7g[2] = -1.5;
+ fjcasv7g[3] = 1.5;
+ fjcasv7g[4] = 1.0e-4;
+ fjcasv7g[5] = 2.0e-8;
wkumc9idui8ysltq = Calloc((*ftnjamu2 * *wy1vqfzu) * (*afpc0kns * *wy1vqfzu), double);
wkumc9idlxyst1eb = Calloc( *qfozcl5b * *ftnjamu2 , double);
@@ -2295,7 +2295,7 @@ void vcao6(double lncwkfq7[], double tlgduey8[], double ufgqj9ck[],
}
}
- Yee_vbfa(psdvgce3, doubvec,
+ Yee_vbfa(psdvgce3, fjcasv7g,
mbvnaor6, ghz9vuba + (qes4mujl-1) * *ftnjamu2,
rbne6ouj + (qes4mujl-1) * *ftnjamu2,
hdnw2fts + sedf7mxb + ehtjigf4 + hnpt1zym[0] - 1,
@@ -2312,6 +2312,7 @@ void vcao6(double lncwkfq7[], double tlgduey8[], double ufgqj9ck[],
nbzjkpi3, acpios9q, jwbkl9fp);
+
y7sdgtqi[3 + *afpc0kns + *afpc0kns] = ghdetj8v;
xumj5dnk = psdvgce3[13];
if (xumj5dnk != 0) {
diff --git a/src/vgam3.c b/src/vgam3.c
index 41a7e59..52050c5 100644
--- a/src/vgam3.c
+++ b/src/vgam3.c
@@ -74,7 +74,7 @@ void fapc0tnbewg7qruh(double ci1oyxas[], double tlgduey8[], double rbne6ouj[],
double tdcb8ilk[] // Added 20100313
);
void Yee_vbfa(
- int psdvgce3[], double *doubvec, double he7mqnvy[], double tlgduey8[],
+ int psdvgce3[], double *fjcasv7g, double he7mqnvy[], double tlgduey8[],
double rbne6ouj[], double hdnw2fts[], double lamvec[], double wbkq9zyi[],
int ezlgm2up[], int lqsahu0r[], int which[],
double kispwgx3[], double m0ibglfx[],
@@ -651,6 +651,7 @@ void Free_fapc0tnbvsuff9(double *wkumc9idwk1a, double *wkumc9idwk1b,
}
+
void vsuff9(int *ftnjamu2, int *lqsahu0r, int ezlgm2up[],
double sjwyig9t[], double tlgduey8[], double rbne6ouj[],
double pygsw6ko[], double pasjmo8g[], double eshvo2ic[],
@@ -679,6 +680,7 @@ void vsuff9(int *ftnjamu2, int *lqsahu0r, int ezlgm2up[],
imk5wjxg = *wy1vqfzu * (*wy1vqfzu + 1) / 2,
n2colb = *kgwmz4ip * *kgwmz4ip,
n3colb = *kgwmz4ip * (*kgwmz4ip + 1) / 2;
+ wkumc9ideshvo2ic = conmat; wkumc9idonxjvw8u = conmat;
wkumc9idwk1a = Calloc(zyojx5hw , double);
wkumc9idwk1b = Calloc(*wy1vqfzu , double);
@@ -1419,7 +1421,7 @@ void fapc0tnbewg7qruh(double ci1oyxas[], double tlgduey8[], double rbne6ouj[],
}
-void Yee_vbfa(int psdvgce3[], double *doubvec, double he7mqnvy[], double tlgduey8[],
+void Yee_vbfa(int psdvgce3[], double *fjcasv7g, double he7mqnvy[], double tlgduey8[],
double rbne6ouj[], double hdnw2fts[], double lamvec[], double wbkq9zyi[],
int ezlgm2up[], int lqsahu0r[], int which[],
double kispwgx3[], double m0ibglfx[],
@@ -1449,10 +1451,10 @@ void Yee_vbfa(int psdvgce3[], double *doubvec, double he7mqnvy[], double tlgduey
double tdcb8ilk[4];
itdcb8ilk[0] = psdvgce3[15];
- tdcb8ilk[0] = doubvec[2];
- tdcb8ilk[1] = doubvec[3];
- tdcb8ilk[2] = doubvec[4];
- tdcb8ilk[3] = doubvec[5];
+ tdcb8ilk[0] = fjcasv7g[2];
+ tdcb8ilk[1] = fjcasv7g[3];
+ tdcb8ilk[2] = fjcasv7g[4];
+ tdcb8ilk[3] = fjcasv7g[5];
wy1vqfzu = psdvgce3 + 7;
ftnjamu2 = psdvgce3;
@@ -1468,8 +1470,8 @@ void Yee_vbfa(int psdvgce3[], double *doubvec, double he7mqnvy[], double tlgduey
npjlv3mr = psdvgce3[12];
ldk = psdvgce3[14];
- zpcqv3uj = doubvec + 0;;
- ghdetj8v = doubvec + 1;
+ zpcqv3uj = fjcasv7g + 0;;
+ ghdetj8v = fjcasv7g + 1;
fapc0tnbvbfa1(ftnjamu2, wy1vqfzu, ezlgm2up, lqsahu0r, which,
he7mqnvy, tlgduey8, rbne6ouj,
diff --git a/src/zeta3.c b/src/zeta3.c
index 285ea64..41711f9 100644
--- a/src/zeta3.c
+++ b/src/zeta3.c
@@ -186,3 +186,46 @@ void vbecoef(double kxae8glp[]) {
}
+
+
+void conmax_Z(double *lamvec, double *nuvec, double *bqelz3cy,
+ int *nlength, int *kpzavbj3,
+ double *qaltf0nz) {
+
+
+ double *pq6zdcwxk, ssum = 0.0, denom = 0.0, yq6lorbx, prevterm;
+ int ayfnwr1v;
+
+ *qaltf0nz = 1.0e-6;
+
+ if (*kpzavbj3 == 0) {
+ pq6zdcwxk = bqelz3cy;
+ for (ayfnwr1v = 0; ayfnwr1v < *nlength; ayfnwr1v++) {
+ prevterm = 1.0 + *lamvec;
+ denom = 1.0;
+ *pq6zdcwxk = prevterm;
+ yq6lorbx = 2.0;
+
+ if (*nuvec == 0.0 && *lamvec >= 1.0) {
+ Rprintf("Error: series will not converge. Returning 0.0\n");
+ *pq6zdcwxk = 0.0;
+ } else {
+ while (prevterm > *qaltf0nz) {
+ denom = denom * pow(yq6lorbx, *lamvec);
+ prevterm = prevterm * *lamvec / denom;
+ *pq6zdcwxk += prevterm;
+ yq6lorbx += 1.0;
+ }
+ }
+ lamvec++;
+ nuvec++;
+ pq6zdcwxk++;
+ }
+ } else if (*kpzavbj3 == 1) {
+
+ } else if (*kpzavbj3 == 2) {
+
+ }
+}
+
+
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/r-cran-vgam.git
More information about the debian-science-commits
mailing list