[r-cran-vgam] 11/63: Import Upstream version 0.7-7
Andreas Tille
tille at debian.org
Tue Jan 24 13:54:22 UTC 2017
This is an automated email from the git hooks/post-receive script.
tille pushed a commit to branch master
in repository r-cran-vgam.
commit 78245e566c4a6c576682c5133f1499917df5a248
Author: Andreas Tille <tille at debian.org>
Date: Tue Jan 24 14:16:47 2017 +0100
Import Upstream version 0.7-7
---
DESCRIPTION | 6 +-
NAMESPACE | 6 +-
NEWS | 27 +++
R/aamethods.q | 4 +-
R/cao.fit.q | 17 +-
R/coef.vlm.q | 2 +-
R/cqo.fit.q | 12 +-
R/family.binomial.q | 452 ++++++++++++++++++++++++++++++++++++++----------
R/family.categorical.q | 6 +-
R/family.extremes.q | 4 +-
R/family.functions.q | 8 +-
R/family.genetic.q | 2 +-
R/family.glmgam.q | 2 +-
R/family.normal.q | 4 +-
R/family.qreg.q | 7 +-
R/family.rcqo.q | 129 ++++++++------
R/family.rrr.q | 153 ++++++++++------
R/family.univariate.q | 25 +--
R/family.zeroinf.q | 26 +--
R/model.matrix.vglm.q | 2 +-
R/mux.q | 18 ++
R/plot.vglm.q | 2 +-
R/predict.vgam.q | 4 +-
R/residuals.vlm.q | 6 +-
R/rrvglm.fit.q | 18 +-
R/summary.vglm.q | 2 +-
R/vgam.R | 2 +-
R/vgam.fit.q | 19 +-
R/vglm.fit.q | 29 ++--
R/vlm.wfit.q | 2 +-
man/acat.Rd | 2 +-
man/alaplace3.Rd | 47 ++++-
man/alsqreg.Rd | 13 +-
man/amlbinomial.Rd | 5 +-
man/amlexponential.Rd | 5 +-
man/amlpoisson.Rd | 5 +-
man/binom2.or.Rd | 51 ++++--
man/binom2.orUC.Rd | 127 ++++++++++++++
man/cqo.Rd | 4 +
man/fisherz.Rd | 13 +-
man/gamma1.Rd | 3 +-
man/lgammaff.Rd | 1 +
man/lms.bcn.Rd | 1 +
man/mbinomial.Rd | 5 +-
man/notdocumentedyet.Rd | 2 +-
man/predict.qrrvglm.Rd | 86 +++++++++
man/rcqo.Rd | 20 ++-
man/zinbUC.Rd | 8 +-
man/zipebcom.Rd | 214 +++++++++++++++++++++++
man/zipoisson.Rd | 17 +-
src/testf90.f90 | 66 +++++--
51 files changed, 1331 insertions(+), 360 deletions(-)
diff --git a/DESCRIPTION b/DESCRIPTION
index 8be3782..bb043a1 100755
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -1,6 +1,6 @@
Package: VGAM
-Version: 0.7-6
-Date: 2008-03-17
+Version: 0.7-7
+Date: 2008-05-14
Title: Vector Generalized Linear and Additive Models
Author: Thomas W. Yee <t.yee at auckland.ac.nz>
Maintainer: Thomas Yee <t.yee at auckland.ac.nz>
@@ -15,4 +15,4 @@ License: GPL-2
URL: http://www.stat.auckland.ac.nz/~yee/VGAM
LazyLoad: yes
LazyData: yes
-Packaged: Mon Mar 17 19:11:07 2008; yee
+Packaged: Wed May 14 11:18:25 2008; yee
diff --git a/NAMESPACE b/NAMESPACE
index 1e71d16..d34113f 100644
--- a/NAMESPACE
+++ b/NAMESPACE
@@ -211,11 +211,15 @@ AA.Aa.aa, AB.Ab.aB.ab2, AB.Ab.aB.ab, ABO, acat,
beta.ab, betaff, betaffqn,
dbetageom, pbetageom, rbetageom, betageometric,
betaprime,
-betaII, binom2.or, binom2.rho, binomialff, biplot.rrvglm, brat,
+betaII,
+zipebcom,
+binom2.or, dbinom2.or, rbinom2.or,
+binom2.rho, binomialff, biplot.rrvglm, brat,
bratt, Brat, calibrate.qrrvglm.control, calibrate.qrrvglm,
calibrate, cao.control,
cao, ccoef, cdf.lmscreg, cgo, chisq, clo,
Coef.qrrvglm, Coef, Coef.rrvglm, Coef.vlm,
+predict.qrrvglm,
cratio, cumulative, scumulative, deplot.lmscreg, dirichlet,
exponential, G1G2G3)
diff --git a/NEWS b/NEWS
index ece6420..4fc85e2 100755
--- a/NEWS
+++ b/NEWS
@@ -5,6 +5,33 @@
**************************************************
+ CHANGES IN VGAM VERSION 0.7-7
+
+NEW FEATURES
+
+ o Labelling changes: binom2.or() uses "oratio" instead of "OR" (stands
+ for the odds ratio).
+ o New VGAM family functions:
+ zipebcom().
+ o New functions: dbinom2.or(), rbinom2.or().
+ o binom2.or() has new arguments 'imu1, 'imu2' and 'ioratio' for
+ inputting optional marginal probabilities and odds ratio.
+ The third element of the score vector uses a new formula.
+ o [dpqr]zinb() has arguments prob and munb set to NULL by default.
+ o Compatible with R 2.7.0.
+
+
+BUG FIXES
+
+ o gaussianff()@loglikelihood was buggy.
+ o all(trivial.constraints(Blist)) changed to
+ all(trivial.constraints(Blist) == 1) to avoid a warning in R 2.7.0.
+ Ditto for 'all(findex)' and 'any(diff(Alphavec))'.
+ o qtriangle(0.3, theta=0.3) used to fail.
+ o gharmonic() handles a negative argument s.
+
+
+
CHANGES IN VGAM VERSION 0.7-6
NEW FEATURES
diff --git a/R/aamethods.q b/R/aamethods.q
index 02bdfd6..f735ecc 100644
--- a/R/aamethods.q
+++ b/R/aamethods.q
@@ -81,7 +81,7 @@ setClass("vglmff", representation(
"inverse" = "function",
"last" = "expression",
"link" = "function",
- "loglikelihood"= "function", # problem: zz function() NULL if unspecified
+ "loglikelihood"= "function",
"middle" = "expression",
"middle2" = "expression",
"summary.dispersion" = "logical",
@@ -99,7 +99,7 @@ setClass("vglmff", representation(
"inverse" = "function",
"last" = "expression",
"link" = "function",
- "loglikelihood"= "function", # problem: zz function() NULL if unspecified
+ "loglikelihood"= "function",
"middle" = "expression",
"middle2" = "expression",
"summary.dispersion" = "logical",
diff --git a/R/cao.fit.q b/R/cao.fit.q
index 2e42b7c..0001107 100644
--- a/R/cao.fit.q
+++ b/R/cao.fit.q
@@ -78,7 +78,7 @@ cao.fit <- function(x, y, w=rep(1, length(x[, 1])),
tc1 = trivial.constraints(constraints)
- if(all(findex))
+ if(all(findex == 1))
stop("No covariates to form latent variables from.")
colx1.index = names.colx1.index = NULL
dx2 = dimnames(x)[[2]]
@@ -667,7 +667,7 @@ if(exists("flush.console")) flush.console()
coefficients = ans1$beta,
df1.nl = df1.nl,
df2.nl = if(Rank == 2) df2.nl else NULL,
- df.residual = n*M - qrank - sum(ans1$df - 1), # zz ??
+ df.residual = n*M - qrank - sum(ans1$df - 1),
fitted = ans1$fv, # NOS x n
kindex = ans1$kindex,
predictors = matrix(ans1$etamat, n, M, byrow=TRUE),
@@ -689,7 +689,9 @@ calldcaof = function(cmatrix,
n, M,
othint, othdbl,
alldump=FALSE) {
- if(alldump) stop("zz really used?")
+
+
+ if(alldump) stop("really used?")
if(exists("flush.console")) flush.console()
if(!Nice21) stop("Nice21 must be TRUE")
@@ -898,7 +900,7 @@ if(exists("flush.console")) flush.console()
coefficients=ans1$beta,
df1.nl=ans1$spardf[ NOS+(1:NOS)] - 1,
df2.nl=ans1$spardf[3*NOS+(1:NOS)] - 1,
- df.residual = n*M - qrank - sum(ans1$df - 1), # zz ??
+ df.residual = n*M - qrank - sum(ans1$df - 1),
fitted=ans1$fv,
kindex = ans1$kindex,
predictors=matrix(ans1$etamat, n, M, byrow=TRUE),
@@ -1363,9 +1365,9 @@ predict.cao <- function (object, newdata=NULL,
attr(X, "assign") = as.save # Restored
}
- offset <- if (!is.null(off.num<-attr(tt,"offset")))
+ offset <- if (!is.null(off.num<-attr(tt,"offset"))) {
eval(attr(tt,"variables")[[off.num+1]], newdata)
- else if (!is.null(object at offset))
+ } else if (!is.null(object at offset))
eval(object at call$offset, newdata)
if(is.smart(object) && length(object at smart.prediction)) {
@@ -1376,7 +1378,6 @@ predict.cao <- function (object, newdata=NULL,
attr(X, "assign") <- attrassigndefault(X, tt)
}
- # zz 30/8/06; use object at extra$Cmat instead? # Not 100% sure here
cancoefs = ccoef(object)
lvmat = X[,ocontrol$colx2.index,drop=FALSE] %*% cancoefs # n x Rank
@@ -1621,7 +1622,7 @@ persp.cao = function(x,
rugplot=FALSE,
...) {
object = x # don't like x as the primary argument
- coefobj = Coef(object) # (Rotate=FALSE, IToleranc=TRUE) # zzzz
+ coefobj = Coef(object)
if((Rank <- coefobj at Rank) > 2)
stop("object must be a rank-1 or rank-2 model")
fvmat = fitted(object)
diff --git a/R/coef.vlm.q b/R/coef.vlm.q
index 3b5ff07..5df7239 100644
--- a/R/coef.vlm.q
+++ b/R/coef.vlm.q
@@ -18,7 +18,7 @@ coefvlm <- function(object, matrix.out=FALSE, label=TRUE, compress=TRUE)
xij <- object at control$xij
Blist <- object at constraints
- if(!length(xij) && all(trivial.constraints(Blist))) {
+ if(!length(xij) && all(trivial.constraints(Blist) == 1)) {
B <- matrix(ans, nrow=ncolx, ncol=M, byrow=TRUE)
} else {
B <- matrix(as.numeric(NA), nrow=ncolx, ncol=M)
diff --git a/R/cqo.fit.q b/R/cqo.fit.q
index b230fcb..60892e1 100644
--- a/R/cqo.fit.q
+++ b/R/cqo.fit.q
@@ -239,7 +239,7 @@ checkCMCO <- function(Blist, control, modelno) {
Blist2[[k]] = (Blist2[[k]])[c(TRUE,FALSE),,drop=FALSE]
}
- if(!all(trivial.constraints(Blist2)))
+ if(!all(trivial.constraints(Blist2) == 1))
stop("the constraint matrices for the non-Norrr terms are not trivial")
if(!trivial.constraints(Blist1[[1]]))
stop("the constraint matrices for intercept term is not trivial")
@@ -335,7 +335,7 @@ cqo.fit <- function(x, y, w=rep(1, length(x[, 1])),
" that will be overwritten by reduced-rank regression", sep=""))
}
- if(all(findex))
+ if(all(findex == 1))
stop("use vglm(), not rrvglm()!")
colx1.index = names.colx1.index = NULL
dx2 = dimnames(x)[[2]]
@@ -446,7 +446,7 @@ cqo.fit <- function(x, y, w=rep(1, length(x[, 1])),
dn <- labels(x)
yn <- dn[[1]]
xn <- dn[[2]]
- residuals <- z - fv # zz - offset ?? # not sure 3/10/05
+ residuals <- z - fv
if(M==1) {
residuals <- as.vector(residuals)
names(residuals) <- yn
@@ -466,10 +466,10 @@ cqo.fit <- function(x, y, w=rep(1, length(x[, 1])),
names(mu) <- names(fv)
}
- df.residual <- 55 - 8 - Rank*p2 # zz n.big==55, rank==8
+ df.residual <- 55 - 8 - Rank*p2
fit <- list(assign=asgn,
coefficients=coefs,
- constraints=Blist, # B.list? zz
+ constraints=Blist,
df.residual=df.residual,
df.total=n*M,
fitted.values=mu,
@@ -652,7 +652,7 @@ if(FALSE) {
constraints=replace.constraints(constraints,diag(M),rrcontrol$colx2.index)
nice31 = (!control$EqualTol || control$ITolerances) &&
- all(trivial.constraints(constraints))
+ all(trivial.constraints(constraints) == 1)
}
NOS = ifelse(modelno==3 || modelno==5, M/2, M)
diff --git a/R/family.binomial.q b/R/family.binomial.q
index e8b1dd0..5a4e408 100644
--- a/R/family.binomial.q
+++ b/R/family.binomial.q
@@ -215,130 +215,226 @@ betabinomial <- function(lmu="logit", lrho="logit",
+dbinom2.or = function(mu1,
+ mu2=if(exchangeable) mu1 else stop("'mu2' not specified"),
+ oratio=1,
+ exchangeable=FALSE,
+ tol=0.001,
+ colnames=c("00", "01", "10", "11"),
+ ErrorCheck=TRUE)
+{
+ if(ErrorCheck) {
+ if(!is.Numeric(mu1, positive=TRUE) || max(mu1) >= 1)
+ stop("bad input for argument 'mu1'")
+ if(!is.Numeric(mu2, positive=TRUE) || max(mu2) >= 1)
+ stop("bad input for argument 'mu2'")
+ if(!is.Numeric(oratio, positive=TRUE))
+ stop("bad input for argument 'oratio'")
+ if(!is.Numeric(tol, positive=TRUE, allow=1) || tol > 0.1)
+ stop("bad input for argument \"tol\"")
+ if(exchangeable && max(abs(mu1 - mu2)) > 0.00001)
+ stop("argument 'exchangeable' is TRUE but 'mu1' and 'mu2' differ")
+ }
+
+ n = max(length(mu1), length(mu2), length(oratio))
+ oratio = rep(oratio, len=n)
+ mu1 = rep(mu1, len=n)
+ mu2 = rep(mu2, len=n)
+
+ a.temp = 1 + (mu1+mu2)*(oratio-1)
+ b.temp = -4 * oratio * (oratio-1) * mu1 * mu2
+ temp = sqrt(a.temp^2 + b.temp)
+ p11 = ifelse(abs(oratio-1) < tol, mu1*mu2, (a.temp-temp)/(2*(oratio-1)))
+ p01 = mu2 - p11
+ p10 = mu1 - p11
+ p00 = 1 - p11 - p01 - p10
+ matrix(c(p00,p01,p10,p11), n, 4, dimnames=list(NULL,colnames))
+}
+
+
+
+
+rbinom2.or = function(n, mu1,
+ mu2=if(exchangeable) mu1 else stop("'mu2' not specified"),
+ oratio=1,
+ exchangeable=FALSE,
+ tol=0.001,
+ twoCols=TRUE,
+ colnames=if(twoCols) c("y1","y2") else c("00", "01", "10", "11"),
+ ErrorCheck=TRUE)
+{
+ if(ErrorCheck) {
+ if(!is.Numeric(n, integer=TRUE, posit=TRUE, allow=1))
+ stop("bad input for argument 'n'")
+ if(!is.Numeric(mu1, positive=TRUE) || max(mu1) >= 1)
+ stop("bad input for argument 'mu1'")
+ if(!is.Numeric(mu2, positive=TRUE) || max(mu2) >= 1)
+ stop("bad input for argument 'mu2'")
+ if(!is.Numeric(oratio, positive=TRUE))
+ stop("bad input for argument 'oratio'")
+ if(!is.Numeric(tol, positive=TRUE, allow=1) || tol > 0.1)
+ stop("bad input for argument \"tol\"")
+ if(exchangeable && max(abs(mu1 - mu2)) > 0.00001)
+ stop("argument 'exchangeable' is TRUE but 'mu1' and 'mu2' differ")
+ }
+
+ dmat = dbinom2.or(mu1=mu1, mu2=mu2, oratio=oratio, exchang=exchangeable,
+ tol=tol, ErrorCheck=ErrorCheck)
+
+ answer = matrix(0, n, 2, dimnames=list(NULL, if(twoCols) colnames else NULL))
+ yy = runif(n)
+ cs1 = dmat[,"00"] + dmat[,"01"]
+ cs2 = cs1 + dmat[,"10"]
+ index = (dmat[,"00"] < yy) & (yy <= cs1)
+ answer[index,2] = 1
+ index = (cs1 < yy) & (yy <= cs2)
+ answer[index,1] = 1
+ index = (yy > cs2)
+ answer[index,] = 1
+ if(twoCols) answer else {
+ answer4 = matrix(0, n, 4, dimnames=list(NULL, colnames))
+ answer4[cbind(1:n, 1 + 2*answer[,1] + answer[,2])] = 1
+ answer4
+ }
+}
+
-binom2.or <- function(lmu="logit", lmu1=lmu, lmu2=lmu, lor="loge",
- emu=list(), emu1=emu, emu2=emu, eor=list(),
- zero=3, exchangeable=FALSE, tol=0.001)
+
+
+binom2.or = function(lmu="logit", lmu1=lmu, lmu2=lmu, loratio="loge",
+ emu=list(), emu1=emu, emu2=emu, eoratio=list(),
+ imu1=NULL, imu2=NULL, ioratio = NULL,
+ zero=3, exchangeable=FALSE, tol=0.001)
{
if(mode(lmu) != "character" && mode(lmu) != "name")
- lmu <- as.character(substitute(lmu))
+ lmu = as.character(substitute(lmu))
if(mode(lmu1) != "character" && mode(lmu1) != "name")
- lmu1 <- as.character(substitute(lmu1))
+ lmu1 = as.character(substitute(lmu1))
if(mode(lmu2) != "character" && mode(lmu2) != "name")
- lmu2 <- as.character(substitute(lmu2))
- if(mode(lor) != "character" && mode(lor) != "name")
- lor <- as.character(substitute(lor))
+ lmu2 = as.character(substitute(lmu2))
+ if(mode(loratio) != "character" && mode(loratio) != "name")
+ loratio = as.character(substitute(loratio))
if(is.logical(exchangeable) && exchangeable && ((lmu1 != lmu2) ||
!all.equal(emu1, emu2)))
stop("exchangeable=TRUE but marginal links are not equal")
- if(!is.Numeric(tol, positive=TRUE, allow=1))
+ if(!is.Numeric(tol, positive=TRUE, allow=1) || tol > 0.1)
stop("bad input for argument \"tol\"")
if(!is.list(emu1)) emu1 = list()
if(!is.list(emu2)) emu2 = list()
- if(!is.list(eor)) eor = list()
+ if(!is.list(eoratio)) eoratio = list()
new("vglmff",
- blurb=c("Palmgren model\n",
- "Links: ",
- namesof("mu1", lmu1, earg=emu1), ", ",
- namesof("mu2", lmu2, earg=emu2), "; ",
- namesof("OR", lor, earg=eor)),
+ blurb=c("Bivariate binomial regression with an odds ratio\n",
+ "Links: ",
+ namesof("mu1", lmu1, earg=emu1), ", ",
+ namesof("mu2", lmu2, earg=emu2), "; ",
+ namesof("oratio", loratio, earg=eoratio)),
constraints=eval(substitute(expression({
- constraints <- cm.vgam(matrix(c(1,1,0,0,0,1),3,2), x,
- .exchangeable, constraints,
- intercept.apply=TRUE)
- constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ constraints = cm.vgam(matrix(c(1,1,0,0,0,1),3,2), x,
+ .exchangeable, constraints,
+ intercept.apply=TRUE)
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
}), list( .exchangeable=exchangeable, .zero=zero ))),
deviance=Deviance.categorical.data.vgam,
initialize=eval(substitute(expression({
eval(process.binomial2.data.vgam)
- predictors.names <- c(namesof("mu1", .lmu1, earg= .emu1, short=TRUE),
- namesof("mu2", .lmu2, earg= .emu2, short=TRUE),
- namesof("OR", .lor, earg= .eor, short=TRUE))
- }), list( .lmu1=lmu1, .lmu2=lmu2,
- .emu1=emu1, .emu2=emu2, .eor=eor,
- .lor=lor ))),
+ predictors.names = c(namesof("mu1", .lmu1, earg= .emu1, short=TRUE),
+ namesof("mu2", .lmu2, earg= .emu2, short=TRUE),
+ namesof("oratio", .loratio, earg= .eoratio, short=TRUE))
+
+ if(!length(etastart)) {
+ pmargin = cbind(mu[,3]+mu[,4], mu[,2]+mu[,4])
+ ioratio = if(length( .ioratio)) rep( .ioratio, len=n) else
+ mu[,4]*mu[,1]/(mu[,2]*mu[,3])
+ if(length( .imu1)) pmargin[,1] = .imu1
+ if(length( .imu2)) pmargin[,2] = .imu2
+ etastart = cbind(theta2eta(pmargin[,1], .lmu1, earg= .emu1),
+ theta2eta(pmargin[,2], .lmu2, earg= .emu2),
+ theta2eta(ioratio, .loratio, earg= .eoratio))
+ }
+ }), list( .lmu1=lmu1, .lmu2=lmu2, .loratio=loratio,
+ .emu1=emu1, .emu2=emu2, .eoratio=eoratio,
+ .imu1=imu1, .imu2=imu2, .ioratio=ioratio ))),
inverse=eval(substitute(function(eta, extra=NULL) {
- pm <- cbind(eta2theta(eta[,1], .lmu1, earg= .emu1),
- eta2theta(eta[,2], .lmu2, earg= .emu2))
- or <- eta2theta(eta[,3], .lor, earg= .eor)
- a <- 1 + (pm[,1]+pm[,2])*(or-1)
- b <- -4 * or * (or-1) * pm[,1] * pm[,2]
- temp <- sqrt(a^2+b)
- pj4 <- ifelse(abs(or-1) < .tol, pm[,1]*pm[,2], (a-temp)/(2*(or-1)))
- pj2 <- pm[,2] - pj4
- pj3 <- pm[,1] - pj4
+ pmargin = cbind(eta2theta(eta[,1], .lmu1, earg= .emu1),
+ eta2theta(eta[,2], .lmu2, earg= .emu2))
+ oratio = eta2theta(eta[,3], .loratio, earg= .eoratio)
+ a.temp = 1 + (pmargin[,1]+pmargin[,2])*(oratio-1)
+ b.temp = -4 * oratio * (oratio-1) * pmargin[,1] * pmargin[,2]
+ temp = sqrt(a.temp^2 + b.temp)
+ pj4 = ifelse(abs(oratio-1) < .tol, pmargin[,1]*pmargin[,2],
+ (a.temp-temp)/(2*(oratio-1)))
+ pj2 = pmargin[,2] - pj4
+ pj3 = pmargin[,1] - pj4
cbind("00" = 1-pj4-pj2-pj3, "01" = pj2, "10" = pj3, "11" = pj4)
}, list( .tol=tol, .lmu1=lmu1, .lmu2=lmu2,
- .emu1=emu1, .emu2=emu2, .eor=eor,
- .lor=lor ))),
+ .emu1=emu1, .emu2=emu2, .eoratio=eoratio,
+ .loratio=loratio ))),
last=eval(substitute(expression({
- misc$link <- c("mu1"= .lmu1, "mu2"= .lmu2, "OR"= .lor)
- misc$earg <- list(mu1 = .emu1, mu2 = .emu2, OR = .eor)
- misc$tol <- .tol
+ misc$link = c("mu1"= .lmu1, "mu2"= .lmu2, "oratio"= .loratio)
+ misc$earg = list(mu1 = .emu1, mu2 = .emu2, oratio = .eoratio)
+ misc$tol = .tol
+ misc$expected = TRUE
}), list( .tol=tol, .lmu1=lmu1, .lmu2=lmu2,
- .emu1=emu1, .emu2=emu2, .eor=eor,
- .lor=lor ))),
+ .emu1=emu1, .emu2=emu2, .eoratio=eoratio,
+ .loratio=loratio ))),
link=eval(substitute(function(mu, extra=NULL) {
- pm <- cbind(mu[,3]+mu[,4], mu[,2]+mu[,4])
- or <- mu[,4]*mu[,1]/(mu[,2]*mu[,3])
- cbind(theta2eta(pm[,1], .lmu1, earg= .emu1),
- theta2eta(pm[,2], .lmu2, earg= .emu2),
- theta2eta(or, .lor, earg= .eor))
+ pmargin = cbind(mu[,3]+mu[,4], mu[,2]+mu[,4])
+ oratio = mu[,4]*mu[,1] / (mu[,2]*mu[,3])
+ cbind(theta2eta(pmargin[,1], .lmu1, earg= .emu1),
+ theta2eta(pmargin[,2], .lmu2, earg= .emu2),
+ theta2eta(oratio, .loratio, earg= .eoratio))
}, list( .lmu1=lmu1, .lmu2=lmu2,
- .emu1=emu1, .emu2=emu2, .eor=eor,
- .lor=lor ))),
+ .emu1=emu1, .emu2=emu2, .eoratio=eoratio,
+ .loratio=loratio ))),
loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL)
if(residuals) stop("loglikelihood residuals not implemented yet") else
sum(w * y * log(mu)),
vfamily=c("binom2.or", "binom2"),
deriv=eval(substitute(expression({
- pm <- cbind(mu[,3]+mu[,4], mu[,2]+mu[,4])
- or <- mu[,4]*mu[,1]/(mu[,2]*mu[,3])
-
- a <- 1 + (pm[,1]+pm[,2])*(or-1)
- b <- -4 * or * (or-1) * pm[,1] * pm[,2]
- temp <- sqrt(a^2+b)
-
- coeff1 <- -0.5 + (2*or*pm[,2]-a)/(2*temp)
- d1 <- coeff1*(y[,1]/mu[,1]-y[,3]/mu[,3])-
- (1+coeff1)*(y[,2]/mu[,2]-y[,4]/mu[,4])
+ pmargin = cbind(mu[,3]+mu[,4], mu[,2]+mu[,4])
+ oratio = mu[,4]*mu[,1] / (mu[,2]*mu[,3])
+ a.temp = 1 + (pmargin[,1]+pmargin[,2])*(oratio-1)
+ b.temp = -4 * oratio * (oratio-1) * pmargin[,1] * pmargin[,2]
+ temp9 = sqrt(a.temp^2 + b.temp)
+
+ coeff12 = -0.5 + (2*oratio*pmargin - a.temp) / (2*temp9)
+ dl.dmu1 = coeff12[,2] * (y[,1]/mu[,1]-y[,3]/mu[,3]) -
+ (1+coeff12[,2]) * (y[,2]/mu[,2]-y[,4]/mu[,4])
- coeff2 <- -0.5 + (2*or*pm[,1]-a)/(2*temp)
- d2 <- coeff2*(y[,1]/mu[,1]-y[,2]/mu[,2])-
- (1+coeff2)*(y[,3]/mu[,3]-y[,4]/mu[,4])
+ dl.dmu2 = coeff12[,1] * (y[,1]/mu[,1]-y[,2]/mu[,2]) -
+ (1+coeff12[,1]) * (y[,3]/mu[,3]-y[,4]/mu[,4])
- coeff3 <- (y[,1]/mu[,1]-y[,2]/mu[,2]-y[,3]/mu[,3]+y[,4]/mu[,4])
- d3 <- ifelse(abs(or-1) < .tol,
- coeff3 * pm[,1] * (1-pm[,1]) * pm[,2] * (1-pm[,2]),
- (1/(or-1)) * coeff3 * ( (pm[,1]+pm[,2])*(1-a/temp)/2 +
- (2*or-1)*pm[,1]*pm[,2]/temp - (a-temp)/(2*(or-1)) ))
- w * cbind(d1 * dtheta.deta(pm[,1], .lmu1, earg= .emu1),
- d2 * dtheta.deta(pm[,2], .lmu2, earg= .emu2),
- d3 * dtheta.deta(or, .lor, earg= .eor))
- }), list( .tol=tol, .lmu1=lmu1, .lmu2=lmu2,
- .emu1=emu1, .emu2=emu2, .eor=eor,
- .lor=lor ))),
+ coeff3 = (y[,1]/mu[,1] - y[,2]/mu[,2] - y[,3]/mu[,3] + y[,4]/mu[,4])
+ Vab = 1 / (1/mu[,1] + 1/mu[,2] + 1/mu[,3] + 1/mu[,4])
+ dp11.doratio = Vab / oratio
+ dl.doratio = coeff3 * dp11.doratio
+
+ w * cbind(dl.dmu1 * dtheta.deta(pmargin[,1], .lmu1, earg= .emu1),
+ dl.dmu2 * dtheta.deta(pmargin[,2], .lmu2, earg= .emu2),
+ dl.doratio * dtheta.deta(oratio, .loratio, earg= .eoratio))
+ }), list( .lmu1=lmu1, .lmu2=lmu2,
+ .emu1=emu1, .emu2=emu2, .eoratio=eoratio,
+ .loratio=loratio ))),
weight=eval(substitute(expression({
- Vab <- 1/(1/mu[,1] + 1/mu[,2] + 1/mu[,3] + 1/mu[,4])
- deltapi <- mu[,3]*mu[,2] - mu[,4]*mu[,1]
- delta <- mu[,1]*mu[,2]*mu[,3]*mu[,4]
- pq <- pm[,1:2]*(1-pm[,1:2])
-
- wz <- matrix(0, n, 4)
- wz[,iam(1,1,M)] <- dtheta.deta(pm[,1], .lmu1, earg= .emu1)^2 *
- pq[,2] * Vab / delta
- wz[,iam(2,2,M)] <- dtheta.deta(pm[,2], .lmu2, earg= .emu2)^2 *
- pq[,1] * Vab / delta
- wz[,iam(3,3,M)] <- Vab *
- (dtheta.deta(or, .lor, earg= .eor) / dtheta.deta(or, "loge"))^2
- wz[,iam(1,2,M)] <- Vab * deltapi *
- dtheta.deta(pm[,1], .lmu1, earg= .emu1) *
- dtheta.deta(pm[,2], .lmu2, earg= .emu2) / delta
- w * wz
+ Deltapi = mu[,3]*mu[,2] - mu[,4]*mu[,1]
+ myDelta = mu[,1] * mu[,2] * mu[,3] * mu[,4]
+ pqmargin = pmargin * (1-pmargin)
+
+ wz = matrix(0, n, 4)
+ wz[,iam(1,1,M)] = (pqmargin[,2] * Vab / myDelta) *
+ dtheta.deta(pmargin[,1], .lmu1, earg= .emu1)^2
+ wz[,iam(2,2,M)] = (pqmargin[,1] * Vab / myDelta) *
+ dtheta.deta(pmargin[,2], .lmu2, earg= .emu2)^2
+ wz[,iam(3,3,M)] = (Vab / oratio^2) *
+ dtheta.deta(oratio, .loratio, earg= .eoratio)^2
+ wz[,iam(1,2,M)] = (Vab * Deltapi / myDelta) *
+ dtheta.deta(pmargin[,1], .lmu1, earg= .emu1) *
+ dtheta.deta(pmargin[,2], .lmu2, earg= .emu2)
+ w * wz
}), list( .lmu1=lmu1, .lmu2=lmu2,
- .emu1=emu1, .emu2=emu2, .eor=eor,
- .lor=lor ))))
+ .emu1=emu1, .emu2=emu2, .eoratio=eoratio,
+ .loratio=loratio ))))
}
@@ -978,6 +1074,182 @@ seq2binomial = function(lprob1="logit", lprob2="logit",
+zipebcom = function(lmu12="cloglog", lphi12="logit", loratio="loge",
+ emu12=list(), ephi12=list(), eoratio=list(),
+ imu12=NULL, iphi12=NULL, ioratio = NULL,
+ zero=2:3, tol=0.001,
+ addRidge=0.001)
+{
+
+
+
+ if(mode(lphi12) != "character" && mode(lphi12) != "name")
+ lphi12 = as.character(substitute(lphi12))
+ if(mode(loratio) != "character" && mode(loratio) != "name")
+ loratio = as.character(substitute(loratio))
+ if(!is.Numeric(tol, positive=TRUE, allow=1) || tol > 0.1)
+ stop("bad input for argument \"tol\"")
+ if(!is.Numeric(addRidge, allow=1, posit=TRUE) || addRidge > 0.5)
+ stop("bad input for argument \"addRidge\"")
+ if(!is.list(emu12)) emu12 = list()
+ if(!is.list(ephi12)) ephi12 = list()
+ if(!is.list(eoratio)) eoratio = list()
+ if(lmu12 != "cloglog")
+ warning("argument 'lmu12' should be 'cloglog'")
+
+ new("vglmff",
+ blurb=c("Exchangeable bivariate ", lmu12, " odds-ratio model based on\n",
+ "a zero-inflated Poisson distribution\n\n",
+ "Links: ",
+ namesof("mu12", lmu12, earg=emu12), ", ",
+ namesof("phi12", lphi12, earg=ephi12), ", ",
+ namesof("oratio", loratio, earg=eoratio)),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ eval(process.binomial2.data.vgam)
+ predictors.names = c(
+ namesof("mu12", .lmu12, earg= .emu12, short=TRUE),
+ namesof("phi12", .lphi12, earg= .ephi12, short=TRUE),
+ namesof("oratio", .loratio, earg= .eoratio, short=TRUE))
+
+ propY1.eq.0 = weighted.mean(y[,'00'], w) + weighted.mean(y[,'01'], w)
+ propY2.eq.0 = weighted.mean(y[,'00'], w) + weighted.mean(y[,'10'], w)
+ if(length( .iphi12) && any(.iphi12 > propY1.eq.0))
+ warning("iphi12 must be less than the sample proportion of Y1==0")
+ if(length( .iphi12) && any(.iphi12 > propY2.eq.0))
+ warning("iphi12 must be less than the sample proportion of Y2==0")
+
+ if(!length(etastart)) {
+ pstar.init = ((mu[,3]+mu[,4]) + (mu[,2]+mu[,4])) / 2
+ phi.init = if(length(.iphi12)) rep(.iphi12, len=n) else
+ min(propY1.eq.0 * 0.95, propY2.eq.0 * 0.95, pstar.init/1.5)
+ oratio.init = if(length( .ioratio)) rep( .ioratio, len=n) else
+ mu[,4]*mu[,1]/(mu[,2]*mu[,3])
+ mu12.init = if(length(.imu12)) rep(.imu12, len=n) else
+ pstar.init / (1-phi.init)
+ etastart = cbind(
+ theta2eta(mu12.init, .lmu12, earg= .emu12),
+ theta2eta(phi.init, .lphi12, earg= .ephi12),
+ theta2eta(oratio.init, .loratio, earg= .eoratio))
+ }
+ }), list( .lmu12=lmu12, .lphi12=lphi12, .loratio=loratio,
+ .emu12=emu12, .ephi12=ephi12, .eoratio=eoratio,
+ .imu12=imu12, .iphi12=iphi12, .ioratio=ioratio ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ A1vec = eta2theta(eta[,1], .lmu12, earg= .emu12)
+ phivec = eta2theta(eta[,2], .lphi12, earg= .ephi12)
+ pmargin = matrix((1 - phivec) * A1vec, nrow(eta), 2)
+ oratio = eta2theta(eta[,3], .loratio, earg= .eoratio)
+ a.temp = 1 + (pmargin[,1]+pmargin[,2])*(oratio-1)
+ b.temp = -4 * oratio * (oratio-1) * pmargin[,1] * pmargin[,2]
+ temp = sqrt(a.temp^2 + b.temp)
+ pj4 = ifelse(abs(oratio-1) < .tol, pmargin[,1]*pmargin[,2],
+ (a.temp-temp)/(2*(oratio-1)))
+ pj2 = pmargin[,2] - pj4
+ pj3 = pmargin[,1] - pj4
+ cbind("00" = 1-pj4-pj2-pj3, "01" = pj2, "10" = pj3, "11" = pj4)
+ }, list( .tol=tol,
+ .lmu12=lmu12, .lphi12=lphi12, .loratio=loratio,
+ .emu12=emu12, .ephi12=ephi12, .eoratio=eoratio ))),
+ last=eval(substitute(expression({
+ misc$link = c("mu12"= .lmu12, "phi12" = .lphi12, "oratio"= .loratio)
+ misc$earg = list("mu12"= .emu12, "phi12"= .ephi12, "oratio"= .eoratio)
+ misc$tol = .tol
+ misc$expected = TRUE
+ misc$addRidge = .addRidge
+ }), list( .tol=tol, .addRidge = addRidge,
+ .lmu12=lmu12, .lphi12=lphi12, .loratio=loratio,
+ .emu12=emu12, .ephi12=ephi12, .eoratio=eoratio ))),
+ loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * y * log(mu)),
+ vfamily=c("zipebcom"),
+ deriv=eval(substitute(expression({
+ A1vec = eta2theta(eta[,1], .lmu12, earg= .emu12)
+ smallno = .Machine$double.eps^(2/4)
+ A1vec[A1vec > 1.0 -smallno] = 1.0 - smallno
+
+
+ phivec = eta2theta(eta[,2], .lphi12, earg= .ephi12)
+ pmargin = matrix((1 - phivec) * A1vec, n, 2)
+ oratio = eta2theta(eta[,3], .loratio, earg= .eoratio)
+
+
+
+ Vab = 1 / (1/mu[,1] + 1/mu[,2] + 1/mu[,3] + 1/mu[,4])
+ Vabc = 1/mu[,1] + 1/mu[,2]
+ denom3 = 2 * oratio * mu[,2] + mu[,1] + mu[,4]
+ temp1 = oratio * mu[,2] + mu[,4]
+ dp11star.dp1unstar = 2*(1-phivec)*Vab * Vabc
+
+ dp11star.dphi1 = -2 * A1vec * Vab * Vabc
+
+ dp11star.doratio = Vab / oratio
+
+ yandmu = (y[,1]/mu[,1] - y[,2]/mu[,2] - y[,3]/mu[,3] + y[,4]/mu[,4])
+ dp11.doratio = Vab / oratio
+ check.dl.doratio = yandmu * dp11.doratio
+
+ cyandmu = (y[,2]+y[,3])/mu[,2] - 2 * y[,1]/mu[,1]
+ dl.dmu1 = dp11star.dp1unstar * yandmu + (1-phivec) * cyandmu
+ dl.dphi1 = dp11star.dphi1 * yandmu - A1vec * cyandmu
+ dl.doratio = check.dl.doratio
+
+ dthetas.detas = cbind(dtheta.deta(A1vec, .lmu12, earg= .emu12),
+ dtheta.deta(phivec, .lphi12, earg= .ephi12),
+ dtheta.deta(oratio, .loratio, earg= .eoratio))
+ w * cbind(dl.dmu1, dl.dphi1, dl.doratio) * dthetas.detas
+ }), list( .tol=tol,
+ .lmu12=lmu12, .lphi12=lphi12, .loratio=loratio,
+ .emu12=emu12, .ephi12=ephi12, .eoratio=eoratio ))),
+ weight=eval(substitute(expression({
+ myvarfun1 = function(y, mu, a=1, b=0) (a-2*b)^2 / mu[,1] +
+ (b-a)^2 * (1/mu[,2] + 1/mu[,3]) + a^2 / mu[,4]
+
+ mycovfun1 = function(y, mu, a1=1, b1=0, a2=1, b2=0)
+ (a1-2*b1)*(a2-2*b2) / mu[,1] +
+ (b1-a1) * (b2-a2) * (1/mu[,2] + 1/mu[,3]) +
+ a1 * a2 / mu[,4]
+
+ wz = matrix(0, n, 4)
+ alternwz11 = 2*(1-phivec)^2 *(2/mu[,1] + 1/mu[,2] - 2*Vab*Vabc^2) *
+ (dthetas.detas[,1])^2
+ wz[,iam(1,1,M)] = alternwz11
+
+
+ alternwz22 = 2* A1vec^2 *(2/mu[,1] + 1/mu[,2] - 2*Vab*Vabc^2) *
+ (dthetas.detas[,2])^2
+ wz[,iam(2,2,M)] = alternwz22
+
+
+
+
+
+ alternwz12 = -2*A1vec*(1-phivec)*(2/mu[,1] + 1/mu[,2] - 2*Vab*Vabc^2) *
+ dthetas.detas[,1] * dthetas.detas[,2]
+ wz[,iam(1,2,M)] = alternwz12
+
+
+
+
+ alternwz33 = (Vab / oratio^2) * dthetas.detas[,3]^2
+ wz[,iam(3,3,M)] = alternwz33
+
+
+
+
+
+
+ wz[,1:2] = wz[,1:2] * (1 + .addRidge)
+
+ w * wz
+ }), list( .tol=tol, .addRidge = addRidge,
+ .lmu12=lmu12, .lphi12=lphi12, .loratio=loratio,
+ .emu12=emu12, .ephi12=ephi12, .eoratio=eoratio ))))
+}
+
diff --git a/R/family.categorical.q b/R/family.categorical.q
index 53c048c..c8e0687 100644
--- a/R/family.categorical.q
+++ b/R/family.categorical.q
@@ -995,8 +995,8 @@ bratt = function(refgp="last",
names(misc$link) = c(paste("alpha",uindex,sep=""), "alpha0")
misc$refgp = .refgp
misc$refvalue = .refvalue
- misc$alpha = alpha # zz; last one saved (ok since n=1)
- misc$alpha0 = alpha0 # zz; last one saved (ok since n=1)
+ misc$alpha = alpha
+ misc$alpha0 = alpha0
}), list( .refgp=refgp, .refvalue=refvalue ))),
loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL)
if(residuals) stop("loglikelihood residuals not implemented yet") else
@@ -1494,7 +1494,7 @@ scumulative = function(link="logit", earg = list(),
J = extra$J
misc$link = c(rep( .link, length=J),
rep( .lscale, length=J))[interleave.VGAM(M, M=2)]
- names(misc$link) = predictors.names # zz mynames
+ names(misc$link) = predictors.names
misc$earg = vector("list", M)
names(misc$earg) = names(misc$link)
for(ii in 1:J) misc$earg[[2*ii-1]] = .earg
diff --git a/R/family.extremes.q b/R/family.extremes.q
index c6895d6..7fa17f9 100644
--- a/R/family.extremes.q
+++ b/R/family.extremes.q
@@ -286,7 +286,7 @@ gev <- function(llocation="identity",
EulerM = -digamma(1)
wz[iszero,iam(2,2,M)] = (pi^2/6 + (1-EulerM)^2) / sigma^2
- wz[iszero,iam(3,3,M)] = 2.4236 # Solved numerically zz
+ wz[iszero,iam(3,3,M)] = 2.4236
wz[iszero,iam(1,2,M)] = (digamma(2) + 2*(EulerM-1)) / sigma^2
wz[iszero,iam(1,3,M)]= -(trigamma(1)/2 + digamma(1)*
(digamma(1)/2+1))/sigma
@@ -575,7 +575,7 @@ egev <- function(llocation="identity",
pp/kay) / (sigma * kay^2)
if(any(iszero)) {
wz[iszero,iam(2,2,M)] = (pi^2/6 + (1-EulerM)^2) / sigma^2
- wz[iszero,iam(3,3,M)] = 2.4236 # Solved numerically zz
+ wz[iszero,iam(3,3,M)] = 2.4236
wz[iszero,iam(1,2,M)] = (digamma(2) + 2*(EulerM-1)) / sigma^2
wz[iszero,iam(1,3,M)]= -(trigamma(1)/2 + digamma(1)*
(digamma(1)/2+1))/sigma
diff --git a/R/family.functions.q b/R/family.functions.q
index 90cd86d..cc5dd61 100644
--- a/R/family.functions.q
+++ b/R/family.functions.q
@@ -157,11 +157,11 @@ matrix.power <- function(wz, M, power, fast=TRUE)
index <- as.vector( matrix(1, 1, M) %*% is.na(temp) )
- index <- index == 0 # zz <=?
+ index <- index == 0
if(!all(index)) {
warning(paste("Some weight matrices have negative",
"eigenvalues. They\nwill be assigned NAs"))
- temp[,!index] <- 1 # zz; Replace by a pos value
+ temp[,!index] <- 1
}
WW <- mux55(evects, temp, M=M)
@@ -177,8 +177,8 @@ rss.vgam <- function(z, wz, M)
if(M==1)
return(sum(c(wz) * c(z^2)))
- wzz <- mux22(t(wz), z, M, as.mat=TRUE) # else mux2(wz, z)
- ans <- sum(wzz * z)
+ wz.z <- mux22(t(wz), z, M, as.mat=TRUE) # else mux2(wz, z)
+ ans <- sum(wz.z * z)
ans
}
diff --git a/R/family.genetic.q b/R/family.genetic.q
index 3a83dbd..e4ab14f 100644
--- a/R/family.genetic.q
+++ b/R/family.genetic.q
@@ -15,7 +15,7 @@ G1G2G3 <- function(link="logit", earg = list(), ip1=NULL, ip2=NULL, iF=NULL)
if(!is.list(earg)) earg = list()
new("vglmff",
- blurb=c("G1/G2/G3 zzphenotype\n\n",
+ blurb=c("G1/G2/G3 phenotype\n\n",
"Links: ",
namesof("p1", link, earg= earg), ", ",
namesof("p2", link, earg= earg), ", ",
diff --git a/R/family.glmgam.q b/R/family.glmgam.q
index a3d8c67..c7a771c 100644
--- a/R/family.glmgam.q
+++ b/R/family.glmgam.q
@@ -1267,7 +1267,7 @@ mbino <- function()
if(any(y != 0 & y != 1))
stop("response vector must have 0 or 1 values only")
xrle = rle(mvar)
- if(length(unique(mvar)) != length(xrel$zz))
+ if(length(unique(mvar)) != length(xrel$length))
stop("extra$mvar must take on contiguous values")
temp9 = factor(mvar)
diff --git a/R/family.normal.q b/R/family.normal.q
index 65fabe6..2c50d36 100644
--- a/R/family.normal.q
+++ b/R/family.normal.q
@@ -100,7 +100,7 @@ gaussianff = function(dispersion=0, parallel=FALSE, zero=NULL)
M = if(is.matrix(y)) ncol(y) else 1
n = if(is.matrix(y)) nrow(y) else length(y)
wz = VGAM.weights.function(w=w, M=M, n=n)
- temp = rss.vgam(y, wz=wz, M=M)
+ temp = rss.vgam(y-mu, wz=wz, M=M)
-0.5 * temp
},
link=function(mu, extra=NULL) mu,
@@ -201,7 +201,7 @@ posnormal1 = function(lmean="identity", lsd="loge",
mymu + mysd * dnorm(-mymu/mysd) / (1-pnorm(-mymu/mysd))
}, list( .lmean=lmean, .lsd=lsd, .emean=emean, .esd=esd ))),
last=eval(substitute(expression({
- misc$link = c("mean"= .lmean, "sd"= .lsd) # zz mu or mean ?
+ misc$link = c("mean"= .lmean, "sd"= .lsd)
misc$earg = list("mean"= .emean, "sd"= .esd )
misc$expected = TRUE
}), list( .lmean=lmean, .lsd=lsd, .emean=emean, .esd=esd ))),
diff --git a/R/family.qreg.q b/R/family.qreg.q
index cb99344..df8db5b 100644
--- a/R/family.qreg.q
+++ b/R/family.qreg.q
@@ -940,7 +940,7 @@ lms.yjn <- function(percentiles=c(25,50,75),
as.double(gleg.abs), as.double(gleg.wts), as.integer(n),
as.integer(length(gleg.abs)), as.double(lambda),
as.double(mymu), as.double(sigma), answer=double(3*n),
- eps=as.double(1.0e-5))$ans #zz adjust eps for more accuracy
+ eps=as.double(1.0e-5))$ans
dim(temp9) = c(3,n)
wz[,iam(1,1,M)] = temp9[1,]
wz[,iam(1,2,M)] = temp9[2,]
@@ -1052,7 +1052,6 @@ alsqreg <- function(w.als=1, parallel=FALSE,
if(!is.Numeric(w.als, posit=TRUE))
stop("'w.als' must be a vector of positive values")
- print("hi 13/3/08")
if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
method.init > 3) stop("argument \"method.init\" must be 1, 2 or 3")
if(mode(lexpectile) != "character" && mode(lexpectile) != "name")
@@ -1693,8 +1692,6 @@ qregal = function(tau=c(0.25, 0.5, 0.75),
extra$n = n
extra$y.names = y.names = paste("tau=", round(extra$tau, dig=.digt),
sep="")
- print("y.names")
- print( y.names )
extra$individual = FALSE
predictors.names = c(
namesof("scale", .lscale, earg=.escale, tag=FALSE),
@@ -1724,8 +1721,6 @@ qregal = function(tau=c(0.25, 0.5, 0.75),
.ilocation=ilocation ))),
inverse=eval(substitute(function(eta, extra=NULL) {
eta = as.matrix(eta)
- print("eta[1:5,]")
- print( eta[1:5,] )
xi.ans = matrix(0, nrow(eta), ncol(eta)-1)
for(ii in 1:(ncol(eta)-1))
xi.ans[,ii] = eta2theta(eta[,ii+1], .llocation, earg= .elocation)
diff --git a/R/family.rcqo.q b/R/family.rcqo.q
index 16214dc..c3635fc 100644
--- a/R/family.rcqo.q
+++ b/R/family.rcqo.q
@@ -17,7 +17,8 @@ rcqo <- function(n, p, S,
loabundance = if(EqualMaxima) hiabundance else 10,
hiabundance = 100,
sdlv = c(1.5/2^(0:3))[1:Rank],
- sdOptima = ifelse(ESOptima, 1.5/Rank, 1) * sdlv,
+ sdOptima = ifelse(ESOptima, 1.5/Rank, 1) *
+ ifelse(scalelv, sdlv, 1),
sdTolerances = 0.25,
Kvector = 1,
Shape = 1,
@@ -26,46 +27,48 @@ rcqo <- function(n, p, S,
rhox = 0.5,
breaks = 4, # ignored unless family="ordinal"
seed = NULL,
- Crow1positive=TRUE
+ Crow1positive=TRUE,
+ xmat = NULL, # Can be input
+ scalelv = TRUE
) {
family = match.arg(family, c("poisson","negbinomial", "binomial-poisson",
"Binomial-negbinomial", "ordinal-poisson",
"Ordinal-negbinomial","gamma2"))[1]
if(!is.Numeric(n, integer=TRUE, posit=TRUE, allow=1))
- stop("bad input for argument \"n\"")
+ stop("bad input for argument 'n'")
if(!is.Numeric(p, integer=TRUE, posit=TRUE, allow=1) || p < 1 + Rank)
- stop("bad input for argument \"p\"")
+ stop("bad input for argument 'p'")
if(!is.Numeric(S, integer=TRUE, posit=TRUE, allow=1))
- stop("bad input for argument \"S\"")
+ stop("bad input for argument 'S'")
if(!is.Numeric(Rank, integer=TRUE, posit=TRUE, allow=1) || Rank > 4)
- stop("bad input for argument \"Rank\"")
+ stop("bad input for argument 'Rank'")
if(!is.Numeric(Kvector, posit=TRUE))
- stop("bad input for argument \"Kvector\"")
+ stop("bad input for argument 'Kvector'")
if(!is.Numeric(rhox) || abs(rhox) >= 1)
- stop("bad input for argument \"rhox\"")
+ stop("bad input for argument 'rhox'")
if(length(seed) && !is.Numeric(seed, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"seed\"")
+ stop("bad input for argument 'seed'")
if(!is.logical(EqualTolerances) || length(EqualTolerances)>1)
- stop("bad input for argument \"EqualTolerances)\"")
+ stop("bad input for argument 'EqualTolerances)'")
if(!is.logical(sqrt) || length(sqrt)>1)
- stop("bad input for argument \"sqrt)\"")
+ stop("bad input for argument 'sqrt)'")
if(family != "negbinomial" && sqrt)
- warning("argument \"sqrt\" is used only with family=\"negbinomial\"")
+ warning("argument 'sqrt' is used only with family='negbinomial'")
if(!EqualTolerances && !is.Numeric(sdTolerances, posit=TRUE))
- stop("bad input for argument \"sdTolerances\"")
+ stop("bad input for argument 'sdTolerances'")
if(!is.Numeric(loabundance, posit=TRUE))
- stop("bad input for argument \"loabundance\"")
+ stop("bad input for argument 'loabundance'")
if(!is.Numeric(sdlv, posit=TRUE))
- stop("bad input for argument \"sdlv\"")
+ stop("bad input for argument 'sdlv'")
if(!is.Numeric(sdOptima, posit=TRUE))
- stop("bad input for argument \"sdOptima\"")
+ stop("bad input for argument 'sdOptima'")
if(EqualMaxima && loabundance != hiabundance)
- stop(paste("arguments \"loabundance)\" and \"hiabundance)\" must",
+ stop(paste("arguments 'loabundance)' and 'hiabundance)' must",
"be equal when EqualTolerances=TRUE"))
if(any(loabundance > hiabundance))
stop("loabundance > hiabundance is not allowed")
if(!is.logical(Crow1positive)) {
- stop("bad input for argument \"Crow1positive)\"")
+ stop("bad input for argument 'Crow1positive)'")
} else {
Crow1positive = rep(Crow1positive, len=Rank)
}
@@ -75,26 +78,40 @@ rcqo <- function(n, p, S,
sdTolerances = rep(sdTolerances, len=Rank)
AA = sdOptima / 3^0.5
if(Rank > 1 && any(diff(sdlv) > 0))
- stop("argument \"sdlv)\" must be a vector with decreasing values")
+ stop("argument 'sdlv)' must be a vector with decreasing values")
- if(!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE))
- runif(1) # initialize the RNG if necessary
- if(is.null(seed)) {
- RNGstate <- get(".Random.seed", envir = .GlobalEnv)
- } else {
- R.seed <- get(".Random.seed", envir = .GlobalEnv)
- set.seed(seed)
- RNGstate <- structure(seed, kind = as.list(RNGkind()))
- on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv))
- }
+ if(FALSE)
+ change.seed.expression = expression({
+ if(!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) {
+ runif(1) # initialize the RNG if necessary
+ }
+ if(is.null(seed)) {
+ RNGstate <- get(".Random.seed", envir = .GlobalEnv)
+ } else {
+ R.seed <- get(".Random.seed", envir = .GlobalEnv)
+ set.seed(seed)
+ RNGstate <- structure(seed, kind = as.list(RNGkind()))
+ on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv))
+ }
+ })
+ change.seed.expression = expression({
+ if(length(seed)) set.seed(seed)
+ })
+ eval(change.seed.expression)
V = matrix(rhox, p-1, p-1)
diag(V) = 1
L = chol(V)
- xmat = matrix(rnorm(n*(p-1)), n, p-1) %*% L
- xmat = scale(xmat, center=TRUE)
- xnames = paste("x", 2:p, sep="")
- dimnames(xmat) = list(as.character(1:n), xnames)
+ if(length(xmat)) {
+ xnames = colnames(xmat)
+ } else {
+ eval(change.seed.expression)
+ xmat = matrix(rnorm(n*(p-1)), n, p-1) %*% L
+ xmat = scale(xmat, center=TRUE)
+ xnames = paste("x", 2:p, sep="")
+ dimnames(xmat) = list(as.character(1:n), xnames)
+ }
+ eval(change.seed.expression)
ccoefs = matrix(rnorm((p-1)*Rank), p-1, Rank)
lvmat = cbind(xmat %*% ccoefs)
if(Rank > 1) {
@@ -110,10 +127,17 @@ rcqo <- function(n, p, S,
lvmat[,r] = -lvmat[,r]
}
- for(r in 1:Rank) {
- sdlvr = sd(lvmat[,r])
- lvmat[,r] = lvmat[,r] * sdlv[r] / sdlvr
- ccoefs[,r] = ccoefs[,r] * sdlv[r] / sdlvr
+ if(scalelv) {
+ for(r in 1:Rank) {
+ sdlvr = sd(lvmat[,r])
+ lvmat[,r] = lvmat[,r] * sdlv[r] / sdlvr
+ ccoefs[,r] = ccoefs[,r] * sdlv[r] / sdlvr
+ }
+ } else {
+ sdlvr = NULL
+ for(r in 1:Rank) {
+ sdlvr = c(sdlvr, sd(lvmat[,r]))
+ }
}
if(ESOptima) {
if(!is.Numeric(S^(1/Rank), integ=TRUE) || S^(1/Rank) < 2)
@@ -140,6 +164,7 @@ rcqo <- function(n, p, S,
optima = matrix(unlist(optima), S, Rank) # Make sure it is a matrix
} else {
optima = matrix(1, S, Rank)
+ eval(change.seed.expression)
for(r in 1:Rank) {
optima[,r] = rnorm(n=S, sd=sdOptima[r])
}
@@ -152,6 +177,7 @@ rcqo <- function(n, p, S,
names(Kvector) = ynames
lvnames = if(Rank==1) "lv" else paste("lv", 1:Rank, sep="")
Tols = if(EqualTolerances) matrix(1, S, Rank) else {
+ eval(change.seed.expression)
temp = matrix(1, S, Rank)
if(S > 1)
for(r in 1:Rank) {
@@ -167,6 +193,7 @@ rcqo <- function(n, p, S,
dimnames(optima) = list(ynames, lvnames)
loeta = log(loabundance) # May be a vector
hieta = log(hiabundance)
+ eval(change.seed.expression)
logmaxima = runif(S, min=loeta, max=hieta) # loeta and hieta may be vector
names(logmaxima) = ynames
etamat = matrix(logmaxima,n,S,byrow=TRUE) # eta=log(mu) only; intercept term
@@ -182,6 +209,7 @@ rcqo <- function(n, p, S,
"poisson"=1, "binomial-poisson"=1, "ordinal-poisson"=1,
"negbinomial"=2, "Binomial-negbinomial"=2, "Ordinal-negbinomial"=2,
"gamma2"=3)
+ eval(change.seed.expression)
if(rootdist == 1) {
ymat = matrix(rpois(n*S, lam=exp(etamat)), n, S)
} else if(rootdist == 2) {
@@ -192,7 +220,7 @@ rcqo <- function(n, p, S,
Shape = matrix(Shape, n, S, byrow=TRUE)
ymat = matrix(rgamma(n*S, shape=Shape, scale=exp(etamat)/Shape),n,S)
if(Log) ymat = log(ymat)
- } else stop("'rootdist' unmatched")
+ } else stop("argument 'rootdist' unmatched")
tmp1 = NULL
if(any(family == c("ordinal-poisson","Ordinal-negbinomial"))) {
@@ -223,12 +251,13 @@ rcqo <- function(n, p, S,
attr(ans, "optima") = optima
attr(ans, "Log") = Log
attr(ans, "lv") = lvmat
+ attr(ans, "eta") = etamat
attr(ans, "EqualTolerances") = EqualTolerances
attr(ans, "EqualMaxima") = EqualMaxima || all(loabundance == hiabundance)
attr(ans, "ESOptima") = ESOptima
- attr(ans, "seed") = RNGstate
+ attr(ans, "seed") = seed # RNGstate
attr(ans, "sdTolerances") = sdTolerances
- attr(ans, "sdlv") = sdlv
+ attr(ans, "sdlv") = if(scalelv) sdlv else sdlvr
attr(ans, "sdOptima") = sdOptima
attr(ans, "Shape") = Shape
attr(ans, "sqrt") = sqrt
@@ -259,19 +288,20 @@ dcqo <- function(x, p, S,
if(mode(family) != "character" && mode(family) != "name")
family = as.character(substitute(family))
- family = match.arg(family, c("poisson", "binomial", "negbinomial", "ordinal"))[1]
+ family = match.arg(family, c("poisson", "binomial",
+ "negbinomial", "ordinal"))[1]
if(!is.Numeric(p, integer=TRUE, posit=TRUE, allow=1) || p < 2)
- stop("bad input for argument \"p\"")
+ stop("bad input for argument 'p'")
if(!is.Numeric(S, integer=TRUE, posit=TRUE, allow=1))
- stop("bad input for argument \"S\"")
+ stop("bad input for argument 'S'")
if(!is.Numeric(Rank, integer=TRUE, posit=TRUE, allow=1))
- stop("bad input for argument \"Rank\"")
+ stop("bad input for argument 'Rank'")
if(length(seed) && !is.Numeric(seed, integer=TRUE, posit=TRUE))
- stop("bad input for argument \"seed\"")
+ stop("bad input for argument 'seed'")
if(!is.logical(EqualTolerances) || length(EqualTolerances)>1)
- stop("bad input for argument \"EqualTolerances)\"")
+ stop("bad input for argument 'EqualTolerances)'")
if(EqualMaxima && loabundance != hiabundance)
- stop(paste("\"loabundance)\" and \"hiabundance)\" must",
+ stop(paste("'loabundance)' and 'hiabundance)' must",
"be equal when EqualTolerances=TRUE"))
if(length(seed)) set.seed(seed)
@@ -286,13 +316,14 @@ dcqo <- function(x, p, S,
hieta = log(hiabundance)
logmaxima = runif(S, min=loeta, max=hieta)
- etamat = matrix(logmaxima, n, S, byrow=TRUE) # eta=log(mu) only; intercept term
+ etamat = matrix(logmaxima,n,S,byrow=TRUE) # eta=log(mu) only; intercept term
for(jay in 1:S) {
optmat = matrix(optima[jay,], n, Rank, byrow=TRUE)
tolmat = matrix(Tols[jay,], n, Rank, byrow=TRUE)
temp = cbind((lvmat - optmat) * tolmat)
for(r in 1:Rank)
- etamat[,jay] = etamat[,jay] - 0.5 * temp[,r] * (lvmat[,r] - optmat[jay,r])
+ etamat[,jay] = etamat[,jay] - 0.5 * temp[,r] *
+ (lvmat[,r] - optmat[jay,r])
}
ymat = if(family == "negbinomial") {
diff --git a/R/family.rrr.q b/R/family.rrr.q
index 8732474..ce3e7f4 100644
--- a/R/family.rrr.q
+++ b/R/family.rrr.q
@@ -82,7 +82,7 @@ valt <- function(x, z, U, Rank=1,
Criterion <- as.character(substitute(Criterion))
Criterion <- match.arg(Criterion, c("rss", "coefficients"))[1]
- if(any(diff(Alphavec)) <= 0)
+ if(any(diff(Alphavec) <= 0))
stop("Alphavec must be an increasing sequence")
if(!is.matrix(z))
@@ -273,7 +273,7 @@ lm2qrrvlm.model.matrix = function(x, Blist, C, control, assign=TRUE,
if(assign) {
asx = attr(x, "assign")
asx = vector("list", ncol(new.lv.model.matrix))
- names(asx) = names(cmat2) # wrong zz
+ names(asx) = names(cmat2)
for(i in 1:length(names(asx))) {
asx[[i]] = i
}
@@ -583,9 +583,9 @@ rrr.derivative.expression <- expression({
constraints=replace.constraints(constraints,diag(M),rrcontrol$colx2.index)
nice31 = (!control$EqualTol || control$ITolerances) &&
- all(trivial.constraints(constraints))
+ all(trivial.constraints(constraints) == 1)
- theta0 <- c(Cmat) # zz; Possibly bad because of normalization?
+ theta0 <- c(Cmat)
if(is.R()) assign(".VGAM.dot.counter", 0, envir = VGAMenv) else
.VGAM.dot.counter <<- 0
if(control$OptimizeWrtC) {
@@ -1184,6 +1184,7 @@ printCoef.qrrvglm = function(x, ...) {
setMethod("summary", "qrrvglm", function(object, ...)
summary.qrrvglm(object, ...))
+
predict.qrrvglm <- function(object,
newdata=NULL,
type=c("link", "response", "lv", "terms"),
@@ -1206,65 +1207,117 @@ predict.qrrvglm <- function(object,
if(type=="terms")
stop("can't handle type='terms' yet")
- na.act = object at na.action
- object at na.action = list()
M = object at misc$M
Rank = object at control$Rank
+ na.act = object at na.action
+ object at na.action = list()
+
+ if(!length(newdata) && type=="response" && length(object at fitted.values)) {
+ if(length(na.act)) {
+ return(napredict(na.act[[1]], object at fitted.values))
+ } else {
+ return(object at fitted.values)
+ }
+ }
+
+ attrassignlm <- function(object, ...)
+ attrassigndefault(model.matrix(object), object at terms)
+
+ attrassigndefault <- function(mmat, tt) {
+ if (!inherits(tt, "terms"))
+ stop("need terms object")
+ aa <- attr(mmat, "assign")
+ if (is.null(aa))
+ stop("argument is not really a model matrix")
+ ll <- attr(tt, "term.labels")
+ if (attr(tt, "intercept") > 0)
+ ll <- c("(Intercept)", ll)
+ aaa <- factor(aa, labels = ll)
+ split(order(aa), aaa)
+ }
+
+ if(!length(newdata)) {
+ X <- model.matrixvlm(object, type="lm", ...)
+ offset <- object at offset
+ tt <- object at terms$terms # terms(object)
+ if(is.R() && !length(object at x))
+ attr(X, "assign") <- attrassignlm(X, tt)
+ } else {
+ if(is.smart(object) && length(object at smart.prediction)) {
+ setup.smart("read", smart.prediction=object at smart.prediction)
+ }
+
+ tt <- object at terms$terms # terms(object) # 11/8/03; object at terms$terms
+ X <- model.matrix(delete.response(tt), newdata, contrasts =
+ if(length(object at contrasts)) object at contrasts else NULL,
+ xlev = object at xlevels)
+
+ if(is.R() && nrow(X)!=nrow(newdata)) {
+ as.save = attr(X, "assign")
+ X = X[rep(1, nrow(newdata)),,drop=FALSE]
+ dimnames(X) = list(dimnames(newdata)[[1]], "(Intercept)")
+ attr(X, "assign") = as.save # Restored
+ }
+
+ offset <- if (!is.null(off.num<-attr(tt,"offset"))) {
+ eval(attr(tt,"variables")[[off.num+1]], newdata)
+ } else if (!is.null(object at offset))
+ eval(object at call$offset, newdata)
+
+ if(any(c(offset) != 0)) stop("currently cannot handle nonzero offsets")
+
+ if(is.smart(object) && length(object at smart.prediction)) {
+ wrapup.smart()
+ }
+
+ if(is.R())
+ attr(X, "assign") <- attrassigndefault(X, tt)
+ }
+
+ ocontrol = object at control
+
+ Rank = ocontrol$Rank
+ NOS = ncol(object at y)
+ sppnames = dimnames(object at y)[[2]]
+ modelno = ocontrol$modelno # 1,2,3,5 or 0
+ M = if(any(slotNames(object) == "predictors") &&
+ is.matrix(object at predictors)) ncol(object at predictors) else
+ object at misc$M
+ MSratio = M / NOS # First value is g(mean) = quadratic form in lv
+ if(MSratio != 1) stop("can only handle MSratio==1 for now")
+
+
if(length(newdata)) {
- p1 = length(object at control$colx1.index)
Coefs = Coef(object, varlvI = varlvI, reference = reference)
- temptype = ifelse(type=="link", "response", type[1])
-
- conmat = replace.constraints(vector("list", object at misc$p),
- diag(M), object at control$colx1.index)
- conmat = replace.constraints(conmat, Coefs at A,
- object at control$colx2.index)
- names(conmat) = object at misc$colnames.x
- object at constraints = conmat # To get RR-VGLM type eta
-
- newcoefs = lm2vlm.model.matrix(object at x, conmat, xij=object at control$xij)
- newcoefs = dimnames(newcoefs)[[2]]
- newcoefs = coef(object)[newcoefs] # Fixes up order of coefficients
- c.index = is.na(newcoefs) # Indices corresponding to C matrix
- newcoefs[c.index] = t(Coefs at C) # Fill in the rest (C) of the outerprod
-
- object at coefficients = newcoefs
-
- pvlm = predict.vlm(object, newdata=newdata, type=temptype,
- se.fit=se.fit, deriv=deriv,
- dispersion=dispersion, extra=extra, ...)
-
- newcoefs[c.index] = 0 # Trick; set C==0 here to give B1 terms only
- object at coefficients = newcoefs
- pvlm1 = if(!length(object at control$colx1.index)) 0 else
- predict.vlm(object, newdata=newdata, type=temptype,
- se.fit=se.fit, deriv=deriv,
- dispersion=dispersion, extra=extra, ...)
-
- lvmat = if(object at control$Corner)
- (pvlm - pvlm1)[,object at control$Index.corner,drop=FALSE] else
- stop("corner constraints needed")
-
- for(j in 1:M)
- pvlm[,j] = pvlm[,j] + (if(Rank==1) (lvmat^2 * Coefs at D[,,j]) else
- (((lvmat %*% Coefs at D[,,j]) * lvmat) %*% rep(1, Rank)))
+ X1mat = X[,ocontrol$colx1.index,drop=FALSE]
+ X2mat = X[,ocontrol$colx2.index,drop=FALSE]
+ lvmat = as.matrix(X2mat %*% Coefs at C) # n x Rank
+
+ etamat = as.matrix(X1mat %*% Coefs at B1 + lvmat %*% t(Coefs at A))
+ whichSpecies = 1:NOS # Do it all for all species
+ for(sppno in 1:length(whichSpecies)) {
+ thisSpecies = whichSpecies[sppno]
+ Dmat = matrix(Coefs at D[,,thisSpecies], Rank, Rank)
+ etamat[,thisSpecies] = etamat[,thisSpecies] +
+ mux34(lvmat, Dmat, symm=TRUE)
+ }
} else {
- pvlm = predict.vglm(object, type=type, se.fit=se.fit,
- deriv=deriv, dispersion=dispersion,
- extra=extra, ...)
+ etamat = object at predictors
}
pred = switch(type,
- response={ fv = if(length(newdata)) object at family@inverse(pvlm, extra) else
- pvlm
+ response={
+ fv = if(length(newdata)) object at family@inverse(etamat, extra) else
+ fitted(object)
if(M > 1 && is.matrix(fv)) {
dimnames(fv) <- list(dimnames(fv)[[1]],
dimnames(object at fitted.values)[[2]])
}
fv
},
- link = pvlm,
+ link = etamat,
+ lv=stop("failure here"),
terms=stop("failure here"))
if(!length(newdata) && length(na.act)) {
@@ -1624,7 +1677,7 @@ get.rrvglm.se1 <- function(fit, omit13= FALSE, kill.all= FALSE,
cov11 = cov1122[log.vec11, log.vec11, drop=FALSE]
cov12 = cov1122[ log.vec11, -log.vec11, drop=FALSE]
cov22 = cov1122[-log.vec11, -log.vec11, drop=FALSE]
- cov13 = delct.da %*% cov33 # zz; this always seems to be negative
+ cov13 = delct.da %*% cov33
if(omit13)
@@ -2299,7 +2352,7 @@ lvplot.rrvglm = function(object,
nuhat = x2mat %*% Cmat
if(!plot.it) return(as.matrix(nuhat))
- index.nosz = 1:M # index of no structural zeros; zz
+ index.nosz = 1:M
allmat = rbind(if(A) Amat else NULL,
if(C) Cmat else NULL,
if(scores) nuhat else NULL)
diff --git a/R/family.univariate.q b/R/family.univariate.q
index 215a410..9c58dfe 100644
--- a/R/family.univariate.q
+++ b/R/family.univariate.q
@@ -903,10 +903,9 @@ zetaff = function(link="loge", earg=list(), init.p=NULL)
gharmonic = function(n, s=1, lognexponent=0) {
+
if(!is.Numeric(n, integ=TRUE, posit=TRUE))
stop("bad input for argument \"n\"")
- if(!is.Numeric(s, posit=TRUE))
- stop("bad input for argument \"s\"")
if(!is.Numeric(lognexponent, allow=1))
stop("bad input for argument \"lognexponent\"")
if(length(n) == 1 && length(s) == 1) {
@@ -1018,7 +1017,7 @@ zipf = function(N=NULL, link="loge", earg=list(), init.s=NULL)
}), list( .link=link, .earg=earg, .init.s=init.s, .N=N ))),
inverse=eval(substitute(function(eta, extra=NULL) {
ss = eta2theta(eta, .link, earg= .earg)
- gharmonic(extra$N, s=ss-1) / gharmonic(extra$N, s=ss)
+ gharmonic(extra$N, s=ss - 1) / gharmonic(extra$N, s=ss)
}, list( .link=link, .earg=earg ))),
last=eval(substitute(expression({
misc$expected = FALSE
@@ -2872,7 +2871,7 @@ normal1 = function(lmean="identity", lsd="loge",
weight=expression({
wz = matrix(as.numeric(NA), n, 2) # diagonal matrix; y is one-column too
ed2l.dmu2 = -1 / sd^2
- ed2l.dsd2 = -2 / sd^2 # zz; replace 2 by 0.5 ??
+ ed2l.dsd2 = -2 / sd^2
wz[,iam(1,1,M)] = -w * ed2l.dmu2 * dmu.deta^2
wz[,iam(2,2,M)] = -w * ed2l.dsd2 * dsd.deta^2
wz
@@ -5355,9 +5354,9 @@ stoppa = function(y0,
}), list( .link.theta=link.theta, .link.alpha=link.alpha ))),
weight=eval(substitute(expression({
ed2l.dalpha = 1/alpha^2 + theta * (2 * log(extra$y0) * (digamma(2)-
- digamma(theta+4)) -
- (trigamma(1)+trigamma(theta+3)) / alpha^3) /
- (alpha * (theta+1) * (theta+2) / n) # zz / sum(w)
+ digamma(theta+4)) - (trigamma(1) +
+ trigamma(theta+3)) / alpha^3) / (alpha *
+ (theta+1) * (theta+2) / n)
ed2l.dtheta = 1 / theta^2
ed2l.dalphatheta = (digamma(2)-digamma(theta+2)) / (alpha*(theta+1))
wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
@@ -7524,7 +7523,7 @@ paretoIV = function(location=0,
"Links: ", namesof("scale", lscale, earg=escale ), ", ",
namesof("inequality", linequality, earg=einequality ), ", ",
namesof("shape", lshape, earg=eshape ), "\n",
- "Mean: location + scale * NA"), # zz
+ "Mean: location + scale * NA"),
initialize=eval(substitute(expression({
if(ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
@@ -7653,7 +7652,7 @@ paretoIII = function(location=0,
location, ", scale > 0, inequality > 0, \n",
"Links: ", namesof("scale", lscale, earg=escale ), ", ",
namesof("inequality", linequality, earg=einequality ), "\n",
- "Mean: location + scale * NA"), # zz
+ "Mean: location + scale * NA"),
initialize=eval(substitute(expression({
if(ncol(cbind(y)) != 1)
stop("the response must be a vector or a one-column matrix")
@@ -7760,7 +7759,7 @@ paretoII = function(location=0,
location, ", scale > 0, shape > 0,\n",
"Links: ", namesof("scale", lscale, earg=escale ), ", ",
namesof("shape", lshape, earg=eshape ), "\n",
- "Mean: location + scale * NA"), # zz
+ "Mean: location + scale * NA"),
initialize=eval(substitute(expression({
if(ncol(cbind(y)) != 1)
stop("the response must be a vector or a one-column matrix")
@@ -7775,7 +7774,7 @@ paretoII = function(location=0,
shape.init = if(length( .ishape)) .ishape else NULL
if(!length(shape.init) || !length(scale.init)) {
probs = (1:4)/5
- scale.init.0 = 1 # zz; have to put some value here...
+ scale.init.0 = 1
ytemp = quantile(x=log(y-location+scale.init.0), probs=probs)
fittemp = lsfit(x=log1p(-probs), y=ytemp, int=TRUE)
if(!length(shape.init))
@@ -8780,6 +8779,7 @@ alaplace1 = function(tau = NULL,
dfmu.init = 3,
method.init=1, zero=NULL) {
+
if(!is.Numeric(kappa, posit=TRUE))
stop("bad input for argument \"kappa\"")
if(length(tau) && max(abs(kappa - sqrt(tau/(1-tau)))) > 1.0e-6)
@@ -8846,6 +8846,7 @@ alaplace1 = function(tau = NULL,
location.init = if(length(.ilocation)) rep(.ilocation, len=n) else
rep(location.init, len=n)
location.init = matrix(location.init, n, M)
+ if( .llocation == "loge") location.init = abs(location.init)
etastart =
cbind(theta2eta(location.init, .llocation, earg= .elocation))
}
@@ -9663,7 +9664,7 @@ qtriangle = function(p, theta, lower=0, upper=1) {
pstar = (p - (theta-lower)/(upper-lower)) / (1 -
(theta-lower)/(upper-lower))
qstar = cbind(1 - sqrt(1-pstar), 1 + sqrt(1-pstar))
- qstar = qstar[Pos,]
+ qstar = qstar[Pos,,drop=FALSE]
qstar = ifelse(qstar[,1] >= 0 & qstar[,1] <= 1, qstar[,1], qstar[,2])
ans[Pos] = theta[Pos] + qstar * (upper-theta)[Pos]
}
diff --git a/R/family.zeroinf.q b/R/family.zeroinf.q
index 2728ac4..a5cc327 100644
--- a/R/family.zeroinf.q
+++ b/R/family.zeroinf.q
@@ -108,7 +108,7 @@ yip88 = function(link.lambda="loge", n.arg=NULL)
suma = extra$sumw
phi = (1 - temp5[1] - suma/narg) / (1 - temp5[1])
phi = if(phi < 0 || phi>1) NA else phi # phi is a probability
- misc$phi = phi # zz call it $p0 = phi ??
+ misc$phi = phi
}
}), list( .link.lambda=link.lambda ))),
loglikelihood=eval(substitute(
@@ -807,9 +807,9 @@ rzibinom = function(n, size, prob, phi=0) {
-dzinb = function(x, phi, size, prob, munb, log=FALSE) {
- if (!missing(munb)) {
- if (!missing(prob))
+dzinb = function(x, phi, size, prob=NULL, munb=NULL, log=FALSE) {
+ if (length(munb)) {
+ if (length(prob))
stop("'prob' and 'munb' both specified")
prob <- size/(size + munb)
}
@@ -825,9 +825,9 @@ dzinb = function(x, phi, size, prob, munb, log=FALSE) {
ifelse(x==0, phi + (1-phi) * ans, (1-phi) * ans)
}
-pzinb = function(q, phi, size, prob, munb) {
- if (!missing(munb)) {
- if (!missing(prob))
+pzinb = function(q, phi, size, prob=NULL, munb=NULL) {
+ if (length(munb)) {
+ if (length(prob))
stop("'prob' and 'munb' both specified")
prob <- size/(size + munb)
}
@@ -837,9 +837,9 @@ pzinb = function(q, phi, size, prob, munb) {
phi + (1-phi) * ans
}
-qzinb = function(p, phi, size, prob, munb) {
- if (!missing(munb)) {
- if (!missing(prob))
+qzinb = function(p, phi, size, prob=NULL, munb=NULL) {
+ if (length(munb)) {
+ if (length(prob))
stop("'prob' and 'munb' both specified")
prob <- size/(size + munb)
}
@@ -857,9 +857,9 @@ qzinb = function(p, phi, size, prob, munb) {
ans
}
-rzinb = function(n, phi, size, prob, munb) {
- if (!missing(munb)) {
- if (!missing(prob))
+rzinb = function(n, phi, size, prob=NULL, munb=NULL) {
+ if (length(munb)) {
+ if (length(prob))
stop("'prob' and 'munb' both specified")
prob <- size/(size + munb)
}
diff --git a/R/model.matrix.vglm.q b/R/model.matrix.vglm.q
index 41db2b5..d9a35c0 100644
--- a/R/model.matrix.vglm.q
+++ b/R/model.matrix.vglm.q
@@ -67,7 +67,7 @@ lm2vlm.model.matrix <- function(x, Blist=NULL, assign.attributes=TRUE,
}
}
n <- nrow(x)
- if(all(trivial.constraints(Blist)) && !length(Aarray)) {
+ if(all(trivial.constraints(Blist) == 1) && !length(Aarray)) {
xbig <- if(M > 1) kronecker(x, diag(M)) else x
ncolBlist <- rep(M, ncol(x))
} else {
diff --git a/R/mux.q b/R/mux.q
index 862e0f8..3bb2fdc 100644
--- a/R/mux.q
+++ b/R/mux.q
@@ -3,6 +3,24 @@
+
+mux34 <- function(xmat, cc, symmetric=FALSE)
+{
+
+ if(!is.matrix(xmat))
+ xmat <- as.matrix(xmat)
+ d <- dim(xmat)
+ n <- d[1]
+ R <- d[2]
+ if(length(cc) == 1) cc = matrix(cc, 1, 1)
+ if(!is.matrix(cc)) stop("'cc' is not a matrix")
+ c(dotFortran(name="vgamf90mux34", as.double(xmat), as.double(cc),
+ as.integer(n), as.integer(R),
+ as.integer(symmetric), ans=as.double(rep(0.0, n)),
+ NAOK=TRUE)$ans)
+}
+
+
mux2 <- function(cc, xmat)
{
diff --git a/R/plot.vglm.q b/R/plot.vglm.q
index a6fdd16..bd83a9b 100644
--- a/R/plot.vglm.q
+++ b/R/plot.vglm.q
@@ -356,7 +356,7 @@ plotvlm <- function(object, residuals=NULL, rugplot= FALSE, ...)
plotvglm <- function(x, residuals=NULL, smooths= FALSE,
rugplot= FALSE, id.n= FALSE, ...)
{
- stop("this function hasn't been written yet") # zz
+ stop("this function hasn't been written yet")
diff --git a/R/predict.vgam.q b/R/predict.vgam.q
index 079a2f5..3471d60 100644
--- a/R/predict.vgam.q
+++ b/R/predict.vgam.q
@@ -68,7 +68,7 @@ predict.vgam <- function(object, newdata=NULL,
if(!length(newdata)) {
if(type=="link") {
if(se.fit) {
- stop("cannot handle this option (se.fit=TRUE) currently") # zz
+ stop("cannot handle this option (se.fit=TRUE) currently")
} else {
if(length(na.act)) {
answer = napredict(na.act[[1]], object at predictors)
@@ -81,7 +81,7 @@ predict.vgam <- function(object, newdata=NULL,
} else
if(type=="response") {
if(se.fit) {
- stop("cannot handle this option (se.fit=TRUE) currently") # zz
+ stop("cannot handle this option (se.fit=TRUE) currently")
} else {
if(length(na.act)) {
return(napredict(na.act[[1]], object at fitted.values))
diff --git a/R/residuals.vlm.q b/R/residuals.vlm.q
index 021c6e7..84336c3 100644
--- a/R/residuals.vlm.q
+++ b/R/residuals.vlm.q
@@ -111,7 +111,7 @@ residualsvglm <- function(object,
deviance = {
n <- object at misc$n
- y <- as.matrix(object at y) # zz as.matrix
+ y <- as.matrix(object at y)
mu <- object at fitted.values
@@ -138,7 +138,7 @@ residualsvglm <- function(object,
},
ldot = {
n <- object at misc$n
- y <- as.matrix(object at y) # zz as.matrix
+ y <- as.matrix(object at y)
mu <- object at fitted
w <- object at prior.weights
if(is.null(w))
@@ -157,7 +157,7 @@ residualsvglm <- function(object,
response = {
y <- object at y
- mu <- fitted(object) # zz object at fitted
+ mu <- fitted(object)
true.mu <- object at misc$true.mu
if(is.null(true.mu))
diff --git a/R/rrvglm.fit.q b/R/rrvglm.fit.q
index 6f138de..f064fb2 100644
--- a/R/rrvglm.fit.q
+++ b/R/rrvglm.fit.q
@@ -163,7 +163,7 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
wz = checkwz(wz, M=M, trace=trace, wzeps=control$wzepsilon)
- wz = matrix(wz, nrow=n) # zz 3/10/05
+ wz = matrix(wz, nrow=n)
U <- vchol(wz, M=M, n=n, silent=!trace)
tvfor <- vforsub(U, as.matrix(deriv.mu), M=M, n=n)
z = eta + vbacksub(U, tvfor, M, n) - offset # Contains \bI \bnu
@@ -252,7 +252,7 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
" that will be overwritten by reduced-rank regression", sep=""))
}
- if(all(findex))
+ if(all(findex == 1))
stop("use vglm(), not rrvglm()!")
colx1.index = names.colx1.index = NULL
dx2 = dimnames(x)[[2]]
@@ -306,7 +306,7 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
Blist <- process.constraints(constraints, x, M, specialCM=specialCM)
nice31 = control$Quadratic && (!control$EqualTol || control$ITolerances) &&
- all(trivial.constraints(Blist))
+ all(trivial.constraints(Blist) == 1)
Blist = Blist.save = replace.constraints(Blist, Amat, colx2.index)
@@ -466,8 +466,8 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
asgn <- attr(xbig.save, "assign")
if(nice31) {
- coefs <- rep(0, len=length(xn.big)) # zz
- rank <- p.big # zz 3/10/05
+ coefs <- rep(0, len=length(xn.big))
+ rank <- p.big
} else {
coefs <- tfit$coefficients
names(coefs) <- xn.big
@@ -480,7 +480,7 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
stop("rrvglm only handles full-rank models (currently)")
if(nice31) {
- R <- matrix(as.numeric(NA), 5, 5) # zz 3/10/05
+ R <- matrix(as.numeric(NA), 5, 5)
} else {
R <- if(is.R()) tfit$qr$qr[1:p.big, 1:p.big, drop=FALSE] else {
if(backchat) tfit$qr[1:p.big, 1:p.big, drop=FALSE] else
@@ -494,7 +494,7 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
}
if(nice31) {
- effects <- rep(0, len=77) # zz 3/10/05
+ effects <- rep(0, len=77)
} else {
effects <- tfit$effects
neff <- rep("", n.big)
@@ -508,7 +508,7 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
xn <- dn[[2]]
if(nice31) {
- residuals <- z - fv # zz - offset ?? # not sure 3/10/05
+ residuals <- z - fv
if(M==1) {
residuals <- as.vector(residuals)
names(residuals) <- yn
@@ -516,7 +516,7 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
dimnames(residuals) <- list(yn, predictors.names)
}
} else {
- residuals <- z - tfit$predictors # zz - offset ??
+ residuals <- z - tfit$predictors
if(M==1) {
tfit$predictors <- as.vector(tfit$predictors)
residuals <- as.vector(residuals)
diff --git a/R/summary.vglm.q b/R/summary.vglm.q
index 984d0f4..d2798ac 100644
--- a/R/summary.vglm.q
+++ b/R/summary.vglm.q
@@ -207,7 +207,7 @@ vcovvlm <- function(object, dispersion=NULL, untransform=FALSE) {
if(!object at misc$intercept.only)
stop("object must be an intercept-only fit, i.e., y ~ 1 is the response")
- if(!all(trivial.constraints(constraints(object))))
+ if(!all(trivial.constraints(constraints(object)) == 1))
stop("object must have trivial constraints")
M = object at misc$M
diff --git a/R/vgam.R b/R/vgam.R
index 3ca3583..434efe8 100644
--- a/R/vgam.R
+++ b/R/vgam.R
@@ -118,7 +118,7 @@ vgam <- function(formula,
- nonparametric <- length(smoothers$s) > 0 # zz; different
+ nonparametric <- length(smoothers$s) > 0
if(nonparametric) {
ff <- apply(aa$factors[smoothers[["s"]],,drop=FALSE], 2, any)
diff --git a/R/vgam.fit.q b/R/vgam.fit.q
index 1e202e8..562dbbe 100644
--- a/R/vgam.fit.q
+++ b/R/vgam.fit.q
@@ -32,8 +32,7 @@ vgam.fit <- function(x, y, w, mf,
# --------------------------------------------------------------
new.s.call <- expression({
- if(c.list$one.more)
- {
+ if(c.list$one.more) {
fv <- if(backchat && M>1 && nonparametric)
matrix(c.list$fit,n,M,byrow=TRUE) else c.list$fit
new.coeffs <- c.list$coeff
@@ -49,10 +48,9 @@ vgam.fit <- function(x, y, w, mf,
old.crit <- new.crit
- new.crit <-
- switch(criterion,
- coefficients=new.coeffs,
- tfun(mu=mu, y=y, w=w, res=FALSE, eta=eta, extra))
+ new.crit <- switch(criterion,
+ coefficients=new.coeffs,
+ tfun(mu=mu, y=y, w=w, res=FALSE, eta=eta, extra))
if(trace) {
cat("VGAM ", bf, " loop ", iter, ": ", criterion, "= ")
@@ -74,8 +72,7 @@ vgam.fit <- function(x, y, w, mf,
flush.console()
if(!is.finite(one.more) || !is.logical(one.more)) one.more = FALSE
- if(one.more)
- {
+ if(one.more) {
iter <- iter + 1
deriv.mu <- eval(family at deriv)
wz <- eval(family at weight)
@@ -137,7 +134,7 @@ vgam.fit <- function(x, y, w, mf,
- smooth.frame <- mf # zz
+ smooth.frame <- mf
assignx <- attr(x, "assign")
which <- assignx[smooth.labels]
@@ -249,7 +246,7 @@ vgam.fit <- function(x, y, w, mf,
rank <- tfit$rank
if(rank < ncol(x))
stop("rank < ncol(x) is bad")
- } else rank <- ncol(x) # zz 8/12/01 I think rank is all wrong
+ } else rank <- ncol(x)
R <- if(is.R()) tfit$qr$qr[1:p.big, 1:p.big, drop=FALSE] else {
if(backchat) tfit$qr[1:p.big, 1:p.big, drop=FALSE] else
@@ -347,7 +344,7 @@ vgam.fit <- function(x, y, w, mf,
if(se.fit && length(fit$s.xargument)) {
misc$varassign <-
- varassign(Blist, names(fit$s.xargument)) # zz or constraints?
+ varassign(Blist, names(fit$s.xargument))
}
diff --git a/R/vglm.fit.q b/R/vglm.fit.q
index 0a84a2f..daaf923 100644
--- a/R/vglm.fit.q
+++ b/R/vglm.fit.q
@@ -31,8 +31,7 @@ vglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
n <- dim(x)[1]
new.s.call <- expression({
- if(c.list$one.more)
- {
+ if(c.list$one.more) {
fv <- if(backchat) {
if(M>1) matrix(c.list$fit,n,M,byrow=TRUE) else c.list$fit
} else c.list$fit
@@ -76,8 +75,7 @@ vglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
(criterion!="coefficients" &&
(if(minimize.criterion) new.crit > old.crit else
new.crit < old.crit)))
- if(take.half.step)
- {
+ if(take.half.step) {
stepsize <- 2 * min(orig.stepsize, 2*stepsize)
new.coeffs.save <- new.coeffs
if(trace)
@@ -117,29 +115,27 @@ vglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
( minimize.criterion && new.crit < old.crit) ||
(!minimize.criterion && new.crit > old.crit))
break
- }
+ } # of repeat
if(trace)
cat("\n")
- if(too.small)
- {
+ if(too.small) {
warning(paste("iterations terminated because",
"half-step sizes are very small"))
one.more <- FALSE
- } else
- {
+ } else {
if(trace) {
- cat("VGLM linear loop ", iter, ": ", criterion, "= ")
+ cat("VGLM linear loop ",
+ iter, ": ", criterion, "= ")
- uuuu =
- switch(criterion,
+ uuuu = switch(criterion,
coefficients=if(is.R())
format(new.crit, dig=round(2-log10(epsilon))) else
format(round(new.crit, round(2-log10(epsilon)))),
format(round(new.crit, 4)))
switch(criterion,
- coefficients={if(length(new.crit) > 2) cat("\n");
+ coefficients={if(length(new.crit) > 2) cat("\n");
cat(uuuu, fill=TRUE, sep=", ")},
cat(uuuu, fill=TRUE, sep=", "))
}
@@ -154,8 +150,7 @@ vglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
flush.console()
if(!is.logical(one.more)) one.more = FALSE
- if(one.more)
- {
+ if(one.more) {
iter <- iter + 1
deriv.mu <- eval(slot(family, "deriv"))
wz <- eval(slot(family, "weight"))
@@ -300,7 +295,7 @@ vglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
c.list$coeff <- tfit$coefficients
- tfit$predictors <- tfit$fitted.values # zz + offset??
+ tfit$predictors <- tfit$fitted.values
c.list$fit <- tfit$fitted.values
c.list <- eval(new.s.call)
@@ -356,7 +351,7 @@ vglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
xn <- dn[[2]]
- residuals <- z - tfit$predictors # zz - offset ??
+ residuals <- z - tfit$predictors
if(M==1) {
tfit$predictors <- as.vector(tfit$predictors)
residuals <- as.vector(residuals)
diff --git a/R/vlm.wfit.q b/R/vlm.wfit.q
index 5002927..beb2726 100644
--- a/R/vlm.wfit.q
+++ b/R/vlm.wfit.q
@@ -54,7 +54,7 @@ vlm.wfit <- function(x, z, Blist, wz=NULL, U=NULL,
}
ans <- if(!is.R()) lm.fit.qr(x=xbig, y=z.big, qr=qr, ...) else
- lm.fit(xbig, z.big, ...) # zz; also qr=qr,
+ lm.fit(xbig, z.big, ...)
if(rss) {
ans$rss <- sum(ans$resid^2)
diff --git a/man/acat.Rd b/man/acat.Rd
index e256289..32a2edf 100644
--- a/man/acat.Rd
+++ b/man/acat.Rd
@@ -38,7 +38,7 @@ acat(link = "loge", earg = list(),
By default, the linear/additive predictors used are
\eqn{\eta_j = \log(P[Y=j+1]/P[Y=j])}{eta_j = log(P[Y=j+1]/P[Y=j])}
for \eqn{j=1,\ldots,M}.
- If \code{reverse} is \code{TRUE}, then
+ If \code{reverse} is \code{TRUE} then
\eqn{\eta_j = \log(P[Y=j]/P[Y=j+1])}{eta_j=log(P[Y=j]/P[Y=j+1])}
will be used.
diff --git a/man/alaplace3.Rd b/man/alaplace3.Rd
index 4e47eda..2ae0157 100644
--- a/man/alaplace3.Rd
+++ b/man/alaplace3.Rd
@@ -196,8 +196,7 @@ economics, engineering, and finance},
Boston: Birkhauser.
Yee, T. W. (2008)
- Quantile regression by scoring
- an asymmetric Laplace distribution.
+ Quantile regression for counts and binomial proportions.
In preparation.
}
@@ -228,6 +227,12 @@ Boston: Birkhauser.
\code{alaplace1()} with one \eqn{\tau}{tau} at a time but
using offsets and an intercept-only model.
+ A second method for solving the noncrossing quantile problem is
+ illustrated below in Example 3.
+ This is called the \emph{accumulative quantile method} (AQM)
+ and details are in Yee (2008).
+ It does not make the strong parallelism assumption.
+
The functions \code{alaplace2()} and \code{\link{laplace}}
differ slightly in terms of the parameterizations.
@@ -254,7 +259,7 @@ fitp = vgam(y ~ s(x, df=mydof), alaplace1(tau=mytau, llocation="loge",
parallelLoc=TRUE), data=alldat, trace=TRUE)
\dontrun{
-par(xpd=TRUE, las=1)
+par(las=1)
mylwd = 1.5
with(alldat, plot(x, jitter(y, factor=0.5), col="red",
main="Example 1; green: parallelLoc=TRUE",
@@ -287,6 +292,42 @@ with(alldat, plot(x, jitter(y, factor=0.5), col="red", ylab="y",
with(alldat, matlines(x, fitted(fitp2), col="blue", lty="solid", lwd=mylwd))
with(alldat, matlines(x, fitted(fitp3), col="black", lty="solid", lwd=mylwd))
}
+
+
+
+# Example 3: noncrossing regression quantiles using a trick: obtain
+# successive solutions which are added to previous solutions; use a log
+# link to ensure an increasing quantiles at any value of x.
+
+mytau = seq(0.1, 0.9, by=0.1)
+answer = matrix(0, nrow(alldat), length(mytau)) # Stores the quantiles
+alldat = transform(alldat, offsety=y*0)
+usetau = mytau
+for(ii in 1:length(mytau)) {
+# cat("\n\nii =", ii, "\n")
+ alldat = transform(alldat, usey=y-offsety)
+ iloc = ifelse(ii==1, with(alldat, median(y)), 1.0) # Well-chosen!
+ mydf = ifelse(ii==1, 5, 3) # Maybe less smoothing will help
+ lloc = ifelse(ii==1, "loge", "loge") # 2nd value must be "loge"
+ fit3 = vglm(usey ~ ns(x, df=mydf), data=alldat, trace=TRUE,
+ fam=alaplace1(tau=usetau[ii], lloc=lloc, iloc=iloc))
+ answer[,ii] = (if(ii==1) 0 else answer[,ii-1]) + fitted(fit3)
+ alldat = transform(alldat, offsety=answer[,ii])
+}
+
+# Plot the results.
+\dontrun{
+with(alldat, plot(x, y, col="blue",
+ main=paste("Noncrossing and nonparallel; tau =",
+ paste(mytau, collapse=", "))))
+with(alldat, matlines(x, answer, col="red", lty=1))
+
+# Zoom in near the origin.
+with(alldat, plot(x, y, col="blue", xlim=c(0, 0.2), ylim=0:1,
+ main=paste("Noncrossing and nonparallel; tau =",
+ paste(mytau, collapse=", "))))
+with(alldat, matlines(x, answer, col="red", lty=1))
+}
}
\keyword{models}
\keyword{regression}
diff --git a/man/alsqreg.Rd b/man/alsqreg.Rd
index da92da6..ef638eb 100644
--- a/man/alsqreg.Rd
+++ b/man/alsqreg.Rd
@@ -96,6 +96,10 @@ alsqreg(w.als=1, parallel=FALSE, lexpectile = "identity",
If \code{w.als} has more than one value then the value returned by
the slot is the sum taken over all the \code{w.als} values.
+ This \pkg{VGAM} family function could well be renamed \code{amlnormal()}
+ instead, given the other function names \code{\link{amlpoisson}},
+ \code{\link{amlbinomial}}, etc.
+
}
%\section{Warning }{
@@ -111,7 +115,7 @@ alsqreg(w.als=1, parallel=FALSE, lexpectile = "identity",
\code{\link{amlbinomial}},
\code{\link{amlexponential}},
\code{\link{bminz}},
- \code{\link{alaplace2}},
+ \code{\link{alaplace1}},
\code{\link{lms.bcn}} and similar variants are alternative
methods for quantile regression.
@@ -130,7 +134,8 @@ coef(fit, matrix=TRUE)
\dontrun{
# Quantile plot
with(bminz, plot(age, BMI, col="blue", main=
- paste(round(fit at extra$percentile, dig=1), "percentile curve")))
+ paste(round(fit at extra$percentile, dig=1),
+ "expectile-percentile curve")))
with(bminz, lines(age, c(fitted(fit)), col="black"))
}
@@ -145,7 +150,7 @@ findw = function(w, percentile=50) {
\dontrun{
# Quantile plot
with(bminz, plot(age, BMI, col="blue", las=1, main=
- "25, 50 and 75 percentile curves"))
+ "25, 50 and 75 expectile-percentile curves"))
}
for(myp in c(25,50,75)) {
# Note: uniroot() can only find one root at a time
@@ -174,7 +179,7 @@ coef(fit3, matrix=TRUE)
# Quantile plot
with(bminz, plot(age, BMI, col="blue", main=
paste(paste(round(fit3 at extra$percentile, dig=1), collapse=", "),
- "percentile curves")))
+ "expectile-percentile curves")))
with(bminz, matlines(age, fitted(fit3), col=1:fit3 at extra$M, lwd=2))
with(bminz, lines(age, c(fitted(fit )), col="black")) # For comparison
}
diff --git a/man/amlbinomial.Rd b/man/amlbinomial.Rd
index e5ffb2c..89832b3 100644
--- a/man/amlbinomial.Rd
+++ b/man/amlbinomial.Rd
@@ -95,7 +95,8 @@ amlbinomial(w.aml=1, parallel=FALSE, digw=4, link="logit", earg=list())
\seealso{
\code{\link{amlpoisson}},
\code{\link{amlexponential}},
- \code{\link{alsqreg}}.
+ \code{\link{alsqreg}},
+ \code{\link{alaplace1}}.
}
@@ -117,7 +118,7 @@ par(mfrow=c(1,2))
# Quantile plot
with(mydat, plot(x, jitter(y), col="blue", las=1, main=
paste(paste(round(fit at extra$percentile, dig=1), collapse=", "),
- "expectile curves")))
+ "percentile-expectile curves")))
with(mydat, matlines(x, fitted(fit), lwd=2, col="blue", lty=1))
diff --git a/man/amlexponential.Rd b/man/amlexponential.Rd
index 8df359c..be7c1f6 100644
--- a/man/amlexponential.Rd
+++ b/man/amlexponential.Rd
@@ -106,7 +106,8 @@ amlexponential(w.aml=1, parallel=FALSE, method.init=1, digw=4,
\code{\link{exponential}},
\code{\link{amlbinomial}},
\code{\link{amlpoisson}},
- \code{\link{alsqreg}}.
+ \code{\link{alsqreg}},
+ \code{\link{alaplace1}}.
}
@@ -126,7 +127,7 @@ par(mfrow=c(1,2))
# Quantile plot
with(mydat, plot(x, sqrt(y), col="blue", las=1, main=
paste(paste(round(fit at extra$percentile, dig=1), collapse=", "),
- "expectile curves")))
+ "percentile-expectile curves")))
with(mydat, matlines(x, sqrt(fitted(fit)), lwd=2, col="blue", lty=1))
# Compare the fitted expectiles with the quantiles
diff --git a/man/amlpoisson.Rd b/man/amlpoisson.Rd
index 79019f6..3f8cce4 100644
--- a/man/amlpoisson.Rd
+++ b/man/amlpoisson.Rd
@@ -125,7 +125,8 @@ amlpoisson(w.aml=1, parallel=FALSE, method.init=1, digw=4,
}
\seealso{
\code{\link{alsqreg}},
- \code{\link{amlbinomial}}.
+ \code{\link{amlbinomial}},
+ \code{\link{alaplace1}}.
}
@@ -141,7 +142,7 @@ fit at extra
# Quantile plot
with(mydat, plot(x, jitter(y), col="blue", las=1, main=
paste(paste(round(fit at extra$percentile, dig=1), collapse=", "),
- "percentile curves")))
+ "percentile-expectile curves")))
with(mydat, matlines(x, fitted(fit), lwd=2))
}
}
diff --git a/man/binom2.or.Rd b/man/binom2.or.Rd
index b2e1e19..8f6bb69 100644
--- a/man/binom2.or.Rd
+++ b/man/binom2.or.Rd
@@ -1,16 +1,18 @@
\name{binom2.or}
\alias{binom2.or}
%- Also NEED an '\alias' for EACH other topic documented here.
-\title{ Bivariate Logistic Regression }
+\title{ Bivariate Binary Regression with an Odds Ratio (Family Function) }
\description{
Fits a Palmgren (bivariate logistic regression) model to two binary
- responses. Actually, a bivariate logistic/probit/cloglog/cauchit
+ responses. Actually, a bivariate logistic/probit/cloglog/cauchit
model can be fitted.
+ The odds ratio is used as a measure of dependency.
}
\usage{
-binom2.or(lmu = "logit", lmu1 = lmu, lmu2 = lmu, lor = "loge",
- emu=list(), emu1=emu, emu2=emu, eor=list(),
+binom2.or(lmu = "logit", lmu1 = lmu, lmu2 = lmu, loratio = "loge",
+ emu=list(), emu1=emu, emu2=emu, eoratio=list(),
+ imu1=NULL, imu2=NULL, ioratio = NULL,
zero = 3, exchangeable = FALSE, tol = 0.001)
}
%- maybe also 'usage' for other objects documented here.
@@ -26,12 +28,19 @@ binom2.or(lmu = "logit", lmu1 = lmu, lmu2 = lmu, lor = "loge",
probabilities.
}
- \item{lor}{
+ \item{loratio}{
Link function applied to the odds ratio.
See \code{\link{Links}} for more choices.
}
- \item{emu, emu1, emu2, eor}{
+ \item{imu1, imu2, ioratio}{
+ Optional initial values for the marginal probabilities and odds ratio.
+ See \code{\link{CommonVGAMffArguments}} for more details.
+ In general good initial values are often required so use these
+ arguments if convergence failure occurs.
+
+ }
+ \item{emu, emu1, emu2, eoratio}{
List. Extra argument for each of the links.
See \code{earg} in \code{\link{Links}} for general information.
@@ -55,7 +64,7 @@ binom2.or(lmu = "logit", lmu1 = lmu, lmu2 = lmu, lor = "loge",
\details{
Known also as the \emph{Palmgren model}, the bivariate logistic model is
a full-likelihood based model defined as two logistic regressions plus
- \code{log(OR) = eta3} where \code{eta3} is the third linear/additive
+ \code{log(oratio) = eta3} where \code{eta3} is the third linear/additive
predictor relating the odds ratio to explanatory variables.
Explicitly, the default model is
\deqn{logit[P(Y_j=1)] = \eta_j,\ \ \ j=1,2}{%
@@ -70,12 +79,14 @@ binom2.or(lmu = "logit", lmu1 = lmu, lmu2 = lmu, lor = "loge",
likelihood is specified. The two binary responses are independent
if and only if the odds ratio is unity, or equivalently, the log odds
ratio is zero.
+ Fisher scoring is implemented.
The default models \eqn{\eta_3}{eta3} as a single parameter only,
i.e., an intercept-only model, but this can be circumvented by setting
- \code{zero=NULL} to model the odds ratio as a function of all the
+ \code{zero=NULL} in order to model the odds ratio as a function of all the
explanatory variables.
- The function \code{binom2.or} can handle \code{\link{probit}},
+ The function \code{binom2.or} can handle other probability link
+ functions such as \code{\link{probit}},
\code{\link{cloglog}} and \code{\link{cauchit}} links as well, so is
quite general. In fact, the two marginal probabilities can each have
a different link function. A similar model is the \emph{bivariate
@@ -89,8 +100,8 @@ binom2.or(lmu = "logit", lmu1 = lmu, lmu2 = lmu, lor = "loge",
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
- The object is used by modelling functions such as \code{\link{vglm}},
- \code{\link{rrvglm}} and \code{\link{vgam}}.
+ The object is used by modelling functions such as \code{\link{vglm}}
+ and \code{\link{vgam}}.
When fitted, the \code{fitted.values} slot of the object contains the
four joint probabilities, labelled as \eqn{(Y_1,Y_2)}{(Y1,Y2)} = (0,0),
@@ -124,6 +135,7 @@ binom2.or(lmu = "logit", lmu1 = lmu, lmu2 = lmu, lor = "loge",
columns correspond to \eqn{(Y_1,Y_2)}{(Y1,Y2)} = (0,0), (0,1), (1,0),
(1,1) respectively), or a two-column matrix where each column has two
distinct values.
+ The function \code{\link{rbinom2.or}} may be used to generate such data.
By default, a constant odds ratio is fitted because \code{zero=3}.
Set \code{zero=NULL} if you want the odds ratio to be modelled as a
@@ -141,8 +153,10 @@ binom2.or(lmu = "logit", lmu1 = lmu, lmu2 = lmu, lor = "loge",
}
\seealso{
+ \code{\link{rbinom2.or}},
\code{\link{binom2.rho}},
\code{\link{loglinb2}},
+ \code{\link{zipebcom}},
\code{\link{coalminers}},
\code{\link{binomialff}},
\code{\link{logit}},
@@ -159,13 +173,16 @@ fitted(fit)
summary(fit)
coef(fit, matrix=TRUE)
\dontrun{
-attach(coalminers)
-matplot(Age, fitted(fit), type="l", las=1, xlab="(age - 42) / 5",
- main=paste("B=Breathlessness, W=Wheeze; 1=(B=0,W=0),",
- "2=(B=0,W=1), 3=(B=1,W=0), 4=(B=1,W=1)"))
-matpoints(Age, fit at y, col=1:4)
-detach(coalminers)
+with(coalminers, matplot(Age, fitted(fit), type="l", las=1,
+ xlab="(age - 42) / 5", lwd=2))
+with(coalminers, matpoints(Age, fit at y, col=1:4))
+legend(x=-4, y=0.5, lty=1:4, col=1:4, lwd=2,
+ legend=c("1 = (Breathlessness=0, Wheeze=0)",
+ "2 = (Breathlessness=0, Wheeze=1)",
+ "3 = (Breathlessness=1, Wheeze=0)",
+ "4 = (Breathlessness=1, Wheeze=1)"))
}
}
\keyword{models}
\keyword{regression}
+
diff --git a/man/binom2.orUC.Rd b/man/binom2.orUC.Rd
new file mode 100644
index 0000000..416c6c4
--- /dev/null
+++ b/man/binom2.orUC.Rd
@@ -0,0 +1,127 @@
+\name{Binom2.or}
+\alias{Binom2.or}
+\alias{dbinom2.or}
+\alias{rbinom2.or}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Bivariate Binary Regression with an Odds Ratio }
+\description{
+ Density and random generation for a bivariate binary regression model
+ using an odds ratio as the measure of dependency.
+
+}
+\usage{
+rbinom2.or(n, mu1,
+ mu2=if(exchangeable) mu1 else stop("'mu2' not specified"),
+ oratio=1, exchangeable=FALSE, tol=0.001, twoCols=TRUE,
+ colnames=if(twoCols) c("y1","y2") else c("00", "01", "10", "11"),
+ ErrorCheck=TRUE)
+dbinom2.or(mu1,
+ mu2=if(exchangeable) mu1 else stop("'mu2' not specified"),
+ oratio=1, exchangeable=FALSE, tol=0.001,
+ colnames=c("00", "01", "10", "11"),
+ ErrorCheck=TRUE)
+
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{n}{
+ number of observations. Must be a single positive integer.
+ The arguments \code{mu1}, \code{mu2}, \code{oratio} are recycled to
+ length \code{n}.
+
+ }
+ \item{mu1, mu2}{
+ The marginal probabilities.
+ Only \code{mu1} is needed if \code{exchangeable=TRUE}.
+ Values should be between 0 and 1.
+
+ }
+ \item{oratio}{
+ Odds ratio. Must be numeric and positive.
+ The default value of unity means the responses are statistically
+ independent.
+
+ }
+ \item{exchangeable}{
+ Logical. If \code{TRUE}, the two marginal probabilities are constrained
+ to be equal.
+
+ }
+ \item{twoCols}{
+ Logical.
+ If \code{TRUE}, then a \eqn{n} \eqn{\times}{*} \eqn{2} matrix of 1s
+ and 0s is returned.
+ If \code{FALSE}, then a \eqn{n} \eqn{\times}{*} \eqn{4} matrix of 1s
+ and 0s is returned.
+
+ }
+ \item{colnames}{
+ The \code{dimnames} argument of
+ \code{\link[base]{matrix}} is assigned \code{list(NULL, colnames)}.
+
+ }
+ \item{tol}{
+ Tolerance for testing independence. Should be some
+ small positive numerical value.
+
+ }
+ \item{ErrorCheck}{
+ Logical. Do some error checking of the input parameters?
+
+ }
+
+}
+\details{
+ The function \code{rbinom2.or} generates data coming from a bivariate
+ binary response model.
+ The data might be fitted with the \pkg{VGAM} family function
+ \code{\link{binom2.or}}.
+
+ The function \code{dbinom2.or} does not really compute the density
+ (because that does not make sense here) but rather returns the
+ four joint probabilities.
+
+}
+\value{
+ The function \code{rbinom2.or} returns
+ either a 2 or 4 column matrix of 1s and 0s, depending on the argument
+ \code{twoCols}.
+
+ The function \code{dbinom2.or} returns
+ a 4 column matrix of joint probabilities; each row adds up to unity.
+
+}
+\author{ T. W. Yee }
+\seealso{
+ \code{\link{binom2.or}}.
+
+}
+\examples{
+# Example 1
+nn = 2000
+ymat = rbinom2.or(n=nn, mu1=0.8, oratio=exp(2), exch=TRUE)
+(mytab = table(ymat[,1], ymat[,2]))
+(myor = mytab["0","0"] * mytab["1","1"] / (mytab["1","0"] * mytab["0","1"]))
+fit = vglm(ymat ~ 1, binom2.or(exch=TRUE))
+coef(fit, matrix=TRUE)
+
+
+# Example 2
+x = sort(runif(nn))
+mu1 = logit(-2+4*x, inv=TRUE)
+mu2 = logit(-1+3*x, inv=TRUE)
+dmat = dbinom2.or(mu1=mu1, mu2=mu2, oratio=exp(2))
+ymat = rbinom2.or(n=nn, mu1=mu1, mu2=mu2, oratio=exp(2))
+fit2 = vglm(ymat ~ x, binom2.or)
+coef(fit2, matrix=TRUE)
+\dontrun{
+matplot(x, dmat, lty=1:4, col=1:4, type="l", main="Joint probabilities",
+ ylim=0:1, lwd=2)
+legend(x=0, y=0.5, lty=1:4, col=1:4, lwd=2,
+ legend=c("1 = (y1=0, y2=0)", "2 = (y1=0, y2=1)",
+ "3 = (y1=1, y2=0)", "4 = (y1=1, y2=1)"))
+}
+}
+\keyword{distribution}
+
+
diff --git a/man/cqo.Rd b/man/cqo.Rd
index d5b90ca..50d1bca 100644
--- a/man/cqo.Rd
+++ b/man/cqo.Rd
@@ -381,6 +381,7 @@ Constrained additive ordination.
\seealso{
\code{\link{qrrvglm.control}},
\code{\link{Coef.qrrvglm}},
+ \code{\link{predict.qrrvglm}},
\code{\link{rcqo}},
\code{\link{cao}},
\code{\link{uqo}},
@@ -407,6 +408,7 @@ contains further information and examples.
# Example 1; Fit an unequal tolerances model to the hunting spiders data
data(hspider)
hspider[,1:6]=scale(hspider[,1:6]) # Standardize the environmental variables
+set.seed(1234)
p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~
WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
@@ -461,10 +463,12 @@ lvplot(p2, ellips=FALSE, label=TRUE, xlim=c(-3,4),
# Example 3: species packing model with presence/absence data
+set.seed(2345)
n = 200; p = 5; S = 5
mydata = rcqo(n, p, S, fam="binomial", hiabundance=4,
EqualTol=TRUE, ESOpt=TRUE, EqualMax=TRUE)
myform = attr(mydata, "formula")
+set.seed(1234)
b1 = cqo(myform, fam=binomialff(mv=TRUE, link="cloglog"), data=mydata)
sort(b1 at misc$deviance.Bestof) # A history of all the iterations
\dontrun{
diff --git a/man/fisherz.Rd b/man/fisherz.Rd
index b754fed..fc4dbe4 100644
--- a/man/fisherz.Rd
+++ b/man/fisherz.Rd
@@ -20,10 +20,10 @@ fisherz(theta, earg = list(), inverse = FALSE, deriv = 0,
}
\item{earg}{
Optional list. Extra argument for passing in additional information.
- Values of \code{theta} which are less than or equal to -1 can be
+ Values of \code{theta} which are less than or equal to \eqn{-1} can be
replaced by the \code{bminvalue} component of the list \code{earg}
before computing the link function value.
- Values of \code{theta} which are greater than or equal to 1 can be
+ Values of \code{theta} which are greater than or equal to \eqn{1} can be
replaced by the \code{bmaxvalue} component of the list \code{earg}
before computing the link function value.
See \code{\link{Links}} for general information about \code{earg}.
@@ -51,9 +51,9 @@ fisherz(theta, earg = list(), inverse = FALSE, deriv = 0,
}
\details{
The \code{fisherz} link function is commonly used for parameters that
- lie between -1 and 1.
- Numerical values of \code{theta} close to -1 or 1 or out of range
- result in
+ lie between \eqn{-1} and \eqn{1}.
+ Numerical values of \code{theta} close to \eqn{-1} or \eqn{1} or
+ out of range result in
\code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}.
The arguments \code{short} and \code{tag} are used only if
\code{theta} is character.
@@ -79,7 +79,8 @@ fisherz(theta, earg = list(), inverse = FALSE, deriv = 0,
\author{ Thomas W. Yee }
\note{
- Numerical instability may occur when \code{theta} is close to -1 or 1.
+ Numerical instability may occur when \code{theta} is close to \eqn{-1} or
+ \eqn{1}.
One way of overcoming this is to use \code{earg}.
The link function \code{\link{rhobit}} is very similar to \code{fisherz},
diff --git a/man/gamma1.Rd b/man/gamma1.Rd
index 303d4dc..163f420 100644
--- a/man/gamma1.Rd
+++ b/man/gamma1.Rd
@@ -65,7 +65,8 @@ New York: Wiley-Interscience, Third edition.
}
\seealso{
- \code{\link{gamma2.ab}} for the 2-parameter gamma distribution.
+ \code{\link{gamma2.ab}} for the 2-parameter gamma distribution,
+ \code{\link{lgammaff}}.
}
\examples{
diff --git a/man/lgammaff.Rd b/man/lgammaff.Rd
index 581bec3..67ec548 100644
--- a/man/lgammaff.Rd
+++ b/man/lgammaff.Rd
@@ -111,6 +111,7 @@ New York: Wiley.
\code{\link{rlgamma}},
\code{\link{ggamma}},
\code{\link{prentice74}},
+\code{\link{gamma1}},
\code{\link[base:Special]{lgamma}}.
}
\examples{
diff --git a/man/lms.bcn.Rd b/man/lms.bcn.Rd
index e04b961..e5972b4 100644
--- a/man/lms.bcn.Rd
+++ b/man/lms.bcn.Rd
@@ -143,6 +143,7 @@ number corresponding to the highest likelihood value.
\code{\link{deplot.lmscreg}},
\code{\link{cdf.lmscreg}},
\code{\link{bminz}},
+\code{\link{alaplace1}},
\code{\link{alsqreg}}.
}
diff --git a/man/mbinomial.Rd b/man/mbinomial.Rd
index 3f4c0f8..4d45fc0 100644
--- a/man/mbinomial.Rd
+++ b/man/mbinomial.Rd
@@ -126,7 +126,9 @@ mbinomial(mvar=NULL, link="logit", earg=list(),
}
\examples{
\dontrun{
-# cf. Hastie and Tibshirani (1990) p.209. The variable n must be even.
+# Cf. Hastie and Tibshirani (1990) p.209. The variable n must be even.
+# Here, the intercept for each matched set accounts for x3 which is
+# the confounder or matching variable.
n = 700 # Requires a big machine with lots of memory. Expensive wrt time
n = 100 # This requires a reasonably big machine.
mydat = data.frame(x2 = rnorm(n), x3 = rep(rnorm(n/2), each=2))
@@ -140,6 +142,7 @@ mydat = transform(mydat, y = c(y1, 1-y1),
ID = factor(c(row(etamat))))
fit = vglm(y ~ 1 + ID + x2, trace=TRUE,
fam = mbinomial(mvar = ~ ID - 1), data=mydat)
+dimnames(coef(fit, mat=TRUE))
coef(fit, mat=TRUE)
summary(fit)
fitted(fit)[1:5]
diff --git a/man/notdocumentedyet.Rd b/man/notdocumentedyet.Rd
index 307712c..fc90ea7 100644
--- a/man/notdocumentedyet.Rd
+++ b/man/notdocumentedyet.Rd
@@ -207,7 +207,7 @@
\alias{predict.glm}
\alias{predict.lm}
\alias{predict.mlm}
-\alias{predict.qrrvglm}
+% \alias{predict.qrrvglm}
\alias{predict.rrvglm}
\alias{predict.uqo}
\alias{predict.vgam}
diff --git a/man/predict.qrrvglm.Rd b/man/predict.qrrvglm.Rd
new file mode 100644
index 0000000..13fb90a
--- /dev/null
+++ b/man/predict.qrrvglm.Rd
@@ -0,0 +1,86 @@
+\name{predict.qrrvglm}
+\alias{predict.qrrvglm}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Predict Method for a CQO fit }
+\description{
+ Predicted values based on a constrained quadratic ordination (CQO)
+ object.
+
+}
+\usage{
+predict.qrrvglm(object, newdata=NULL,
+ type=c("link", "response", "lv", "terms"),
+ se.fit=FALSE, deriv=0, dispersion=NULL,
+ extra=object at extra, varlvI = FALSE, reference = NULL, ...)
+
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{object}{ Object of class inheriting from \code{"qrrvglm"}. }
+ \item{newdata}{
+ An optional data frame in which to look for variables with which
+ to predict. If omitted, the fitted linear predictors are used.
+
+ }
+ \item{type, se.fit, dispersion, extra}{
+ See \code{\link{predict.vglm}}.
+
+ }
+ \item{deriv}{ Derivative. Currently only 0 is handled. }
+ \item{varlvI, reference}{
+ Arguments passed into \code{\link{Coef.qrrvglm}}.
+ }
+ \item{\dots}{ Currently undocumented. }
+}
+\details{
+ Obtains predictions
+ from a fitted CQO object.
+ Currently there are lots of limitations of this function; it is
+ unfinished.
+
+% and optionally estimates standard errors of those predictions
+
+}
+\value{
+ See \code{\link{predict.vglm}}.
+}
+\references{
+Yee, T. W. (2004)
+A new technique for maximum-likelihood
+canonical Gaussian ordination.
+\emph{Ecological Monographs},
+\bold{74}, 685--701.
+
+}
+
+\author{ T. W. Yee }
+\note{
+ This function is not robust and has not been checked fully.
+
+}
+
+\seealso{
+ \code{\link{cqo}}.
+
+}
+
+\examples{
+data(hspider)
+hspider[,1:6]=scale(hspider[,1:6]) # Standardize the environmental variables
+set.seed(1234)
+p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
+ Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~
+ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+ fam=poissonff, data=hspider, Crow1positive=FALSE, ITol=TRUE)
+sort(p1 at misc$deviance.Bestof) # A history of all the iterations
+
+predict(p1)[1:3,]
+
+# The following should be all zeros
+max(abs(predict(p1, new=hspider[1:3,]) - predict(p1)[1:3,]))
+max(abs(predict(p1, new=hspider[1:3,], type="res") - fitted(p1)[1:3,]))
+}
+\keyword{models}
+\keyword{regression}
+
+
diff --git a/man/rcqo.Rd b/man/rcqo.Rd
index a362395..ce40afa 100644
--- a/man/rcqo.Rd
+++ b/man/rcqo.Rd
@@ -13,10 +13,10 @@ rcqo(n, p, S, Rank = 1,
EqualMaxima = FALSE, EqualTolerances = TRUE, ESOptima = FALSE,
loabundance = if (EqualMaxima) hiabundance else 10,
hiabundance = 100, sdlv = c(1.5/2^(0:3))[1:Rank],
- sdOptima = ifelse(ESOptima, 1.5/Rank, 1) * sdlv,
+ sdOptima = ifelse(ESOptima, 1.5/Rank, 1) * ifelse(scalelv, sdlv, 1),
sdTolerances = 0.25, Kvector = 1, Shape = 1,
sqrt = FALSE, Log = FALSE, rhox = 0.5, breaks = 4,
- seed = NULL, Crow1positive=TRUE)
+ seed = NULL, Crow1positive=TRUE, xmat = NULL, scalelv = TRUE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -199,6 +199,18 @@ rcqo(n, p, S, Rank = 1,
See \code{\link{qrrvglm.control}} for details.
}
+ \item{xmat}{
+ The
+ \eqn{n \times (p-1)}{n * (p-1)}
+ environmental matrix can be inputted.
+
+ }
+ \item{scalelv}{
+ Logical. If \code{FALSE} the argument
+ \code{sdlv} is ignored and no scaling of the latent variable
+ values is performed.
+
+ }
}
\details{
This function generates data coming from a constrained quadratic
@@ -275,6 +287,10 @@ rcqo(n, p, S, Rank = 1,
equal to successive values of \code{sdlv}.
}
+ \item{"eta"}{
+ The linear/additive predictor value.
+
+ }
\item{"optima"}{
The \eqn{S \times R}{S * R} matrix of species' optima.
diff --git a/man/zinbUC.Rd b/man/zinbUC.Rd
index 818448f..b2a0c45 100644
--- a/man/zinbUC.Rd
+++ b/man/zinbUC.Rd
@@ -12,10 +12,10 @@
}
\usage{
-dzinb(x, phi, size, prob, munb, log=FALSE)
-pzinb(q, phi, size, prob, munb)
-qzinb(p, phi, size, prob, munb)
-rzinb(n, phi, size, prob, munb)
+dzinb(x, phi, size, prob=NULL, munb=NULL, log=FALSE)
+pzinb(q, phi, size, prob=NULL, munb=NULL)
+qzinb(p, phi, size, prob=NULL, munb=NULL)
+rzinb(n, phi, size, prob=NULL, munb=NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
diff --git a/man/zipebcom.Rd b/man/zipebcom.Rd
new file mode 100644
index 0000000..0dca0fb
--- /dev/null
+++ b/man/zipebcom.Rd
@@ -0,0 +1,214 @@
+\name{zipebcom}
+\alias{zipebcom}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Exchangeable bivariate cloglog odds-ratio model from a
+ zero-inflated Poisson distribution }
+\description{
+ Fits an exchangeable bivariate odds-ratio model to two binary
+ responses with a complementary log-log link.
+ The data are assumed to come from a zero-inflated Poisson distribution
+ that has been converted to presence/absence.
+
+}
+\usage{
+zipebcom(lmu12="cloglog", lphi12="logit", loratio="loge",
+ emu12=list(), ephi12=list(), eoratio=list(),
+ imu12=NULL, iphi12=NULL, ioratio = NULL,
+ zero=2:3, tol=0.001, addRidge=0.001)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lmu12, emu12, imu12}{
+ Link function, extra argument and optional initial values for
+ the first (and second) marginal probabilities.
+ Arguments \code{lmu12} and \code{emu12} should be left alone.
+ Argument \code{imu12} may be of length 2 (one element for each response).
+
+ }
+ \item{lphi12}{
+ Link function applied to the \eqn{\phi}{phi} parameter of the
+ zero-inflated Poisson distribution (see \code{\link{zipoisson}}).
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{loratio}{
+ Link function applied to the odds ratio.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{iphi12, ioratio}{
+ Optional initial values for \eqn{\phi}{phi} and the odds ratio.
+ See \code{\link{CommonVGAMffArguments}} for more details.
+ In general, good initial values (especially for \code{iphi12})
+ are often required, therefore use these
+ arguments if convergence failure occurs.
+ If inputted, the value of \code{iphi12} cannot be more than the sample
+ proportions of zeros in either response.
+
+ }
+ \item{ephi12, eoratio}{
+ List. Extra argument for each of the links.
+ See \code{earg} in \code{\link{Links}} for general information.
+
+ }
+ \item{zero}{
+ Which linear/additive predictor is modelled as an intercept only?
+ A \code{NULL} means none.
+ The default has both \eqn{\phi}{phi} and the odds ratio as
+ not being modelled as a function of the explanatory variables (apart
+ from an intercept).
+
+ }
+ \item{tol}{
+ Tolerance for testing independence.
+ Should be some small positive numerical value.
+
+ }
+ \item{addRidge}{
+ Some small positive numerical value.
+ The first two diagonal elements of the working weight matrices are
+ multiplied by \code{1+addRidge} to make it diagonally dominant,
+ therefore positive-definite.
+
+ }
+}
+\details{
+ This \pkg{VGAM} family function fits an exchangeable bivariate odds
+ ratio model (\code{\link{binom2.or}}) with a \code{\link{cloglog}} link.
+ The data are assumed to come from a zero-inflated Poisson (ZIP) distribution
+ that has been converted to presence/absence.
+ Explicitly, the default model is
+ \deqn{cloglog[P(Y_j=1)/(1-\phi)] = \eta_1,\ \ \ j=1,2}{%
+ cloglog[P(Y_j=1)/(1-phi)] = eta_1,\ \ \ j=1,2}
+ for the (exchangeable) marginals, and
+ \deqn{logit[\phi] = \eta_2,}{%
+ logit[phi] = eta_2,}
+ for the mixing parameter, and
+ \deqn{\log[P(Y_{00}=1) P(Y_{11}=1) / (P(Y_{01}=1) P(Y_{10}=1))] = \eta_3,}{%
+ log[P(Y_{00}=1) P(Y_{11}=1) / (P(Y_{01}=1) P(Y_{10}=1))] = eta_3,}
+ specifies the dependency between the two responses. Here, the responses
+ equal 1 for a success and a 0 for a failure, and the odds ratio is often
+ written \eqn{\psi=p_{00}p_{11}/(p_{10}p_{01})}{psi=p00 p11 / (p10 p01)}.
+ We have \eqn{p_{10} = p_{01}}{p10 = p01} because of the exchangeability.
+
+ The second linear/additive predictor models the \eqn{\phi}{phi}
+ parameter (see \code{\link{zipoisson}}).
+ The third linear/additive predictor is the same as \code{\link{binom2.or}},
+ viz., the log odds ratio.
+
+ Suppose a dataset1 comes from a Poisson distribution that has been
+ converted to presence/absence, and that both marginal probabilities
+ are the same (exchangeable).
+ Then \code{binom2.or("cloglog", exch=TRUE)} is appropriate.
+ Now suppose a dataset2 comes from a \emph{zero-inflated} Poisson
+ distribution. The first linear/additive predictor of \code{zipebcom()}
+ applied to dataset2
+ is the same as that of
+ \code{binom2.or("cloglog", exch=TRUE)}
+ applied to dataset1.
+ That is, the \eqn{\phi}{phi} has been taken care
+ of by \code{zipebcom()} so that it is just like the simpler
+ \code{\link{binom2.or}}.
+
+ Note that, for \eqn{\eta_1}{eta_1},
+ \code{mu12 = prob12 / (1-phi12)} where \code{prob12} is the probability
+ of a 1 under the ZIP model.
+ Here, \code{mu12} correspond to \code{mu1} and \code{mu2} in the
+ \code{\link{binom2.or}}-Poisson model.
+
+ If \eqn{\phi=0}{phi=0} then \code{zipebcom()} should be equivalent to
+ \code{binom2.or("cloglog", exch=TRUE)}.
+ Full details are given in Yee and Dirnbock (2008).
+
+ The leading \eqn{2 \times 2}{2 x 2} submatrix of the expected
+ information matrix (EIM) is of rank-1, not 2! This is due to the
+ fact that the parameters corresponding to the first two
+ linear/additive predictors are unidentifiable. The quick fix
+ around this problem is to use the \code{addRidge} adjustment.
+ The model is fitted by maximum likelihood estimation since the full
+ likelihood is specified. Fisher scoring is implemented.
+
+ The default models \eqn{\eta_2}{eta2} and \eqn{\eta_3}{eta3} as
+ single parameters only, but this
+ can be circumvented by setting \code{zero=NULL} in order to model the
+ \eqn{\phi}{phi} and odds ratio as a function of all the explanatory
+ variables.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}}
+ and \code{\link{vgam}}.
+
+ When fitted, the \code{fitted.values} slot of the object contains the
+ four joint probabilities, labelled as \eqn{(Y_1,Y_2)}{(Y1,Y2)} = (0,0),
+ (0,1), (1,0), (1,1), respectively.
+ These estimated probabilities should be extracted with the \code{fitted}
+ generic function.
+
+}
+\references{
+
+ Yee, T. W. and Dirnbock, T. (2008)
+ A model for species presence/absence data at two time
+ points based on an odds ratio and zero-inflated Poisson
+ distribution.
+ In preparation.
+
+}
+\author{ Thomas W. Yee }
+\note{
+ The \code{"12"} in the argument names reinforce the user about the
+ exchangeability assumption.
+ The name of this \pkg{VGAM} family function stands for
+ \emph{zero-inflated Poisson exchangeable bivariate complementary
+ log-log odds-ratio model} or ZIP-EBCOM.
+
+ See \code{\link{binom2.or}} for details that are pertinent to this
+ \pkg{VGAM} family function too.
+ Even better initial values are usually needed here.
+
+}
+\seealso{
+ \code{\link{binom2.or}},
+ \code{\link{zipoisson}},
+ \code{\link{cloglog}},
+ \code{\link{CommonVGAMffArguments}}.
+
+}
+\examples{
+mydat = data.frame(x = seq(0, 1, len=(nsites <- 2000)))
+mydat = transform(mydat, eta1 = -3 + 5 * x,
+ phi1 = logit(-1, inverse=TRUE),
+ oratio = exp(2))
+mydat = transform(mydat, mu12 = cloglog(eta1, inverse=TRUE) * (1-phi1))
+tmat = with(mydat, rbinom2.or(nsites, mu1=mu12, oratio=oratio, exch=TRUE))
+mydat = transform(mydat, ybin1 = tmat[,1], ybin2 = tmat[,2])
+
+with(mydat, table(ybin1,ybin2)) / nsites # For interest only
+\dontrun{
+# Various plots of the data, for interest only
+par(mfrow=c(2,2))
+with(mydat, plot(jitter(ybin1) ~ x, col="blue"))
+
+with(mydat, plot(jitter(ybin2) ~ jitter(ybin1), col="blue"))
+
+with(mydat, plot(x, mu12, col="blue", type="l", ylim=0:1,
+ ylab="Probability", main="Marginal probability and phi"))
+with(mydat, abline(h=phi1[1], col="red", lty="dashed"))
+
+tmat2 = with(mydat, dbinom2.or(mu1=mu12, oratio=oratio, exch=TRUE))
+with(mydat, matplot(x, tmat2, col=1:4, type="l", ylim=0:1,
+ ylab="Probability", main="Joint probabilities"))
+}
+
+# Now fit the model to the data.
+fit = vglm(cbind(ybin1,ybin2) ~ x, fam=zipebcom, dat=mydat, trace=TRUE)
+
+coef(fit, matrix=TRUE)
+summary(fit)
+vcov(fit)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/zipoisson.Rd b/man/zipoisson.Rd
index 6f5b5f5..e1f29ca 100644
--- a/man/zipoisson.Rd
+++ b/man/zipoisson.Rd
@@ -60,22 +60,27 @@ zipoisson(lphi="logit", llambda = "loge",
}
}
\details{
- This function uses Fisher scoring and is based on
+ The model is a mixture of a Poisson distribution and the value 0;
+ it has value 0 with probability \eqn{\phi}{phi} else is
+ Poisson(\eqn{\lambda}{lambda}) distributed.
+ The model can be written
\deqn{P(Y=0) = \phi + (1-\phi) \exp(-\lambda),}{%
P(Y=0) = phi + (1-phi) * exp(-lambda),}
and for \eqn{y=1,2,\ldots},
\deqn{P(Y=y) = (1-\phi) \exp(-\lambda) \lambda^y / y!.}{%
P(Y=y) = (1-phi) * exp(-lambda) * lambda^y / y!.}
- The parameter \eqn{\phi}{phi} satisfies \eqn{0 < \phi < 1}{0 < phi < 1}.
+ Here, the parameter \eqn{\phi}{phi} satisfies
+ \eqn{0 < \phi < 1}{0 < phi < 1}.
The mean of \eqn{Y} is \eqn{(1-\phi) \lambda}{(1-phi)*lambda} and these
are returned as the fitted values. By default, the two linear/additive
predictors are \eqn{(logit(\phi), \log(\lambda))^T}{(logit(phi),
log(lambda))^T}.
+ This function implements Fisher scoring.
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
- The object is used by modelling functions such as \code{\link{vglm}},
+ The object is used by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}}.
}
@@ -90,6 +95,10 @@ zipoisson(lphi="logit", llambda = "loge",
\emph{Computational Statistics & Data Analysis},
\bold{42}, 37--46.
+ Cameron, A. C. and Trivedi, P. K. (1998)
+ \emph{Regression Analysis of Count Data}.
+ Cambridge University Press: Cambridge.
+
}
\author{ T. W. Yee }
\note{
@@ -121,7 +130,9 @@ zipoisson(lphi="logit", llambda = "loge",
\code{\link{zapoisson}},
\code{\link{Zipois}},
\code{\link{yip88}},
+ \code{\link{zipebcom}},
\code{\link[stats:Poisson]{rpois}}.
+
}
\examples{
x = runif(n <- 2000)
diff --git a/src/testf90.f90 b/src/testf90.f90
index 48aa4b9..9af38f5 100644
--- a/src/testf90.f90
+++ b/src/testf90.f90
@@ -1,18 +1,13 @@
-! Test F90 subroutines; cannot be translated by ratfor
-! 20080225
-! Author: T. W. Yee
-!module VGAMf90 ! =============================================
-subroutine VGAM_F90_fill9(vec, veclen, ansvec)
+
+subroutine vgamf90fill9(vec, veclen, ansvec)
implicit none
-! 20080225
-integer :: veclen
+integer :: veclen
double precision :: vec(veclen), ansvec(veclen)
double precision, allocatable :: workspace1(:)
-! Local variables
integer :: iii
allocate(workspace1(veclen))
@@ -22,9 +17,60 @@ do iii = 1, veclen
end do
deallocate(workspace1)
-end subroutine VGAM_F90_fill9
+end subroutine vgamf90fill9
+
+
+
+
+
+
+
+
+subroutine vgamf90mux34(xmat, Dmat, nrowx, ncolx, symmetric, ansvec)
+implicit none
+
+
+integer :: nrowx, ncolx, symmetric
+double precision :: xmat(nrowx,ncolx), Dmat(ncolx,ncolx), ansvec(nrowx)
+
+integer :: iii, jay, kay
+
+if(ncolx .eq. 1) then
+ do iii = 1, nrowx
+ ansvec(iii) = Dmat(1,1) * xmat(iii, 1)**2
+ end do
+ return
+end if
+
+if(symmetric .eq. 1) then
+ do iii = 1, nrowx
+ ansvec(iii) = 0.0d0
+ do jay = 1, ncolx
+ ansvec(iii) = ansvec(iii) + Dmat(jay,jay) * xmat(iii, jay)**2
+ end do
+ if(ncolx .gt. 1) then
+ do jay = 1, ncolx
+ do kay = jay+1, ncolx
+ ansvec(iii) = ansvec(iii) + 2.0 * Dmat(jay,kay) * &
+ xmat(iii, jay) * xmat(iii, kay)
+ end do
+ end do
+ end if
+ end do
+else
+ do iii = 1, nrowx
+ ansvec(iii) = 0.0d0
+ do jay = 1, ncolx
+ do kay = 1, ncolx
+ ansvec(iii) = ansvec(iii) + &
+ Dmat(jay,kay) * xmat(iii, jay) * xmat(iii, kay)
+ end do
+ end do
+ end do
+end if
+return
+end subroutine vgamf90mux34
-!end module VGAMf90 ! =========================================
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/r-cran-vgam.git
More information about the debian-science-commits
mailing list