[cvxopt] 18/64: Imported Upstream version 0.9.3

Andreas Tille tille at debian.org
Wed Jul 20 11:23:50 UTC 2016


This is an automated email from the git hooks/post-receive script.

tille pushed a commit to branch master
in repository cvxopt.

commit 3ac301c218142d53e18d4e8c7212e0a0a5347758
Author: Andreas Tille <tille at debian.org>
Date:   Wed Jul 20 08:26:54 2016 +0200

    Imported Upstream version 0.9.3
---
 INSTALL                                 |    2 +-
 LICENSE                                 |    2 +-
 doc/base.tex                            |    2 +-
 doc/base_sparse.tex                     |    2 +-
 doc/blas.tex                            |  122 +-
 doc/c-api.tex                           |    2 +-
 doc/coneprog.tex                        |  770 +++++--
 doc/copyright.tex                       |   46 +
 doc/cvxopt.tex                          |   62 +-
 doc/figures/floorplan.pdf               |  Bin 17885 -> 17887 bytes
 doc/figures/normappr.pdf                |  Bin 21648 -> 21648 bytes
 doc/figures/portfolio1.pdf              |  Bin 21166 -> 21164 bytes
 doc/figures/portfolio2.pdf              |  Bin 22818 -> 22818 bytes
 doc/intro.tex                           |    2 +-
 doc/lapack.tex                          |  296 +--
 doc/modeling.tex                        |    6 +-
 doc/printing.tex                        |   20 +-
 doc/solvers.tex                         |  515 ++---
 doc/spsolvers.tex                       |   46 +-
 examples/book/chap6/basispursuit        |   71 +-
 examples/book/chap6/cvxfit              |    5 +-
 examples/book/chap6/tv                  |   66 +-
 examples/doc/chap4/acent                |    1 -
 examples/doc/chap8/conelp               |   21 +
 examples/doc/chap8/coneqp               |   23 +
 examples/doc/chap8/l1                   |    7 +-
 examples/doc/{chap9 => chap8}/l1regls   |   83 +-
 examples/doc/chap8/mcsdp                |    4 +-
 examples/doc/{chap9 => chap8}/portfolio |    2 +-
 examples/doc/chap8/qcl1                 |    2 +-
 examples/doc/chap8/sdp                  |    2 +-
 examples/doc/chap8/socp                 |    2 +-
 examples/doc/chap9/gp                   |    2 +-
 examples/doc/chap9/l2ac                 |   79 +
 src/C/amd.c                             |    4 +-
 src/C/base.c                            |    4 +-
 src/C/blas.c                            |    4 +-
 src/C/cholmod.c                         |    4 +-
 src/C/cvxopt.h                          |    6 +-
 src/C/dense.c                           |    6 +-
 src/C/dsdp.c                            |    4 +-
 src/C/fftw.c                            |    4 +-
 src/C/glpk.c                            |    4 +-
 src/C/gsl.c                             |    4 +-
 src/C/lapack.c                          |    4 +-
 src/C/misc.h                            |    4 +-
 src/C/sparse.c                          |    8 +-
 src/C/umfpack.c                         |    4 +-
 src/python/__init__.py                  |   24 +-
 src/python/coneprog.py                  | 3456 ++++++++++++++-----------------
 src/python/cvxprog.py                   | 1465 +++----------
 src/python/info.py                      |    6 +-
 src/python/misc.py                      | 1302 +++++++++++-
 src/python/modeling.py                  |    4 +-
 src/python/mosek.py                     |    4 +-
 src/python/printing.py                  |    5 +-
 src/python/solvers.py                   |   14 +-
 src/setup.py                            |    2 +-
 58 files changed, 4494 insertions(+), 4117 deletions(-)

diff --git a/INSTALL b/INSTALL
index 5bbd30c..f70676c 100644
--- a/INSTALL
+++ b/INSTALL
@@ -1,4 +1,4 @@
-Installation instructions for CVXOPT Version 0.9.2.
+Installation instructions for CVXOPT Version 0.9.3.
 
 The package requires version 2.3 or newer of Python, and is built from 
 source, so the header files and libraries for Python must be installed, 
diff --git a/LICENSE b/LICENSE
index 0b667ff..9833a5e 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,4 +1,4 @@
-CVXOPT version 0.9.2.  Copyright (c) 2004-2007 J. Dahl and L. Vandenberghe.
+CVXOPT version 0.9.3.  Copyright (c) 2004-2008 J. Dahl and L. Vandenberghe.
 
 This program is free software; you can redistribute it and/or modify
 it under the terms of the GNU General Public License as published by
diff --git a/doc/base.tex b/doc/base.tex
index 47ea1f6..d602cce 100644
--- a/doc/base.tex
+++ b/doc/base.tex
@@ -25,7 +25,7 @@ lists of matrices and numbers.
 \item If \var{x} is a number (Python \intgr, \flt\ or \cmplx), a matrix
 is created with the dimensions specified by \var{size} and with all the 
 coefficients equal to \var{x}.  
-The default value of \var{size} is $(1,1)$, and the default value
+The default value of \var{size} is \tm{(1,1)}, and the default value
 of \var{tc} is the type of \var{x}.
 If necessary, the type of \var{x} is converted (from integer to double
 when used to create a matrix of type \dtc, and from integer or
diff --git a/doc/base_sparse.tex b/doc/base_sparse.tex
index 561068c..6cb65ee 100644
--- a/doc/base_sparse.tex
+++ b/doc/base_sparse.tex
@@ -620,7 +620,7 @@ from that of the matrix product.
 \begin{funcdesc}{syrk}{A, C\optional{, uplo='L'\optional{, 
 trans='N'\optional{, alpha=1.0\optional{, beta=0.0\optional{, 
 partial=False}}}}}}
-Rank-$k$ update of a sparse or dense real or complex symmetric
+Rank-\tm{k} update of a sparse or dense real or complex symmetric
 matrix:
 \[
  C := \alpha AA^T + \beta C \quad (\mathrm{trans} = \mathrm{'N'}), 
diff --git a/doc/blas.tex b/doc/blas.tex
index 8baf421..df0bb83 100644
--- a/doc/blas.tex
+++ b/doc/blas.tex
@@ -48,17 +48,17 @@ arguments that specify the structure.
 
 \begin{description}
 \item[Vector] 
-A real or complex $n$-vector is represented by a \mtrx\ of type 
-\dtc\ or \ztc\ and length $n$, with the entries of the vector stored in 
+A real or complex \tm{n}-vector is represented by a \mtrx\ of type 
+\dtc\ or \ztc\ and length \tm{n}, with the entries of the vector stored in 
 column-major order. 
 
 \item[General matrix]
-A general real or complex $m$ by $n$ matrix is represented by 
-a real or complex \mtrx\ of size ($m$, $n$).
+A general real or complex \tm{m} by \tm{n} matrix is represented by 
+a real or complex \mtrx\ of size (\tm{m}, \tm{n}).
 
 \item[Symmetric matrix]
-A real or complex symmetric matrix of order $n$ is represented
-by a real or complex \mtrx\ of size ($n$, $n$), and a character 
+A real or complex symmetric matrix of order \tm{n} is represented
+by a real or complex \mtrx\ of size (\tm{n}, \tm{n}), and a character 
 argument \var{uplo} with two possible values:  
 \code{'L'} and \code{'U'}.
 If \var{uplo} is \code{'L'}, the lower triangular part of the
@@ -84,12 +84,12 @@ X[0,n-1] & X[1,n-1] & X[2,n-1] & \cdots & X[n-1,n-1]
 \EEAS
 
 \item[Complex Hermitian matrix]
-A complex Hermitian matrix of order $n$ is represented
-by a \mtrx\ of type \ztc\ and size ($n$, $n$), and
+A complex Hermitian matrix of order \tm{n} is represented
+by a \mtrx\ of type \ztc\ and size (\tm{n}, \tm{n}), and
 a character argument \var{uplo} with the same meaning as for symmetric 
 matrices.
-A complex \mtrx\ {\var X} of size ($n$, $n$) can represent the Hermitian  
-matrices
+A complex \mtrx\ {\var X} of size (\tm{n}, \tm{n}) can represent the 8
+Hermitian  matrices
 \BEAS
 &
 \left[\begin{array}{ccccc}
@@ -109,14 +109,14 @@ X[n-1,0] & X[n-1,1] & X[n-1,2] & \cdots & \Re X[n-1,n-1]
 \EEAS
 
 \item[Triangular matrix]
-A real or complex triangular matrix of order $n$ is represented
-by a real or complex \mtrx\ of size ($n$, $n$), and two 
+A real or complex triangular matrix of order \tm{n} is represented
+by a real or complex \mtrx\ of size (\tm{n}, \tm{n}), and two 
 character arguments: an argument \var{uplo} with possible values 
 \code{'L'} and \code{'U'} to distinguish between lower and upper 
 triangular matrices, and an argument \var{diag} with possible values 
 \code{'U'} and \code{'N'} to distinguish between unit and non-unit 
 triangular matrices.  A square \mtrx\ {\var X} of size 
-($n$, $n$) can represent the triangular matrices
+(\tm{n}, \tm{n}) can represent the triangular matrices
 \BEAS
 & \left[\begin{array}{ccccc}
 X[0,0]   & 0        & 0        & \cdots & 0 \\
@@ -149,16 +149,16 @@ X[0,0]   & X[0,1]   & X[0,2]   & \cdots & X[0,n-1] \\
 \EEAS
 
 \item[General band matrix]
-A general real or complex $m$ by $n$ band matrix  with $k_l$
-subdiagonals and $k_u$ superdiagonals is represented by a real or 
-complex \mtrx\ \var{X} of size ($k_l+k_u+1$, $n$), and the two 
-integers $m$ and $k_l$.   
+A general real or complex \tm{m} by \tm{n} band matrix  with \tm{\s{k}{l}}
+subdiagonals and \tm{\s{k}{u}} superdiagonals is represented by a real or 
+complex \mtrx\ \var{X} of size (\tm{\s{k}{l}+\s{k}{u}+1}, \tm{n}), 
+and the two integers \tm{m} and \tm{\s{k}{l}}.   
 The diagonals of the band matrix are stored in the rows of \var{X}, 
 starting at the top diagonal, and shifted horizontally so that the 
 entries of the 
-$k$th column of the band matrix are stored in column $k$ of 
-{\var X}.  A \mtrx\ {\var X} of size ($k_l+k_u+1$, $n$) therefore
-represents the $m$ by $n$ band matrix
+\tm{k}th column of the band matrix are stored in column \tm{k} of 
+{\var X}.  A \mtrx\ {\var X} of size (\tm{\s{k}{l}+\s{k}{u}+1}, \tm{n}) 
+therefore represents the \tm{m} by \tm{n} band matrix
 \[
 \left[ \begin{array}{ccccccc}
 X[k_u,0]     & X[k_u-1,1]     & X[k_u-2,2]     & \cdots & X[0,k_u] & 0               & \cdots \\
@@ -172,17 +172,17 @@ X[k_u+k_l,0] & X[k_u+k_l-1,1] & X[k_u+k_l-2,2] & \cdots &  &  & \\
 \]
 
 \item[Symmetric band matrix]
-A real or complex symmetric band matrix of order $n$ with $k$
+A real or complex symmetric band matrix of order \tm{n} with \tm{k}
 subdiagonals, is represented by a real or complex matrix \var{X} of 
-size ($k+1$, $n$), and an argument {\it uplo} to indicate 
+size (\tm{k+1}, \tm{n}), and an argument {\it uplo} to indicate 
 whether the subdiagonals ({\it uplo} is \code{'L'}) or superdiagonals 
 ({\it uplo} is \code{'U'}) are stored.
-The $k+1$ diagonals are stored as rows of \var{X}, starting at the top 
+The \tm{k+1} diagonals are stored as rows of \var{X}, starting at the top 
 diagonal (\ie, the main diagonal if {\it uplo} is \code{'L'},  or
-the $k$th superdiagonal if {\it uplo} is \code{'U'}) and shifted
+the \tm{k}th superdiagonal if {\it uplo} is \code{'U'}) and shifted
 horizontally so that the entries of the 
-$k$th column of the band matrix are stored in column $k$ of 
-{\var X}.  A \mtrx\ \var{X} of size ($k+1$, $n$) can therefore
+\tm{k}th column of the band matrix are stored in column \tm{k} of 
+{\var X}.  A \mtrx\ \var{X} of size (\tm{k+1}, \tm{n}) can therefore
 represent the band matrices 
 \BEAS
 & \left[ \begin{array}{ccccccc}
@@ -207,10 +207,10 @@ X[0,k]   & X[1,k]   & X[2,k]   & \cdots &  &  & \\
 \EEAS
 
 \item[Hermitian  band matrix]
-A complex Hermitian band matrix of order $n$ with $k$ 
+A complex Hermitian band matrix of order \tm{n} with \tm{k} 
 subdiagonals is represented by a complex matrix of size 
-($k+1$, $n$) and an argument \var{uplo}.  
-A \mtrx\ \var{X} of size ($k+1$, $n$) can represent the band
+(\tm{k+1}, \tm{n}) and an argument \var{uplo}.  
+A \mtrx\ \var{X} of size (\tm{k+1}, \tm{n}) can represent the band
 matrices 
 \BEAS
 & \left[ \begin{array}{ccccccc}
@@ -235,11 +235,11 @@ X[k,0] & X[k-1,1] & X[k-2,2] & \cdots &  &  & \\
 \EEAS
 
 \item[Triangular band matrix]
-A triangular band matrix of order $n$ with $k$ subdiagonals or
+A triangular band matrix of order \tm{n} with \tm{k} subdiagonals or
 superdiagonals is represented by a real complex matrix of size 
-($k+1$, $n$) and two character arguments \var{uplo} and 
+(\tm{k+1}, \tm{n}) and two character arguments \var{uplo} and 
 \var{diag}.  
-A \mtrx\ \var{X} of size ($k+1$, $n$) can represent the band
+A \mtrx\ \var{X} of size (\tm{k+1}, \tm{n}) can represent the band
 matrices 
 \BEAS
 & \left[ \begin{array}{cccc}
@@ -304,20 +304,20 @@ Euclidean norm of a vector:  returns
 \begin{funcdesc}{asum}{x}
 1-Norm of a vector: returns 
 \[
-\|x\|_1 \quad \mbox{($x$ real)}, \qquad  
-\|\Re x\|_1 + \|\Im x\|_1 \quad \mbox{($x$ complex)}.
+\|x\|_1 \quad \mbox{(\tm{x} real)}, \qquad  
+\|\Re x\|_1 + \|\Im x\|_1 \quad \mbox{(\tm{x} complex)}.
 \]
 \end{funcdesc}
 
 \begin{funcdesc}{iamax}{x}
 Returns 
 \[
- \argmax_{k=0,\ldots,n-1} |x_k| \quad \mbox{($x$ real)}, \qquad
+ \argmax_{k=0,\ldots,n-1} |x_k| \quad \mbox{(\tm{x} real)}, \qquad
  \argmax_{k=0,\ldots,n-1} |\Re x_k| + |\Im x_k| \quad 
- \mbox{($x$ complex)}. 
+ \mbox{(\tm{x} complex)}. 
 \]
 If more than one coefficient achieves the maximum, the index of the 
-first $k$ is returned.  
+first \tm{k} is returned.  
 \end{funcdesc}
 
 \begin{funcdesc}{swap}{x, y}
@@ -391,7 +391,7 @@ Matrix-vector  product with a real symmetric matrix:
 \[
    y := \alpha A x + \beta y,
 \]
-where $A$ is a real symmetric matrix.  
+where \tm{A} is a real symmetric matrix.  
 The arguments \var{A}, \var{x} and {\var y} must have 
 type \dtc\ and \var{alpha} and \var{beta} must be real.
 \end{funcdesc}
@@ -403,7 +403,7 @@ matrix:
 \[
    y := \alpha A x + \beta y,
 \]
-where $A$ is real symmetric or complex Hermitian.
+where \tm{A} is real symmetric or complex Hermitian.
 The arguments \var{A}, \var{x} and {\var y} must have the same
 type (\dtc\ or \ztc).  
 Complex values of \var{alpha} and \var{beta} are only
@@ -418,7 +418,7 @@ x := Ax \quad (\mathrm{trans} = \mathrm{'N'}), \qquad
 x := A^T x \quad (\mathrm{trans} = \mathrm{'T'}), \qquad
 x := A^H x \quad (\mathrm{trans} = \mathrm{'C'}), 
 \]
-where $A$ is square and triangular.
+where \tm{A} is square and triangular.
 The arguments \var{A} and \var{x} must have the same type (\dtc\ or \ztc).
 \end{funcdesc}
 
@@ -430,7 +430,7 @@ x := A^{-1}x \quad (\mathrm{trans} = \mathrm{'N'}), \qquad
 x := A^{-T}x \quad (\mathrm{trans} = \mathrm{'T'}), \qquad 
 x := A^{-H}x \quad (\mathrm{trans} = \mathrm{'C'}), 
 \]
-where $A$ is square and triangular with nonzero diagonal 
+where \tm{A} is square and triangular with nonzero diagonal 
 elements.  The arguments \var{A} and \var{x} must have the same type 
 (\dtc\ or \ztc).
 \end{funcdesc}
@@ -444,8 +444,8 @@ y := \alpha A^T x + \beta y \quad (\mathrm{trans} = \mathrm{'T'}),
 \qquad 
 y := \alpha A^H x + \beta y \quad (\mathrm{trans} = \mathrm{'C'}),
 \]
-where  $A$ is a rectangular band matrix with $m$ rows and 
-$k_l$ subdiagonals.
+where  \tm{A} is a rectangular band matrix with \tm{m} rows and 
+\tm{\s{k}{l}} subdiagonals.
 The arguments \var{A}, \var{x} and {\var y} must have the same
 type (\dtc\ or \ztc).
 Complex values of \var{alpha} and \var{beta} are only allowed if \var{A} is
@@ -458,7 +458,7 @@ Matrix-vector  product with a real symmetric band matrix:
 \[
  y := \alpha Ax + \beta y,
 \]
-where $A$ is a real symmetric band matrix.
+where \tm{A} is a real symmetric band matrix.
 The arguments \var{A}, \var{x} and {\var y} must have type \dtc\ and 
 \var{alpha} and \var{beta} must be real.
 \end{funcdesc}
@@ -470,7 +470,7 @@ band matrix:
 \[
  y := \alpha Ax + \beta y,
 \]
-where $A$ is a real symmetric or complex Hermitian band matrix.
+where \tm{A} is a real symmetric or complex Hermitian band matrix.
 The arguments \var{A}, \var{x} and {\var y} must have the same type
 (\dtc\ or \ztc).  
 Complex values of \var{alpha} and \var{beta} are only allowed if 
@@ -497,7 +497,7 @@ x := A^{-1}x \quad (\mathrm{trans} = \mathrm{'N'}), \qquad
 x := A^{-T} x \quad (\mathrm{trans} = \mathrm{'T'}), \qquad
 x := A^{-H} x \quad (\mathrm{trans} = \mathrm{'T'}), 
 \]
-where $A$ is a triangular band matrix of with nonzero diagonal 
+where \tm{A} is a triangular band matrix of with nonzero diagonal 
 elements.
 The arguments \var{A} and \var{x} must have the same type 
 (\dtc\ or \ztc).  
@@ -508,7 +508,7 @@ General rank-1 update:
 \[ 
 A := A + \alpha x y^H,
 \]
-where $A$ is a general matrix.
+where \tm{A} is a general matrix.
 The arguments \var{A}, \var{x} and \var{y} must have the same type 
 (\dtc\ or \ztc).  
 Complex values of \var{alpha} are only allowed if \var{A} is complex.
@@ -519,7 +519,7 @@ General rank-1 update:
 \[ 
 A := A + \alpha x y^T, 
 \]
-where $A$ is a general matrix.
+where \tm{A} is a general matrix.
 The arguments \var{A}, \var{x} and \var{y} must have the same type 
 (\dtc\ or \ztc).  
 Complex values of \var{alpha} are only allowed if \var{A} is complex.
@@ -530,7 +530,7 @@ Symmetric rank-1 update:
 \[
  A := A + \alpha xx^T,
 \]
-where $A$ is a real symmetric matrix.
+where \tm{A} is a real symmetric matrix.
 The arguments \var{A} and \var{x} must have type \dtc.  
 \var{alpha} must be a real number.
 \end{funcdesc}
@@ -540,7 +540,7 @@ Hermitian rank-1 update:
 \[
  A := A + \alpha xx^H, 
 \]
-where $A$ is a real symmetric or complex Hermitian matrix.
+where \tm{A} is a real symmetric or complex Hermitian matrix.
 The arguments \var{A} and \var{x} must have the same type 
 (\dtc\ or \ztc).  
 \var{alpha} must be a real number.
@@ -552,7 +552,7 @@ Symmetric rank-2  update:
 \[
  A := A + \alpha (xy^T + yx^T),
 \]
-where $A$ is a real symmetric matrix.
+where \tm{A} is a real symmetric matrix.
 The arguments \var{A}, \var{x} and \var{y} must have type \dtc.  
 \var{alpha} must be real.
 \end{funcdesc}
@@ -563,7 +563,7 @@ Symmetric rank-2  update:
 \[
  A := A + \alpha xy^H + \bar \alpha yx^H,
 \]
-where $A$ is a a real symmetric or complex Hermitian matrix.
+where \tm{A} is a a real symmetric or complex Hermitian matrix.
 The arguments \var{A}, \var{x} and \var{y} must have the same type  
 (\dtc\ or \ztc).  
 Complex values of \var{alpha} are only allowed if \var{A} is complex.
@@ -635,8 +635,8 @@ if \var{A} is complex.
 
 \begin{funcdesc}{symm}{A, B, C\optional{, side='L'\optional{, 
 uplo='L'\optional{, alpha=1.0\optional{,  beta=0.0}}}}}
-Product of a real or complex symmetric matrix $A$ and a general 
-matrix $B$:
+Product of a real or complex symmetric matrix \tm{A} and a general 
+matrix \tm{B}:
 \[
  C := \alpha AB + \beta C \quad (\mathrm{side} = \mathrm{'L'}), \qquad 
  C := \alpha BA + \beta C \quad (\mathrm{side} = \mathrm{'R'}). 
@@ -689,7 +689,7 @@ Solution of a nonsingular triangular system of equations:
  A^T & \mathrm{transA} = \mathrm{'T'} \\
  A^H & \mathrm{transA} = \mathrm{'C'}, \end{array} \right.
 \]
-where $A$ is triangular and $B$ is a general matrix.
+where \tm{A} is triangular and \tm{B} is a general matrix.
 The arguments \var{A} and \var{B} must have the same type (\dtc\ or 
 \ztc).   Complex values of \var{alpha} are only allowed if \var{A} is 
 complex.
@@ -703,7 +703,7 @@ Rank-{\it k} update of a real or complex symmetric matrix {\it C}:
  \qquad 
  C := \alpha A^TA + \beta C \quad (\mathrm{trans} = \mathrm{'T'}), 
 \]
-where $A$ is a general matrix.
+where \tm{A} is a general matrix.
 The arguments \var{A} and \var{C} must have the same type (\dtc\ or 
 \ztc).  Complex values of \var{alpha} and \var{beta} are only allowed 
 if \var{A} is complex.
@@ -711,13 +711,13 @@ if \var{A} is complex.
 
 \begin{funcdesc}{herk}{A, C\optional{, uplo='L'\optional{, 
 trans='N'\optional{, alpha=1.0\optional{, beta=0.0}}}}}
-Rank-$k$ update of a real symmetric or complex Hermitian matrix $C$:
+Rank-\tm{k} update of a real symmetric or complex Hermitian matrix \tm{C}:
 \[
  C := \alpha AA^H + \beta C \quad (\mathrm{trans} = \mathrm{'N'}), 
  \qquad 
  C := \alpha A^HA + \beta C \quad (\mathrm{trans} = \mathrm{'C'}),
 \]
-where $A$ is a general matrix.
+where \tm{A} is a general matrix.
 The arguments \var{A} and \var{C} must have the same type (\dtc\ or
 \ztc).  \var{alpha} and \var{beta} must be real.
 \end{funcdesc}
@@ -731,7 +731,7 @@ Rank-{\it 2k} update of a real or complex symmetric matrix {\it C}:
  C := \alpha (A^TB + B^TA) + \beta C \quad 
   (\mathrm{trans} = \mathrm{'T'}). 
 \]
-$A$ and $B$ are general real or complex matrices.
+\tm{A} and \tm{B} are general real or complex matrices.
 The arguments \var{A}, \var{B} and \var{C} must have the same
 type.  Complex values of \var{alpha} and \var{beta} are only 
 allowed if \var{A} is complex.
@@ -739,14 +739,14 @@ allowed if \var{A} is complex.
 
 \begin{funcdesc}{her2k}{A, B, C\optional{, uplo='L'\optional{, 
 trans='N'\optional{, alpha=1.0\optional{ beta=0.0}}}}}
-Rank-$2k$ update of a real symmetric or complex Hermitian matrix $C$:
+Rank-\tm{2k} update of a real symmetric or complex Hermitian matrix \tm{C}:
 \[
  C := \alpha AB^H + \bar \alpha BA^H + \beta C \quad 
   (\mathrm{trans} = \mathrm{'N'}), \qquad 
  C := \alpha A^HB + \bar\alpha B^HA + \beta C \quad 
   (\mathrm{trans} = \mathrm{'C'}), 
 \]
-where $A$ and $B$ are general matrices.
+where \tm{A} and \tm{B} are general matrices.
 The arguments \var{A}, \var{B} and \var{C} must have the same type 
 (\dtc\ or \ztc).  Complex values of \var{alpha} are only allowed if 
 \var{A} is complex.  \var{beta} must be real.
diff --git a/doc/c-api.tex b/doc/c-api.tex
index bd24774..73d0389 100644
--- a/doc/c-api.tex
+++ b/doc/c-api.tex
@@ -100,7 +100,7 @@ for each column of the matrix the index of the first element in
 \code{\var{colptr}[0]} is \code{0}, and for 
 \code{\var{k} = 0, 1, \ldots, \var{ncols}-1},
 \code{\var{colptr}[k+1]} is equal to \code{\var{colptr}[k]} plus the 
-number of nonzeros in column $k$ of the matrix.
+number of nonzeros in column \tm{k} of the matrix.
 Thus, \code{\var{colptr}[\var{ncols}]} is equal to \var{nnz}, the 
 number of nonzero entries.
 \end{description}
diff --git a/doc/coneprog.tex b/doc/coneprog.tex
index 1f9f096..1ee7bec 100644
--- a/doc/coneprog.tex
+++ b/doc/coneprog.tex
@@ -1,31 +1,36 @@
 \chapter{Cone Programming (\module{cvxopt.solvers})}
 \label{chap:coneprog}
 
-A \emph{cone (linear) program} is an optimization problem of the form 
+In this chapter we consider convex optimization problems of the form
 \[
  \begin{array}{ll}
- \mbox{minimize}   & c^T x \\
+ \mbox{minimize}   & (1/2) x^TPx + q^T x \\
  \mbox{subject to} & G x \preceq h \\ & Ax = b.
  \end{array}
 \]
-The inequality is a generalized inequality with respect to a proper convex
-cone.  The \module{cvxopt.solvers} module provides functions for solving 
-cone programs with constraints that include (scalar) linear inequalities, 
+The linear inequality is a generalized inequality with respect to a 
+proper convex cone.  It may include componentwise vector inequalities, 
 second-order cone inequalities, and linear matrix inequalities.  
-The main solver, described in section~\ref{s-conelp}, is 
-\function{conelp()}.
-For convenience (and backward compatibility), simpler interfaces to this
-function are also provided that handle pure linear programs, second-order 
-cone programs, and semidefinite programs.  These are described in 
+The main solvers are \function{conelp()} and \function{coneqp()},
+described in sections~\ref{s-conelp} and~\ref{s-coneqp}.
+The function \function{conelp()} is restricted to problems with
+linear cost functions, but can detect primal and dual infeasibility.
+The function \function{coneqp()} solves the general quadratic problem, 
+but requires the problem to be primal and dual feasible.
+For convenience (and backward compatibility), simpler interfaces to these
+function are also provided that handle pure linear programs, 
+quadratic programs, second-order cone programs, and semidefinite 
+programs.  These are described in 
 sections~\ref{s-lpsolver}--\ref{s-sdpsolver}.
-In section~\ref{s-conelp-struct} we explain how customized solvers
-can be implemented that exploit structure in specific classes of problems.
-The last two sections describe optional interfaces to external solvers,
+In section~\ref{s-conelp-struct} we explain how custom solvers
+can be implemented that exploit structure in cone programs.  
+The last two sections describe optional interfaces to external solvers, 
 and the algorithm parameters that control the cone programming solvers.
 
-\section{General Solver} \label{s-conelp}
-\begin{funcdesc}{conelp}{c, G, h, dims\optional{, A, b\optional{,
-primalstart\optional{, dualstart\optional{, kktsolver}}}}}
+\section{Linear Cone Programs} \label{s-conelp}
+
+\begin{funcdesc}{conelp}{c, G, h\optional{, dims\optional{, A, b\optional{,
+primalstart\optional{, dualstart\optional{, kktsolver}}}}}}
 Solves a pair of primal and dual cone programs
 \BEQ \label{e-conelp}
  \begin{array}[t]{ll}
@@ -39,9 +44,9 @@ Solves a pair of primal and dual cone programs
    & z \succeq 0.
  \end{array}
 \EEQ
-The primal variables are $x$ and the slack variable $s$.  
-The dual variables are $y$ and $z$.  The inequalities are 
-interpreted as $s \in C$, $z\in C$, where $C$ is a cone defined as a 
+The primal variables are \tm{x} and the slack variable \tm{s}.  
+The dual variables are \tm{y} and \tm{z}.  The inequalities are 
+interpreted as $s \in C$, $z\in C$, where \tm{C} is a cone defined as a 
 Cartesian product of a nonnegative orthant, a number of second-order 
 cones, and a number of positive semidefinite cones:
 \[
@@ -52,63 +57,64 @@ with
 \[
 C_0 = 
  \{ u \in \reals^l \;| \; u_k \geq 0, \; k=1, \ldots,l\}, \qquad 
-C_{k+1} = \{ (u_0, u_1) \in \reals \times \reals^{q_{k}-1} \; | \;
+C_{k+1} = \{ (u_0, u_1) \in \reals \times \reals^{r_{k}-1} \; | \;
    u_0 \geq \|u_1\|_2 \},  \quad k=0,\ldots, M-1, \qquad 
 C_{k+M+1} = \left\{ \svec(u) \; | \;
-  u \in \symm^{p_k}_+ \right\}, \quad k=0,\ldots,N-1.
+  u \in \symm^{t_k}_+ \right\}, \quad k=0,\ldots,N-1.
 \]
-Here $\svec(u)$ denotes a symmetric matrix $u$ stored as a vector 
-in column major order.  
+In this definition, $\svec(u)$ denotes a symmetric matrix \tm{u} stored 
+as a vector in column major order.  The structure of \tm{C} is specified 
+by \var{dims}.  This argument is a dictionary with three fields. 
+\begin{description}
+\item[\var{dims['l']}:] \tm{l}, the dimension of the nonnegative orthant
+ (a nonnegative integer).
+\item[\var{dims['q']}:] $[r_0, \ldots, r_{M-1}]$, 
+ a list with the dimensions of the second-order cones (positive integers).
+\item[\var{dims['s']}:] $[t_0, \ldots, t_{N-1}]$, 
+ a list with the dimensions of the positive semidefinite cones
+ (nonnegative integers).
+\end{description}
+The default value of \var{dims} is \code{\{'l': G.size[0], 'q': [], 
+'s': []\}}, \ie, by default the inequality is interpreted as a 
+componentwise vector inequality. 
 
 The arguments \var{c}, \var{h} and \var{b} are real single-column dense 
 matrices.  \var{G} and \var{A} are real dense or sparse matrices.
-The default values for \var{A} and \var{b} are sparse matrices with 
-zero rows, meaning that there are no equality constraints.  
 The number of rows of \var{G} and \var{h} is equal to
 \[
- K = l + \sum_{k=0}^{M-1} q_k + \sum_{k=0}^{N-1} p_k^2.
+ K = l + \sum_{k=0}^{M-1} r_k + \sum_{k=0}^{N-1} t_k^2.
 \]
 The columns of \var{G} and \var{h} are vectors in
 \[
-\reals^l \times \reals^{q_0} \times \cdots \times 
-\reals^{q_{M-1}} \times \reals^{p_0^2}  \times \cdots \times 
-\reals^{p_{N-1}^2},
+\reals^l \times \reals^{r_0} \times \cdots \times 
+\reals^{r_{M-1}} \times \reals^{t_0^2}  \times \cdots \times 
+\reals^{t_{N-1}^2},
 \]
-where the last $N$ components represent symmetric matrices stored in 
+where the last \tm{N} components represent symmetric matrices stored in 
 column major order.  The strictly upper triangular entries of these 
 matrices are not accessed (i.e.,  the symmetric matrices are stored
 in the 'L'-type column major order used in the \module{blas} and
 \module{lapack} modules).
-
-The argument \var{dims} is a dictionary with the dimensions of the 
-cones.  It has three fields. 
-\begin{description}
-\item[\var{dims['l']}:] $l$, the dimension of the nonnegative orthant
- (a nonnegative integer).
-\item[\var{dims['q']}:] $[q_0, \ldots, q_{M-1}]$, 
-a list with the dimensions of the second-order cones (positive integers).
-\item[\var{dims['s']}:] $[p_0, \ldots, p_{N-1}]$, 
-a list with the dimensions of the positive semidefinite cones
-(nonnegative integers).
-\end{description}
+The default values for \var{A} and \var{b} are matrices with 
+zero rows, meaning that there are no equality constraints.  
 
 \var{primalstart} is a dictionary with keys \code{'x'} and \code{'s'}, 
 used as an optional primal starting point.   
 \code{primalstart['x']} and 
 \code{primalstart['s']} are real dense matrices of size
-$(n,1)$ and $(K,1)$, respectively, where $n$ is the
+\tm{(n,1)} and \tm{(K,1)}, respectively, where \tm{n} is the
 length of \var{c}.
 The vector \code{primalstart['s']} must be strictly positive with respect
-to the cone $C$.
+to the cone \tm{C}.
 
 \var{dualstart} is a dictionary with keys \code{'y'} and \code{'z'}, 
 used as an optional dual starting point.
 \code{dualstart['y']} and 
 \code{dualstart['z']} are real dense matrices of size
-$(p,1)$ and $(K,1)$, respectively, where $p$ is the 
+\tm{(p,1)} and \tm{(K,1)}, respectively, where \tm{p} is the 
 number of rows in \var{A}.
 The vector \code{dualstart['s']} must be strictly positive with respect
-to the cone $C$.
+to the cone \tm{C}.
 
 The role of the optional argument \var{kktsolver} is explained in 
 section~\ref{s-conelp-struct}.  
@@ -120,7 +126,7 @@ The \code{'status'} field  is a string with possible values
 and \code{'unknown'}.  The meaning of the other fields depends on the 
 value of \code{'status'}.
 \begin{description}
-\item[\code{'optimal'.}] In this case the \code{'x'}, \code{'s'}, 
+\item[\code{'optimal'}] In this case the \code{'x'}, \code{'s'}, 
 \code{'y'} and \code{'z'} entries contain the primal and dual solutions,
 which approximately satisfy
 \[
@@ -128,7 +134,7 @@ which approximately satisfy
  s \succeq 0, \qquad z \succeq 0,  \qquad s^T z =0.
 \]
 
-\item[\code{'primal infeasible'.}]  
+\item[\code{'primal infeasible'}]
 The \code{'x'} and \code{'s'} entries are \None, and the \code{'y'}, 
 \code{'z'} entries provide an approximate certificate of 
 infeasibility, \ie, vectors that approximately satisfy
@@ -136,7 +142,7 @@ infeasibility, \ie, vectors that approximately satisfy
  G^T z + A^T y = 0, \qquad h^T z + b^T y = -1, \qquad z \succeq 0.
 \]
 
-\item[\code{'dual infeasible'.}]  
+\item[\code{'dual infeasible'}]  
 The \code{'y'} and \code{'z'} entries are \None, and the \code{'x'} 
 and \code{'s'} entries contain an approximate certificate of dual 
 infeasibility 
@@ -144,7 +150,7 @@ infeasibility
  Gx + s = 0, \qquad Ax=0, \qquad  c^T x = -1, \qquad s \succeq 0.
 \]
 
-\item[\code{'unknown'}.] The \code{'x'}, \code{'s'}, \code{'y'}, 
+\item[\code{'unknown'}] The \code{'x'}, \code{'s'}, \code{'y'}, 
 \code{'z'} entries are \None.
 \end{description}
 
@@ -153,8 +159,8 @@ It is required that
 \Rank(A) = p, \qquad 
 \Rank(\left[\begin{array}{c} G \\ A \end{array}\right]) = n,
 \]
-where $p$ is the number or rows of $A$ and $n$ is the number of columns 
-of $G$ and $A$.
+where \tm{p} is the number or rows of \tm{A} and \tm{n} is the number of 
+columns of \tm{G} and \tm{A}.
 \end{funcdesc}
 
 As an example we solve the problem
@@ -219,8 +225,8 @@ As an example we solve the problem
 
 Only the entries of \var{G} and \var{h} defining the lower triangular 
 portions of the coefficients in the linear matrix inequalities are 
-accessed.  This means we 
-obtain the same result if we define \var{G} and \var{h} as below. 
+accessed.  We obtain the same result if we define \var{G} and \var{h} as 
+below. 
 \begin{verbatim}
 >>> G = matrix([[ 16., 7.,  24.,  -8.,   8.,  -1.,  0., -1.,  0.,  0.,   7.,  -5.,   1.,  0.,   1.,  -7.,  0.,  0.,  -4.], 
                 [-14., 2.,   7., -13., -18.,   3.,  0.,  0., -1.,  0.,   3.,  13.,  -6.,  0.,  12., -10.,  0.,  0., -28.],
@@ -228,6 +234,155 @@ obtain the same result if we define \var{G} and \var{h} as below.
 >>> h = matrix( [ -3., 5.,  12.,  -2., -14., -13., 10.,  0.,  0.,  0.,  68., -30., -19.,  0.,  99.,  23.,  0.,  0.,  10.] )
 \end{verbatim}
 
+\section{Quadratic Cone Programs} \label{s-coneqp}
+\begin{funcdesc}{coneqp}{P, q\optional{, G, h\optional{, 
+ dims\optional{, A, b\optional{, initvals\optional{, kktsolver}}}}}}
+Solves a pair of primal and dual quadratic cone programs
+\BEQ \label{e-conelp}
+ \begin{array}[t]{ll}
+ \mbox{minimize} & (1/2) x^T Px + q^T x \\
+ \mbox{subject to} & G x + s = h \\ & Ax = b \\ & s \succeq 0
+ \end{array}
+\qquad\qquad\qquad\qquad
+ \begin{array}[t]{ll}
+ \mbox{maximize} & -(1/2) (q+G^Tz+A^Ty)^T P^\dagger
+            (q+G^Tz+A^Ty) -h^T z - b^T y \\
+ \mbox{subject to} & q + G^T z + A^T y \in \Range(P) \\ & z \succeq 0.
+ \end{array}
+\EEQ
+The primal variables are \tm{x} and the slack variable \tm{s}.  
+The dual variables are \tm{y} and \tm{z}.  The inequalities are 
+interpreted as $s \in C$, $z\in C$, where \tm{C} is a cone defined as a 
+Cartesian product of a nonnegative orthant, a number of second-order 
+cones, and a number of positive semidefinite cones:
+\[
+C = C_0 \times C_1 \times \cdots \times C_M \times C_{M+1} \times
+ \cdots \times C_{M+N}
+\]
+with
+\[
+C_0 = \{ u \in \reals^l \;| \; u_k \geq 0, \; k=1, \ldots,l\}, \qquad 
+C_{k+1} = \{ (u_0, u_1) \in \reals \times \reals^{r_{k}-1} \; | \;
+   u_0 \geq \|u_1\|_2 \},  \quad k=0,\ldots, M-1, \qquad 
+C_{k+M+1} = \left\{ \svec(u) \; | \;
+  u \in \symm^{t_k}_+ \right\}, \quad k=0,\ldots,N-1.
+\]
+In this definition, $\svec(u)$ denotes a symmetric matrix \tm{u} stored as
+a vector in column major order.  The structure of \tm{C} is specified by 
+\var{dims}.  This argument is a dictionary with three fields. 
+\begin{description}
+\item[\var{dims['l']}:] \tm{l}, the dimension of the nonnegative orthant
+ (a nonnegative integer).
+\item[\var{dims['q']}:] $[r_0, \ldots, r_{M-1}]$, 
+ a list with the dimensions of the second-order cones (positive integers).
+\item[\var{dims['s']}:] $[t_0, \ldots, t_{N-1}]$, 
+ a list with the dimensions of the positive semidefinite cones
+ (nonnegative integers).
+\end{description}
+The default value of \var{dims} is \code{\{'l': G.size[0], 'q': [], 
+'s': []\}}, \ie, by default the inequality is interpreted as a 
+componentwise vector inequality. 
+
+\var{P} is a square dense or sparse real matrix, representing a 
+positive semidefinite symmetric matrix in \code{'L'} storage, \ie, only 
+the lower triangular part of \var{P} is referenced.  
+\var{q} is a real single-column dense matrix.
+
+The arguments \var{h} and \var{b} are real single-column dense 
+matrices.  \var{G} and \var{A} are real dense or sparse matrices.
+The number of rows of \var{G} and \var{h} is equal to
+\[
+ K = l + \sum_{k=0}^{M-1} r_k + \sum_{k=0}^{N-1} t_k^2.
+\]
+The columns of \var{G} and \var{h} are vectors in
+\[
+\reals^l \times \reals^{r_0} \times \cdots \times 
+\reals^{r_{M-1}} \times \reals^{t_0^2}  \times \cdots \times 
+\reals^{t_{N-1}^2},
+\]
+where the last \tm{N} components represent symmetric matrices stored in 
+column major order.  The strictly upper triangular entries of these 
+matrices are not accessed (i.e.,  the symmetric matrices are stored
+in the 'L'-type column major order used in the \module{blas} and
+\module{lapack} modules).
+The default values for \var{G}, \var{h}, \var{A} and \var{b} are 
+matrices with zero rows, meaning that there are no inequality or
+equality constraints.  
+
+\var{initvals} is a dictionary with keys \code{'x'}, \code{'s'}, 
+\code{'y'}, \code{'z'} used as an optional starting point.   
+The vectors \code{initvals['s']} and \code{initvals['z']} 
+must be strictly positive with respect to the cone \tm{C}.
+If the argument \var{initvals} or any the four entries in it are missing, 
+default starting points are used for the corresponding variables.
+
+The role of the optional argument \var{kktsolver} is explained in 
+section~\ref{s-conelp-struct}.  
+
+\function{coneqp()} returns a dictionary with keys \code{'status'}, 
+\code{'x'}, \code{'s'}, \code{'y'}, \code{'z'}.  
+The \code{'status'} field  is a string with possible values
+\code{'optimal'} and \code{'unknown'}.  
+\begin{description}
+\item[\code{'optimal'}] In this case the \code{'x'}, \code{'s'}, 
+\code{'y'} and \code{'z'} entries contain primal and dual 
+solutions, which approximately satisfy
+\[
+ Gx+s = h, \qquad Ax = b, \qquad Px + G^Tz + A^T y + c = 0, \qquad
+ s \succeq 0, \qquad z \succeq 0, \qquad s^T z  = 0.
+\]
+\item[\code{'unknown'}] The \code{'x'}, \code{'s'}, \code{'y'}, 
+\code{'z'} entries are \None.
+\end{description}
+
+It is required that the problem is solvable and that 
+\[
+\Rank(A) = p, \qquad 
+\Rank(\left[\begin{array}{c} P \\ G \\ A \end{array}\right]) = n,
+\]
+where \tm{p} is the number or rows of \tm{A} and \tm{n} is the number of columns 
+of \tm{G} and \tm{A}.
+\end{funcdesc}
+
+As an example, we solve a constrained least-squares problem
+\[
+ \begin{array}{ll}
+ \mbox{minimize} & \|Ax - b\|_2^2 \\
+ \mbox{subject to} &  x \succeq 0 \\
+    & \|x\|_2 \leq 1 
+ \end{array}
+\]
+with 
+\[
+ A = \left[ \begin{array}{rrr}
+  0.3 &  0.6 & -0.3 \\
+ -0.4 &  1.2 &  0.0   \\
+ -0.2 & -1.7 &  0.6  \\
+ -0.4 &  0.3 & -1.2 \\
+  1.3 & -0.3 & -2.0 
+ \end{array} \right], \qquad 
+ b = \left[ \begin{array}{r} 1.5 \\ 0.0 \\ -1.2 \\ -0.7 \\ 0.0 
+ \end{array} \right]. 
+\]
+\begin{verbatim}
+>>> from cvxopt import base, solvers
+>>> from cvxopt.base import matrix
+>>> A = matrix([ [ .3, -.4,  -.2,  -.4,  1.3 ], 
+                 [ .6, 1.2, -1.7,   .3,  -.3 ],
+                 [-.3,  .0,   .6, -1.2, -2.0 ] ])
+>>> b = matrix([ 1.5, .0, -1.2, -.7, .0])
+>>> m, n = A.size
+>>> I = matrix(0.0, (n,n))
+>>> I[::n+1] = 1.0
+>>> G = matrix([-I, matrix(0.0, (1,n)), I])
+>>> h = matrix(n*[0.0] + [1.0] + n*[0.0])
+>>> dims = {'l': n, 'q': [n+1], 's': []}
+>>> x = solvers.coneqp(A.T*A, -A.T*b, G, h, dims)['x']
+>>> print x
+[ 7.26e-01]
+[ 6.18e-01]
+[ 3.03e-01]
+\end{verbatim}
 
 \section{Linear Programming} \label{s-lpsolver}
 The function \function{lp()} is an interface to \function{conelp()} for 
@@ -249,20 +404,19 @@ Solves the pair of primal and dual linear programs
    & z \succeq 0.
  \end{array}
 \]
-All inequalities are componentwise vector inequalities.
+The inequalities are componentwise vector inequalities.
 
 The \var{solver} argument is used to choose among three solvers.  
-When it is omitted or \None, the CVXOPT function 
-\function{solvers.conelp()} is used.   
-The external solvers GLPK and MOSEK (if installed) can be 
-selected by setting \code{\var{solver} = 'glpk'} or 
-\code{\var{solver} = 'mosek'}; see section~\ref{s-external}.
+When it is omitted or \None, the CVXOPT function \function{conelp()} is 
+used.   The external solvers GLPK and MOSEK (if installed) can be selected
+by setting \code{\var{solver} = 'glpk'} or \code{\var{solver} = 'mosek'}; 
+see section~\ref{s-external}.
 
 The meaning of the other arguments and the return value are the same as 
 for \function{conelp()} called with 
 \code{dims = \{'l': G.size[0], 'q': [], 's': []\}}. 
-No certificates of primal or dual infeasibility are returned with the 
-\code{solver = 'glpk'} option. 
+The initial values are ignored when \code{solver = 'mosek'} or 
+\code{solver = 'glpk'}.
 \end{funcdesc}
 
 As a simple example we solve the LP
@@ -286,6 +440,136 @@ As a simple example we solve the LP
 [ 1.00e+00]
 \end{verbatim}
 
+\section{Quadratic Programming} \label{s-qp}
+The function \function{qp()} is an interface to \function{coneqp()} for 
+quadratic programs.  It also provides the option of using the quadratic 
+programming solver from MOSEK.
+\begin{funcdesc}{qp}{P, q\optional{, G, h \optional{, A, b\optional{,
+solver\optional{, initvals}}}}}
+Solves the pair of primal and dual convex quadratic programs 
+\[
+\begin{array}[t]{ll}
+\mbox{minimize} & (1/2) x^TPx + q^T x \\
+\mbox{subject to} & Gx \preceq h \\ & Ax = b.
+\end{array}
+\qquad\qquad\qquad\qquad
+ \begin{array}[t]{ll}
+ \mbox{maximize} & -(1/2) (q+G^Tz+A^Ty)^T P^\dagger
+            (q+G^Tz+A^Ty) -h^T z - b^T y \\
+ \mbox{subject to} & q + G^T z + A^T y \in \Range(P) \\ & z \succeq 0.
+ \end{array}
+\]
+The inequalities are componentwise vector inequalities.
+
+The default CVXOPT solver is used when the \var{solver} argument
+is absent or \None.  The MOSEK solver (if installed) can be 
+selected by setting 
+\code{\var{solver} = 'mosek'}; see section~\ref{s-external}.
+The meaning of the other arguments and the return value is the same as 
+for \function{coneqp()} called with 
+\code{dims = \{'l': G.size[0], 'q': [], 's': []\}}.
+
+When the \code{solver = 'mosek'} is used the initial values are ignored,
+and the \code{'status'} string in the solution dictionary 
+can take four possible values: \code{'optimal'},
+\code{'unknown'}.
+\code{'primal infeasible'}, \code{'dual infeasible'}. 
+\begin{description}
+\item [\code{'primal infeasible'}]  
+This means that a certificate of primal infeasibility has been found.   
+The \code{'x'} and \code{'s'} 
+entries are \None, and the
+\code{'z'} and \code{'y'} entries are vectors that approximately satisfy
+\[
+ G^Tz + A^T y = 0, \qquad h^Tz + b^Ty = -1, \qquad z \succeq 0.
+\]
+
+\item [\code{'dual infeasible'}]  This means that a certificate of
+dual infeasibility has been found.   The \code{'z'} and \code{'y'}
+entries are \None, and the \code{'x'} and \code{'s'} entries are
+vectors that approximately satisfy
+\[
+ Px = 0, \qquad q^Tx = -1, \qquad Gx + s = 0, \qquad Ax=0, \qquad
+ s \succeq  0.
+\]
+\end{description}
+\end{funcdesc}
+
+As an example we compute the trade-off curve on page 187
+of the book \citetitle{http://www.stanford.edu/\~{}boyd/cvxbook}{Convex 
+Optimization}, by solving the quadratic program 
+\[
+\begin{array}{ll}
+\mbox{minimize} & -\bar p^T x + \mu x^T S x \\
+\mbox{subject to} & \ones^T x = 1, \quad x \succeq 0
+\end{array}
+\]
+for a sequence of positive values of {\it mu}. 
+The code below computes the trade-off curve and produces two figures 
+using the \ulink{Matplotlib}{http://matplotlib.sourceforge.net} package.
+\begin{center}
+\includegraphics[width=10cm]{figures/portfolio1.eps}
+\hspace*{\fill}
+\includegraphics[width=10cm]{figures/portfolio2.eps}
+\end{center}
+
+\begin{verbatim}
+from math import sqrt
+from cvxopt.base import matrix
+from cvxopt.blas import dot 
+from cvxopt.solvers import qp
+import pylab
+
+# Problem data.
+n = 4
+S = matrix([[ 4e-2,  6e-3, -4e-3,    0.0 ], 
+            [ 6e-3,  1e-2,  0.0,     0.0 ],
+            [-4e-3,  0.0,   2.5e-3,  0.0 ],
+            [ 0.0,   0.0,   0.0,     0.0 ]])
+pbar = matrix([.12, .10, .07, .03])
+G = matrix(0.0, (n,n))
+G[::n+1] = -1.0
+h = matrix(0.0, (n,1))
+A = matrix(1.0, (1,n))
+b = matrix(1.0)
+
+# Compute trade-off.
+N = 100
+mus = [ 10**(5.0*t/N-1.0) for t in xrange(N) ]
+portfolios = [ qp(mu*S, -pbar, G, h, A, b)['x'] for mu in mus ]
+returns = [ dot(pbar,x) for x in portfolios ]
+risks = [ sqrt(dot(x, S*x)) for x in portfolios ]
+
+# Plot trade-off curve and optimal allocations.
+pylab.figure(1, facecolor='w')
+pylab.plot(risks, returns)
+pylab.xlabel('standard deviation')
+pylab.ylabel('expected return')
+pylab.axis([0, 0.2, 0, 0.15])
+pylab.title('Risk-return trade-off curve (fig 4.12)')
+pylab.yticks([0.00, 0.05, 0.10, 0.15])
+
+pylab.figure(2, facecolor='w')
+c1 = [ x[0] for x in portfolios ] 
+c2 = [ x[0] + x[1] for x in portfolios ]
+c3 = [ x[0] + x[1] + x[2] for x in portfolios ] 
+c4 = [ x[0] + x[1] + x[2] + x[3] for x in portfolios ]
+pylab.fill(risks + [.20], c1 + [0.0], '#F0F0F0') 
+pylab.fill(risks[-1::-1] + risks, c2[-1::-1] + c1, facecolor = '#D0D0D0') 
+pylab.fill(risks[-1::-1] + risks, c3[-1::-1] + c2, facecolor = '#F0F0F0') 
+pylab.fill(risks[-1::-1] + risks, c4[-1::-1] + c3, facecolor = '#D0D0D0') 
+pylab.axis([0.0, 0.2, 0.0, 1.0])
+pylab.xlabel('standard deviation')
+pylab.ylabel('allocation')
+pylab.text(.15,.5,'x1')
+pylab.text(.10,.7,'x2')
+pylab.text(.05,.7,'x3')
+pylab.text(.01,.7,'x4')
+pylab.title('Optimal allocations (fig 4.12)')
+pylab.show()
+\end{verbatim}
+
+
 \section{Second-Order Cone Programming} \label{s-socpsolver}
 The function \function{socp()} is a simpler interface to 
 \function{conelp()} for cone programs with no linear matrix inequality 
@@ -319,20 +603,22 @@ are componentwise vector inequalities.
 In the other inequalities, it is assumed that the variables are partitioned
 as
 \[
- s_k = (s_{k0}, s_{k1}) \in\reals\times\reals^{q_{k}-1}, \qquad 
- z_k = (z_{k0}, z_{k1}) \in\reals\times\reals^{q_{k}-1}.
+ s_k = (s_{k0}, s_{k1}) \in\reals\times\reals^{r_{k}-1}, \qquad 
+ z_k = (z_{k0}, z_{k1}) \in\reals\times\reals^{r_{k}-1}, \qquad
+ k=1,\ldots,M.
 \]
 The input argument \var{c} is a real single-column dense matrix.
-The arguments \var{Gl} and \var{hl} are the coefficient matrix $G_0$
-and the righthand side $h_0$ of the componentwise inequalities.
+The arguments \var{Gl} and \var{hl} are the coefficient matrix 
+\tm{\s{G}{0}} and the righthand side \tm{\s{h}{0}} 
+of the componentwise inequalities.
 \var{Gl} is a real dense or sparse matrix; \var{hl} is a real single-column
 dense matrix.   The default values for \var{Gl} and \var{hl} are matrices
 with zero rows.
 
-The argument \var{Gq} is a list of $M$ dense or sparse matrices 
-$G_1$, \ldots, $G_M$. 
-The argument \var{hq} is a list of $M$ dense single-column matrices 
-$h_1$, \ldots, $h_M$.  
+The argument \var{Gq} is a list of \tm{M} dense or sparse matrices 
+\tm{\s{G}{1}}, \ldots, \tm{\s{G}{M}}. 
+The argument \var{hq} is a list of \tm{M} dense single-column matrices 
+\tm{\s{h}{1}}, \ldots, \tm{\s{h}{M}}.  
 The elements of \var{Gq} and \var{hq} must have at least one row.
 The default values of \var{Gq} and \var{hq} are empty lists.
 
@@ -352,17 +638,17 @@ equality constraints.
 primal, respectively, dual starting points.
 \var{primalstart} has elements \code{'x'}, \code{'sl'}, \code{'sq'}.
 \code{primalstart['x']} and \code{primalstart['sl']} are single-column
- dense matrices with the initial values of $x$ and $s_0$; 
+ dense matrices with the initial values of \tm{x} and \tm{\s{s}{0}}; 
 \code{primalstart['sq']} is a list of single-column matrices with the
-initial values of $s_1$, \ldots, $s_M$.
+initial values of \tm{\s{s}{1}}, \ldots, \tm{\s{s}{M}}.
 The initial values must satisfy the inequalities in the primal problem 
 strictly, but not necessarily the equality constraints.
 
 \var{dualstart} has elements \code{'y'}, \code{'zl'}, \code{'zq'}.
 \code{dualstart['y']} and \code{dualstart['zl']} are single-column dense 
-matrices with the initial values of $y$ and $z_0$.
+matrices with the initial values of \tm{y} and \tm{\s{z}{0}}.
 \code{dualstart['zq']} is a list of single-column matrices with the 
-initial values of $z_1$, \ldots, $z_M$.  These values must
+initial values of \tm{\s{z}{1}}, \ldots, \tm{\s{z}{M}}.  These values must
 satisfy the dual inequalities strictly, but not necessarily the equality
 constraint.
 
@@ -379,7 +665,6 @@ The \var{'sq'} and \var{'zq'} fields are lists with the primal slacks and
 dual variables associated with the second-order cone inequalities.
 \end{funcdesc}
 
-
 As an example, we solve  the second-order cone program
 \[
 \begin{array}{ll}
@@ -458,27 +743,27 @@ The inequalities
 are componentwise vector inequalities.   The other inequalities
 are matrix inequalities (\ie, the require the lefthand sides 
 to be positive semidefinite).
-We use the notation $\svec(z)$ to denote a symmetric matrix $z$ 
+We use the notation $\svec(z)$ to denote a symmetric matrix \tm{z} 
 stored in column major order as a column vector.
 
-The input argument \var{c} is a dense real matrix with one column of
-length $n$.
-The arguments \var{Gl} and \var{hl} are the coefficient matrix $G_0$
-and the righthand side $h_0$ of the componentwise inequalities.
+The input argument \var{c} is a real single-column dense matrix.
+The arguments \var{Gl} and \var{hl} are the coefficient matrix 
+\tm{\s{G}{0}} and the righthand side \tm{\s{h}{0}} of the 
+componentwise inequalities.
 \var{Gl} is a real dense or sparse matrix; \var{hl} is a real single-column
 dense matrix.   The default values for \var{Gl} and \var{hl} are matrices
 with zero rows.
 
-\var{Gs} and \var{hs} are lists of length $N$ that specify the 
+\var{Gs} and \var{hs} are lists of length \tm{N} that specify the 
 linear matrix inequality constraints.
-\var{Gs} is a list of $N$ dense or sparse real matrices  
-$G_1$, \ldots, $G_M$.
+\var{Gs} is a list of \tm{N} dense or sparse real matrices  
+\tm{\s{G}{1}}, \ldots, \tm{\s{G}{M}}.
 The columns of these matrices can be interpreted as 
 symmetric matrices stored in column major order, using the BLAS 'L'-type
 storage (\ie, only the entries corresponding to lower triangular positions
 are accessed). 
-\var{hs} is a list of $N$ dense symmetric matrices $h_1$,
-\ldots, $h_N$.
+\var{hs} is a list of \tm{N} dense symmetric matrices \tm{\s{h}{1}},
+\ldots, \tm{\s{h}{N}}.
 Only the lower triangular elements of these matrices are accessed.
 The default values for \code{Gs} and \code{hs} are empty lists.
 
@@ -498,18 +783,18 @@ The optional argument \var{primalstart} is a dictionary with keys
 \code{'x'}, \code{'sl'}, and \code{'ss'}, used as an optional primal 
 starting point. 
 \code{primalstart['x']} and \code{primalstart['sl']} are single-column
-dense matrices with the initial values of $x$ and $s_0$; 
+dense matrices with the initial values of \tm{x} and \tm{\s{s}{0}}; 
 \code{primalstart['ss']} is a list of square matrices with the
-initial values of $s_1$, \ldots, $s_N$.
+initial values of \tm{\s{s}{1}}, \ldots, \tm{\s{s}{N}}.
 The initial values must satisfy the inequalities in the primal problem 
 strictly, but not necessarily the equality constraints.
 
 \var{dualstart} is a dictionary with keys \code{'y'}, \code{'zl'}, 
 \code{'zs'}, used as an optional dual starting point.
 \code{dualstart['y']} and \code{dualstart['zl']} are single-column dense 
-matrices with the initial values of $y$ and $z_0$.
+matrices with the initial values of \tm{y} and \tm{\s{z}{0}}.
 \code{dualstart['zs']} is a list of square matrices with the 
-initial values of $z_1$, \ldots, $z_N$.  These values must
+initial values of \tm{\s{z}{1}}, \ldots, \tm{\s{z}{N}}.  These values must
 satisfy the dual inequalities strictly, but not necessarily the equality
 constraint.
 
@@ -601,34 +886,38 @@ triangular entries need to be provided, so in the example \var{h} and
 
 
 \section{Exploiting Structure} \label{s-conelp-struct}
-By default, the function \function{conelp()} exploits no problem 
+By default, the functions \function{conelp()} and \function{coneqp()} 
+exploit no problem 
 structure except (to some limited extent) sparsity.  
 Two mechanisms are provided for implementing customized solvers that 
 take advantage of problem structure.
 
 \begin{description}
 \item[\emph{Providing a function for solving KKT equations.}]
-The most expensive step of each iteration of \function{conelp()} is the
+The most expensive step of each iteration of 
+\function{conelp()} or \function{coneqp()} is the
 solution of a set of linear equations (`KKT equations') of the form
 \BEQ \label{e-conelp-kkt}
  \left[\begin{array}{ccc}
-  0 & A^T & G^T \\
+  P & A^T & G^T \\
   A & 0   & 0  \\
   G & 0   & -W^T W \end{array}\right]
  \left[\begin{array}{c} u_x \\ u_y \\ u_z \end{array}\right]
- = \left[\begin{array}{c} b_x \\ b_y \\ b_z \end{array}\right].
+ = \left[\begin{array}{c} b_x \\ b_y \\ b_z \end{array}\right]
 \EEQ
-The matrix $W$ depends on the current iterates and is defined as follows.
-We use the notation of~section~\ref{s-conelp}.  Suppose 
+(with \tm{P=0} in \function{conelp()}).
+The matrix \tm{W} depends on the current iterates and is defined as 
+follows.  We use the notation of sections~\ref{s-conelp} 
+and~\ref{s-coneqp}.  Suppose 
 \[
  u = \left(u_\mathrm{l}, \; u_{\mathrm{q},0}, \; \ldots, \; 
  u_{\mathrm{q},M-1}, \; \svec{(u_{\mathrm{s},0})}, \; \ldots, \; 
   \svec{(u_{\mathrm{s},N-1})}\right), \qquad
  u_\mathrm{l} \in\reals^l, \qquad 
- u_{\mathrm{q},k} \in\reals^{q_k}, \quad k = 0,\ldots,M-1, \qquad 
- u_{\mathrm{s},k} \in\symm^{p_k},  \quad k = 0,\ldots,N-1.
+ u_{\mathrm{q},k} \in\reals^{r_k}, \quad k = 0,\ldots,M-1, \qquad 
+ u_{\mathrm{s},k} \in\symm^{t_k},  \quad k = 0,\ldots,N-1.
 \]
-Then $W$ is a block-diagonal matrix, 
+Then \tm{W} is a block-diagonal matrix, 
 \[
  Wu = \left( W_\mathrm{l} u_\mathrm{l}, \;
  W_{\mathrm{q},0} u_{\mathrm{q},0}, \; \ldots, \;
@@ -639,7 +928,7 @@ Then $W$ is a block-diagonal matrix,
 with the following diagonal blocks.
 \BIT
 \item The first block is a \emph{positive diagonal scaling} with a 
- vector $d$:
+ vector \tm{d}:
 \[
   W_\mathrm{l} = \diag(d), \qquad W_\mathrm{l}^{-1} = \diag(d)^{-1}.
 \]
@@ -648,7 +937,7 @@ This transformation is symmetric:
   W_\mathrm{l}^T = W_\mathrm{l}. 
 \]
 
-\item The next $M$ blocks are positive multiples of \emph{hyperbolic 
+\item The next \tm{M} blocks are positive multiples of \emph{hyperbolic 
  Householder transformations}:
 \[
   W_{\mathrm{q},k} = \beta_k ( 2 v_k v_k^T - J),
@@ -667,7 +956,7 @@ These transformations are also symmetric:
   W_{\mathrm{q},k}^T = W_{\mathrm{q},k}. 
 \]
 
-\item The last $N$ blocks are \emph{congruence transformations} with 
+\item The last \tm{N} blocks are \emph{congruence transformations} with 
  nonsingular matrices:
 \[
   W_{\mathrm{s},k} \svec{(u_{\mathrm{s},k})} = 
@@ -676,7 +965,7 @@ These transformations are also symmetric:
  \svec{(r_k^{-T} u_{\mathrm{s},k} r_k^{-1})}, \qquad
  k = 0,\ldots,N-1.
 \]
-In  general, this operation is not symmetric, and
+In  general, this operation is not symmetric: 
 \[
   W_{\mathrm{s},k}^T \svec{(u_{\mathrm{s},k})} = 
   \svec{(r_k u_{\mathrm{s},k} r_k^T)}, \qquad
@@ -687,10 +976,11 @@ In  general, this operation is not symmetric, and
  k = 0,\ldots,N-1.
 \]
 \EIT
-It is often possible to exploit structure in the coefficient matrices
-$G$ and $A$ to solve~(\ref{e-conelp-kkt}) faster than by 
+It is often possible to exploit problem structure to 
+solve~(\ref{e-conelp-kkt}) faster than by 
 standard methods.  The last argument \var{kktsolver} of 
-\function{conelp()} allows the user to supply a Python  function for 
+\function{conelp()} and \function{coneqp()} allows the user to 
+supply a Python  function for 
 solving the KKT equations.
 This function will be called as \samp{f = kktsolver(W)}, where 
 \var{W} is a dictionary that contains the parameters of the scaling:
@@ -698,12 +988,12 @@ This function will be called as \samp{f = kktsolver(W)}, where
 \BIT
 \item \code{W['d']} is the positive vector that defines the diagonal
  scaling.   \code{W['di']} is its componentwise inverse.
-\item \code{W['beta']} and \code{W['v']} are lists of length $M$ with 
+\item \code{W['beta']} and \code{W['v']} are lists of length \tm{M} with 
  the coefficients and vectors that define the hyperbolic Householder 
  transformations.
-\item \code{W['r']} is a list of length $N$ with the matrices that
+\item \code{W['r']} is a list of length \tm{N} with the matrices that
  define the the congruence transformations.  
- \code{W['rti']} is  a list of length $N$ with the transposes of the 
+ \code{W['rti']} is  a list of length \tm{N} with the transposes of the 
  inverses of the matrices in \code{W['r']}.
 \EIT
 
@@ -716,16 +1006,25 @@ the last component scaled, \ie, on exit,
 \[
   b_x := u_x, \qquad b_y := u_y, \qquad b_z := W u_z.
 \]
+In other words, the function returns the solution of
+\[
+ \left[\begin{array}{ccc}
+  P & A^T & G^TW^{-1} \\
+  A & 0   & 0  \\
+  G & 0   & -W^T \end{array}\right]
+ \left[\begin{array}{c} \hat u_x \\ \hat u_y \\ \hat u_z \end{array}\right]
+ = \left[\begin{array}{c} b_x \\ b_y \\ b_z \end{array}\right].
+\]
 
 \item[\emph{Specifying constraints via Python functions}.]
-In the default use of \function{conelp()}, the arguments \var{G} and 
-\var{A} are the coefficient matrices in the constraints 
-of~(\ref{e-conelp}).
-It is also possible to specify these matrices by providing Python functions
-that evaluate the corresponding matrix-vector products and their adjoints.
-
-If the argument \var{G} of \function{conelp()} is a Python
-function, it should be defined as follows:
+In the default use of \function{conelp()} and \function{coneqp()}, the 
+linear constraints and the quadratic term in the objective are 
+parameterized by CVXOPT matrices \var{G}, \var{A}, \var{P}.  It is 
+possible to specify these parameters via Python functions that evaluate 
+the corresponding matrix-vector products and their adjoints.
+
+If the argument \var{G} of \function{conelp()} or \function{coneqp()} 
+is a Python function, it should be defined as follows:
 \begin{funcdesc}{\var{G}}{\var{x}, \var{y} \optional{, 
 \var{alpha}\optional{, \var{beta}\optional{, \var{trans}}}}} 
 This evaluates the matrix-vector products
@@ -752,7 +1051,19 @@ The default values of the optional arguments must be
 \code{alpha = 1.0}, \code{beta = 0.0}, \code{trans = 'N'}.
 \end{funcdesc}
 
-If \var{G} or \var{A} are Python functions, then the argument 
+If the argument \var{P} of \function{coneqp()} is a Python function,
+then it must be defined as follows:
+\begin{funcdesc}{\var{P}}{\var{x}, \var{y} \optional{, 
+\var{alpha}\optional{, \var{beta}}}} 
+This evaluates the matrix-vector products
+\[
+y := \alpha Px + \beta y.
+\]
+The default values of the optional arguments must be
+\code{alpha = 1.0}, \code{beta = 0.0}. 
+\end{funcdesc}
+
+If \var{G}, \var{A} or \var{P} are Python functions, then the argument 
 \var{kktsolver} must also be provided.
 \end{description}
 
@@ -840,8 +1151,7 @@ def l1(P, q):
         where D1 = diag(di[:m])^2, D2 = diag(di[m:])^2 and di = W['di'].
         """
         
-        # Factor A = 4*P'*D*P where D = d1.*d2 ./(d1+d2) and
-        # d1 = di[:m].^2, d2 = di[m:].^2.
+        # Factor A = 4*P'*D*P where D = d1.*d2 ./(d1+d2) and d1 = di[:m].^2, d2 = di[m:].^2.
 
         di = W['di']
         d1, d2 = di[:m]**2, di[m:]**2
@@ -852,9 +1162,8 @@ def l1(P, q):
         def f(x, y, z):
 
             """
-            On entry bx, bz are stored in x, z.
-            On exit x, z contain the solution, with z scaled: z./di is 
-            returned instead of z. 
+            On entry bx, bz are stored in x, z.  On exit x, z contain the solution, 
+            with z scaled: z./di is returned instead of z. 
             """"
 
             # Solve for x[:n]:
@@ -932,10 +1241,9 @@ def mcsdp(w):
         """
         Congruence transformation
 
-	    x := alpha * r'*x*r.
+            x := alpha * r'*x*r.
 
-        r is a matrix of size (n, n). 
-        x is a matrix of size (n**2, 1), representing a symmetric matrix stored in column major order.
+        r and x are square matrices.
         """
 
         # Scale diagonal of x by 1/2.  
@@ -943,10 +1251,12 @@ def mcsdp(w):
     
         # a := tril(x)*r 
         a = +r
-        blas.trmm(x, a, side = 'L')
+        tx = matrix(x, (n,n))
+        blas.trmm(tx, a, side = 'L')
 
         # x := alpha*(a*r' + r*a') 
-        blas.syr2k(r, a, x, trans = 'T', alpha = alpha)
+        blas.syr2k(r, a, tx, trans = 'T', alpha = alpha)
+        x[:] = tx[:]
 
     dims = {'l': 0, 'q': [], 's': [n]}
 
@@ -968,9 +1278,9 @@ def mcsdp(w):
 
         # Cholesky factorization of tsq = t.*t.
         tsq = t**2
-	lapack.potrf(tsq)
+        lapack.potrf(tsq)
 
-	def f(x, y, z):
+        def f(x, y, z):
             """
             On entry, x contains bx, y is empty, and z contains bz stored 
             in column major order.
@@ -1001,7 +1311,7 @@ def mcsdp(w):
             #    = -vec(rti' * (diag(x) + bz) * rti 
             cngrnc(rti, z, alpha = -1.0)
 
-	return f
+        return f
 
     sol = solvers.conelp(c, G, w[:], dims, kktsolver = F) 
     return sol['x'], sol['z']
@@ -1016,8 +1326,8 @@ In the second example, we use a similar trick to solve the problem
  \mbox{subject to} & \|Au - b\|_2 \leq 1.
  \end{array}
 \]
-The code below is efficient, if we assume that the number of rows in $A$ 
-is greater than or equal to the number of columns.
+The code below is efficient, if we assume that the number of 
+rows in \tm{A} is greater than or equal to the number of columns.
 
 \begin{verbatim}
 def qcl1(A, b):
@@ -1143,8 +1453,162 @@ def qcl1(A, b):
         return None, None
 \end{verbatim}
 
+\item[Example: 1-norm regularized least-squares] 
+As an example that illustrates how structure can be exploited
+in \function{coneqp()}, we consider the 1-norm regularized 
+least-squares problem
+\[
+\begin{array}{ll}
+\mbox{minimize} & \|Ax - y\|_2^2 + \|x\|_1
+\end{array}
+\]
+with variable \tm{x}.  The problem is equivalent to the quadratic 
+program
+\[
+ \begin{array}{ll}
+ \mbox{minimize} & \|Ax - y\|_2^2 + \ones^T u \\
+ \mbox{subject to} & -u \preceq x \preceq u
+ \end{array}
+\]
+with variables \tm{x} and \tm{u}.  The implementation below is 
+efficient when \tm{A} has many more columns than rows. 
+
+\begin{verbatim}
+from cvxopt.base import matrix, spdiag, mul, div
+from cvxopt import base, blas, lapack, solvers
+import math
+
+def l1regls(A, y):
+    """
+    
+    Returns the solution of l1-norm regularized least-squares problem
+  
+        minimize || A*x - y ||_2^2  + || x ||_1.
+
+    """
+
+    m, n = A.size
+    q = matrix(1.0, (2*n,1))
+    q[:n] = -2.0 * A.T * y
+
+    def P(u, v, alpha = 1.0, beta = 0.0 ):
+        """
+            v := alpha * 2.0 * [ A'*A, 0; 0, 0 ] * u + beta * v 
+        """
+        v *= beta
+        v[:n] += alpha * 2.0 * A.T * (A * u[:n])
+
+
+    def G(u, v, alpha=1.0, beta=0.0, trans='N'):
+        """
+            v := alpha*[I, -I; -I, -I] * u + beta * v  (trans = 'N' or 'T')
+        """
+
+        v *= beta
+        v[:n] += alpha*(u[:n] - u[n:])
+        v[n:] += alpha*(-u[:n] - u[n:])
+
+    h = matrix(0.0, (2*n,1))
+
+
+    # Customized solver for the KKT system 
+    #
+    #     [  2.0*A'*A  0    I      -I     ] [x[:n] ]     [bx[:n] ]
+    #     [  0         0   -I      -I     ] [x[n:] ]  =  [bx[n:] ].
+    #     [  I        -I   -D1^-1   0     ] [zl[:n]]     [bzl[:n]]
+    #     [ -I        -I    0      -D2^-1 ] [zl[n:]]     [bzl[n:]]
+    #
+    # where D1 = W['di'][:n]**2, D2 = W['di'][:n]**2.
+    #    
+    # We first eliminate zl and x[n:]:
+    #
+    #     ( 2*A'*A + 4*D1*D2*(D1+D2)^-1 ) * x[:n] = 
+    #         bx[:n] - (D2-D1)*(D1+D2)^-1 * bx[n:] + 
+    #         D1 * ( I + (D2-D1)*(D1+D2)^-1 ) * bzl[:n] - 
+    #         D2 * ( I - (D2-D1)*(D1+D2)^-1 ) * bzl[n:]           
+    #
+    #     x[n:] = (D1+D2)^-1 * ( bx[n:] - D1*bzl[:n]  - D2*bzl[n:] ) 
+    #         - (D2-D1)*(D1+D2)^-1 * x[:n]         
+    #
+    #     zl[:n] = D1 * ( x[:n] - x[n:] - bzl[:n] )
+    #     zl[n:] = D2 * (-x[:n] - x[n:] - bzl[n:] ).
+    #
+    # The first equation has the form
+    #
+    #     (A'*A + D)*x[:n]  =  rhs
+    #
+    # and is equivalent to
+    #
+    #     [ D    A' ] [ x:n] ]  = [ rhs ]
+    #     [ A   -I  ] [ v    ]    [ 0   ].
+    #
+    # It can be solved as 
+    #
+    #     ( A*D^-1*A' + I ) * v = A * D^-1 * rhs
+    #     x[:n] = D^-1 * ( rhs - A'*v ).
+
+    S = matrix(0.0, (m,m))
+    Asc = matrix(0.0, (m,n))
+    v = matrix(0.0, (m,1))
+
+    def Fkkt(W):
+
+        # Factor 
+        #
+        #     S = A*D^-1*A' + I 
+        #
+        # where D = 2*D1*D2*(D1+D2)^-1, D1 = d[:n]**-2, D2 = d[n:]**-2.
+
+        d1, d2 = W['di'][:n]**2, W['di'][n:]**2
 
+        # ds is square root of diagonal of D
+        ds = math.sqrt(2.0) * div( mul( W['di'][:n], W['di'][n:]), 
+            base.sqrt(d1+d2) )
+        d3 =  div(d2 - d1, d1 + d2)
+     
+        # Asc = A*diag(d)^-1/2
+        Asc = A * spdiag(ds**-1)
+
+        # S = I + A * D^-1 * A'
+        blas.syrk(Asc, S)
+        S[::m+1] += 1.0 
+        lapack.potrf(S)
 
+        def g(x, y, z):
+
+            x[:n] = 0.5 * ( x[:n] - mul(d3, x[n:]) + 
+                mul(d1, z[:n] + mul(d3, z[:n])) - mul(d2, z[n:] - 
+                mul(d3, z[n:])) )
+            x[:n] = div( x[:n], ds) 
+
+            # Solve
+            #
+            #     S * v = 0.5 * A * D^-1 * ( bx[:n] - 
+            #         (D2-D1)*(D1+D2)^-1 * bx[n:] + 
+            #         D1 * ( I + (D2-D1)*(D1+D2)^-1 ) * bzl[:n] - 
+            #         D2 * ( I - (D2-D1)*(D1+D2)^-1 ) * bzl[n:] )
+                
+            blas.gemv(Asc, x, v)
+            lapack.potrs(S, v)
+            
+            # x[:n] = D^-1 * ( rhs - A'*v ).
+            blas.gemv(Asc, v, x, alpha=-1.0, beta=1.0, trans='T')
+            x[:n] = div(x[:n], ds)
+
+            # x[n:] = (D1+D2)^-1 * ( bx[n:] - D1*bzl[:n]  - D2*bzl[n:] ) 
+            #         - (D2-D1)*(D1+D2)^-1 * x[:n]         
+            x[n:] = div( x[n:] - mul(d1, z[:n]) - mul(d2, z[n:]), d1+d2 )\
+                - mul( d3, x[:n] )
+                
+            # zl[:n] = D1^1/2 * (  x[:n] - x[n:] - bzl[:n] )
+            # zl[n:] = D2^1/2 * ( -x[:n] - x[n:] - bzl[n:] ).
+            z[:n] = mul( W['di'][:n],  x[:n] - x[n:] - z[:n] ) 
+            z[n:] = mul( W['di'][n:], -x[:n] - x[n:] - z[n:] ) 
+
+        return g
+
+    return solvers.coneqp(P, q, G, h, kktsolver = Fkkt)['x'][:n]
+\end{verbatim}
 \end{description}
 
 
@@ -1186,6 +1650,9 @@ adding entries with the following key values.
 \item[\code{'reltol'}] relative accuracy (default: \code{1e-6}).
 \item[\code{'feastol'}] tolerance for feasibility conditions (default:
 \code{1e-7}).
+\item[\code{'refinement'}] number of iterative refinement steps when
+ solving KKT equations (default: 0 if the problem has no second-order
+ cone or matrix inequality constraints; 1 otherwise).
 \end{description}
 For example the command
 \begin{verbatim}
@@ -1193,6 +1660,7 @@ For example the command
 >>> solvers.options['show_progress'] = False
 \end{verbatim}
 turns off the screen output during calls to the solvers.
+
 The tolerances \var{abstol}, \var{reltol} and \var{feastol} have the
 following meaning.   \function{conelp()} terminates with 
 status \code{'optimal'} if
@@ -1209,7 +1677,7 @@ s \succeq 0, \qquad z \succeq 0, \qquad
 and
 \[
  s^T z \leq \epsilon_\mathrm{abs} \qquad \mbox{or} \qquad
-\left( \min\left\{c^Tx,  h^T z + b^Ty \right\} < 0, \quad 
+\left( \min\left\{c^Tx,  h^T z + b^Ty \right\} < 0 \quad \mbox{and} \quad
  \frac{s^Tz} {-\min\{c^Tx, h^Tz + b^T y\}} \leq \epsilon_\mathrm{rel} 
 \right).
 \]
@@ -1231,11 +1699,40 @@ c^Tx = -1.
 The functions \function{lp()}, \function{socp()} and \function{sdp()} call 
 \function{conelp()} and hence use the same stopping criteria.
 
+The function \function{coneqp()} terminates with 
+status \code{'optimal'} if
+\[
+s \succeq 0, \qquad z \succeq 0, \qquad 
+\qquad 
+ \frac{\|Gx + s - h\|_2} {\max\{1,\|h\|_2\}} \leq \epsilon_\mathrm{feas}, 
+\qquad 
+\frac{\|Ax-b\|_2}{\max\{1,\|b\|_2\}} \leq \epsilon_\mathrm{feas}, 
+\qquad
+\frac{\|Px + G^Tz +  A^Ty + q\|_2}{\max\{1,\|q\|_2\}} \leq 
+ \epsilon_\mathrm{feas}, 
+\]
+and
+\[
+s^T z \leq \epsilon_\mathrm{abs}
+\qquad \mbox{or} \qquad \left( \frac{1}{2}x^TPx + q^Tx < 0, \quad 
+\mbox{and}\quad \frac{s^Tz} {-(1/2)x^TPx - q^Tx} \leq 
+\epsilon_\mathrm{rel} \right)
+\qquad \mbox{or} \qquad
+\left( L(x,y,z) > 0 \quad \mbox{and} \quad \frac{s^Tz}
+{L(x,y,z)} \leq \epsilon_\mathrm{rel} \right)
+\]
+where
+\[
+L(x,y,z) = \frac{1}{2}x^TPx + q^Tx  + z^T (Gx-h) + y^T(Ax-b).
+\]
+The function \function{qp()} calls \function{coneqp()} and hence uses 
+the same stopping criteria.
+
 The control parameters listed in the GLPK documentation are 
-set to their default values and can also be customized by making 
+set to their default values and can be customized by making 
 an entry in \member{solvers.options}.
 The keys in the dictionary are strings with the name of the GLPK 
-parameter.  The command
+parameter.  For example, the command
 \begin{verbatim}
 >>> from cvxopt import solvers 
 >>> solvers.options['LPX_K_MSGLEV'] = 0
@@ -1259,7 +1756,8 @@ For example the commands
 turn off the screen output during calls of  \function{lp()} 
 or \function{socp()} with the \code{'mosek'} option.
 
-The following control parameters affect the DSDP algorithm:
+The following control parameters in \member{solvers.options} affect the 
+execution of the DSDP algorithm:
 \begin{description}
 \item[\code{'DSDP\_Monitor'}] the interval (in number of iterations)
  at which output is printed to the screen
diff --git a/doc/copyright.tex b/doc/copyright.tex
new file mode 100644
index 0000000..addc2b7
--- /dev/null
+++ b/doc/copyright.tex
@@ -0,0 +1,46 @@
+\chapter*{Copyright and License}
+
+Copyright \copyright{2004-2008} J. Dahl \& L. Vandenberghe. 
+
+CVXOPT is free software; you can redistribute it and/or modify
+it under the terms of the 
+\ulink{GNU General Public License}{http://www.gnu.org/licenses/gpl-3.0.html}
+as published by the Free Software Foundation; either version 3 of the 
+License, or (at your option) any later version.
+
+CVXOPT is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  
+See the
+\ulink{GNU General Public License}{http://www.gnu.org/licenses/gpl-3.0.html}
+for more details. \\
+
+\hrule 
+
+The CVXOPT distribution includes source code for part of the SuiteSparse 
+suite of sparse matrix algorithms, including:
+\BIT
+\item AMD Version 2.2.0.  
+ Copyright (c) 2007 by Timothy A.\ Davis, Patrick R.\ Amestoy, and 
+ Iain S.\ Duff.  
+\item CHOLMOD Version 1.6.0.  
+ Copyright (c) 2005-2007 by University of Florida, Timothy A. Davis 
+ and W. Hager.
+\item COLAMD version 2.7.0.  Copyright (c) 1998-2007 by Timothy A.\ Davis.
+\item UMFPACK Version 5.2.0.  Copyright (c) 1995-2006 by Timothy A.\  Davis.
+\EIT
+
+These packages are licensed under the terms of the 
+\ulink{GNU General Public License, version 2 or higher}
+{http://www.gnu.org/licenses/old-licenses/gpl-2.0.html} 
+(UMFPACK, the Supernodal module of CHOLMOD) and the
+\ulink{GNU Lesser General Public License, version 2.1 or higher}
+{http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html} 
+(the other CHOLMOD modules, AMD, COLAMD).
+For copyright and license details, consult the README files in the source 
+directories or the website listed below.
+
+\begin{quote}
+Availability: \ulink{www.cise.ufl.edu/research/sparse}
+{http://www.cise.ufl.edu/research/sparse}.
+\end{quote}
diff --git a/doc/cvxopt.tex b/doc/cvxopt.tex
index 85b8ace..b265095 100644
--- a/doc/cvxopt.tex
+++ b/doc/cvxopt.tex
@@ -1,8 +1,5 @@
 \documentclass{book}
 \usepackage{graphicx}
-%\usepackage{html,graphicx}
-
-\parindent 0pt
 
 \def\BIT{\begin{itemize}}
 \def\EIT{\end{itemize}}
@@ -26,6 +23,13 @@
 \newcommand{\symm}{{\mbox{\bf S}}}  
 \newcommand{\op}{\mathop{\mathrm{op}}}
 \newcommand{\svec}{\mathop{\mathbf{vec}}}
+\newcommand{\Range}{\mbox{\textrm{range}}}
+
+% In-line formulas are typeset differently in tex4ht and latex2html.
+\newcommand{\tm}[1]{$#1$}
+%\newcommand{\tm}[1]{\textit{#1}}
+\newcommand{\s}[2]{#1_#2}
+%\newcommand{\s}[2]{{#1}\_{#2}}
 
 % redefine Python markup
 \newcommand{\code}[1]{{\tt #1}}
@@ -74,63 +78,17 @@
 \newcommand{\mtrx}{\class{matrix}}
 \newcommand{\spmtrx}{\class{spmatrix}}
 
+\parindent 0pt
 
 \title{CVXOPT User's Guide} 
 \author{Joachim Dahl \& Lieven Vandenberghe}
-\date{Release 0.9.2 -- December 27, 2007} 
-
+\date{Release 0.9.3 -- February 24, 2008} 
 \begin{document}
 \Configure{crosslinks*}{next}{prev}{up}{}
 
 \maketitle
 
-\chapter*{Copyright and License}
-
-Copyright \copyright{2004-2007} J. Dahl \& L. Vandenberghe. 
-
-CVXOPT is free software; you can redistribute it and/or modify
-it under the terms of the 
-\ulink{GNU General Public License}{http://www.gnu.org/licenses/gpl-3.0.html}
-as published by the Free Software Foundation; either version 3 of the 
-License, or (at your option) any later version.
-
-CVXOPT is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  
-See the
-\ulink{GNU General Public License}{http://www.gnu.org/licenses/gpl-3.0.html}
-for more details. \\
-
-\hrule 
-
-The CVXOPT distribution includes source code for part of the SuiteSparse 
-suite of sparse matrix algorithms, including:
-\BIT
-\item AMD Version 2.2.0.  
- Copyright (c) 2007 by Timothy A.\ Davis, Patrick R.\ Amestoy, and 
- Iain S.\ Duff.  
-\item CHOLMOD Version 1.6.0.  
- Copyright (c) 2005-2007 by University of Florida, Timothy A. Davis 
- and W. Hager.
-\item COLAMD version 2.7.0.  Copyright (c) 1998-2007 by Timothy A.\ Davis.
-\item UMFPACK Version 5.2.0.  Copyright (c) 1995-2006 by Timothy A.\  Davis.
-\EIT
-
-These packages are licensed under the terms of the 
-\ulink{GNU General Public License, version 2 or higher}
-{http://www.gnu.org/licenses/old-licenses/gpl-2.0.html} 
-(UMFPACK, the Supernodal module of CHOLMOD) and the
-\ulink{GNU Lesser General Public License, version 2.1 or higher}
-{http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html} 
-(the other CHOLMOD modules, AMD, COLAMD).
-For copyright and license details, consult the README files in the source 
-directories or the website listed below.
-
-\begin{quote}
-Availability: \ulink{www.cise.ufl.edu/research/sparse}
-{http://www.cise.ufl.edu/research/sparse}.
-\end{quote}
-
+\input{copyright}
 \input{intro}
 \input{base}
 \input{blas}
diff --git a/doc/figures/floorplan.pdf b/doc/figures/floorplan.pdf
index 0603069..9852f21 100644
Binary files a/doc/figures/floorplan.pdf and b/doc/figures/floorplan.pdf differ
diff --git a/doc/figures/normappr.pdf b/doc/figures/normappr.pdf
index f47e697..d1a9b8c 100644
Binary files a/doc/figures/normappr.pdf and b/doc/figures/normappr.pdf differ
diff --git a/doc/figures/portfolio1.pdf b/doc/figures/portfolio1.pdf
index 3f639b3..efebecf 100644
Binary files a/doc/figures/portfolio1.pdf and b/doc/figures/portfolio1.pdf differ
diff --git a/doc/figures/portfolio2.pdf b/doc/figures/portfolio2.pdf
index bc95053..0a603f3 100644
Binary files a/doc/figures/portfolio2.pdf and b/doc/figures/portfolio2.pdf differ
diff --git a/doc/intro.tex b/doc/intro.tex
index 613445a..1209880 100644
--- a/doc/intro.tex
+++ b/doc/intro.tex
@@ -10,7 +10,7 @@ optimization applications straightforward by building on Python's
 extensive standard library and on the strengths of Python as a 
 high-level programming language.  
 
-Release 0.9.2 of CVXOPT includes routines for basic linear algebra 
+Release 0.9.3 of CVXOPT includes routines for basic linear algebra 
 calculations, interfaces to efficient libraries for solving dense and 
 sparse linear equations, convex optimization solvers written in Python,
 interfaces to a few other optimization libraries, 
diff --git a/doc/lapack.tex b/doc/lapack.tex
index c1534e2..dad040d 100644
--- a/doc/lapack.tex
+++ b/doc/lapack.tex
@@ -3,7 +3,7 @@
 
 The module \module{cvxopt.lapack} includes functions for 
 solving dense sets of linear equations, for the corresponding matrix 
-factorizations (LU, Cholesky, LDL$\mathrm{{}^T}$),
+factorizations (LU, Cholesky, $\mathrm{LDL^T}$),
 for solving least-squares and least-norm problems, for QR 
 factorization, for symmetric eigenvalue problems and for singular 
 value decomposition.  
@@ -29,12 +29,12 @@ Solves
 \[ 
    A X = B,
 \]
-where $A$ and $B$ are real or complex matrices, with $A$
+where \tm{A} and \tm{B} are real or complex matrices, with \tm{A}
 square and nonsingular.  On exit, \var{B} is replaced by the solution.  
 The arguments \var{A} and \var{B} must have the same type (\dtc\ or
 \ztc).  
 The optional argument \var{ipiv} is an integer matrix of length at 
-least $n$.  
+least \tm{n}.  
 If \var{ipiv} is provided, then \function{gesv()} solves the system, 
 replaces \var{A} with its triangular factors, and returns the 
 permutation matrix in \var{ipiv}.
@@ -62,11 +62,11 @@ complex matrix,
 \[
   A = PLU 
 \]
-where $A$ is $m$ by $n$.
+where \tm{A} is \tm{m} by \tm{n}.
 The argument \var{ipiv} is an integer matrix of length at least
-$\min\{m, n\}$.
-On exit, the lower triangular part of \var{A} is replaced by $L$,
-the upper triangular part by $U$, and the permutation matrix is 
+\tm{\min\{m, n\}}.
+On exit, the lower triangular part of \var{A} is replaced by \tm{L},
+the upper triangular part by \tm{U}, and the permutation matrix is 
 returned in \var{ipiv}.
 Raises an \code{ArithmeticError} if the matrix is not full rank.
 \end{funcdesc}
@@ -119,39 +119,42 @@ Solves
 \[ 
    A X = B,
 \]
-where $A$ and $B$ are real or complex matrices, with $A$
-$n$ by $n$ and banded with $k_l$ subdiagonals.  
+where \tm{A} and \tm{B} are real or complex matrices, with \tm{A}
+\tm{n} by \tm{n} and banded with \tm{\s{k}{l}} subdiagonals.  
 The arguments \var{A} and \var{B} must have the same type (\dtc\ or
 \ztc).  
 
 The optional argument \var{ipiv} is an integer matrix of length at 
-least $n$.
+least \tm{n}.
 If \var{ipiv} is provided, then \var{A} must have 
-$2k_l + k_u + 1$ rows.  On entry the diagonals of $A$ are
-stored in rows $k_l + 1$ to $2k_l + k_u +1$ of the \var{A}, using
+\tm{2\s{k}{l} + \s{k}{u} + 1} rows.  On entry the diagonals of \tm{A} are
+stored in rows \tm{\s{k}{l} + 1} to 
+\tm{2\s{k}{l} + \s{k}{u} +1} of the \var{A}, using
 the BLAS format for general band matrices 
 (see section~\ref{s-conventions}).
 On exit, the factorization is returned in {\var A} and \var{ipiv}.
 
 If \var{ipiv} is not provided, then \var{A} must have 
-$k_l + k_u + 1$ rows.  On entry the diagonals of $A$ are stored in the 
+\tm{\s{k}{l} + \s{k}{u} + 1} rows.  
+On entry the diagonals of \tm{A} are stored in the 
 rows of \var{A}, following the standard format for general band matrices. 
 In this case, \function{gbsv()} does not modify \var{A} on exit and does
 not return the factorization.
 
-On exit, \var{B} is replaced by the solution $X$.  
+On exit, \var{B} is replaced by the solution \tm{X}.  
 Raises an \code{ArithmeticError} if the matrix is singular.
 \end{funcdesc}
 
 \begin{funcdesc}{gbtrf}{A, m, kl, ipiv}
-LU factorization of a general $m$ by $n$ real or complex band 
-matrix with $k_l$ subdiagonals.
+LU factorization of a general \tm{m} by \tm{n} real or complex band 
+matrix with \tm{\s{k}{l}} subdiagonals.
 The matrix is stored using the BLAS format for general band matrices
 (see section~\ref{s-conventions}), by providing the 
-diagonals (stored as rows of a $k_u + k_l + 1$ by $n$ matrix),
-the number of rows $m$, and the number of subdiagonals $k_l$.
+diagonals (stored as rows of a \tm{\s{k}{u} + \s{k}{l} + 1} by 
+\tm{n} matrix),
+the number of rows \tm{m}, and the number of subdiagonals \tm{\s{k}{l}}.
 The argument \var{ipiv} is an integer matrix of length at least
-$\min\{m, n\}$.
+\tm{\min\{m, n\}}.
 On exit, \var{A} and \var{ipiv} contain the details of the factorization.
 Raises an \code{ArithmeticError} if the matrix is not full rank.
 \end{funcdesc}
@@ -163,8 +166,9 @@ Solves a set of linear equations
  A^TX=B \quad (\mathrm{trans} = \mathrm{'T'}), \qquad 
  A^HX=B \quad (\mathrm{trans} = \mathrm{'C'}), 
 \]
-with $A$ a general band matrix with $k_l$ subdiagonals, given the 
-LU factorization computed by \function{gbsv()} or \function{gbtrf()}.
+with \tm{A} a general band matrix with \tm{\s{k}{l}} subdiagonals, 
+given the LU factorization computed by 
+\function{gbsv()} or \function{gbtrf()}.
 On entry, \var{A} and \var{ipiv} must contain the factorization
 as computed by \function{gbsv()} or \function{gbtrf()}.  
 On exit, \var{B} is overwritten with the solution. 
@@ -235,7 +239,7 @@ An alternative method uses \function{gbtrf()} for the factorization.
 [-2.38e-02]
 \end{verbatim}
 
-The following functions can be used for tridiagonal matrices They use a 
+The following functions can be used for tridiagonal matrices. They use a 
 simpler matrix format, that stores the diagonals in three separate 
 vectors.
 
@@ -244,24 +248,24 @@ Solves
 \[ 
    A X = B,
 \]
-where $A$ is an $n$ by $n$ tridiagonal matrix, 
-with subdiagonal stored as a matrix \var{dl} of length $n-1$, 
-diagonal stored as a matrix \var{d} of length $n$, and superdiagonal 
-stored as a matrix \var{du} of length $n-1$.  
+where \tm{A} is an \tm{n} by \tm{n} tridiagonal matrix, 
+with subdiagonal stored as a matrix \var{dl} of length \tm{n-1}, 
+diagonal stored as a matrix \var{d} of length \tm{n}, and superdiagonal 
+stored as a matrix \var{du} of length \tm{n-1}.  
 The four arguments must have the same type (\dtc\ or \ztc).
 On exit \var{dl}, \var{d}, \var{du} are overwritten with the details of 
-the LU factorization of $A$, and \var{B} is overwritten with the solution 
-$X$.  
+the LU factorization of \tm{A}, and \var{B} is overwritten with the solution 
+\tm{X}.  
 Raises an \code{ArithmeticError} if the matrix is singular.
 \end{funcdesc}
 
 \begin{funcdesc}{gttrf}{dl, d, du, du2, ipiv}
-LU factorization of an $n$ by $n$ tridiagonal matrix with
-subdiagonal $d_l$, diagonal $d$ and superdiagonal $d_u$.
+LU factorization of an \tm{n} by \tm{n} tridiagonal matrix with
+subdiagonal \tm{\s{d}{l}}, diagonal \tm{d} and superdiagonal \tm{\s{d}{u}}.
 \var{dl}, \var{d} and \var{du} must have the same type.
-\var{du2} is a matrix of length $n-2$, and of the same type as 
+\var{du2} is a matrix of length \tm{n-2}, and of the same type as 
 \var{dl}.
-\var{ipiv} is an \itc\ matrix of length $n$.
+\var{ipiv} is an \itc\ matrix of length \tm{n}.
 On exit, the five arguments contain the details of the factorization.
 Raises an \code{ArithmeticError} if the matrix is singular.
 \end{funcdesc}
@@ -273,11 +277,11 @@ Solves a set of linear equations
  A^TX=B \quad (\mathrm{trans} = \mathrm{'T'}), \qquad 
  A^HX=B \quad (\mathrm{trans} = \mathrm{'C'}), 
 \]
-where $A$ is an $n$ by $n$ tridiagonal matrix.
+where \tm{A} is an \tm{n} by \tm{n} tridiagonal matrix.
 The arguments \var{dl}, \var{d}, \var{du}, \var{du2} and \var{ipiv}
 contain the details of the LU factorization as returned by 
 \function{gttrf()}.
-On exit, \var{B} is overwritten with the solution $X$. 
+On exit, \var{B} is overwritten with the solution \tm{X}. 
 \var{B} must have the same type as \var{dl}.
 \end{funcdesc}
 
@@ -288,7 +292,7 @@ Solves
 \[ 
    A X = B,
 \]
-where $A$ is a real symmetric or complex Hermitian positive 
+where \tm{A} is a real symmetric or complex Hermitian positive 
 definite matrix.
 On exit, \var{B} is replaced by the solution, and \var{A} is 
 overwritten with the Cholesky factor.
@@ -302,7 +306,7 @@ Cholesky factorization
 \[
  A = LL^T \qquad \mbox{or} \qquad A = LL^H
 \]
-of a positive definite real symmetric or complex Hermitian matrix $A$.  
+of a positive definite real symmetric or complex Hermitian matrix \tm{A}.  
 On exit, the lower triangular part of \var{A} 
 (if \var{uplo} is \code{'L'}) or the upper triangular part 
 (if \var{uplo} is \code{'U'}) is overwritten with the Cholesky factor 
@@ -374,8 +378,8 @@ Solves
 \[
  AX=B
 \]
-where $A$ is a real symmetric or complex Hermitian positive definite
-band matrix.  On entry, the diagonals of $A$ are stored in \var{A}, 
+where \tm{A} is a real symmetric or complex Hermitian positive definite
+band matrix.  On entry, the diagonals of \tm{A} are stored in \var{A}, 
 using the BLAS format for symmetric or Hermitian band matrices
 (see section~\ref{s-conventions}).  On exit, \var{B} is replaced by the
 solution, and \var{A} is overwritten with the Cholesky factor (in the
@@ -390,7 +394,7 @@ Cholesky factorization
  A = LL^T \qquad \mbox{or} \qquad A = LL^H
 \]
 of a positive definite real symmetric or complex Hermitian band matrix
-$A$.  On entry, the diagonals of $A$ are stored in \var{A}, 
+\tm{A}.  On entry, the diagonals of \tm{A} are stored in \var{A}, 
 using the BLAS format for symmetric or Hermitian band matrices.
 On exit, \var{A} contains the Cholesky factor, in the BLAS format
 for triangular band matrices.
@@ -417,29 +421,29 @@ Solves
 \[ 
    A X = B,
 \]
-where $A$ is an $n$ by $n$ positive definite real symmetric 
+where \tm{A} is an \tm{n} by \tm{n} positive definite real symmetric 
 or complex Hermitian tridiagonal matrix.  Its diagonal 
-is stored as a \dtc\ matrix \var{d} of length $n$ and 
-its subdiagonal as a \dtc\ or \ztc\ matrix \var{e} of length $n-1$.
+is stored as a \dtc\ matrix \var{d} of length \tm{n} and 
+its subdiagonal as a \dtc\ or \ztc\ matrix \var{e} of length \tm{n-1}.
 The arguments \var{e} and \var{B} must have the same type.  
-On exit \var{d} contains the diagonal elements of $D$ in 
-the LDL${}\mathrm{^T}$ or LDL${}\mathrm{^H}$ factorization 
-of $A$, and \var{e} contains the subdiagonal elements of the unit 
-lower bidiagonal matrix $L$.  
-\var{B} is overwritten with the solution $X$.  
+On exit \var{d} contains the diagonal elements of \tm{D} in 
+the $\mathrm{LDL^T}$ or $\mathrm{LDL^H}$ factorization 
+of \tm{A}, and \var{e} contains the subdiagonal elements of the unit 
+lower bidiagonal matrix \tm{L}.  
+\var{B} is overwritten with the solution \tm{X}.  
 Raises an \code{ArithmeticError} if the matrix is singular.
 \end{funcdesc}
 
 \begin{funcdesc}{pttrf}{d, e}
-LDL${}\mathrm{^T}$ or LDL${}\mathrm{^H}$ factorization of an $n$ by 
-$n$ positive definite real symmetric or complex Hermitian tridiagonal 
-matrix $A$.
+$\mathrm{LDL^T}$ or $\mathrm{LDL^H}$ factorization of an \tm{n} by 
+\tm{n} positive definite real symmetric or complex Hermitian tridiagonal 
+matrix \tm{A}.
 On entry, the argument \var{d} is a \dtc\ matrix with the diagonal elements
-of $A$.  The argument \var{e} is \dtc\ or \ztc\ matrix with
-the subdiagonal elements of $A$.
-On exit \var{d} contains the diagonal elements of $D$, and \var{e} 
+of \tm{A}.  The argument \var{e} is \dtc\ or \ztc\ matrix with
+the subdiagonal elements of \tm{A}.
+On exit \var{d} contains the diagonal elements of \tm{D}, and \var{e} 
 contains the subdiagonal elements of the unit lower bidiagonal matrix 
-$L$.  Raises an \code{ArithmeticError} if the matrix is singular.
+\tm{L}.  Raises an \code{ArithmeticError} if the matrix is singular.
 \end{funcdesc}
 
 \begin{funcdesc}{gttrs}{d, e, B\optional{, uplo='L'}}
@@ -447,16 +451,16 @@ Solves a set of linear equations
 \[
  AX=B 
 \]
-where $A$ is an $n$ by $n$ positive definite real symmetric 
-or complex Hermitian tridiagonal matrix, given its LDL${}\mathrm{^T}$ or 
-LDL${}\mathrm{^H}$ factorization.
-The argument \var{d} is the diagonal of the diagonal matrix $D$.
+where \tm{A} is an \tm{n} by \tm{n} positive definite real symmetric 
+or complex Hermitian tridiagonal matrix, given its $\mathrm{LDL^T}$ or 
+$\mathrm{LDL^H}$ factorization.
+The argument \var{d} is the diagonal of the diagonal matrix \tm{D}.
 The argument \var{uplo} only matters for complex matrices.
 If \var{uplo} is \code{'L'}, then on exit \var{e} contains the subdiagonal 
-elements of the unit bidiagonal matrix $L$.
+elements of the unit bidiagonal matrix \tm{L}.
 If \var{uplo} is \code{'U'}, then \var{e} contains the complex
-conjugates of the elements of the unit bidiagonal matrix $L$.
-On exit, \var{B} is overwritten with the solution $X$. 
+conjugates of the elements of the unit bidiagonal matrix \tm{L}.
+On exit, \var{B} is overwritten with the solution \tm{X}. 
 \var{B} must have the same type as \var{e}.
 \end{funcdesc}
 
@@ -467,12 +471,12 @@ Solves
 \[
  AX=B
 \] 
-where $A$ is a real or complex symmetric matrix  of order $n$.
+where \tm{A} is a real or complex symmetric matrix  of order \tm{n}.
 On exit, \var{B} is replaced by the solution.  
 The matrices \var{A} and \var{B} must have the same type (\dtc\ or 
 \ztc).
 The optional argument \var{ipiv} is an integer matrix of length at 
-least equal to $n$.
+least equal to \tm{n}.
 If \var{ipiv} is provided, \function{sysv()} solves the system and 
 returns the factorization in \var{A} and \var{ipiv}.
 If \var{ipiv} is not specified, \function{sysv()} solves the
@@ -482,12 +486,12 @@ Raises an \code{ArithmeticError} if the matrix is singular.
 \end{funcdesc}
 
 \begin{funcdesc}{sytrf}{A, ipiv\optional{, uplo='L'}}
-LDL${}\mathrm{^T}$ factorization 
+$\mathrm{LDL^T}$ factorization 
 \[
  PAP^T = LDL^T
 \]
-of a real or complex symmetric matrix $A$ of order $n$.
-\var{ipiv} is an \itc\ matrix of length at least $n$.
+of a real or complex symmetric matrix \tm{A} of order \tm{n}.
+\var{ipiv} is an \itc\ matrix of length at least \tm{n}.
 On exit, \var{A} and \var{ipiv} contain the factorization.
 Raises an \code{ArithmeticError} if the matrix is singular.
 \end{funcdesc}
@@ -497,14 +501,14 @@ Solves
 \[
   A X = B
 \]
-given the LDL${}\mathrm{^T}$ factorization computed by 
+given the $\mathrm{LDL^T}$ factorization computed by 
 \function{sytrf()} or \function{sysv()}. \var{B} must have the same
 type as \var{A}.
 \end{funcdesc}
 
 \begin{funcdesc}{sytri}{A, ipiv\optional{, uplo='L'}}
 Computes the inverse of a real or complex symmetric matrix.
-On entry, \var{A} and \var{ipiv} contain the LDL${}\mathrm{^T}$ 
+On entry, \var{A} and \var{ipiv} contain the $\mathrm{LDL^T}$ 
 factorization computed by \function{sytrf()} or \function{sysv()}. 
 On exit, \var{A} contains the inverse.
 \end{funcdesc}
@@ -514,12 +518,12 @@ Solves
 \[
  A X = B
 \]
-where $A$ is a real symmetric or complex Hermitian of order $n$.
+where \tm{A} is a real symmetric or complex Hermitian of order \tm{n}.
 On exit, \var{B} is replaced by the solution.
 The matrices \var{A} and \var{B} must have the same type (\dtc\ or 
 \ztc).
 The optional argument \var{ipiv} is an integer matrix of length at 
-least $n$.  
+least \tm{n}.  
 If \var{ipiv} is provided, then \function{hesv()} solves the system and 
 returns the factorization in \var{A} and \var{ipiv}.
 If \var{ipiv} is not specified, then \function{hesv()} solves the
@@ -529,12 +533,12 @@ Raises an \code{ArithmeticError} if the matrix is singular.
 \end{funcdesc}
 
 \begin{funcdesc}{hetrf}{A, ipiv\optional{, uplo='L'}}
-LDL${}\mathrm{^H}$ factorization 
+$\mathrm{LDL^H}$ factorization 
 \[
  PAP^T = LDL^H
 \]
-of a real symmetric or complex Hermitian matrix of order $n$.
-\var{ipiv} is an \itc\ matrix of length at least $n$.
+of a real symmetric or complex Hermitian matrix of order \tm{n}.
+\var{ipiv} is an \itc\ matrix of length at least \tm{n}.
 On exit, \var{A} and \var{ipiv} contain the factorization.
 Raises an \code{ArithmeticError} if the matrix is singular.
 \end{funcdesc}
@@ -544,13 +548,13 @@ Solves
 \[
  A X = B
 \]
-given the LDL${}\mathrm{^H}$ factorization computed by 
+given the $\mathrm{LDL^H}$ factorization computed by 
 \function{hetrf()} or \function{hesv()}.
 \end{funcdesc}
 
 \begin{funcdesc}{hetri}{A, ipiv\optional{, uplo='L'}}
 Computes the inverse of a real symmetric or complex Hermitian  matrix.
-On entry, \var{A} and \var{ipiv} contain the LDL${}\mathrm{^H}$ 
+On entry, \var{A} and \var{ipiv} contain the $\mathrm{LDL^H}$ 
 factorization computed by \function{hetrf()} or \function{hesv()}. 
 On exit, \var{A} contains the inverse.
 \end{funcdesc}
@@ -576,8 +580,8 @@ Solves a triangular set of equations
  A^TX=B \quad (\mathrm{trans} = \mathrm{'T'}), \qquad 
  A^HX=B \quad (\mathrm{trans} = \mathrm{'C'}), 
 \]
-where $A$ is real or complex and triangular of order $n$, 
-and \var{B} is a matrix with $n$ rows.
+where \tm{A} is real or complex and triangular of order \tm{n}, 
+and \var{B} is a matrix with \tm{n} rows.
 \var{A} and \var{B} are matrices with the same type (\dtc\ or \ztc).
 \function{trtrs()} is similar to \function{blas.trsm()}, except
 that it raises an \code{ArithmeticError} if a diagonal element of 
@@ -586,7 +590,7 @@ values).
 \end{funcdesc}
 
 \begin{funcdesc}{trtri}{A\optional{, uplo='L'\optional{, diag='N'}}}
-Computes the inverse of a real or complex triangular matrix $A$.  
+Computes the inverse of a real or complex triangular matrix \tm{A}.  
 On exit, \var{A} contains the inverse.
 \end{funcdesc}
 
@@ -598,28 +602,28 @@ Solves a triangular set of equations
  A^TX=B \quad (\mathrm{trans} = \mathrm{'T'}), \qquad 
  A^HX=B \quad (\mathrm{trans} = \mathrm{'C'}), 
 \]
-where $A$ is real or complex triangular band matrix of order $n$, 
-and \var{B} is a matrix with $n$ rows.
-The diagonals of $A$ are stored in \var{A} using the BLAS conventions 
+where \tm{A} is real or complex triangular band matrix of order \tm{n}, 
+and \var{B} is a matrix with \tm{n} rows.
+The diagonals of \tm{A} are stored in \var{A} using the BLAS conventions 
 for triangular band matrices. 
 \var{A} and \var{B} are matrices with the same type (\dtc\ or \ztc).
-On exit, \var{B} is replaced by the solution $X$.
+On exit, \var{B} is replaced by the solution \tm{X}.
 \end{funcdesc}
 
 \section{Least-Squares and Least-Norm Problems}
 \begin{funcdesc}{gels}{A, B\optional{, trans='N'}}
 Solves least-squares and least-norm problems with a full rank 
-$m$ by $n$ matrix $A$.
+\tm{m} by \tm{n} matrix \tm{A}.
 
 \begin{enumerate}
-\item \var{trans} is \code{'N'}.  If $m$ is greater than or equal
-to $n$, \function{gels()} solves the least-squares problem
+\item \var{trans} is \code{'N'}.  If \tm{m} is greater than or equal
+to \tm{n}, \function{gels()} solves the least-squares problem
 \[
  \begin{array}{ll} 
  \mbox{minimize} & \|AX-B\|_F.
  \end{array} 
 \]
-If $m$ is less than or equal to $n$, \function{gels()} solves 
+If \tm{m} is less than or equal to \tm{n}, \function{gels()} solves 
 the least-norm problem
 \[
  \begin{array}{ll} 
@@ -629,7 +633,7 @@ the least-norm problem
 \]
 
 \item \var{trans} is \code{'T'} or \code{'C'} and \var{A} and \var{B}
-are real.  If $m$ is greater than or equal to $n$,
+are real.  If \tm{m} is greater than or equal to \tm{n},
 \function{gels()} solves the least-norm problem
 \[
  \begin{array}{ll} 
@@ -637,7 +641,7 @@ are real.  If $m$ is greater than or equal to $n$,
  \mbox{subject to} & A^TX=B.
  \end{array}
 \]
-If $m$ is less than or equal to $n$, \function{gels()} solves 
+If \tm{m} is less than or equal to \tm{n}, \function{gels()} solves 
 the least-squares problem
 \[
  \begin{array}{ll} 
@@ -646,7 +650,7 @@ the least-squares problem
 \]
 
 \item \var{trans} is \code{'C'} and \var{A} and \var{B}
-are complex. If $m$ is greater than or equal to $n$, 
+are complex. If \tm{m} is greater than or equal to \tm{n}, 
 \function{gels()} solves the least-norm problem
 \[
  \begin{array}{ll} 
@@ -654,7 +658,7 @@ are complex. If $m$ is greater than or equal to $n$,
  \mbox{subject to} & A^HX=B.
  \end{array}
 \]
-If $m$ is less than or equal to $n$, \function{gels()} solves 
+If \tm{m} is less than or equal to \tm{n}, \function{gels()} solves 
 the least-squares problem
 \[
  \begin{array}{ll} 
@@ -664,11 +668,11 @@ the least-squares problem
 \end{enumerate}
 \var{A} and \var{B} must have the same typecode (\dtc\ or \ztc).
 \var{trans} = \code{'T'} is not allowed if \var{A} is complex.
-On exit, the solution $X$ is stored as the leading submatrix 
+On exit, the solution \tm{X} is stored as the leading submatrix 
 of \var{B}.
 The array \var{A} is overwritten with details of the QR or the LQ 
-factorization of $A$.
-Note that \function{gels()} does not check whether $A$ is full rank.
+factorization of \tm{A}.
+Note that \function{gels()} does not check whether \tm{A} is full rank.
 \end{funcdesc}
 
 \begin{funcdesc}{geqrf}{A, tau}
@@ -676,15 +680,15 @@ QR factorization of a real or complex matrix \var{A}:
 \[
   A = Q R.
 \]
-If \var{A} is $m$ by $n$, then $Q$ is $m$ by $m$ 
-and orthogonal/unitary, and \var{R} is $m$ by $n$
-and upper triangular (if $m$ is greater than or equal to $n$), 
-or upper trapezoidal (if $m$ is less than or equal to $n$).  
+If \var{A} is \tm{m} by \tm{n}, then \tm{Q} is \tm{m} by \tm{m} 
+and orthogonal/unitary, and \var{R} is \tm{m} by \tm{n}
+and upper triangular (if \tm{m} is greater than or equal to \tm{n}), 
+or upper trapezoidal (if \tm{m} is less than or equal to \tm{n}).  
 \var{tau}  is a matrix of the same type as {\var A} and of length at 
-least $\min\{m, n\}$.
-On exit, $R$ is stored in the upper triangular part of \var{A}.
-The matrix $Q$ is stored as a product of $\min\{m, n\}$
-elementary reflectors in the first $\min\{m,n\}$ columns 
+least \tm{\min\{m, n\}}.
+On exit, \tm{R} is stored in the upper triangular part of \var{A}.
+The matrix \tm{Q} is stored as a product of \tm{\min\{m, n\}}
+elementary reflectors in the first \tm{\min\{m,n\}} columns 
 of \var{A} and in \var{tau}.
 \end{funcdesc}
 
@@ -699,8 +703,8 @@ Product with a real orthogonal matrix:
  Q^T & \mathrm{trans} = \mathrm{'T'},
 \end{array}\right.
 \]
-where $Q$ is square and orthogonal.  
-$Q$ is stored in \var{A} and \var{tau} as a product 
+where \tm{Q} is square and orthogonal.  
+\tm{Q} is stored in \var{A} and \var{tau} as a product 
 of min\{\var{A}.\member{size}[0], \var{A}.\member{size}[1]\} 
 elementary reflectors, as computed by \function{geqrf()}.
 \end{funcdesc}
@@ -717,8 +721,8 @@ Product with a real orthogonal or complex unitary matrix:
  Q^H & \mathrm{trans} = \mathrm{'C'},
 \end{array}\right.
 \]
-$Q$ is square and orthogonal or unitary.  
-$Q$ is stored in \var{A} and \var{tau} as a product of 
+\tm{Q} is square and orthogonal or unitary.  
+\tm{Q} is stored in \var{A} and \var{tau} as a product of 
 min\{\var{A}.\member{size}[0], \var{A}.\member{size}[1]\} 
 elementary reflectors, as computed by \function{geqrf()}.
 The arrays \var{A}, \var{tau} and \var{C} must have the same type.
@@ -748,14 +752,14 @@ by a direct call to \function{gels()}, and by separate calls to
 
 \section{Symmetric and Hermitian Eigenvalue Decomposition}
 The first four routines compute all or selected  eigenvalues and 
-eigenvectors of a real symmetric matrix $A$:
+eigenvectors of a real symmetric matrix \tm{A}:
 \[
  A = V\diag(\lambda)V^T,\qquad  V^TV = I.
 \]
 
 \begin{funcdesc}{syev}{A, W\optional{, jobz='N'\optional{, uplo='L'}}}
-Eigenvalue decomposition of a real symmetric matrix of order $n$.
-\var{W} is a real matrix of length at least $n$.
+Eigenvalue decomposition of a real symmetric matrix of order \tm{n}.
+\var{W} is a real matrix of length at least \tm{n}.
 On exit, \var{W} contains the eigenvalues in ascending order.
 If \var{jobz} is \code{'V'}, the eigenvectors are also computed
 and returned in \var{A}.
@@ -773,15 +777,16 @@ algorithm.  It is faster on large problems, but also uses more memory.
 range='A'\optional{, uplo='L'\optional{, vl=0.0, vu=0.0\optional{, 
 il=1, iu=1\optional{, Z=\None}}}}}}}
 Computes selected eigenvalues and eigenvectors of a real symmetric 
-matrix \var{A} of order $n$.
+matrix \var{A} of order \tm{n}.
 
-\var{W} is a real matrix of length at least $n$.
+\var{W} is a real matrix of length at least \tm{n}.
 On exit, \var{W} contains the eigenvalues in ascending order.
 If \var{range} is \code{'A'}, all the eigenvalues are computed.
-If \var{range} is \code{'I'}, eigenvalues $i_l$ through $i_u$
+If \var{range} is \code{'I'}, eigenvalues \tm{\s{i}{l}} through 
+\tm{\s{i}{u}}
 are computed, where $1 \leq i_l \leq i_u \leq n$. 
 If \var{range} is \code{'V'}, the eigenvalues in the interval 
-$(v_l,v_u]$ are computed. 
+\tm{(\s{v}{l},\s{v}{u}]} are computed. 
 
 If \var{jobz} is \code{'V'}, the (normalized) eigenvectors are 
 computed, and returned in \var{Z}.  If \var{jobz} is \code{'N'}, the 
@@ -789,8 +794,8 @@ eigenvectors are not computed.  In both cases, the contents of \var{A}
 are destroyed on exit.
 \var{Z} is optional (and not referenced) if \var{jobz} is \code{'N'}.
 It is required if \var{jobz} is \code{'V'} and must have at least
-$n$ columns if \var{range} is \code{'A'} or \code{'V'} and  at
-least $i_u-i_l+1$ columns if \var{range} is \code{'I'}.
+\tm{n} columns if \var{range} is \code{'A'} or \code{'V'} and  at
+least \tm{\s{i}{u}-\s{i}{l}+1} columns if \var{range} is \code{'I'}.
 
 \function{syevx()} returns the number of computed eigenvalues.
 \end{funcdesc}
@@ -814,7 +819,7 @@ For real symmetric matrices they are identical to the corresponding
 
 \begin{funcdesc}{heev}{A, W\optional{, jobz='N'\optional{, uplo='L'}}}
 Eigenvalue decomposition of a real symmetric or complex Hermitian
-matrix of order $n$.
+matrix of order \tm{n}.
 The calling sequence is identical to \function{syev()}, except that 
 \var{A} can be real or complex.
 \end{funcdesc}
@@ -827,7 +832,7 @@ This is an alternative to \function{heev()}.
 range='A'\optional{, uplo='L'\optional{, vl=0.0, vu=0.0 \optional{, 
 il=1, iu=n\optional{, Z=\None}}}}}}}
 Computes selected eigenvalues and eigenvectors of a real symmetric 
-or complex Hermitian matrix of order $n$.
+or complex Hermitian matrix of order \tm{n}.
 The calling sequence is identical to \function{syevx()},
 except that \var{A} can be real or complex.
 \var{Z} must have the same type as \var{A}.
@@ -846,7 +851,7 @@ Three types of generalized eigenvalue problems can be solved:
  ABZ = Z\diag(\lambda) \quad \mbox{(type 2)}, \qquad 
  BAZ = Z\diag(\lambda) \quad \mbox{(type 3)}, 
 \EEQ
-with $A$ and $B$ real symmetric or complex Hermitian, and $B$ positive 
+with \tm{A} and \tm{B} real symmetric or complex Hermitian, and \tm{B} positive 
 definite.
 The matrix of eigenvectors is normalized as follows:
 \[
@@ -857,12 +862,12 @@ The matrix of eigenvectors is normalized as follows:
 \begin{funcdesc}{sygv}{A, B, W\optional{, itype=1\optional{, 
 jobz='N'\optional{, uplo='L'}}}}
 Solves the generalized eigenproblem~(\ref{e-gevd}) for real symmetric 
-matrices of order $n$, stored in real matrices \var{A} and \var{B}.
+matrices of order \tm{n}, stored in real matrices \var{A} and \var{B}.
 \var{itype} is an integer with possible values 1, 2, 3, and specifies
 the type of eigenproblem.
-\var{W} is a real matrix of length at least $n$.
+\var{W} is a real matrix of length at least \tm{n}.
 On exit, it contains the eigenvalues in ascending order.
-On exit, \var{B} contains the Cholesky factor of $B$.
+On exit, \var{B} contains the Cholesky factor of \tm{B}.
 If \var{jobz} is \code{'V'}, the eigenvectors are computed
 and returned in \var{A}.
 If \var{jobz} is \code{'N'}, the eigenvectors are not returned and the 
@@ -872,7 +877,7 @@ contents of \var{A} are destroyed.
 \begin{funcdesc}{hegv}{A, B, W\optional{, itype=1\optional{, 
 jobz='N'\optional{, uplo='L'}}}}
 Generalized eigenvalue problem~(\ref{e-gevd}) of real symmetric or 
-complex Hermitian matrix of order $n$.
+complex Hermitian matrix of order \tm{n}.
 The calling sequence is identical to \function{sygv()},
 except that \var{A} and \var{B} can be real or complex.
 \end{funcdesc}
@@ -886,10 +891,10 @@ Singular value decomposition
 \[
  A = U \Sigma V^T, \qquad A = U \Sigma V^H
 \]
-of a real or complex $m$ by $n$ matrix \var{A}.
+of a real or complex \tm{m} by \tm{n} matrix \var{A}.
 
-\var{S} is a real matrix of length at least $\min\{m, n\}$.
-On exit, its first  $\min\{m, n\}$ elements are the 
+\var{S} is a real matrix of length at least \tm{\min\{m, n\}}.
+On exit, its first  \tm{\min\{m, n\}} elements are the 
 singular values in descending order.
 
 The argument \var{jobu} controls how many left singular vectors are
@@ -899,9 +904,9 @@ If \var{jobu} is \code{'N'}, no left singular vectors are
 computed.
 If \var{jobu} is \code{'A'}, all left singular vectors are computed 
 and returned as columns of \var{U}.
-If \var{jobu} is \code{'S'}, the first $\min\{m, n\}$ left 
+If \var{jobu} is \code{'S'}, the first \tm{\min\{m, n\}} left 
 singular vectors are computed and returned as columns of \var{U}.
-If \var{jobu} is \code{'O'}, the first $\min\{m, n\}$ left 
+If \var{jobu} is \code{'O'}, the first \tm{\min\{m, n\}} left 
 singular vectors are computed and returned as columns of \var{A}.
 The argument \var{U} is \None\ (if \var{jobu} is \code{'N'}
 or \code{'A'}) or a matrix of the same type as \var{A}.
@@ -912,10 +917,10 @@ and \code{'O'}.
 If \var{jobvt} is \code{'N'}, no right singular vectors are 
 computed.  If \var{jobvt} is \code{'A'}, all right singular vectors 
 are computed and returned as rows of \var{Vt}.
-If \var{jobvt} is \code{'S'}, the first $\min\{m, n\}$ right 
+If \var{jobvt} is \code{'S'}, the first \tm{\min\{m, n\}} right 
 singular vectors are computed and their (conjugate) transposes are
 returned as rows of \var{Vt}.
-If \var{jobvt} is \code{'O'}, the first $\min\{m, n\}$ right 
+If \var{jobvt} is \code{'O'}, the first \tm{\min\{m, n\}} right 
 singular vectors are computed and their (conjugate) transposes 
 are returned as rows of \var{A}.
 Note that the (conjugate) transposes of the right singular vectors 
@@ -928,40 +933,40 @@ On exit, the contents of \var{A} are destroyed.
 
 \begin{funcdesc}{gesdd}{A, S\optional{, jobz='N'\optional{, 
 U=\None\optional{, Vt=\None}}}} 
-Singular value decomposition of a real or complex $m$ by $n$ 
+Singular value decomposition of a real or complex \tm{m} by \tm{n} 
 matrix \var{A}.  This function is based on a divide-and-conquer 
 algorithm and is faster than \function{gesvd()}.
 
-\var{S} is a real matrix of length at least $\min\{m, n\}$.
-On exit, its first $\min\{m, n\}$ elements are the 
+\var{S} is a real matrix of length at least \tm{\min\{m, n\}}.
+On exit, its first \tm{\min\{m, n\}} elements are the 
 singular values in descending order.
 
 The argument \var{jobz} controls how many singular vectors are
 computed.  The possible values are \code{'N'}, \code{'A'}, \code{'S'} 
 and \code{'O'}. 
 If \var{jobz} is \code{'N'}, no singular vectors are computed.
-If \var{jobz} is \code{'A'}, all $m$ left singular vectors are 
-computed and returned as columns of \var{U} and all $n$ right 
+If \var{jobz} is \code{'A'}, all \tm{m} left singular vectors are 
+computed and returned as columns of \var{U} and all \tm{n} right 
 singular vectors are computed and returned as rows of \var{Vt}.
-If \var{jobz} is \code{'S'}, the first $\min\{m, n\}$ left 
+If \var{jobz} is \code{'S'}, the first \tm{\min\{m, n\}} left 
 and right singular vectors are computed and returned as columns of 
 \var{U} and rows of \var{Vt}.
-If \var{jobz} is \code{'O'} and $m$ is greater than or equal
-to $n$, the first $n$ left singular vectors are returned as
-columns of \var{A} and the $n$ right singular vectors are returned
-as rows of \var{Vt}.  If \var{jobz} is \code{'O'} and $m$ is less 
-than $n$, the $m$ left singular vectors are returned as columns
-of \var{U} and the first $m$ right singular vectors are returned 
+If \var{jobz} is \code{'O'} and \tm{m} is greater than or equal
+to \tm{n}, the first \tm{n} left singular vectors are returned as
+columns of \var{A} and the \tm{n} right singular vectors are returned
+as rows of \var{Vt}.  If \var{jobz} is \code{'O'} and \tm{m} is less 
+than \tm{n}, the \tm{m} left singular vectors are returned as columns
+of \var{U} and the first \tm{m} right singular vectors are returned 
 as rows of \var{A}.  
 Note that the (conjugate) transposes of the right singular vectors 
 are returned in \var{Vt} or \var{A}.
 
 The argument \var{U} can be \None\ (if \var{jobz} is \code{'N'}
-or \code{'A'} of \var{jobz} is \code{'O'} and $m$ is greater than
-or equal to  $n$)  or a matrix of the same type as \var{A}.
+or \code{'A'} of \var{jobz} is \code{'O'} and \tm{m} is greater than
+or equal to  \tm{n})  or a matrix of the same type as \var{A}.
 The argument \var{Vt} can be \None\ (if \var{jobz} is \code{'N'}
-or \code{'A'} or \var{jobz} is \code{'O'} and $m$ is less than
-$n$) or a matrix of the same type as \var{A}.
+or \code{'A'} or \var{jobz} is \code{'O'} and \tm{m} is less than
+\tm{n}) or a matrix of the same type as \var{A}.
 
 On exit, the contents of \var{A} are destroyed.
 \end{funcdesc}
@@ -979,7 +984,7 @@ a positive definite set of linear equations
 \[
  A^T \diag(b-Ax)^{-2} A v = -\diag(b-Ax)^{-1}\ones
 \]
-(where $A$ has rows $a_i^T$), and a suitable step size is determined 
+(where \tm{A} has rows $a_i^T$), and a suitable step size is determined 
 by a backtracking line search.
 
 We use the level-3 BLAS function \function{syrk()} to form the Hessian 
@@ -1006,7 +1011,6 @@ def acent(A,b):
     m, n = A.size
     x = matrix(0.0, (n,1))
     H = matrix(0.0, (n,n))
-    g = matrix(0.0, (n,1))
 
     for iter in xrange(MAXITERS):
         
diff --git a/doc/modeling.tex b/doc/modeling.tex
index ce5817d..b358698 100644
--- a/doc/modeling.tex
+++ b/doc/modeling.tex
@@ -352,7 +352,7 @@ Linear equality and inequality constraints of the form
 \[
   f(x_1,\ldots,x_n) = 0, \qquad f(x_1,\ldots,x_n) \preceq  0, 
 \]
-where $f$ is a convex function, are represented by \pytype{constraint}
+where \tm{f} is a convex function, are represented by \pytype{constraint}
 objects.  Equality constraints are created by expressions of the form 
 \begin{quote}
 \code{\var{f1} == \var{f2}}. 
@@ -383,7 +383,7 @@ for a variable of length 5.
 >>> c3 = (sum(x) == 2)
 \end{verbatim}
 
-The built-in fucntion \function{len()} returns the dimension of the
+The built-in function \function{len()} returns the dimension of the
 constraint function.
 
 Constraints have four public attributes.
@@ -405,7 +405,7 @@ Its value is initialized as \None, and can be modified
 by making an assignment to \var{c}.\member{multiplier}.\member{value}.
 \end{memberdesc}
 
-\begin{memberdesc}{constraint}{name}
+\begin{memberdesc}{name}
 The name of the constraint.  Changing the name of a constraint
 also changes the name of the multiplier of \var{c}.
 For example, the command \code{\var{c}.\member{name} = 'newname'} also 
diff --git a/doc/printing.tex b/doc/printing.tex
index 276b008..d410acf 100644
--- a/doc/printing.tex
+++ b/doc/printing.tex
@@ -51,15 +51,15 @@ respectively.
 In order to make the built-in Python functions \function{repr()} and 
 \function{str()} accessible for further customization, two functions
 are provided in \module{cvxopt.base}.  
-The function \function{base.matrix_repr()} is used when \function{repr()}
+The function \function{base.matrix\_repr()} is used when \function{repr()}
 is called with a matrix argument;
-and \function{base.matrix_str()} is used when \function{str()}
+and \function{base.matrix\_str()} is used when \function{str()}
 is called with a matrix argument.  By default, the functions are set to
-\function{printing.matrix_repr_default()} and
-\function{printing.matrix_str_default()}, respectively,
+\function{printing.matrix\_repr\_default()} and
+\function{printing.matrix\_str\_default()}, respectively,
 but they can be redefined to any other Python functions. 
 For example, if we prefer \code{"A"} to return the same output
-as \code{"print A"}, we can simply redefine \function{base.matrix_repr()} 
+as \code{"print A"}, we can simply redefine \function{base.matrix\_repr()} 
 as shown below.
 \begin{verbatim}
 >>> from cvxopt import base, printing
@@ -74,10 +74,10 @@ as shown below.
 
 The formatting for sparse matrices is similar.  
 The functions \function{repr()} and \function{str()} for sparse
-matrices are \function{base.spmatrix_repr()} 
-and \function{base.spmatrix_str()}, respectively.
-By default, they are set to \function{printing.spmatrix_repr_default()}
-and \function{printing.spmatrix_repr_str()}.
+matrices are \function{base.spmatrix\_repr()} 
+and \function{base.spmatrix\_str()}, respectively.
+By default, they are set to \function{printing.spmatrix\_repr\_default()}
+and \function{printing.spmatrix\_repr\_str()}.
 
 \begin{verbatim}
 >>> from cvxopt import base, printing 
@@ -102,7 +102,7 @@ and \function{printing.spmatrix_repr_str()}.
 
 As can be seen from the example, the default behaviour is to print
 the entire matrix including structural zeros. An alternative triplet 
-printing style is defined in \function{printing.spmatrix_str_triplet}. 
+printing style is defined in \function{printing.spmatrix\_str\_triplet}. 
 \begin{verbatim}
 >>> base.spmatrix_str = printing.spmatrix_str_triplet
 >>> print A
diff --git a/doc/solvers.tex b/doc/solvers.tex
index c7f0416..69e2f11 100644
--- a/doc/solvers.tex
+++ b/doc/solvers.tex
@@ -1,34 +1,32 @@
-\chapter{Nonlinear Convex Programming (\module{cvxopt.solvers})}
+\chapter{Nonlinear Convex Optimization (\module{cvxopt.solvers})}
 \label{chap:solvers}
 
-The functions in this chapter are intended for nonlinear convex 
+In this chapter we consider nonlinear convex 
 optimization problems of the form 
 \[ 
  \begin{array}{ll}
  \mbox{minimize} & f_0(x) \\
  \mbox{subject to} & f_k(x) \leq 0, \quad k=1,\ldots,m \\
   & G x \preceq h  \\ 
-  & A x = b,
+  & A x = b.
  \end{array}
 \]
-with the functions $f_k$ convex and twice differentiable.
-The linear inequalities are generalized inequality with respect to a 
-proper convex cone, defined as a product of a nonnegative orthant,
-second-order cones, and positive semidefinite cones.   
-
-The most important functions in this chapter are 
-\function{solvers.cp()} and \function{solvers.cpl()},
-described in sections~\ref{s-cp} and~\ref{s-cpl}.   
-There are also functions for two 
-special problem classes: quadratic programming (section~\ref{s-qp}) and 
-geometric programming~(section~\ref{s-gp}).
-In section~\ref{s-nlcp} we explain how customized solvers can be 
-implemented that exploit structure in specific classes of problems.
+The functions \tm{\s{f}{k}} convex and twice differentiabler and the linear
+inequalities are generalized inequalities with respect to a proper convex 
+cone, defined as a product of a nonnegative orthant, second-order cones, 
+and positive semidefinite cones.   
+
+The basic functions are \function{cp()} and \function{cpl()}, described in 
+sections~\ref{s-cp} and~\ref{s-cpl}.   A simpler interface for geometric 
+programming problems is discussed in section~\ref{s-gp}.
+In section~\ref{s-nlcp} we explain how custom solvers can be implemented 
+that exploit structure in specific classes of problems.
 The last section describes the algorithm parameters that control the 
 solvers.
 
 \section{General Solver} \label{s-cp}
-\begin{funcdesc}{cp}{F\optional{, G, h\optional{, dims\optional{, A, b\optional{, kktsolver}}}}}
+\begin{funcdesc}{cp}{F\optional{, G, h\optional{, dims\optional{, 
+A, b\optional{, kktsolver}}}}}
 Solves a convex optimization problem
 \BEQ \label{e-nlcp}
  \begin{array}{ll}
@@ -46,35 +44,32 @@ sequences.
 \begin{itemize}
 \item \code{F()} returns a tuple (\var{m}, \var{x0}), where \var{m} is 
  the number of nonlinear constraints and \var{x0} is a point in the 
- domain of $f$.  \var{x0} is a dense real matrix of size 
- $(n, 1)$.
-
-\item \code{F(x)}, with \var{x} a dense real matrix of size 
- $(n,1)$, returns a tuple (\var{f}, \var{Df}).  
- \var{f} is a dense real matrix of size $(m+1,1)$, with 
- \code{\var{f}[\var{k}]} equal to $f_k(x)$. 
- (If $m$ is zero, \var{f} can also be returned as a number.)
- \var{Df} is a dense or sparse real matrix of size $(m+1, n)$ 
+ domain of \tm{f}.  \var{x0} is a dense real matrix of size (\tm{n}, 1).
+
+\item \code{F(x)}, with \var{x} a dense real matrix of size (\tm{n},1), 
+ returns a tuple (\var{f}, \var{Df}).  
+ \var{f} is a dense real matrix of size \tm{(m+1,1)}, with 
+ \code{\var{f}[\var{k}]} equal to \tm{\s{f}{k}(x)}. 
+ (If \tm{m} is zero, \var{f} can also be returned as a number.)
+ \var{Df} is a dense or sparse real matrix of size \tm{(m+1, n)} 
  with \code{\var{Df}[\var{k},:]} equal to the transpose of the gradient
- $\nabla f_k(x)$.
- If \var{x} is not in the domain of $f$, \code{F(x)} returns 
- \None\ or a tuple (\None,\None).
+ $\nabla f_k(x)$.  If \var{x} is not in the domain of \tm{f}, \code{F(x)} 
+ returns \None\ or a tuple (\None,\None).
 
 \item \code{F(x,z)}, with \var{x} a dense real matrix of size 
- $(n,1)$ and \var{z} a positive dense real matrix of size 
- $(m+1,1)$ returns a tuple (\var{f}, \var{Df}, \var{H}).  
+ (\tm{n},1) and \var{z} a positive dense real matrix of size 
+ (\tm{m}+1,1) returns a tuple (\var{f}, \var{Df}, \var{H}).  
  \var{f} and \var{Df} are defined as above.  
- \var{H} is a square dense or sparse real matrix of size 
- $(n, n)$, whose lower triangular part contains the lower 
- triangular part of
+ \var{H} is a square dense or sparse real matrix of size (\tm{n}, \tm{n}), 
+ whose lower triangular part contains the lower triangular part of
  \[
   z_0 \nabla^2f_0(x) + z_1 \nabla^2f_1(x) + \cdots + z_m \nabla^2f_m(x).
  \]
  If \var{F} is called with two arguments, it can be assumed that 
- $x$ is in the domain of $f$.
+ \tm{x} is in the domain of \tm{f}.
 \end{itemize}
 
-The linear inequalities are with respect to a cone $C$ defined as a 
+The linear inequalities are with respect to a cone \tm{C} defined as a 
 Cartesian product of a nonnegative orthant, a number of second-order 
 cones, and a number of positive semidefinite cones:
 \[
@@ -85,12 +80,12 @@ with
 \[
 C_0 = 
  \{ u \in \reals^l \;| \; u_k \geq 0, \; k=1, \ldots,l\}, \qquad 
-C_{k+1} = \{ (u_0, u_1) \in \reals \times \reals^{q_{k}-1} \; | \;
+C_{k+1} = \{ (u_0, u_1) \in \reals \times \reals^{r_{k}-1} \; | \;
    u_0 \geq \|u_1\|_2 \},  \quad k=0,\ldots, M-1, \qquad 
 C_{k+M+1} = \left\{ \svec(u) \; | \;
-  u \in \symm^{p_k}_+ \right\}, \quad k=0,\ldots,N-1.
+  u \in \symm^{t_k}_+ \right\}, \quad k=0,\ldots,N-1.
 \]
-Here $\svec(u)$ denotes a symmetric matrix $u$ stored as a vector 
+Here $\svec(u)$ denotes a symmetric matrix \tm{u} stored as a vector 
 in column major order.  
 
 The arguments \var{h} and \var{b} are real single-column dense 
@@ -99,15 +94,15 @@ The default values for \var{A} and \var{b} are sparse matrices with
 zero rows, meaning that there are no equality constraints.  
 The number of rows of \var{G} and \var{h} is equal to
 \[
- K = l + \sum_{k=0}^{M-1} q_k + \sum_{k=0}^{N-1} p_k^2.
+ K = l + \sum_{k=0}^{M-1} r_k + \sum_{k=0}^{N-1} t_k^2.
 \]
 The columns of \var{G} and \var{h} are vectors in
 \[
-\reals^l \times \reals^{q_0} \times \cdots \times 
-\reals^{q_{M-1}} \times \reals^{p_0^2}  \times \cdots \times 
-\reals^{p_{N-1}^2},
+\reals^l \times \reals^{r_0} \times \cdots \times 
+\reals^{r_{M-1}} \times \reals^{t_0^2}  \times \cdots \times 
+\reals^{t_{N-1}^2},
 \]
-where the last $N$ components represent symmetric matrices stored in 
+where the last \tm{N} components represent symmetric matrices stored in 
 column major order.  The strictly upper triangular entries of these 
 matrices are not accessed (i.e.,  the symmetric matrices are stored
 in the 'L'-type column major order used in the \module{blas} and
@@ -116,11 +111,11 @@ in the 'L'-type column major order used in the \module{blas} and
 The argument \var{dims} is a dictionary with the dimensions of the 
 cones.  It has three fields. 
 \begin{description}
-\item[\var{dims['l']}:] $l$, the dimension of the nonnegative orthant
+\item[\var{dims['l']}:] \tm{l}, the dimension of the nonnegative orthant
  (a nonnegative integer).
-\item[\var{dims['q']}:] $[q_0, \ldots, q_{M-1}]$, 
+\item[\var{dims['q']}:] $[r_0, \ldots, r_{M-1}]$, 
 a list with the dimensions of the second-order cones (positive integers).
-\item[\var{dims['s']}:] $[p_0, \ldots, p_{N-1}]$, 
+\item[\var{dims['s']}:] $[t_0, \ldots, t_{N-1}]$, 
 a list with the dimensions of the positive semidefinite cones
 (nonnegative integers).
 \end{description}
@@ -142,10 +137,10 @@ The possible values of the \code{'status'} key are:
   the \code{'snl'} and \code{'sl'} entries are the corresponding
  slacks in the nonlinear and linear inequality constraints, and the 
 \code{'znl'}, \code{'zl'} and \code{'y'} entries are the optimal 
-values of the dual variables associated with the nonlinear 
-inequalities, the linear inequalities, and the linear equality 
-constraints.  These vectors approximately satisfy the 
-Karush-Kuhn-Tucker (KKT) conditions
+ values of the dual variables associated with the nonlinear 
+ inequalities, the linear inequalities, and the linear equality 
+ constraints.  These vectors approximately satisfy the 
+ Karush-Kuhn-Tucker (KKT) conditions
 \[
  \nabla f_0(x) +  D\tilde f(x)^T z_\mathrm{nl} + 
  G^T z_\mathrm{l} + A^T y = 0, \qquad
@@ -162,8 +157,8 @@ s_\mathrm{nl}^T z_\mathrm{nl} +  s_\mathrm{l}^T z_\mathrm{l} = 0.
 
 \item[\code{'unknown'}] This indicates that the algorithm reached
 the maximum number of iterations before a solution was found.
-The \code{'x'}, \code{'snl'}, \code{'sl'}, 
-\code{'y'}, \code{'znl'} and \code{'zl'} entries are \None. 
+The \code{'x'}, \code{'snl'}, \code{'sl'}, \code{'y'}, \code{'znl'} and 
+\code{'zl'} entries are \None. 
 \end{description}
 
 \function{cp()} requires that the problem is solvable and that 
@@ -173,7 +168,7 @@ The \code{'x'}, \code{'snl'}, \code{'sl'},
 \sum_{k=0}^m z_k \nabla^2 f_k(x) & A^T &
  \nabla f_1(x) & \cdots \nabla f_m(x) & G^T \end{array}\right]\right) = n,
 \]
-for all $x$ and all positive $z$. 
+for all \tm{x} and all positive \tm{z}. 
 \end{funcdesc}
 
 
@@ -289,7 +284,8 @@ is simpler to use than \function{cp()}.  (In fact, \function{cp()}
 converts the problem to one with a linear objective, and then 
 calls~\function{cpl()}.) 
 
-\begin{funcdesc}{cpl}{c, F\optional{, G, h\optional{, dims\optional{, A, b\optional{, kktsolver}}}}}
+\begin{funcdesc}{cpl}{c, F\optional{, G, h\optional{, dims\optional{, 
+A, b\optional{, kktsolver}}}}}
 Solves a convex optimization problem with a linear objective
 \[
  \begin{array}{ll}
@@ -306,33 +302,32 @@ Solves a convex optimization problem with a linear objective
 It must handle the following calling sequences.
 
 \begin{itemize}
-\item \code{F()} returns a tuple (\var{m}, \var{x0}), where \var{m} is 
+\item \code{F()} returns a tuple (\var{m}, \var{x0}), where \tm{m} is 
  the number of nonlinear constraints and \var{x0} is a point in the 
- domain of $f$.  \var{x0} is a dense real matrix of size $(n, 1)$.
+ domain of \tm{f}.  \var{x0} is a dense real matrix of size (\tm{n}, 1).
 
-\item \code{F(x)}, with \var{x} a dense real matrix of size 
- $(n, 1)$, returns a tuple (\var{f}, \var{Df}).  
- \var{f} is a dense real matrix of size $(m, 1)$, with 
- \code{\var{f}[\var{k}]} equal to $f_k(x)$. 
- \var{Df} is a dense or sparse real matrix of size $(m,n$ 
+\item \code{F(x)}, with \var{x} a dense real matrix of size \tm{(n, 1)}, 
+ returns a tuple (\var{f}, \var{Df}).  \var{f} is a dense real matrix of 
+ size (\tm{m}, 1), with \code{\var{f}[\var{k}]} equal to \tm{\s{f}{k}(x)}. 
+ \var{Df} is a dense or sparse real matrix of size \tm{(m,n)} 
  with \code{\var{Df}[\var{k},:]} equal to the transpose of the gradient
  $\nabla f_k(x)$.
- If \var{x} is not in the domain of $f$, \code{F(x)} returns 
+ If \var{x} is not in the domain of \tm{f}, \code{F(x)} returns 
  \None\ or a tuple (\None,\None).
 
 \item \code{F(x,z)}, with \var{x} a dense real matrix of size 
- $(n,1)$ and \var{z} a positive dense real matrix of size 
- $(m,1)$ returns a tuple (\var{f}, \var{Df}, \var{H}).  
+ (\tm{n},1) and \var{z} a positive dense real matrix of size 
+ (\tm{m},1) returns a tuple (\var{f}, \var{Df}, \var{H}).  
  \var{f} and \var{Df} are defined as above.  
  \var{H} is a square dense or sparse real matrix of size 
- $(n,n)$, whose lower triangular part contains the lower 
+ (\tm{n},\tm{n}), whose lower triangular part contains the lower 
  triangular part of
  \[
   z_0 \nabla^2f_0(x) + z_1 \nabla^2f_1(x) + \cdots + 
  z_{m-1} \nabla^2f_{m-1}(x).
  \]
  If \var{F} is called with two arguments, it can be assumed that 
- $x$ is in the domain of $f$.
+ \tm{x} is in the domain of \tm{f}.
 \end{itemize}
 
 The other arguments have the same meaning as in~\function{cp()}.
@@ -345,7 +340,7 @@ The other arguments have the same meaning as in~\function{cp()}.
  \nabla f_0(x) & \cdots \nabla f_{m-1}(x) & G^T \end{array}\right]\right) 
  = n,
 \]
-for all $x$ and all positive $z$. 
+for all \tm{x} and all positive \tm{z}. 
 \end{funcdesc}
 
 
@@ -516,144 +511,6 @@ pylab.show()
 \end{center}
 \end{description}
 
-\section{Quadratic Programming} \label{s-qp}
-\begin{funcdesc}{qp}{P, q, \optional{, G, h \optional{, A, b\optional{,
-solver}}}}
-Solves a convex quadratic program  
-\[
-\begin{array}{ll}
-\mbox{minimize} & (1/2) x^TPx + q^T x \\
-\mbox{subject to} & Gx \preceq h \\ & Ax = b.
-\end{array}
-\]
-
-\var{P} is a square dense or sparse real matrix, representing a 
-symmetric matrix in \code{'L'} storage, \ie, only the lower 
-triangular part of \var{P} is referenced.
-\var{G} and \var{A} are dense or sparse real matrices.
-Their default values are sparse matrices with zero columns.
-\var{q}, \var{h} and \var{b} are single-column real dense matrices.
-The default values of \var{h} and \var{b} are matrices of size (0,1).
-
-The default CVXOPT solver is used when the \var{solver} argument
-is absent or \None.  The MOSEK solver (if installed) can be 
-selected by setting \var{solver}=\code{'mosek'}.
-
-\function{qp()} returns a dictionary with keys 
-\code{'status'}, \code{'x'}, \code{'s'}, \code{'y'}, \code{'z'}.
-The possible values of the \code{'status'} key are as follows.
-\begin{description}
-\item[\code{'optimal'}]  In this case the 
-\code{'x'} entry is the primal optimal solution,
-the \code{'s'} entry is the corresponding slack in the inequality
-constraints, the \code{'z'} and \code{'y'} entries are the optimal 
-values of the dual variables associated with the linear inequality 
-and linear equality constraints.
-These values (approximately) satisfy the optimality conditions
-\[
- Px + q + G^T z + A^T y = 0, \qquad Gx + s = h, \qquad
- Ax = b, \qquad s \succeq 0, \qquad z \succeq 0, \qquad s^T z = 0. 
-\]
-
-\item [\code{'primal infeasible'}]  This only applies when
-\var{solver} is \code{'mosek'}, and means that a certificate of
-primal infeasibility has been found.   The \code{'x'} and \code{'s'} 
-entries are \None, and the
-\code{'z'} and \code{'y'} entries are vectors that approximately satisfy
-\[
- G^Tz + A^T y = 0, \qquad h^Tz + b^Ty = -1, \qquad z \succeq 0.
-\]
-
-\item [\code{'dual infeasible'}]  This only applies when
-\var{solver} is \code{'mosek'}, and means that a certificate of
-dual infeasibility has been found.   The \code{'z'} and \code{'y'}
-entries are \None, and the \code{'x'} and \code{'s'} entries are
-vectors that approximately satisfy
-\[
- Px = 0, \qquad q^Tx = -1, \qquad Gx + s = 0, \qquad Ax=0, \qquad
- s \succeq  0.
-\]
-
-\item[\code{'unknown'}] This means that the algorithm reached
-the maximum number of iterations before a solution was found.
-The \code{'x'}, \code{'s'}, \code{'y'}, \code{'z'} entries are \None. 
-\end{description}
-\end{funcdesc}
-
-As an example we compute the trade-off curve on page 187
-of the book \citetitle{http://www.stanford.edu/\~{}boyd/cvxbook}{Convex 
-Optimization}, by solving the quadratic program 
-\[
-\begin{array}{ll}
-\mbox{minimize} & -\bar p^T x + \mu x^T S x \\
-\mbox{subject to} & \ones^T x = 1, \quad x \succeq 0
-\end{array}
-\]
-for a sequence of positive values of {\it mu}. 
-The code below computes the trade-off curve and produces two figures 
-using the \ulink{Matplotlib}{http://matplotlib.sourceforge.net} package.
-\begin{center}
-\includegraphics[width=10cm]{figures/portfolio1.eps}
-\hspace*{\fill}
-\includegraphics[width=10cm]{figures/portfolio2.eps}
-\end{center}
-
-\begin{verbatim}
-from math import sqrt
-from cvxopt.base import matrix
-from cvxopt.blas import dot 
-from cvxopt.solvers import qp
-import pylab
-
-# Problem data.
-n = 4
-S = matrix([[ 4e-2,  6e-3, -4e-3,    0.0 ], 
-            [ 6e-3,  1e-2,  0.0,     0.0 ],
-            [-4e-3,  0.0,   2.5e-3,  0.0 ],
-            [ 0.0,   0.0,   0.0,     0.0 ]])
-pbar = matrix([.12, .10, .07, .03])
-G = matrix(0.0, (n,n))
-G[::n+1] = -1.0
-h = matrix(0.0, (n,1))
-A = matrix(1.0, (1,n))
-b = matrix(1.0)
-
-# Compute trade-off.
-N = 100
-mus = [ 10**(5.0*t/N-1.0) for t in xrange(N) ]
-portfolios = [ qp(mu*S, -pbar, G, h, A, b)['x'] for mu in mus ]
-returns = [ dot(pbar,x) for x in portfolios ]
-risks = [ sqrt(dot(x, S*x)) for x in portfolios ]
-
-# Plot trade-off curve and optimal allocations.
-pylab.figure(1, facecolor='w')
-pylab.plot(risks, returns)
-pylab.xlabel('standard deviation')
-pylab.ylabel('expected return')
-pylab.axis([0, 0.2, 0, 0.15])
-pylab.title('Risk-return trade-off curve (fig 4.12)')
-pylab.yticks([0.00, 0.05, 0.10, 0.15])
-
-pylab.figure(2, facecolor='w')
-c1 = [ x[0] for x in portfolios ] 
-c2 = [ x[0] + x[1] for x in portfolios ]
-c3 = [ x[0] + x[1] + x[2] for x in portfolios ] 
-c4 = [ x[0] + x[1] + x[2] + x[3] for x in portfolios ]
-pylab.fill(risks + [.20], c1 + [0.0], '#F0F0F0') 
-pylab.fill(risks[-1::-1] + risks, c2[-1::-1] + c1, facecolor = '#D0D0D0') 
-pylab.fill(risks[-1::-1] + risks, c3[-1::-1] + c2, facecolor = '#F0F0F0') 
-pylab.fill(risks[-1::-1] + risks, c4[-1::-1] + c3, facecolor = '#D0D0D0') 
-pylab.axis([0.0, 0.2, 0.0, 1.0])
-pylab.xlabel('standard deviation')
-pylab.ylabel('allocation')
-pylab.text(.15,.5,'x1')
-pylab.text(.10,.7,'x2')
-pylab.text(.05,.7,'x3')
-pylab.text(.01,.7,'x4')
-pylab.title('Optimal allocations (fig 4.12)')
-pylab.show()
-\end{verbatim}
-
 \section{Geometric Programming} \label{s-gp}
 \begin{funcdesc}{gp}{K, F, g \optional{, G, h \optional{, A, b}}}
 Solves a geometric program in convex form
@@ -673,9 +530,9 @@ where
  g = \left[ \begin{array}{cccc}
  g_0^T & g_1^T & \cdots & g_m^T \end{array}\right]^T. 
 \]
-\var{K} is a list of $m+1$ positive integers with 
+\var{K} is a list of \tm{m+1} positive integers with 
 \code{\var{K}[\var i]}
-equal to the number of rows in $F_i$.
+equal to the number of rows in \tm{\s{F}{i}}.
 \var{F} is a dense or sparse real matrix of 
 size \code{(sum(\var K),\var n)}.
 \var{g} is a dense real matrix with one column and the same number of
@@ -733,7 +590,7 @@ The  posynomial form of the problem is
   &   (1/\delta)dw^{-1} \leq 1
  \end{array}
 \]
-with variables $h$, $w$, $d$.
+with variables \tm{h}, \tm{w}, \tm{d}.
 
 \begin{verbatim}
 from cvxopt.base import matrix, log, exp
@@ -780,7 +637,7 @@ where
  \nabla f_1(x) & \cdots & \nabla f_m(x) & G^T \end{array}\right]^T.
 \]
 
-The matrix $W$ depends on the current iterates and is defined as follows.
+The matrix \tm{W} depends on the current iterates and is defined as follows.
 Suppose 
 \[
  u = \left(u_\mathrm{nl}, \; u_\mathrm{l}, \; u_{\mathrm{q},0}, \; 
@@ -788,10 +645,10 @@ Suppose
  u_{\mathrm{q},M-1}, \; \svec{(u_{\mathrm{s},0})}, \; \ldots, \; 
   \svec{(u_{\mathrm{s},N-1})}\right), \qquad
  u_\mathrm{l} \in\reals^l, \qquad 
- u_{\mathrm{q},k} \in\reals^{q_k}, \quad k = 0,\ldots,M-1, \qquad 
- u_{\mathrm{s},k} \in\symm^{p_k},  \quad k = 0,\ldots,N-1.
+ u_{\mathrm{q},k} \in\reals^{r_k}, \quad k = 0,\ldots,M-1, \qquad 
+ u_{\mathrm{s},k} \in\symm^{t_k},  \quad k = 0,\ldots,N-1.
 \]
-Then $W$ is a block-diagonal matrix, 
+Then \tm{W} is a block-diagonal matrix, 
 \[
  Wu = \left( W_\mathrm{nl} u_\mathrm{nl}, \; 
  W_\mathrm{l} u_\mathrm{l}, \;
@@ -824,7 +681,7 @@ This transformation is symmetric:
   W_\mathrm{l}^T = W_\mathrm{l}. 
 \]
 
-\item The next $M$ blocks are positive multiples of \emph{hyperbolic 
+\item The next \tm{M} blocks are positive multiples of \emph{hyperbolic 
  Householder transformations}:
 \[
   W_{\mathrm{q},k} = \beta_k ( 2 v_k v_k^T - J),
@@ -843,7 +700,7 @@ These transformations are also symmetric:
   W_{\mathrm{q},k}^T = W_{\mathrm{q},k}. 
 \]
 
-\item The last $N$ blocks are \emph{congruence transformations} with 
+\item The last \tm{N} blocks are \emph{congruence transformations} with 
  nonsingular matrices:
 \[
   W_{\mathrm{s},k} \svec{(u_{\mathrm{s},k})} = 
@@ -871,22 +728,22 @@ solving the KKT equations.
 This function will be called as \samp{f = kktsolver(x, z, W)}. 
 The argument \var{x} is the point at which the derivatives in the
 KKT matrix are evaluated.  \var{z} is a positive vector of 
-length it $m + 1$, containing the coefficients in the 1,1 block $H$.
+length it \tm{m + 1}, containing the coefficients in the 1,1 block \tm{H}.
 \var{W} is a dictionary that contains the parameters of the scaling:
 
 \BIT
 \item \code{W['dnl']} is the positive vector that defines the diagonal
  scaling for the nonlinear inequalities.   
  \code{W['dnli']} is its componentwise inverse.
-\item \code{W['dl']} is the positive vector that defines the diagonal
+\item \code{W['d']} is the positive vector that defines the diagonal
  scaling for the componentwise linear inequalities.   
- \code{W['dli']} is its componentwise inverse.
-\item \code{W['beta']} and \code{W['v']} are lists of length $M$ with 
+ \code{W['di']} is its componentwise inverse.
+\item \code{W['beta']} and \code{W['v']} are lists of length \tm{M} with 
  the coefficients and vectors that define the hyperbolic Householder 
  transformations.
-\item \code{W['r']} is a list of length $N$ with the matrices that
+\item \code{W['r']} is a list of length \tm{N} with the matrices that
  define the the congruence transformations.  
- \code{W['rti']} is  a list of length $N$ with the transposes of the 
+ \code{W['rti']} is  a list of length \tm{N} with the transposes of the 
  inverses of the matrices in \code{W['r']}.
 \EIT
 
@@ -973,153 +830,97 @@ If \var{G}, \var{A}, \var{Df}, or \var{H} are Python functions, then
 the argument \var{kktsolver} must also be provided.
 \end{description}
 
-As an example, we consider the 1-norm regularized least-squares problem
-\[
-\begin{array}{ll}
-\mbox{minimize} & \|Ax - y\|_2^2 + \|x\|_1
-\end{array}
-\]
-with variable {\it x}.  The problem is equivalent to the quadratic 
-program
+As an example, we consider the unconstrained problem
 \[
  \begin{array}{ll}
- \mbox{minimize} & \|Ax - y\|_2^2 + \ones^T u \\
- \mbox{subject to} & -u \preceq x \preceq u
+ \mbox{minimize} & (1/2)\|Ax-b\|_2^2 - \sum_{i=1}^n \log(1-x_i^2)
  \end{array}
 \]
-with variables $x$ and $u$.  The implementation below is 
-efficient when $A$ has many more columns than rows. 
+where \tm{A} is an \tm{m} by \tm{n} matrix with \tm{m} less than \tm{n}.  
+The Hessian of the objective is diagonal plus a low-rank term:
+\[
+  H = A^TA + \diag(d), \qquad d_i = \frac{2(1+x_i^2)}{(1-x_i^2)^2}.
+\]
+We can exploit this property when solving~(\ref{e-cp-kkt}) by applying
+the matrix inversion lemma. We first solve  
+\[
+ (A \diag(d)^{-1}A^T + I) v = (1/z_0) A \diag(d)^{-1}b_x, \qquad
+\]
+and then obtain
+\[
+ u_x = \diag(d)^{-1}(b_x/z_0 - A^T v).
+\]
+The following code follows this method.  It also uses BLAS functions
+for matrix-matrix and matrix-vector products.
 
 \begin{verbatim}
-from cvxopt.base import matrix, spdiag, mul, div
-from cvxopt import blas, lapack, solvers
+from cvxopt.base import matrix, spdiag, mul, div, log 
+from cvxopt import base, blas, lapack, solvers, base
 
-m, n = A.size
-def F(x=None):
+def l2ac(A, b):
     """
-    Function and gradient evaluation of
+    Solves
 
-        f = || A*x[:n] - y ||_2^2 +  sum(x[n:])
-    """
+        minimize  (1/2) * ||A*x-b||_2^2 - sum log (1-xi^2)
 
-    nvars = 2*n
-    if x is None: return 0, matrix(0.0, (nvars,1))
-    r = A*x[:n] - y
-    f = blas.nrm2(r)**2 + sum(x[n:])
-    gradf = matrix(1.0, (1,2*n))
-    blas.gemv(A, r, gradf, alpha=2.0, trans='T')  
-    if z is None:
-        return f, gradf
-    else:
-        def Hf(u, v, alpha = 1.0, beta = 0.0):
-            # A function that evaluates 
-            #
-            #     v := alpha * [2.0 * z[0] * A'*A, 0; 0, 0] * u + beta * v
-            #
-            blas.scal(beta, v)
-            w = matrix(0.0, (m, 1))
-            blas.gemv(A, u, w)
-            blas.gemv(A, w, v, alpha = 2.0 * z[0] * alpha, beta = 1.0, trans = 'T')
-        return f, gradf, Hf
-
-
-
-def G(u, v, alpha=1.0, beta=0.0, trans='N'):
-    """
-        v := alpha*[I, -I; -I, -I] * u + beta * v  (trans = 'N' or 'T')
+    assuming A is m x n with m << n.
     """
 
-    v *= beta
-    v[:n] += alpha*(u[:n] - u[n:])
-    v[n:] += alpha*(-u[:n] - u[n:])
-
-h = matrix(0.0, (2*n,1))
-
-
-# Customized solver for the KKT system 
-#
-#     [  2.0*z[0]*A'*A  0    I      -I     ] [x[:n] ]     [bx[:n] ]
-#     [  0              0   -I      -I     ] [x[n:] ]  =  [bx[n:] ].
-#     [  I             -I   -D1^-1   0     ] [zl[:n]]     [bzl[:n]]
-#     [ -I             -I    0      -D2^-1 ] [zl[n:]]     [bzl[n:]]
-#
-# where D1 = W['dli'][:n]**2, D2 = W['dli'][n:]**2, 
-#    
-# We first eliminate zl and x[n:]:
-#
-#     ( 2*z[0]*A'*A + 4*D1*D2*(D1+D2)^-1 ) * x[:n] = bx[:n] - (D2-D1)*(D1+D2)^-1 * bx[n:] 
-#         + D1 * ( I + (D2-D1)*(D1+D2)^-1 ) * bzl[:n] - D2 * ( I - (D2-D1)*(D1+D2)^-1 ) * bzl[n:]           
-#
-#     x[n:] = (D1+D2)^-1 * ( bx[n:] - D1*bzl[:n]  - D2*bzl[n:] ) - (D2-D1)*(D1+D2)^-1 * x[:n]         
-#     zl[:n] = D1 * ( x[:n] - x[n:] - bzl[:n] )
-#     zl[n:] = D2 * (-x[:n] - x[n:] - bzl[n:] ).
-#
-# The first equation has the form
-#
-#     (z[0]*A'*A + D)*x[:n]  =  rhs
-#
-# and is equivalent to
-#
-#     [ D    A'       ] [ x:n] ]  = [ rhs ]
-#     [ A   -1/z[0]*I ] [ v    ]    [ 0   ].
-#
-# It can be solved as 
-#
-#     ( A*D^-1*A' + 1/z[0]*I ) * v = A * D^-1 * rhs
-#     x[:n] = D^-1 * ( rhs - A'*v ).
-
-S = matrix(0.0, (m,m))
-Asc = matrix(0.0, (m,n))
-v = matrix(0.0, (m,1))
-def Fkkt(x, z, W):
-
-    # Factor 
+    m, n = A.size
+    def F(x = None, z = None):
+        if x is None: 
+            return 0, matrix(0.0, (n,1))
+        if max(abs(x)) >= 1.0: 
+            return None 
+        # r = A*x - b
+        r = -b
+        blas.gemv(A, x, r, beta = -1.0)
+        w = x**2
+        f = 0.5 * blas.nrm2(r)**2  - sum(log(1-w))
+        # gradf = A'*r + 2.0 * x ./ (1-w)
+        gradf = div(x, 1.0 - w)
+        blas.gemv(A, r, gradf, trans = 'T', beta = 2.0)
+        if z is None:
+            return f, gradf.T
+        else:
+            def Hf(u, v, alpha = 1.0, beta = 0.0):
+               # v := alpha * (A'*A*u + 2*((1+w)./(1-w)).*u + beta *v
+               v *= beta
+               v += 2.0 * alpha * mul(div(1.0+w, (1.0-w)**2), u)
+               blas.gemv(A, u, r)
+               blas.gemv(A, r, v, alpha = alpha, beta = 1.0, trans = 'T')
+            return f, gradf.T, Hf
+
+
+    # Custom solver for the Newton system
     #
-    #     S = A*D^-1*A' + 1/z[0]*I 
+    #     z[0]*(A'*A + D)*x = bx
     #
-    # where D = 2*D1*D2*(D1+D2)^-1, D1 = dl[:n]**2, D2 = dl[n:]**2.
-
-    d1, d2 = W['dli'][:n]**2, W['dli'][n:]**2   # d1 = diag(D1), d2 = diag(D2)
-    # ds is square root of diagonal of D
-    ds = sqrt(2.0) * div( mul(dl[:n], dl[n:]), sqrt(d1+d2) )
-    d3 =  div(d2 - d1, d1 + d2)
- 
-    # Asc = A*diag(d)^-1/2
-    Asc = A * spdiag(ds**-1)
-
-    # S = 1/z[0]*I + A * D^-1 * A'
-    blas.syrk(Asc, S)
-    S[::m+1] += 1.0 / z[0] 
-    lapack.potrf(S)
-
-    def g(x, y, z):
-
-        x[:n] = 0.5 * ( x[:n] - mul(d3, x[n:]) + mul(d1, z[:n] + mul(d3, z[:n])) - mul(d2, z[n:] - mul(d3, z[n:])) )
-        x[:n] = div( x[:n], ds) 
-
-        # Solve
-        #
-        #     S * v = 0.5 * A * D^-1 * ( bx[:n] - (D2-D1)*(D1+D2)^-1 * bx[n:] 
-        #             + D1 * ( I + (D2-D1)*(D1+D2)^-1 ) * bz[:n] - D2 * ( I - (D2-D1)*(D1+D2)^-1 ) * bz[n:] )
-	  
-        blas.gemv(Asc, x, v)
-        lapack.potrs(S, v)
-	
-        # x[:n] = D^-1 * ( rhs - A'*v ).
-        blas.gemv(Asc, v, x, alpha=-1.0, beta=1.0, trans='T')
-        x[:n] = div(x[:n], ds)
-
-        # x[n:] = (D1+D2)^-1 * ( bx[n:] - D1*bz[:n]  - D2*bz[n:] ) - (D2-D1)*(D1+D2)^-1 * x[:n]         
-        x[n:] = div( x[n:] - mul(d1, z[:n]) - mul(d2, z[n:]), d1+d2 ) - mul( d3, x[:n] )
-	    
-        # z[:n] = D1^1/2 * (  x[:n] - x[n:] - bz[:n] )
-        # z[n:] = D2^1/2 * ( -x[:n] - x[n:] - bz[n:] ).
-        z[:n] = mul( W['dli'][:n],  x[:n] - x[n:] - z[:n] )
-        z[n:] = mul( W['dli'][n:], -x[:n] - x[n:] - z[n:] )
-
-    return g
-
-x = solvers.cp(F, G, h, kktsolver = Fkkt)['x'][:n]
+    # where D = 2 * (1+x.^2) ./ (1-x.^2).^2.  We apply the matrix inversion
+    # lemma and solve this as
+    #    
+    #     (A * D^-1 *A' + I) * v = A * D^-1 * bx / z[0]
+    #     D * x = bx / z[0] - A'*v.
+
+    S = matrix(0.0, (m,m))
+    Asc = matrix(0.0, (m,n))
+    v = matrix(0.0, (m,1))
+    def Fkkt(x, z, W):
+        ds = (2.0 * div(1 + x**2, (1 - x**2)**2))**-0.5
+        base.gemm(A, spdiag(ds), Asc)
+        blas.syrk(Asc, S)
+        S[::m+1] += 1.0 
+        lapack.potrf(S)
+        a = z[0]
+        def g(x, y, z):
+            x[:] = mul(x, ds) / a
+            blas.gemv(Asc, x, v)
+            lapack.potrs(S, v)
+            blas.gemv(Asc, v, x, alpha = -1.0, beta = 1.0, trans = 'T')
+            x[:] = mul(x, ds)  
+        return g
+
+    return solvers.cp(F, kktsolver = Fkkt)['x']
 \end{verbatim}
 
 
@@ -1139,6 +940,8 @@ adding entries with the following key values.
 \item[\code{'reltol'}] relative accuracy (default: \code{1e-6}).
 \item[\code{'feastol'}] tolerance for feasibility conditions (default:
 \code{1e-7}).
+\item[\code{'refinement'}] number of iterative refinement steps when
+ solving KKT equations (default: 1).
 \end{description}
 For example the command
 \begin{verbatim}
@@ -1161,7 +964,7 @@ following meaning in \function{nlcp()}.
 {\max\{1, \| ( f(x_0) + \ones,  
 Gx_0 + \ones-h, Ax_0-b) \|_2 \}} \leq \epsilon_\mathrm{feas}  
 \]
-where $x_0$ is the point returned by \code{F()}, and
+where \tm{\s{x}{0}} is the point returned by \code{F()}, and
 \[
 \mathrm{gap} \leq \epsilon_\mathrm{abs}
 \qquad \mbox{or} \qquad \left( c^Tx < 0, \quad
@@ -1182,7 +985,7 @@ L(x,y,z) = c^Tx + z_\mathrm{nl}^T f(x) + z_\mathrm{l}^T (Gx-h) + y^T(Ax-b).
 \]
 The functions \function{cp()}, \function{qp()}, and \function{gp()} 
 call \function{cpl()} and hence use the same stopping criteria
-(with $x_0=0$ for \function{qp()} and \function{gp()}).
+(with \tm{\s{x}{0}=0} for \function{qp()} and \function{gp()}).
 
 The MOSEK interior-point algorithm parameters are set to their default 
 values.  They can be modified by adding an entry 
diff --git a/doc/spsolvers.tex b/doc/spsolvers.tex
index b535c54..cada4df 100644
--- a/doc/spsolvers.tex
+++ b/doc/spsolvers.tex
@@ -1,9 +1,9 @@
-\chapter{Sparse Linear Equation Solvers} \label{c-spsolvers}
+\chapter{Sparse Linear Equations} \label{c-spsolvers}
 In this section we describe routines for solving sparse sets of linear 
 equations.
 
 A real symmetric or complex Hermitian sparse matrix is stored as
-an \spmtrx\ object \var{X}  of size ($n$, $n$) and an 
+an \spmtrx\ object \var{X}  of size (\tm{n}, \tm{n}) and an 
 additional character argument \code{uplo} with possible values 
 \code{'L'} and \code{'U'}.  
 If \code{uplo} is \code{'L'}, the lower triangular part
@@ -14,8 +14,8 @@ If \code{uplo} is \code{'U'}, the upper triangular part
 of \var{X} contains the upper triangular part of the
 matrix, and the lower triangular matrix of \var{X} is ignored.
 
-A general sparse square matrix of order $n$ is represented by an
-\spmtrx\ object of size ($n$, $n$).
+A general sparse square matrix of order \tm{n} is represented by an
+\spmtrx\ object of size (\tm{n}, \tm{n}).
 
 Dense matrices, which appear as righthand sides of equations, are 
 stored using the same conventions as in the BLAS and LAPACK modules.
@@ -36,7 +36,7 @@ ACM Transactions on Mathematical Software, 30(3), 381-388, 2004.}
 
 \begin{funcdesc}{order}{A\optional{, uplo='L'}}
 Computes the approximate mimimum degree ordering of a symmetric  sparse
-matrix $A$.  
+matrix \tm{A}.  
 The ordering is returned as an integer dense matrix with length equal 
 to the order of \var{A}.  Its entries specify a permutation that 
 reduces fill-in during the Cholesky factorization.
@@ -165,7 +165,7 @@ coefficients.
 The numerical factorization on the other hand depends on the sparsity 
 pattern of the matrix and on its the numerical values.
 
-As an example, suppose $A$ is the matrix~(\ref{e-sp-Adef}) and 
+As an example, suppose \tm{A} is the matrix~(\ref{e-sp-Adef}) and 
 \[
 B = \left[\begin{array}{rrrrr}
  4 & 3 & 0 & 0 & 0 \\
@@ -175,7 +175,7 @@ B = \left[\begin{array}{rrrrr}
  0 & 4 & 2 & 0 & 2 
  \end{array}\right],
 \]
-which differs from $A$ in its first and last entries.
+which differs from \tm{A} in its first and last entries.
 The following code computes
 \[
  x = A^{-T}B^{-1}A^{-1}\ones.
@@ -212,8 +212,8 @@ routines of the CHOLMOD package.
 It includes functions for Cholesky factorization of sparse positive
 definite matrices, and for solving sparse sets of linear equations with 
 positive definite matrices. 
-The routines can also be used for computing {LDL$\mathrm{{}^T}$} 
-(or {LDL$\mathrm{{}^H}$}) factorizations of symmetric indefinite matrices 
+The routines can also be used for computing $\mathrm{LDL^T}$ 
+(or $\mathrm{LDL^H}$) factorizations of symmetric indefinite matrices 
 (with L unit lower-triangular and D diagonal and nonsingular) if 
 such a factorization exists.  
 
@@ -226,7 +226,7 @@ Solves
 \[
  AX = B 
 \]
-with $A$ sparse and real symmetric or complex Hermitian.
+with \tm{A} sparse and real symmetric or complex Hermitian.
 \var{B} is a dense matrix of the same type as \var{A}.  On exit it 
 is overwritten with the solution.
 The argument \var{p} is an integer matrix with length equal to the 
@@ -292,9 +292,9 @@ and
 \BEQ \label{e-chol-ldl}
    PAP^T = LDL^T, \qquad PAP^T = LDL^H,
 \EEQ
-where $P$ is a permutation matrix, $L$ is lower triangular 
+where \tm{P} is a permutation matrix, \tm{L} is lower triangular 
 (unit lower triangular in the second factorization), and 
-$D$ is nonsingular diagonal.  The type of factorization depends 
+\tm{D} is nonsingular diagonal.  The type of factorization depends 
 on the value of \code{options['supernodal']} (see below).
 
 If \var{uplo} is \code{'L'}, only the lower triangular part of \var{A} 
@@ -351,7 +351,7 @@ factorization~(\ref{e-chol-ll}) or~(\ref{e-chol-ldl}) computed by
 \end{center}
 
 (If \var{F} is a Cholesky factorization of the form~(\ref{e-chol-ll}), 
-$D$ is an identity matrix in this table. 
+\tm{D} is an identity matrix in this table. 
 If \var{A} is complex, $L^T$ should be replaced by $L^H$.)
 
 The matrix \var{B} is a dense \dtc\ or \ztc\ matrix, with the same type
@@ -378,10 +378,10 @@ For the same example as above:
 \end{verbatim}
 
 \begin{funcdesc}{diag}{F}
-Returns the diagonal elements of the Cholesky factor $L$ 
+Returns the diagonal elements of the Cholesky factor \tm{L} 
 in~(\ref{e-chol-ll}), as a dense matrix of the same type as \var{A}.
 Note that this only applies to Cholesky factorizations.
-The matrix $D$ in an LDL$\mathrm{{}^T}$ factorization can be 
+The matrix \tm{D} in an $\mathrm{LDL^T}$ factorization can be 
 retrieved via \function{cholmod.solve()} with \var{sys} equal to 6.
 \end{funcdesc}
 
@@ -438,20 +438,20 @@ factorization.  We consider the problem
  \mbox{subject to} & K_{ij}=0,\quad (i,j) \not \in S.
  \end{array}
 \EEQ
-The optimization variable is a symmetric matrix $K$ of order $n$
+The optimization variable is a symmetric matrix \tm{K} of order \tm{n}
 and the domain of the problem is the set of positive definite matrices.
-The matrix $Y$ and the index set  $S$ are given.  We assume that all 
-the diagonal positions are included in $S$.
+The matrix \tm{Y} and the index set  \tm{S} are given.  We assume that all 
+the diagonal positions are included in \tm{S}.
 This problem arises in maximum likelihood estimation of the covariance
 matrix of a zero-mean normal distribution, with constraints 
 that specify that pairs of variables are conditionally independent.
 
-We can express $K$ as
+We can express \tm{K} as
 \[
    K(x) = E_1\diag(x)E_2^T+E_2\diag(x)E_1^T
 \]
-where $x$ are the nonzero elements in the lower triangular part
-of $K$, with the diagonal elements scaled by 1/2,
+where \tm{x} are the nonzero elements in the lower triangular part
+of \tm{K}, with the diagonal elements scaled by 1/2,
 and
 \[
  E_1 = \left[ \begin{array}{cccc}
@@ -459,8 +459,8 @@ and
  E_2 = \left[ \begin{array}{cccc}
   e_{j_1} & e_{j_2} & \cdots & e_{j_q} \end{array}\right], 
 \]
-where ($i_k$, $j_k$) are the positions of the nonzero 
-entries in the lower-triangular part of $K$.
+where (\tm{\s{i}{k}}, \tm{\s{j}{k}}) are the positions of the nonzero 
+entries in the lower-triangular part of \tm{K}.
 With this notation, we can solve problem~(\ref{e-covsel}) by solving
 the unconstrained problem
 \[
diff --git a/examples/book/chap6/basispursuit b/examples/book/chap6/basispursuit
index b6b2a48..c98ce93 100755
--- a/examples/book/chap6/basispursuit
+++ b/examples/book/chap6/basispursuit
@@ -64,40 +64,27 @@ y = mul( 1.0 + 0.5 * sin(11*ts), sin(30 * sin(5*ts)))
 #
 #     minimize    ||A*x - y||_2^2 + ||x||_1
 #
-#     minimize    ||A*x - y||_2^2 + 1'*u
+#     minimize    x'*A'*A*x - 2.0*y'*A*x + 1'*u
 #     subject to  -u <= x <= u
 #
 # Variables x (n),  u (n).
 
 m, n = A.size
 r = matrix(0.0, (m,1))
-gradf = matrix(1.0, (1,2*n))
-w = matrix(0.0, (m, 1))
 
-def F(x = None, z = None):
+q = matrix(1.0, (2*n,1))
+blas.gemv(A, y, q, alpha = -2.0, trans = 'T')
+
+
+def P(u, v, alpha = 1.0, beta = 0.0):
     """
     Function and gradient evaluation of
 
-	f = || A*x[:n] - y ||_2^2 +  sum(x[n:])
+	v := alpha * 2*A'*A * u + beta * v
     """
 
-    nvars = 2*n
-    if x is None: 
-        return 0, matrix(0.0, (nvars,1))
-    blas.copy(y, r)
-    blas.gemv(A, x, r, beta=-1.0)      # r = A*x[:n] - y
-    f = blas.nrm2(r)**2 + blas.asum(x, offset=n)
-    blas.gemv(A, r, gradf, alpha = 2.0, trans = 'T') # gradf = [2*A'*r; 1.0]
-    if z is None:
-        return f, +gradf
-    else:
-        def H(u, v, alpha = 1.0, beta = 0.0):
-            # u := alpha * [2.0 * z[0] * A'*A, 0; 0, 0] * u + beta * v 
-            blas.scal(beta, v)
-            blas.gemv(A, u, w)
-            blas.gemv(A, w, v, alpha = 2.0 * z[0] * alpha, beta = 1.0, 
-                trans = 'T')
-        return f, +gradf, H
+    blas.gemv(A, u, r)      
+    blas.gemv(A, r, v, alpha = 2.0*alpha, beta = beta, trans = 'T') 
         
 
 def G(u, v, alpha = 1.0, beta = 0.0, trans = 'N'):
@@ -116,16 +103,16 @@ h = matrix(0.0, (2*n,1))
 
 # Customized solver for the KKT system 
 #
-#     [  2.0*z[0]*A'*A  0    I      -I     ] [x[:n] ]     [bx[:n] ]
-#     [  0              0   -I      -I     ] [x[n:] ]  =  [bx[n:] ].
-#     [  I             -I   -D1^-1   0     ] [z[:n] ]     [bz[:n] ]
-#     [ -I             -I    0      -D2^-1 ] [z[n:] ]     [bz[n:] ]
+#     [  2.0*A'*A   0    I      -I     ] [x[:n] ]     [bx[:n] ]
+#     [  0          0   -I      -I     ] [x[n:] ]  =  [bx[n:] ].
+#     [  I         -I   -D1^-1   0     ] [z[:n] ]     [bz[:n] ]
+#     [ -I         -I    0      -D2^-1 ] [z[n:] ]     [bz[n:] ]
 #
-# where D1 = W['dli'][:n]**2,  D2 = W['dli'][:n]**2.
+# where D1 = W['di'][:n]**2,  D2 = W['di'][:n]**2.
 #    
 # We first eliminate z and x[n:]:
 #
-#     ( 2*z[0]*A'*A + 4*D1*D2*(D1+D2)^-1 ) * x[:n] = 
+#     ( 2*A'*A + 4*D1*D2*(D1+D2)^-1 ) * x[:n] = 
 #         bx[:n] - (D2-D1)*(D1+D2)^-1 * bx[n:] 
 #         + D1 * ( I + (D2-D1)*(D1+D2)^-1 ) * bz[:n]
 #         - D2 * ( I - (D2-D1)*(D1+D2)^-1 ) * bz[n:]           
@@ -139,34 +126,34 @@ h = matrix(0.0, (2*n,1))
 #
 # The first equation has the form
 #
-#     (z[0]*A'*A + D)*x[:n]  =  rhs
+#     (A'*A + D)*x[:n]  =  rhs
 #
 # and is equivalent to
 #
-#     [ D    A'       ] [ x:n] ]  = [ rhs ]
-#     [ A   -1/z[0]*I ] [ v    ]    [ 0   ].
+#     [ D    A' ] [ x:n] ]  = [ rhs ]
+#     [ A   -I  ] [ v    ]    [ 0   ].
 #
 # It can be solved as 
 #
-#     ( A*D^-1*A' + 1/z[0]*I ) * v = A * D^-1 * rhs
+#     ( A*D^-1*A' + I ) * v = A * D^-1 * rhs
 #     x[:n] = D^-1 * ( rhs - A'*v ).
 
 S = matrix(0.0, (m,m))
 Asc = matrix(0.0, (m,n))
 v = matrix(0.0, (m,1))
 
-def Fkkt(x, z, W):
+def Fkkt(W):
 
     # Factor 
     #
-    #     S = A*D^-1*A' + 1/z[0]*I 
+    #     S = A*D^-1*A' + I 
     #
-    # where D = 2*D1*D2*(D1+D2)^-1, D1 = dl[:n]**2, D2 = dl[n:]**2.
+    # where D = 2*D1*D2*(D1+D2)^-1, D1 = d[:n]**2, D2 = d[n:]**2.
 
-    d1, d2 = W['dli'][:n]**2, W['dli'][n:]**2    
+    d1, d2 = W['di'][:n]**2, W['di'][n:]**2    
 
     # ds is square root of diagonal of D
-    ds = sqrt(2.0) * div( mul( W['dli'][:n], W['dli'][n:]), sqrt(d1+d2) )
+    ds = sqrt(2.0) * div( mul( W['di'][:n], W['di'][n:]), sqrt(d1+d2) )
     d3 =  div(d2 - d1, d1 + d2)
  
     # Asc = A*diag(d)^-1/2
@@ -174,9 +161,9 @@ def Fkkt(x, z, W):
     for k in xrange(m):
         blas.tbsv(ds, Asc, n=n, k=0, ldA=1, incx=m, offsetx=k)
 
-    # S = 1/z[0]*I + A * D^-1 * A'
+    # S = I + A * D^-1 * A'
     blas.syrk(Asc, S)
-    S[::m+1] += 1.0 / z[0] 
+    S[::m+1] += 1.0 
     lapack.potrf(S)
 
     def g(x, y, z):
@@ -207,12 +194,12 @@ def Fkkt(x, z, W):
 	    
         # z[:n] = D1^1/2 * (  x[:n] - x[n:] - bz[:n] )
         # z[n:] = D2^1/2 * ( -x[:n] - x[n:] - bz[n:] ).
-        z[:n] = mul( W['dli'][:n],  x[:n] - x[n:] - z[:n] ) 
-        z[n:] = mul( W['dli'][n:], -x[:n] - x[n:] - z[n:] ) 
+        z[:n] = mul( W['di'][:n],  x[:n] - x[n:] - z[:n] ) 
+        z[n:] = mul( W['di'][n:], -x[:n] - x[n:] - z[n:] ) 
 
     return g
 
-x = solvers.cp(F, G, h, kktsolver = Fkkt)['x'][:n]
+x = solvers.coneqp(P, q, G, h, kktsolver = Fkkt)['x'][:n]
 
 I = [ k for k in xrange(n) if abs(x[k]) > 1e-2 ]
 xls = +y
diff --git a/examples/book/chap6/cvxfit b/examples/book/chap6/cvxfit
index 6220be2..6d1958a 100755
--- a/examples/book/chap6/cvxfit
+++ b/examples/book/chap6/cvxfit
@@ -13,9 +13,8 @@ data = load(open('cvxfit.bin','r'))
 u, y = data['u'], data['y']
 m = len(u)
 
-# minimize     (1/2) * || yhat - y ||_2^2
-# subject to   yhat[j] >= yhat[i] + g[i]' * (u[j] - u[i]), 
-#                  j, i = 0,...,m-1  
+# minimize    (1/2) * || yhat - y ||_2^2
+# subject to  yhat[j] >= yhat[i] + g[i]' * (u[j] - u[i]), j, i = 0,...,m-1
 #
 # Variables  yhat (m), g (m).
 
diff --git a/examples/book/chap6/tv b/examples/book/chap6/tv
index 83c6a4b..aae9a11 100755
--- a/examples/book/chap6/tv
+++ b/examples/book/chap6/tv
@@ -106,28 +106,20 @@ def tv(delta):
     Variables x (n), y (n-1).
     """
 
-    def F(x = None, z = None):
-        """
-        Function and gradient evaluation of 
+    q = matrix(0.0, (2*n-1,1))
+    q[:n] = -corr  
+    q[n:] = delta
 
-            f = 1/2 * || x[:n]-corr ||_2^2 + delta * sum(x[n:]).
+    def P(u, v, alpha = 1.0, beta = 0.0):
+        """
+            v := alpha*u + beta*v
         """
-    
-        nvars = 2*n - 1
-        if x is None: 
-            return 0, matrix(0.0, (nvars,1))
-        f = 0.5 * blas.nrm2( x[:n]-corr )**2 + delta * sum(x[n:])
-        gradf = matrix(0.0, (1,nvars))
-        gradf[:n] = x[:n]-corr
-        gradf[n:] = delta
-        if z is None:
-            return f, gradf 
-        else:
-            H = spmatrix(1.0, range(n), range(n), (nvars, nvars))
-            return f, gradf, H
+
+        v *= beta
+        v[:n] += alpha*u[:n]
 
 
-    def G(u, v, alpha=1.0, beta=0.0, trans='N'):
+    def G(u, v, alpha = 1.0, beta = 0.0, trans = 'N'):
         """
            v := alpha*[D, -I;  -D, -I] * u + beta * v  (trans = 'N')
            v := alpha*[D, -I;  -D, -I]' * u + beta * v  (trans = 'T')
@@ -152,42 +144,43 @@ def tv(delta):
 
     # Customized solver for KKT system with coefficient
     #
-    #     [  z[0]*I  0    D'   -D' ] 
-    #     [  0       0   -I    -I  ] 
-    #     [  D      -I   -D1    0  ] 
-    #     [ -D      -I    0    -D2 ].
+    #     [  I    0    D'   -D' ] 
+    #     [  0    0   -I    -I  ] 
+    #     [  D   -I   -D1    0  ] 
+    #     [ -D   -I    0    -D2 ].
      
     # Diagonal and subdiagonal.
     Sd = matrix(0.0, (n,1))
     Se = matrix(0.0, (n-1,1))
 
-    def Fkkt(x, z, W):
+    def Fkkt(W):
         """
         Factor the tridiagonal matrix
 
-             S = z[0]*I + 4.0 * D' * diag( d1.*d2./(d1+d2) ) * D 
+             S = I + 4.0 * D' * diag( d1.*d2./(d1+d2) ) * D 
 
-        with d1 = W['dli'][:n-1]**2 = diag(D1^-1) 
-        d2 = W['dli'][n-1:]**2 = diag(D2^-1).
+        with d1 = W['di'][:n-1]**2 = diag(D1^-1) 
+        d2 = W['di'][n-1:]**2 = diag(D2^-1).
         """
 
-        d1 = W['dli'][:n-1]**2
-        d2 = W['dli'][n-1:]**2
+        d1 = W['di'][:n-1]**2
+        d2 = W['di'][n-1:]**2
         d = 4.0*div( mul(d1,d2), d1+d2) 
-        Sd[:] = z[0]
+        Sd[:] = 1.0
         Sd[:n-1] += d
         Sd[1:] += d
         Se[:] = -d
         lapack.pttrf(Sd, Se)
+
         def g(x, y, z):
 
             """
             Solve 
 
-                [  z[0]*I  0   D'  -D' ] [x[:n]   ]    [bx[:n]   ]
-                [  0       0  -I   -I  ] [x[n:]   ] =  [bx[n:]   ]
-                [  D      -I  -D1   0  ] [z[:n-1] ]    [bz[:n-1] ]
-                [ -D      -I   0   -D2 ] [z[n-1:] ]    [bz[n-1:] ].
+                [  I   0   D'  -D' ] [x[:n]   ]    [bx[:n]   ]
+                [  0   0  -I   -I  ] [x[n:]   ] =  [bx[n:]   ]
+                [  D  -I  -D1   0  ] [z[:n-1] ]    [bz[:n-1] ]
+                [ -D  -I   0   -D2 ] [z[n-1:] ]    [bz[n-1:] ].
 
             First solve
                  
@@ -224,13 +217,12 @@ def tv(delta):
 
             # z[:n-1] = d1 .* (D*x[:n] - x[n:] - bz[:n-1])
             # z[n-1:] = d2 .* (-D*x[:n] - x[n:] - bz[n-1:])
-            z[:n-1] = mul(W['dli'][:n-1], u - x[n:] - z[:n-1])
-            z[n-1:] = mul(W['dli'][n-1:], -u - x[n:] - z[n-1:])
+            z[:n-1] = mul(W['di'][:n-1], u - x[n:] - z[:n-1])
+            z[n-1:] = mul(W['di'][n-1:], -u - x[n:] - z[n-1:])
 
         return g
 
-    solvers.options['feastol'] = 1e-5
-    return solvers.cp(F, G, h, kktsolver = Fkkt)['x'][:n]
+    return solvers.coneqp(P, q, G, h, kktsolver = Fkkt)['x'][:n]
 
 
 nopts = 15
diff --git a/examples/doc/chap4/acent b/examples/doc/chap4/acent
index 839bd85..9923441 100755
--- a/examples/doc/chap4/acent
+++ b/examples/doc/chap4/acent
@@ -21,7 +21,6 @@ def acent(A,b):
     m, n = A.size
     x = matrix(0.0, (n,1))
     H = matrix(0.0, (n,n))
-    g = matrix(0.0, (n,1))
 
     for iter in xrange(MAXITERS):
         
diff --git a/examples/doc/chap8/conelp b/examples/doc/chap8/conelp
new file mode 100755
index 0000000..7caa300
--- /dev/null
+++ b/examples/doc/chap8/conelp
@@ -0,0 +1,21 @@
+#!/usr/bin/python
+# 
+# The small linear cone program of section 8.1
+
+from cvxopt.base import matrix
+from cvxopt import solvers
+
+c = matrix([-6., -4., -5.])
+G = matrix([[ 16., 7.,  24.,  -8.,   8.,  -1.,  0., -1.,  0.,  0.,   7.,  
+    -5.,   1.,  -5.,   1.,  -7.,   1.,   -7.,  -4.],
+            [-14., 2.,   7., -13., -18.,   3.,  0.,  0., -1.,  0.,   3.,  
+    13.,  -6.,  13.,  12., -10.,  -6.,  -10., -28.],
+            [  5., 0., -15.,  12.,  -6.,  17.,  0.,  0.,  0., -1.,   9.,   
+     6.,  -6.,   6.,  -7.,  -7.,  -6.,   -7., -11.]])
+h = matrix( [ -3., 5.,  12.,  -2., -14., -13., 10.,  0.,  0.,  0.,  68., 
+    -30., -19., -30.,  99.,  23., -19.,   23.,  10.] )
+dims = {'l': 2, 'q': [4, 4], 's': [3]}
+sol = solvers.conelp(c, G, h, dims)
+print "\nStatus:", sol['status']
+print "\nx =\n\n", sol['x']
+print "\nz =\n\n", sol['z']
diff --git a/examples/doc/chap8/coneqp b/examples/doc/chap8/coneqp
new file mode 100755
index 0000000..81f9bf6
--- /dev/null
+++ b/examples/doc/chap8/coneqp
@@ -0,0 +1,23 @@
+#!/usr/bin/python
+# 
+# The quadratic cone program of section 8.2
+# 
+# minimize   (1/2)*x'*A'*A*x - b'*A*x
+# subject to x >= 0
+#            ||x||_2 <= 1
+
+from cvxopt import base, solvers
+from cvxopt.base import matrix
+A = matrix([ [ .3, -.4,  -.2,  -.4,  1.3 ], 
+             [ .6, 1.2, -1.7,   .3,  -.3 ],
+             [-.3,  .0,   .6, -1.2, -2.0 ] ])
+b = matrix([ 1.5, .0, -1.2, -.7, .0])
+m, n = A.size
+
+I = matrix(0.0, (n,n))
+I[::n+1] = 1.0
+G = matrix([-I, matrix(0.0, (1,n)), I])
+h = matrix(n*[0.0] + [1.0] + n*[0.0])
+dims = {'l': n, 'q': [n+1], 's': []}
+x = solvers.coneqp(A.T*A, -A.T*b, G, h, dims)['x']
+print "\nx = \n\n", x
diff --git a/examples/doc/chap8/l1 b/examples/doc/chap8/l1
index 08932cb..e882728 100755
--- a/examples/doc/chap8/l1
+++ b/examples/doc/chap8/l1
@@ -1,11 +1,10 @@
 #!/usr/bin/python
 
-# The 1-norm approximation example of section 8.5.  
+# The 1-norm approximation example of section 8.7.  
 
-from cvxopt import base, base, blas, lapack, solvers
+from cvxopt import base, blas, lapack, solvers
 from cvxopt.base import matrix, spdiag, mul, div
 from math import sqrt
-solvers.options['refinement'] = False
 
 def l1(P, q):
 
@@ -124,6 +123,6 @@ def l1(P, q):
     return sol['x'][:n],  sol['z'][m:] - sol['z'][:m]    
 
 base.setseed()
-m, n = 500, 100
+m, n = 1000, 100
 P, q = base.normal(m,n), base.normal(m,1)
 x, y = l1(P,q)
diff --git a/examples/doc/chap9/l1regls b/examples/doc/chap8/l1regls
similarity index 56%
rename from examples/doc/chap9/l1regls
rename to examples/doc/chap8/l1regls
index 05b2d86..41a60a6 100755
--- a/examples/doc/chap9/l1regls
+++ b/examples/doc/chap8/l1regls
@@ -1,41 +1,30 @@
 #!/usr/bin/python
 
-# The 1-norm regularized least-squares example of section 9.5.
+# The 1-norm regularized least-squares example of section 8.7.
 
 from cvxopt.base import matrix, spdiag, mul, div
-from cvxopt import base, blas, lapack, solvers, base
+from cvxopt import base, blas, lapack, solvers 
 import math
 
 def l1regls(A, y):
+    """
+    
+    Returns the solution of l1-norm regularized least-squares problem
+  
+        minimize || A*x - y ||_2^2  + || x ||_1.
+
+    """
 
     m, n = A.size
-    def F(x = None, z = None):
-        """
-        Function and gradient evaluation of
+    q = matrix(1.0, (2*n,1))
+    q[:n] = -2.0 * A.T * y
 
-            f = || A*x[:n] - y ||_2^2 +  sum(x[n:])
+    def P(u, v, alpha = 1.0, beta = 0.0 ):
         """
-
-        nvars = 2*n
-        if x is None: return 0, matrix(0.0, (nvars,1))
-        r = A*x[:n] - y
-        f = blas.nrm2(r)**2 + sum(x[n:])
-        gradf = matrix(1.0, (1,2*n))
-        blas.gemv(A, r, gradf, alpha=2.0, trans='T')  
-        if z is None:
-            return f, gradf
-        else:
-            def Hf(u, v, alpha = 1.0, beta = 0.0):
-               # Hf evaluates
-               #
-               #     v := alpha * [2.0*z[0] * A'*A, 0; 0, 0] * u + beta * v 
-               #
-               blas.scal(beta, v)
-               w = matrix(0.0, (m, 1))    
-               blas.gemv(A, u, w)
-               blas.gemv(A, w, v, alpha = 2.0 * z[0] * alpha, beta = 1.0, 
-                   trans = 'T')
-            return f, gradf, Hf
+            v := alpha * 2.0 * [ A'*A, 0; 0, 0 ] * u + beta * v 
+        """
+        v *= beta
+        v[:n] += alpha * 2.0 * A.T * (A * u[:n])
 
 
     def G(u, v, alpha=1.0, beta=0.0, trans='N'):
@@ -52,16 +41,16 @@ def l1regls(A, y):
 
     # Customized solver for the KKT system 
     #
-    #     [  2.0*z[0]*A'*A  0    I      -I     ] [x[:n] ]     [bx[:n] ]
-    #     [  0              0   -I      -I     ] [x[n:] ]  =  [bx[n:] ].
-    #     [  I             -I   -D1^-1   0     ] [zl[:n]]     [bzl[:n]]
-    #     [ -I             -I    0      -D2^-1 ] [zl[n:]]     [bzl[n:]]
+    #     [  2.0*A'*A  0    I      -I     ] [x[:n] ]     [bx[:n] ]
+    #     [  0         0   -I      -I     ] [x[n:] ]  =  [bx[n:] ].
+    #     [  I        -I   -D1^-1   0     ] [zl[:n]]     [bzl[:n]]
+    #     [ -I        -I    0      -D2^-1 ] [zl[n:]]     [bzl[n:]]
     #
-    # where D1 = W['dli'][:n]**2, D2 = W['dli'][:n]**2.
+    # where D1 = W['di'][:n]**2, D2 = W['di'][:n]**2.
     #    
     # We first eliminate zl and x[n:]:
     #
-    #     ( 2*z[0]*A'*A + 4*D1*D2*(D1+D2)^-1 ) * x[:n] = 
+    #     ( 2*A'*A + 4*D1*D2*(D1+D2)^-1 ) * x[:n] = 
     #         bx[:n] - (D2-D1)*(D1+D2)^-1 * bx[n:] + 
     #         D1 * ( I + (D2-D1)*(D1+D2)^-1 ) * bzl[:n] - 
     #         D2 * ( I - (D2-D1)*(D1+D2)^-1 ) * bzl[n:]           
@@ -74,43 +63,43 @@ def l1regls(A, y):
     #
     # The first equation has the form
     #
-    #     (z[0]*A'*A + D)*x[:n]  =  rhs
+    #     (A'*A + D)*x[:n]  =  rhs
     #
     # and is equivalent to
     #
-    #     [ D    A'       ] [ x:n] ]  = [ rhs ]
-    #     [ A   -1/z[0]*I ] [ v    ]    [ 0   ].
+    #     [ D    A' ] [ x:n] ]  = [ rhs ]
+    #     [ A   -I  ] [ v    ]    [ 0   ].
     #
     # It can be solved as 
     #
-    #     ( A*D^-1*A' + 1/z[0]*I ) * v = A * D^-1 * rhs
+    #     ( A*D^-1*A' + I ) * v = A * D^-1 * rhs
     #     x[:n] = D^-1 * ( rhs - A'*v ).
 
     S = matrix(0.0, (m,m))
     Asc = matrix(0.0, (m,n))
     v = matrix(0.0, (m,1))
 
-    def Fkkt(x, z, W):
+    def Fkkt(W):
 
         # Factor 
         #
-        #     S = A*D^-1*A' + 1/z[0]*I 
+        #     S = A*D^-1*A' + I 
         #
-        # where D = 2*D1*D2*(D1+D2)^-1, D1 = dl[:n]**-2, D2 = dl[n:]**-2.
+        # where D = 2*D1*D2*(D1+D2)^-1, D1 = d[:n]**-2, D2 = d[n:]**-2.
 
-        d1, d2 = W['dli'][:n]**2, W['dli'][n:]**2
+        d1, d2 = W['di'][:n]**2, W['di'][n:]**2
 
         # ds is square root of diagonal of D
-        ds = math.sqrt(2.0) * div( mul( W['dli'][:n], W['dli'][n:]), 
+        ds = math.sqrt(2.0) * div( mul( W['di'][:n], W['di'][n:]), 
             base.sqrt(d1+d2) )
         d3 =  div(d2 - d1, d1 + d2)
      
         # Asc = A*diag(d)^-1/2
         Asc = A * spdiag(ds**-1)
 
-        # S = 1/z[0]*I + A * D^-1 * A'
+        # S = I + A * D^-1 * A'
         blas.syrk(Asc, S)
-        S[::m+1] += 1.0 / z[0] 
+        S[::m+1] += 1.0 
         lapack.potrf(S)
 
         def g(x, y, z):
@@ -141,12 +130,12 @@ def l1regls(A, y):
                 
             # zl[:n] = D1^1/2 * (  x[:n] - x[n:] - bzl[:n] )
             # zl[n:] = D2^1/2 * ( -x[:n] - x[n:] - bzl[n:] ).
-            z[:n] = mul( W['dli'][:n],  x[:n] - x[n:] - z[:n] ) 
-            z[n:] = mul( W['dli'][n:], -x[:n] - x[n:] - z[n:] ) 
+            z[:n] = mul( W['di'][:n],  x[:n] - x[n:] - z[:n] ) 
+            z[n:] = mul( W['di'][n:], -x[:n] - x[n:] - z[n:] ) 
 
         return g
 
-    return solvers.cp(F, G, h, kktsolver = Fkkt)['x'][:n]
+    return solvers.coneqp(P, q, G, h, kktsolver = Fkkt)['x'][:n]
 
 m, n = 100, 1000
 base.setseed()
diff --git a/examples/doc/chap8/mcsdp b/examples/doc/chap8/mcsdp
index 5051d7c..e9cdbb7 100755
--- a/examples/doc/chap8/mcsdp
+++ b/examples/doc/chap8/mcsdp
@@ -1,8 +1,8 @@
 #!/usr/bin/python
 
-# The SDP example of section 8.5.
+# The SDP example of section 8.7.
 
-from cvxopt import base, blas, lapack, base, solvers
+from cvxopt import base, blas, lapack, solvers
 from cvxopt.base import matrix
 
 def mcsdp(w):
diff --git a/examples/doc/chap9/portfolio b/examples/doc/chap8/portfolio
similarity index 97%
rename from examples/doc/chap9/portfolio
rename to examples/doc/chap8/portfolio
index 1b936f4..9f14cec 100755
--- a/examples/doc/chap9/portfolio
+++ b/examples/doc/chap8/portfolio
@@ -1,6 +1,6 @@
 #!/usr/bin/python
  
-# The risk-return trade-off of section 9.3.
+# The risk-return trade-off of section 8.4.
 
 from math import sqrt
 from cvxopt.base import matrix
diff --git a/examples/doc/chap8/qcl1 b/examples/doc/chap8/qcl1
index dcbbd75..ce35ba8 100755
--- a/examples/doc/chap8/qcl1
+++ b/examples/doc/chap8/qcl1
@@ -1,6 +1,6 @@
 #!/usr/bin/python
 
-# The quadratically constrained 1-norm minimization example of section 8.5.
+# The quadratically constrained 1-norm minimization example of section 8.7.
 
 from cvxopt import base, blas, lapack, solvers
 from cvxopt.base import matrix, mul, div
diff --git a/examples/doc/chap8/sdp b/examples/doc/chap8/sdp
index a1c3751..64b9153 100755
--- a/examples/doc/chap8/sdp
+++ b/examples/doc/chap8/sdp
@@ -1,6 +1,6 @@
 #!/usr/bin/python
 
-# The small SDP of section 8.4.  
+# The small SDP of section 8.6.  
 
 from cvxopt.base import matrix  
 from cvxopt import solvers  
diff --git a/examples/doc/chap8/socp b/examples/doc/chap8/socp
index 41a1357..eb1fd50 100755
--- a/examples/doc/chap8/socp
+++ b/examples/doc/chap8/socp
@@ -1,6 +1,6 @@
 #!/usr/bin/python
 
-# The small SOCP of section 8.3.  
+# The small SOCP of section 8.5.  
 
 from cvxopt.base import matrix  
 from cvxopt import solvers  
diff --git a/examples/doc/chap9/gp b/examples/doc/chap9/gp
index a0b4fee..229dc64 100755
--- a/examples/doc/chap9/gp
+++ b/examples/doc/chap9/gp
@@ -1,6 +1,6 @@
 #!/usr/bin/python
 
-# The small GP of section 9.4
+# The small GP of section 9.3
 
 from cvxopt.base import matrix, log, exp  
 from cvxopt import solvers  
diff --git a/examples/doc/chap9/l2ac b/examples/doc/chap9/l2ac
new file mode 100755
index 0000000..6ada427
--- /dev/null
+++ b/examples/doc/chap9/l2ac
@@ -0,0 +1,79 @@
+#!/usr/bin/python
+
+# The example of section 9.4.  
+
+from cvxopt.base import matrix, spdiag, mul, div, log 
+from cvxopt import base, blas, lapack, solvers, base
+
+
+def l2ac(A, b):
+    """
+    Solves
+
+        minimize  (1/2) * ||A*x-b||_2^2 - sum log (1-xi^2)
+
+    assuming A is m x n with m << n.
+    """
+
+    m, n = A.size
+    def F(x = None, z = None):
+        if x is None: 
+            return 0, matrix(0.0, (n,1))
+        if max(abs(x)) >= 1.0: 
+            return None 
+        r = - b
+        blas.gemv(A, x, r, beta = -1.0)
+        w = x**2
+        f = 0.5 * blas.nrm2(r)**2  - sum(log(1-w))
+        gradf = div(x, 1.0 - w)
+        blas.gemv(A, r, gradf, trans = 'T', beta = 2.0)
+        if z is None:
+            return f, gradf.T
+        else:
+            def Hf(u, v, alpha = 1.0, beta = 0.0):
+               """
+                   v := alpha * (A'*A*u + 2*((1+w)./(1-w)).*u + beta *v
+               """
+               v *= beta
+               v += 2.0 * alpha * mul(div(1.0+w, (1.0-w)**2), u)
+               blas.gemv(A, u, r)
+               blas.gemv(A, r, v, alpha = alpha, beta = 1.0, trans = 'T')
+            return f, gradf.T, Hf
+
+
+    # Custom solver for the Newton system
+    #
+    #     z[0]*(A'*A + D)*x = bx
+    #
+    # where D = 2 * (1+x.^2) ./ (1-x.^2).^2.  We apply the matrix inversion
+    # lemma and solve this as
+    #    
+    #     (A * D^-1 *A' + I) * v = A * D^-1 * bx / z[0]
+    #     D * x = bx / z[0] - A'*v.
+
+    S = matrix(0.0, (m,m))
+    Asc = matrix(0.0, (m,n))
+    v = matrix(0.0, (m,1))
+    def Fkkt(x, z, W):
+        ds = (2.0 * div(1 + x**2, (1 - x**2)**2))**-0.5
+        base.gemm(A, spdiag(ds), Asc)
+        blas.syrk(Asc, S)
+        S[::m+1] += 1.0 
+        lapack.potrf(S)
+        a = z[0]
+        def g(x, y, z):
+            x[:] = mul(x, ds) / a
+            blas.gemv(Asc, x, v)
+            lapack.potrs(S, v)
+            blas.gemv(Asc, v, x, alpha = -1.0, beta = 1.0, trans = 'T')
+            x[:] = mul(x, ds)  
+        return g
+
+    return solvers.cp(F, kktsolver = Fkkt)['x']
+
+m, n = 200, 2000
+base.setseed()
+A = base.normal(m,n)
+x = base.uniform(n,1)
+b = A*x
+x = l2ac(A, b)
diff --git a/src/C/amd.c b/src/C/amd.c
index 5298d8f..b7fddea 100644
--- a/src/C/amd.c
+++ b/src/C/amd.c
@@ -1,7 +1,7 @@
 /*
- * Copyright 2004-2007 J. Dahl and L. Vandenberghe.
+ * Copyright 2004-2008 J. Dahl and L. Vandenberghe.
  *
- * This file is part of CVXOPT version 0.9.2.
+ * This file is part of CVXOPT version 0.9.3.
  *
  * CVXOPT is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/src/C/base.c b/src/C/base.c
index 4de92bd..25318cf 100644
--- a/src/C/base.c
+++ b/src/C/base.c
@@ -1,7 +1,7 @@
 /*
- * Copyright 2004-2007 J. Dahl and L. Vandenberghe.
+ * Copyright 2004-2008 J. Dahl and L. Vandenberghe.
  *
- * This file is part of CVXOPT version 0.9.2.
+ * This file is part of CVXOPT version 0.9.3.
  *
  * CVXOPT is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/src/C/blas.c b/src/C/blas.c
index 31ce43a..8798cc0 100644
--- a/src/C/blas.c
+++ b/src/C/blas.c
@@ -1,7 +1,7 @@
 /*
- * Copyright 2004-2007 J. Dahl and L. Vandenberghe.
+ * Copyright 2004-2008 J. Dahl and L. Vandenberghe.
  *
- * This file is part of CVXOPT version 0.9.2.
+ * This file is part of CVXOPT version 0.9.3.
  *
  * CVXOPT is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/src/C/cholmod.c b/src/C/cholmod.c
index b2a2e39..9b60615 100644
--- a/src/C/cholmod.c
+++ b/src/C/cholmod.c
@@ -1,7 +1,7 @@
 /*
- * Copyright 2004-2007 J. Dahl and L. Vandenberghe.
+ * Copyright 2004-2008 J. Dahl and L. Vandenberghe.
  *
- * This file is part of CVXOPT version 0.9.2.
+ * This file is part of CVXOPT version 0.9.3.
  *
  * CVXOPT is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/src/C/cvxopt.h b/src/C/cvxopt.h
index d5748c2..7d85a07 100644
--- a/src/C/cvxopt.h
+++ b/src/C/cvxopt.h
@@ -1,7 +1,7 @@
 /*
- * Copyright 2004-2007 J. Dahl and L. Vandenberghe.
+ * Copyright 2004-2008 J. Dahl and L. Vandenberghe.
  *
- * This file is part of CVXOPT version 0.9.2.
+ * This file is part of CVXOPT version 0.9.3.
  *
  * CVXOPT is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -84,7 +84,7 @@ static void **cvxopt_API;
 #define SpMatrix_NewFromSpMatrix \
   (*(spmatrix * (*)(spmatrix *, int)) cvxopt_API[5])
 #define SpMatrix_NewFromIJV \
-  (*(spmatrix * (*)(matrix *, matrix *, matrix *, int, int, int, int)) \
+  (*(spmatrix * (*)(matrix *, matrix *, matrix *, int, int, int)) \
       cvxopt_API[6])
 #define SpMatrix_Check (*(int * (*)(void *)) cvxopt_API[7])
 
diff --git a/src/C/dense.c b/src/C/dense.c
index f5147cd..49d9bb7 100644
--- a/src/C/dense.c
+++ b/src/C/dense.c
@@ -1,7 +1,7 @@
 /*
- * Copyright 2004-2007 J. Dahl and L. Vandenberghe.
+ * Copyright 2004-2008 J. Dahl and L. Vandenberghe.
  *
- * This file is part of CVXOPT version 0.9.2.
+ * This file is part of CVXOPT version 0.9.3.
  *
  * CVXOPT is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -468,6 +468,8 @@ matrix_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
   if (tc && !(VALID_TC_MAT(tc))) PY_ERR_TYPE("tc must be 'i', 'd' or 'z'");  
   int id = (tc ? TC2ID(tc) : -1);
 
+  if (!Objx && size) PY_ERR_TYPE("invalid arguments");
+
   if (!Objx) return (PyObject *)Matrix_New(0, 0, (id == -1 ? INT : id));
 
   matrix *ret = NULL;
diff --git a/src/C/dsdp.c b/src/C/dsdp.c
index 72c2de4..10f8200 100644
--- a/src/C/dsdp.c
+++ b/src/C/dsdp.c
@@ -1,7 +1,7 @@
 /*
- * Copyright 2004-2007 J. Dahl and L. Vandenberghe.
+ * Copyright 2004-2008 J. Dahl and L. Vandenberghe.
  *
- * This file is part of CVXOPT version 0.9.2.
+ * This file is part of CVXOPT version 0.9.3.
  *
  * CVXOPT is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/src/C/fftw.c b/src/C/fftw.c
index 3d89b1a..5080acf 100644
--- a/src/C/fftw.c
+++ b/src/C/fftw.c
@@ -1,7 +1,7 @@
 /*
- * Copyright 2004-2007 J. Dahl and L. Vandenberghe.
+ * Copyright 2004-2008 J. Dahl and L. Vandenberghe.
  *
- * This file is part of CVXOPT version 0.9.2.
+ * This file is part of CVXOPT version 0.9.3.
  *
  * CVXOPT is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/src/C/glpk.c b/src/C/glpk.c
index 9226175..b052c40 100644
--- a/src/C/glpk.c
+++ b/src/C/glpk.c
@@ -1,7 +1,7 @@
 /*
- * Copyright 2004-2007 J. Dahl and L. Vandenberghe.
+ * Copyright 2004-2008 J. Dahl and L. Vandenberghe.
  *
- * This file is part of CVXOPT version 0.9.2.
+ * This file is part of CVXOPT version 0.9.3.
  *
  * CVXOPT is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/src/C/gsl.c b/src/C/gsl.c
index ca779a0..c1e9edb 100644
--- a/src/C/gsl.c
+++ b/src/C/gsl.c
@@ -1,7 +1,7 @@
 /*
- * Copyright 2004-2007 J. Dahl and L. Vandenberghe.
+ * Copyright 2004-2008 J. Dahl and L. Vandenberghe.
  *
- * This file is part of CVXOPT version 0.9.2.
+ * This file is part of CVXOPT version 0.9.3.
  *
  * CVXOPT is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/src/C/lapack.c b/src/C/lapack.c
index 09ead80..2025695 100644
--- a/src/C/lapack.c
+++ b/src/C/lapack.c
@@ -1,7 +1,7 @@
 /*
- * Copyright 2004-2007 J. Dahl and L. Vandenberghe.
+ * Copyright 2004-2008 J. Dahl and L. Vandenberghe.
  *
- * This file is part of CVXOPT version 0.9.2.
+ * This file is part of CVXOPT version 0.9.3.
  *
  * CVXOPT is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/src/C/misc.h b/src/C/misc.h
index e8c5477..a0e7501 100644
--- a/src/C/misc.h
+++ b/src/C/misc.h
@@ -1,7 +1,7 @@
 /*
- * Copyright 2004-2007 J. Dahl and L. Vandenberghe.
+ * Copyright 2004-2008 J. Dahl and L. Vandenberghe.
  *
- * This file is part of CVXOPT version 0.9.2.
+ * This file is part of CVXOPT version 0.9.3.
  *
  * CVXOPT is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/src/C/sparse.c b/src/C/sparse.c
index 22ea582..10b00c0 100644
--- a/src/C/sparse.c
+++ b/src/C/sparse.c
@@ -1,7 +1,7 @@
 /*
- * Copyright 2004-2007 J. Dahl and L. Vandenberghe.
+ * Copyright 2004-2008 J. Dahl and L. Vandenberghe.
  *
- * This file is part of CVXOPT version 0.9.2.
+ * This file is part of CVXOPT version 0.9.3.
  *
  * CVXOPT is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -1249,7 +1249,7 @@ static int sp_dgemm(char tA, char tB, number alpha, void *a, void *b,
     }
 
     int mn = m*n; 
-    scal[A->id](&mn, &beta, C, (int *)&One[DOUBLE]);
+    scal[A->id](&mn, &beta, C, (int *)&One[INT]);
 
     int j, l;    
     for (j=0; j<n; j++) {
@@ -1273,7 +1273,7 @@ static int sp_dgemm(char tA, char tB, number alpha, void *a, void *b,
     ccs *B = (tB == 'N' ? b : transpose(b, 0));
 
     int j, l, mn_ = m*n;
-    scal[DOUBLE](&mn_, &beta, C, (int *)&One[DOUBLE]);
+    scal[DOUBLE](&mn_, &beta, C, (int *)&One[INT]);
  
     for (j=0; j<n; j++) {
       for (l=B->colptr[j]; l<B->colptr[j+1]; l++) {	
diff --git a/src/C/umfpack.c b/src/C/umfpack.c
index 5708fa4..4edc535 100644
--- a/src/C/umfpack.c
+++ b/src/C/umfpack.c
@@ -1,7 +1,7 @@
 /*
- * Copyright 2004-2007 J. Dahl and L. Vandenberghe.
+ * Copyright 2004-2008 J. Dahl and L. Vandenberghe.
  *
- * This file is part of CVXOPT version 0.9.2.
+ * This file is part of CVXOPT version 0.9.3.
  *
  * CVXOPT is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/src/python/__init__.py b/src/python/__init__.py
index 4db19f6..dbd9b75 100644
--- a/src/python/__init__.py
+++ b/src/python/__init__.py
@@ -9,7 +9,8 @@ base.spmatrix_str  = printing.spmatrix_str_default
 base.spmatrix_repr = printing.spmatrix_repr_default
 
 def normal(nrows, ncols=1, mean=0.0, std=1.0):
-    '''Randomly generates a matrix with normally distributed entries.
+    '''
+    Randomly generates a matrix with normally distributed entries.
 
     normal(nrows, ncols=1, mean=0, std=1)
   
@@ -24,7 +25,8 @@ def normal(nrows, ncols=1, mean=0.0, std=1.0):
     ncols     number of columns
 
     mean      approximate mean of the distribution
-    std       standard deviation of the distribution'''
+    std       standard deviation of the distribution
+    '''
 
     try:    
         from cvxopt import gsl
@@ -37,7 +39,8 @@ def normal(nrows, ncols=1, mean=0.0, std=1.0):
     return gsl.normal(nrows, ncols, mean, std)
 
 def uniform(nrows, ncols=1, a=0, b=1):
-    '''Randomly generates a matrix with uniformly distributed entries.
+    '''
+    Randomly generates a matrix with uniformly distributed entries.
     
     uniform(nrows, ncols=1, a=0, b=1)
 
@@ -53,7 +56,8 @@ def uniform(nrows, ncols=1, a=0, b=1):
 
     a         lower bound
 
-    b         upper bound'''
+    b         upper bound
+    '''
 
     try:    
         from cvxopt import gsl
@@ -66,13 +70,15 @@ def uniform(nrows, ncols=1, a=0, b=1):
     return gsl.uniform(nrows, ncols, a, b)
 
 def setseed(val = 0):
-    '''Sets the seed value for the random number generator.
+    ''' 
+    Sets the seed value for the random number generator.
 
     setseed(val = 0)
     
     ARGUMENTS
     value     integer seed.  If the value is 0, the current system time  
-              is used. '''    
+              is used. 
+    '''    
 
     try:    
         from cvxopt import gsl
@@ -84,9 +90,11 @@ def setseed(val = 0):
         
  
 def getseed():
-    '''Returns the seed value for the random number generator.
+    '''
+    Returns the seed value for the random number generator.
     
-    getseed()'''
+    getseed()
+    '''
 
     try:    
         from cvxopt import gsl
diff --git a/src/python/coneprog.py b/src/python/coneprog.py
index 7e20119..9d10f9f 100644
--- a/src/python/coneprog.py
+++ b/src/python/coneprog.py
@@ -2,9 +2,9 @@
 Solver for linear, second-order cone and semidefinite programming.
 """
 
-# Copyright 2004-2007 J. Dahl and L. Vandenberghe.
+# Copyright 2004-2008 J. Dahl and L. Vandenberghe.
 # 
-# This file is part of CVXOPT version 0.9.2.
+# This file is part of CVXOPT version 0.9.3.
 #
 # CVXOPT is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
@@ -20,536 +20,28 @@ Solver for linear, second-order cone and semidefinite programming.
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 
-import math
-from cvxopt import base, blas, lapack, cholmod
-from cvxopt.base import matrix, spmatrix
-
 __all__ = []
 options = {}
 
 
-def scale(x, W, trans = 'N', inverse = 'N'):  
-
-    # Computes 
-    #
-    #     x := W*x        (trans is 'N', inverse = 'N')  
-    #     x := W^T*x      (trans is 'T', inverse = 'N')  
-    #     x := W^{-1}*x   (trans is 'N', inverse = 'I')  
-    #     x := W^{-T}*x   (trans is 'T', inverse = 'I'). 
-    #
-    # x is a dense 'd' matrix.
-    #
-    # W is a dictionary with entries:
-    #
-    # - W['d']: positive vector
-    # - W['di']: componentwise inverse of W['d']
-    # - W['v']: lists of second order cone vectors with unit hyperbolic 
-    #   norms
-    # - W['beta']: list of positive numbers
-    # - W['r']: list of square matrices 
-    # - W['rti']: list of square matrices.  rti[k] is the inverse  
-    #   transpose of r[k].
-
-
-    # Scaling for 'l' component xk is xk := d .* xk; inverse scaling is 
-    # xk ./ d = di .* xk, where d = W['d'], di = W['di'].
-
-    if inverse == 'N': w = W['d']
-    else: w = W['di']
-    m = w.size[0]
-    for k in xrange(x.size[1]):
-        blas.tbmv(w, x, n = m, k = 0, ldA = 1, offsetx = k*x.size[0])
-
-
-    # Scaling for 'q' component is 
-    #
-    #     xk := beta * (2*v*v' - J) * xk
-    #         = beta * (2*v*(xk'*v)' - J*xk)
-    #
-    # where beta = W['beta'][k], v = W['v'][k], J = [1, 0; 0, -I].
-    #
-    # Inverse scaling is
-    #
-    #     xk := 1/beta * (2*J*v*v'*J - J) * xk
-    #         = 1/beta * (-J) * (2*v*((-J*xk)'*v)' + xk). 
-
-    w = matrix(0.0, (x.size[1], 1))
-    ind = m
-    for k in xrange(len(W['v'])):
-        v = W['v'][k]
-        m = v.size[0]
-        if inverse == 'I':  
-            blas.scal(-1.0, x, offset = ind, inc = x.size[0])
-        blas.gemv(x, v, w, trans = 'T', m = m, n = x.size[1], offsetA = 
-            ind, ldA = x.size[0])
-        blas.scal(-1.0, x, offset = ind, inc = x.size[0])
-        blas.ger(v, w, x, alpha = 2.0, m = m, n = x.size[1], ldA = 
-            x.size[0], offsetA = ind)
-        if inverse == 'I': 
-            blas.scal(-1.0, x, offset = ind, inc = x.size[0])
-            a = 1.0 / W['beta'][k] 
-        else:
-            a = W['beta'][k] 
-        for i in xrange(x.size[1]):
-            blas.scal(a, x, n = m, offset = ind + i*x.size[0])
-        ind += m
-
-
-    # Scaling for 's' component xk is
-    #
-    #     xk := vec( r' * mat(xk) * r )  if trans = 'N'
-    #     xk := vec( r * mat(xk) * r' )  if trans = 'T'.
-    #
-    # r is kth element of W['r'].
-    #
-    # Inverse scaling is
-    #
-    #     xk := vec( rti * mat(xk) * rti' )  if trans = 'N'
-    #     xk := vec( rti' * mat(xk) * rti )  if trans = 'T'.
-    #
-    # rti is kth element of W['rti'].
-
-    maxn = max( [0] + [ r.size[0] for r in W['r'] ] )
-    a = matrix(0.0, (maxn, maxn))
-    for k in xrange(len(W['r'])):
-
-        if inverse == 'N':
-            r = W['r'][k]
-            if trans == 'N': t = 'T'
-            else: t = 'N'
-        else:
-            r = W['rti'][k]
-            t = trans
-
-        n = r.size[0]
-        for i in xrange(x.size[1]):
-
-            # scale diagonal of xk by 0.5
-            blas.scal(0.5, x, offset = ind + i*x.size[0], inc = n+1, n = n)
-
-            # a = r*tril(x) (t is 'N') or a = tril(x)*r  (t is 'T')
-            blas.copy(r, a)
-            if t == 'N':   
-                blas.trmm(x, a, side = 'R', m = n, n = n, ldA = n, ldB = n,
-                    offsetA = ind + i*x.size[0])
-            else:    
-                blas.trmm(x, a, side = 'L', m = n, n = n, ldA = n, ldB = n,
-                    offsetA = ind + i*x.size[0])
- 
-            # x := (r*a' + a*r')  if t is 'N'
-            # x := (r'*a + a'*r)  if t is 'T'
-            blas.syr2k(r, a, x, trans = t, n = n, k = n, ldB = n, ldC = n,
-                offsetC = ind + i*x.size[0])
- 
-        ind += n**2
-
-
-def scale2(lmbda, x, dims, inverse = 'N'):
-
-    # x := H(lambda^{1/2}) * x   (inverse is 'N')
-    # x := H(lambda^{-1/2}) * x  (inverse is 'I')
-    #
-    # H is the Hessian of the logarithmic barrier.
-      
-
-    # For the 'l' block, 
-    #
-    #     xk := xk ./ l   (inverse is 'N')
-    #     xk := xk .* l   (inverse is 'I')
-    #
-    # where l is lmbda[:dims['l']].
-
-    if inverse == 'N':
-        blas.tbsv(lmbda, x, n = dims['l'], k = 0, ldA = 1)
-    else:
-        blas.tbmv(lmbda, x, n = dims['l'], k = 0, ldA = 1)
-   
-  
-    # For 'q' blocks, if inverse is 'N',
-    #
-    #     xk := 1/a * [ l'*J*xk;  
-    #         xk[1:] - (xk[0] + l'*J*xk) / (l[0] + 1) * l[1:] ].
-    #
-    # If inverse is 'I',
-    #
-    #     xk := a * [ l'*xk; 
-    #         xk[1:] + (xk[0] + l'*xk) / (l[0] + 1) * l[1:] ].
-    #
-    # a = sqrt(lambda_k' * J * lambda_k), l = lambda_k / a.
-
-    ind = dims['l']
-    for m in dims['q']:
-        a = jnrm2(lmbda, n = m, offset = ind)
-        if inverse == 'N':
-            lx = jdot(lmbda, x, n = m, offsetx = ind, offsety = ind)/a
-        else:
-            lx = blas.dot(lmbda, x, n = m, offsetx = ind, offsety = ind)/a
-        x0 = x[ind]
-        x[ind] = lx
-        c = (lx + x0) / (lmbda[ind]/a + 1) / a 
-        if inverse == 'N':  c *= -1.0
-        blas.axpy(lmbda, x, alpha = c, n = m-1, offsetx = ind+1, offsety =
-            ind+1)
-        if inverse == 'N': a = 1.0/a 
-        blas.scal(a, x, offset = ind, n = m)
-        ind += m
-        
-
-    # For the 's' blocks, if inverse is 'N',
-    #
-    #     xk := vec( diag(l)^{-1/2} * mat(xk) * diag(k)^{-1/2}).
-    #
-    # If inverse is 'I',
-    #
-    #     xk := vec( diag(l)^{1/2} * mat(xk) * diag(k)^{1/2}).
-    #
-    # where l is kth block of lambda.
-    # 
-    # We scale upper and lower triangular part of mat(xk) because the
-    # inverse operation will be applied to nonsymmetric matrices.
-
-    ind2 = ind
-    for k in xrange(len(dims['s'])):
-        m = dims['s'][k]
-        for j in xrange(m):
-            c = math.sqrt(lmbda[ind2+j]) * base.sqrt(lmbda[ind2:ind2+m])
-            if inverse == 'N':  
-                blas.tbsv(c, x, n = m, k = 0, ldA = 1, offsetx = ind + j*m)
-            else:
-                blas.tbmv(c, x, n = m, k = 0, ldA = 1, offsetx = ind + j*m)
-        ind += m*m
-        ind2 += m
-
-
-def pack(x, y, dims, offsetx = 0, offsety = 0):
-
-     # The vector x is an element of S, with the 's' components stored 
-     # in unpacked storage.  On return, x is copied to y with the 's' 
-     # components matrices stored in packed storage and the off-diagonal 
-     # entries scaled by sqrt(2).
-
-     nlq = dims['l'] + sum(dims['q'])
-     np = sum([ n*(n+1)/2 for n in dims['s'] ])
-     blas.copy(x, y, n = nlq, offsetx = offsetx, offsety = offsety)
-     iu, ip = offsetx + nlq, offsety + nlq
-     for n in dims['s']:
-         for k in xrange(n):
-             blas.copy(x, y, n = n-k, offsetx = iu + k*(n+1), offsety = ip)
-             y[ip] /= math.sqrt(2)
-             ip += n-k
-         iu += n**2 
-     blas.scal(math.sqrt(2.0), y, n = np, offset = offsety+nlq)
-     
-
-def unpack(x, y, dims, offsetx = 0, offsety = 0):
-
-     # The vector x is an element of S, with the 's' components stored
-     # in unpacked storage and off-diagonal entries scaled by sqrt(2).
-     # On return, x is copied to y with the 's' components stored in 
-     # unpacked storage and off-diagonal entries scaled by sqrt(2).
-
-     nlq = dims['l'] + sum(dims['q'])
-     nu = sum([ n**2 for n in dims['s'] ])
-     blas.copy(x, y, n = nlq, offsetx = offsetx, offsety = offsety)
-     iu, ip = offsety+nlq, offsetx+nlq
-     for n in dims['s']:
-         for k in xrange(n):
-             blas.copy(x, y, n = n-k, offsetx = ip, offsety = iu+k*(n+1))
-             y[iu+k*(n+1)] *= math.sqrt(2)
-             ip += n-k
-         iu += n**2 
-     blas.scal(1.0/math.sqrt(2.0), y, n = nu, offset = offsety+nlq)
-
-
-def sdot(x, y, dims):
-
-    # Returns the inner product of two vectors in S
-    
-    ind = dims['l'] + sum(dims['q'])
-    a = blas.dot(x, y, n = ind)
-    for m in dims['s']:
-        a += blas.dot(x, y, offsetx = ind, offsety = ind, incx = m+1, 
-            incy = m+1, n = m)
-        for j in xrange(1, m):
-            a += 2.0 * blas.dot(x, y, incx = m+1, incy = m+1, 
-                offsetx = ind+j, offsety = ind+j, n = m-j)
-        ind += m**2
-    return a
-
-
-def sdot2(x, y):
-    """
-    Inner product of two block-diagonal symmetric dense 'd' matrices.
-
-    x and y are square dense 'd' matrices, or lists of N square dense
-    'd' matrices.
-    """
-
-    a = 0.0
-    if type(x) is matrix:
-	n = x.size[0]
-	a += blas.dot(x, y, incx=n+1, incy=n+1, n=n)
-	for j in xrange(1,n):
-	    a += 2.0 * blas.dot(x, y, incx=n+1, incy=n+1, offsetx=j,
-		offsety=j, n=n-j)
-
-    else:
-	for k in xrange(len(x)):
-	    n = x[k].size[0]
-	    a += blas.dot(x[k], y[k], incx=n+1, incy=n+1, n=n)
-	    for j in xrange(1,n):
-		a += 2.0 * blas.dot(x[k], y[k], incx=n+1, incy=n+1, 
-		    offsetx=j, offsety=j, n=n-j)
-    return a
-
-
-def snrm2(x, dims): 
-
-    # Returns the norm of a vector in S
-
-    return math.sqrt(sdot(x, x, dims))
-
-
-def sgemv(A, x, y, dims, trans = 'N', alpha = 1.0, beta = 0.0, m = None, 
-    n = None, offsetA = 0, offsety = 0): 
-
-    # A is a matrix or spmatrix of size (N, n) where 
-    #
-    #     N = dims['l'] + sum(dims['q']) + sum( k**2 for k in dims['s'] ). 
-    #
-    # If trans is 'N': 
-    #
-    #     y := alpha*A*x + beta * y   (trans = 'N').
-    #
-    # x is a vector of length n.  y is a vector of length N.
-    #
-    # If trans is 'T':
-    #
-    #     y := alpha*A'*x + beta * y  (trans = 'T').
-    #
-    # x is a vector of length N.  y is a vector of length n.
-    #
-    # The 's' components in S are stored in unpacked 'L' storage.
-
-    if m is None: m = A.size[0]
-    if n is None: n = A.size[1]
-
-    if trans == 'T' and alpha:
-        ind = dims['l'] + sum(dims['q'])
-        for mk in dims['s']:
-            # Set upper triangular part of x to zero and scale strict 
-            # lower triangular part by 2.
-            for j in xrange(1, mk):  
-                blas.scal(0.0, x, n = mk-j, inc = mk, offset = 
-                    ind + j*(mk + 1) - 1) 
-                blas.scal(2.0, x, offset = ind + mk*(j-1) + j, n = mk-j) 
-            ind += mk**2
-
-    base.gemv(A, x, y, trans = trans, alpha = alpha, beta = beta, m = m,
-        n = n, offsetA = offsetA, offsety = offsety)
-
-    if trans == 'T' and alpha:
-        ind = dims['l'] + sum(dims['q'])
-        for mk in dims['s']:
-            # Scale strict lower triangular part of x by 0.5.
-            for j in xrange(1, mk):  
-                blas.scal(0.5, x, offset = ind + mk*(j-1) + j, n = mk-j) 
-            ind += mk**2
-
-
-def jdot(x, y, n = None, offsetx = 0, offsety = 0):
-
-    # Returns x' * J * y, where J = [1, 0; 0, -I].
-
-    if n is None: 
-         if len(x) != len(y): raise ValueError, "x and y must have the "\
-             "same length"
-         n = len(x)
-    return x[offsetx] * y[offsety] - blas.dot(x, y, n = n-1, 
-        offsetx = offsetx + 1, offsety = offsety + 1) 
-
-
-def jnrm2(x, n = None, offset = 0):
-
-    # Returns sqrt(x' * J * x) where J = [1, 0; 0, -I], for a vector
-    # x in a second order cone. 
-
-    if n is None:  n = len(x)
-    a = blas.nrm2(x, n = n-1, offset = offset+1)
-    return math.sqrt(x[offset] - a) * math.sqrt(x[offset] + a)
-
-
-def symm(x, n, offset = 0):
-
-    # Fills in the upper triangular part of the symmetric matrix stored in
-    # x[offset : offset+n*n] using 'L' storage.
-
-    if n <= 1:  pass
-    for i in xrange(n-1):
-        blas.copy(x, x, offsetx = offset + i*(n+1) + 1, offsety = 
-            offset + (i+1)*(n+1) - 1, incy = n, n = n-i-1)
-
-
-def sprod(x, y, dims, diag = 'N'):   
-
-    # The product x := (y o x).  If diag is 'D', the 's' part of y is 
-    # diagonal and only the diagonal is stored.
-
-
-    # For the 'l' block:  
-    #
-    #     yk o xk = yk .* xk.
-
-    blas.tbmv(y, x, n = dims['l'], k = 0, ldA = 1) 
-
-
-    # For 'q' blocks: 
-    #
-    #               [ lo   l1'  ]
-    #     yk o xk = [           ] * xk
-    #               [ lo   l0*I ] 
-    #
-    # where yk = (l0, l1).
-    
-    ind = dims['l']
-    for m in dims['q']:
-        dd = blas.dot(x, y, offsetx = ind, offsety = ind, n = m)
-        blas.scal(y[ind], x, offset = ind+1, n = m-1)
-        blas.axpy(y, x, alpha = x[ind], n = m-1, offsetx = ind+1, offsety 
-            = ind+1)
-        x[ind] = dd
-        ind += m
-
-
-    # For the 's' blocks:
-    #
-    #    yk o sk = .5 * ( Yk * mat(xk) + mat(xk) * Yk )
-    # 
-    # where Yk = mat(yk) if diag is 'N' and Yk = diag(yk) if diag is 'D'.
-
-    if diag is 'N':
-        maxm = max([0] + dims['s'])
-        A = matrix(0.0, (maxm, maxm))
-
-        for m in dims['s']:
-            blas.copy(x, A, offsetx = ind, n = m*m)
-
-            # Write upper triangular part of A and yk.
-            for i in xrange(m-1):
-                symm(A, m)
-                symm(y, m, offset = ind)
-
-            # xk = 0.5 * (A*yk + yk*A)
-            blas.syr2k(A, y, x, alpha = 0.5, n = m, k = m, ldA = m,  ldB = 
-                m, ldC = m, offsetB = ind, offsetC = ind)
-
-            ind += m*m
-
-    else:
-        ind2 = ind
-        for m in dims['s']:
-            for j in xrange(m):
-                u = 0.5 * ( y[ind2+j:ind2+m] + y[ind2+j] )
-                blas.tbmv(u, x, n = m-j, k = 0, ldA = 1, offsetx = 
-                    ind + j*(m+1))  
-            ind += m*m
-            ind2 += m
-
-
-def sinv(x, y, dims):   
-
-    # The inverse product x := (y o\ x), when the 's' components of y are 
-    # diagonal.
-    
-    # For the 'l' block:  
-    # 
-    #     yk o\ xk = yk .\ xk.
-
-    blas.tbsv(y, x, n = dims['l'], k = 0, ldA = 1)
-
-
-    # For the 'q' blocks: 
-    #
-    #                        [ l0   -l1'              ]  
-    #     yk o\ xk = 1/a^2 * [                        ] * xk
-    #                        [ -l1  (a*I + l1*l1')/l0 ]
-    #
-    # where yk = (l0, l1) and a = l0^2 - l1'*l1.
-
-    ind = dims['l']
-    for m in dims['q']:
-        aa = jnrm2(y, n = m, offset = ind)**2
-        cc = x[ind]
-        dd = blas.dot(y, x, offsetx = ind+1, offsety = ind+1, n = m-1)
-        x[ind] = cc * y[ind] - dd
-        blas.scal(aa / y[ind], x, n = m-1, offset = ind+1)
-        blas.axpy(y, x, alpha = dd/y[ind] - cc, n = m-1, offsetx = ind+1, 
-            offsety = ind+1)
-        blas.scal(1.0/aa, x, n = m, offset = ind)
-        ind += m
-
-
-    # For the 's' blocks:
-    #
-    #     yk o\ xk =  xk ./ gamma
-    #
-    # where gammaij = .5 * (yk_i + yk_j).
-
-    ind2 = ind
-    for m in dims['s']:
-        for j in xrange(m):
-            u = 0.5 * ( y[ind2+j:ind2+m] + y[ind2+j] )
-            blas.tbsv(u, x, n = m-j, k = 0, ldA = 1, offsetx = ind + 
-                j*(m+1))  
-        ind += m*m
-        ind2 += m
-        
-
-def max_step(x, dims, sigma = None):
-
-    # Returns min {t | x + t*e >= 0}.
-    # When called with the argument sigma, also returns the eigenvalues 
-    # (in sigma) and the eigenvectors (in x) of the 's' components of x.
-
-    t = []
-    ind = dims['l']
-    if ind: t += [ -min(x[:ind]) ] 
-    for m in dims['q']:
-        if m: t += [ blas.nrm2(x, offset = ind+1, n = m-1) - x[ind] ]
-        ind += m
-    if sigma is None and dims['s']:  
-        Q = matrix(0.0, (max(dims['s']), max(dims['s'])))
-        w = matrix(0.0, (max(dims['s']),1))
-    ind2 = 0
-    for m in dims['s']:
-        if sigma is None:
-            blas.copy(x, Q, offsetx = ind, n = m**2)
-            lapack.syevr(Q, w, range = 'I', il = 1, iu = 1, n = m, ldA = m)
-            if m:  t += [ -w[0] ]
-        else:            
-            lapack.syevd(x, sigma, jobz = 'V', n = m, ldA = m, offsetA = 
-                ind, offsetW = ind2)
-            if m:  t += [ -sigma[ind2] ] 
-        ind += m*m
-        ind2 += m
-    if t: return max(t)
-    else: return 0.0
-
-
-
-def conelp(c, G, h, dims, A = None, b = None, primalstart = None, 
-    dualstart = None, kktsolver = None, xnewcopy = matrix, xdot = 
-    blas.dot,  xaxpy = blas.axpy, xscal = blas.scal, ynewcopy = matrix, 
-    ydot = blas.dot, yaxpy = blas.axpy, yscal = blas.scal):
+def conelp(c, G, h, dims = None, A = None, b = None, primalstart = None, 
+    dualstart = None, kktsolver = None, xnewcopy = None, xdot = None,
+    xaxpy = None, xscal = None, ynewcopy = None, ydot = None, yaxpy = None,
+    yscal = None):
 
     """
     Solves a pair of primal and dual cone programs
 
-        minimize    c'*x              maximize    -h'*z - b'*y 
-        subject to  G*x + s = h       subject to  G'*z + A'*y + c = 0
-                    A*x = b                       z >= 0.
+        minimize    c'*x
+        subject to  G*x + s = h
+                    A*x = b
                     s >= 0
 
+        maximize    -h'*z - b'*y 
+        subject to  G'*z + A'*y + c = 0
+                    z >= 0.
+
+
     The inequalities are with respect to a cone C defined as the Cartesian
     product of N + M + 1 cones:
     
@@ -567,8 +59,7 @@ def conelp(c, G, h, dims, A = None, b = None, primalstart = None,
 
     Input arguments (basic usage).
    
-        c is a dense 'd' matrix of size (n,1), where n is the dimension of
-        the primal variable x.
+        c is a dense 'd' matrix of size (n,1).
 
         dims is a dictionary with the dimensions of the components of C.  
         It has three fields.
@@ -580,6 +71,7 @@ def conelp(c, G, h, dims, A = None, b = None, primalstart = None,
         - dims['s'] = ms = [ ms[0], ms[1], ..., ms[M-1] ], a list of M  
           integers with the orders of the semidefinite cones C_{N+1}, ...,
           C_{N+M}.  (M >= 0 and ms[k] >= 0.)
+        The default value of dims is {'l': G.size[0], 'q': [], 's': []}.
 
         G is a dense or sparse 'd' matrix of size (K,n), where
 
@@ -603,8 +95,8 @@ def conelp(c, G, h, dims, A = None, b = None, primalstart = None,
         h is a dense 'd' matrix of size (K,1), representing a vector in V,
         in the same format as the columns of G.
     
-        A is a dense or sparse 'd' matrix of size (p,n).   The default
-        value is a sparse 'd' matrix of size (0,n).
+        A is a dense or sparse 'd' matrix of size (p,n).  The default value
+        is a sparse 'd' matrix of size (0,n).
 
         b is a dense 'd' matrix of size (p,1).   The default value is a 
         dense 'd' matrix of size (0,1).
@@ -621,15 +113,16 @@ def conelp(c, G, h, dims, A = None, b = None, primalstart = None,
         - dualstart['y'] is a dense 'd' matrix of size (p,1).  
         - dualstart['z'] is a dense 'd' matrix of size (K,1), representing
           a vector that is strictly positive with respect to the cone C.
+
+        It is assumed that rank(A) = p and rank([A; G]) = n.
  
-        The other arguments are normally not needed.  They allow one to 
-        exploit certain types of structure in cone LPs, as described below.
+        The other arguments are normally not needed.  They make it possible
+        to exploit certain types of structure, as described below.
 
 
-    Output.
+    Output arguments.
 
-        conelp() returns a dictionary with keys 'status', 'x', 's', 'z', 
-        'y'.
+        Returns a dictionary with keys 'status', 'x', 's', 'z', 'y'.
 
         If status is 'optimal', x, s, y, z are approximate primal and 
         dual optimal solutions.
@@ -648,48 +141,22 @@ def conelp(c, G, h, dims, A = None, b = None, primalstart = None,
 
 
 
-    Exploiting problem structure.
-
-        Three mechanisms are provided to express problem structure in 
-        cone LPs.  
-
-        First, instead of matrices, G and A are allowed to be Python 
-        functions that evaluate the linear mappings G*x, A*x and their 
-        adjoints.  If G is a function, the call G(x, y, alpha, beta, trans)
-        should evaluate the matrix-vector products
-
-            y := alpha * G * x + beta * y  if trans is 'N' 
-            y := alpha * G' * x + beta * y  if trans is 'T'.
-
-        The arguments x and y are required.  The other arguments have 
-        default values alpha = 1.0, beta = 0.0, trans = 'N'.
-
-        If A is a function, the call A(x, y, alpha, beta, trans) should 
-        evaluate the matrix-vectors products
-
-            y := alpha * A * x + beta * y if trans is 'N'
-            y := alpha * A' * x + beta * y if trans is 'T'.
-
-        The arguments x and y are required.  The other arguments 
-        have default values alpha = 1.0, beta = 0.0, trans = 'N'.
+    Advanced usage.
 
-        If G and/or A are functions, then the argument kktsolver is 
-        required. 
+        Three mechanisms are provided to express problem structure.  
 
-  
-        Second, the user can provide a customized routine for solving the 
-        linear equations (`KKT systems')
+        1.  The user can provide a customized routine for solving linear 
+        equations (`KKT systems')
         
-            [ 0  A'  G'   ] [ x ]   [ bx ]
-            [ A  0   0    ] [ y ] = [ by ]
-            [ G  0  -W'*W ] [ z ]   [ bz ]
+            [ 0  A'  G'   ] [ ux ]   [ bx ]
+            [ A  0   0    ] [ uy ] = [ by ].
+            [ G  0  -W'*W ] [ uz ]   [ bz ]
 
-        that form the most expensive step of the algorithm.  Here W is a 
-        scaling matrix, a block diagonal mapping 
+        W is a scaling matrix, a block diagonal mapping 
 
            W*z = ( W0*z_0, ..., W_{N+M}*z_{N+M} ) 
 
-        from V to V, defined as follows.  
+        defined as follows.  
 
         - For the 'l' block (W_0): 
 
@@ -715,7 +182,7 @@ def conelp(c, G, h, dims, A = None, b = None, primalstart = None,
         called as f = kktsolver(W), where W is a dictionary that contains 
         the parameters of the scaling:
 
-        - W['d'] is a positive 'd' matrix of size (ml, 1).
+        - W['d'] is a positive 'd' matrix of size (ml,1).
         - W['di'] is a positive 'd' matrix with the elementwise inverse of
           W['d'].
         - W['beta'] is a list [ beta_0, ..., beta_{N-1} ]
@@ -724,19 +191,50 @@ def conelp(c, G, h, dims, A = None, b = None, primalstart = None,
         - W['rti'] is a list [ rti_0, ..., rti_{M-1} ], with rti_k the
           inverse of the transpose of r_k.
 
-        The call f = kktsolver(W) should return a function f for solving 
-        the KKT system.  The KKT system is solved by f(x, y, z).  On 
-        entry, x, y, z contain the righthand side bx, by, bz.  On exit, 
-        they contain the solution, with z scaled: W*z is returned instead 
-        of z.
+        The call f = kktsolver(W) should return a function f that solves 
+        the KKT system by g(x, y, z).  On entry, x, y, z contain the 
+        righthand side bx, by, bz.  On exit, they contain the solution, 
+        with uz scaled: the argument z contains W*uz.  In other words,
+        on exit, x, y, z are the solution of
+
+            [ 0  A'  G'*W^{-1} ] [ ux ]   [ bx ]
+            [ A  0   0         ] [ uy ] = [ by ].
+            [ G  0  -W'        ] [ uz ]   [ bz ]
+
+
+        2.  The linear operators G*u and A*u can be specified by providing
+        Python functions instead of matrices.  This can only be done in 
+        combination with 1. above, i.e., it requires the kktsolver 
+        argument.
+
+        If G is a function, the call G(u, u, alpha, beta, trans)
+        should evaluate the matrix-vector products
+
+            v := alpha * G * u + beta * v  if trans is 'N' 
+            v := alpha * G' * u + beta * v  if trans is 'T'.
+
+        The arguments u and v are required.  The other arguments have 
+        default values alpha = 1.0, beta = 0.0, trans = 'N'.
+
+        If A is a function, the call A(u, v, alpha, beta, trans) should 
+        evaluate the matrix-vectors products
+
+            v := alpha * A * u + beta * v if trans is 'N'
+            v := alpha * A' * u + beta * v if trans is 'T'.
+
+        The arguments u and v are required.  The other arguments 
+        have default values alpha = 1.0, beta = 0.0, trans = 'N'.
 
 
-        Finally, instead of using the default representation of the primal 
+        3.  Instead of using the default representation of the primal 
         variable x and the dual variable y as one-column 'd' matrices, 
-        we can represent these variables (and the corresponding parameters
-        c and b) by arbitrary Python objects (matrices, lists, 
-        dictionaries, etc), provided the user supplies the functions 
-        xnewcopy, xdot, xscal, xaxpy, ynewcopy, ydot, yscal, yaxpy.  
+        we can represent these variables and the corresponding parameters
+        c and b by arbitrary Python objects (matrices, lists, dictionaries,
+        etc.).  This can only be done in combination with 1. and 2. above,
+        i.e., it requires a user-provided KKT solver and an operator 
+        description of the linear mappings.  It also requires the arguments
+        xnewcopy, xdot, xscal, xaxpy, ynewcopy, ydot, yscal, yaxpy.  These
+        arguments are functions defined as follows.
 
         If X is the vector space of primal variables x, then:
         - xnewcopy(u) creates a new copy of the vector u in X.
@@ -745,9 +243,6 @@ def conelp(c, G, h, dims, A = None, b = None, primalstart = None,
           and u is a vector in X.
         - xaxpy(u, v, alpha = 1.0, beta = 0.0) computes v := alpha*u + v 
           for a scalar alpha and two vectors u and v in X.
-        If this option is used, the argument c must be in the same format
-        as x, the arguments G and A must be Python functions, and the
-        argument kktsolver is required.
 
         If Y is the vector space of primal variables y:
         - ynewcopy(u) creates a new copy of the vector u in Y.
@@ -766,12 +261,19 @@ def conelp(c, G, h, dims, A = None, b = None, primalstart = None,
        The following control parameters can be modified by adding an 
        entry to the dictionary options.  
 
-       options['show_progress'] True/False (default: True)
-       options['maxiters'] positive integer (default: 100)
-       options['abstol'] scalar (default: 1e-7)
-       options['reltol'] scalar (default: 1e-6)
-       options['feastol'] scalar (default: 1e-7).
+       options['show_progress'].  True/False (default: True).
+       options['maxiters'].  Positive integer (default: 100).
+       options['refinement'].  Positive integer (default: 0 for problems
+           with no second-order cone and matrix inequality constraints; 
+           1 otherwise).
+       options['abstol'].  Scalar (default: 1e-7).
+       options['reltol'].  Scalar (default: 1e-6).
+       options['feastol'].  Scalar (default: 1e-7).
+
     """
+    import math
+    from cvxopt import base, blas, misc
+    from cvxopt.base import matrix, spmatrix
 
     EXPON = 3
     STEP = 0.99
@@ -804,6 +306,44 @@ def conelp(c, G, h, dims, A = None, b = None, primalstart = None,
     try: show_progress = options['show_progress']
     except KeyError: show_progress = True
 
+
+    if kktsolver is None: 
+        if dims and (dims['q'] or dims['s']):  
+            kktsolver = 'qr'            
+        else:
+            kktsolver = 'chol2'
+    defaultsolvers = ('ldl', 'ldl2', 'qr', 'chol', 'chol2')
+    if type(kktsolver) is str and kktsolver not in defaultsolvers:
+        raise ValueError, "'%s' is not a valid value for kktsolver" \
+            %kktsolver
+
+    # Argument error checking depends on level of customization.
+    customkkt = type(kktsolver) is not str
+    operatorG = type(G) not in (matrix, spmatrix)
+    operatorA = A is not None and type(A) not in (matrix, spmatrix)
+    if (operatorG or operatorA) and not customkkt:
+        raise ValueError, "use of function valued G, A requires a "\
+            "user-provided kktsolver"
+    customx = ( xnewcopy != None or xdot != None or xaxpy != None or 
+        xscal != None )
+    if customx and (not operatorG or not operatorA or not customkkt):
+        raise ValueError, "use of non-vector type for x requires "\
+            "function valued G, A and user-provided kktsolver"
+    customy = (ynewcopy != None or ydot != None or yaxpy != None or 
+        yscal != None)  
+    if customy and (not operatorA or not customkkt):
+        raise ValueError, "use of non-vector type for y requires "\
+            "function valued A and user-provided kktsolver"
+
+
+    if not customx and (type(c) is not matrix or c.typecode != 'd' or 
+        c.size[1] != 1):
+        raise TypeError, "'c' must be a 'd' matrix with one column" 
+
+    if type(h) is not matrix or h.typecode != 'd' or h.size[1] != 1:
+        raise TypeError, "'h' must be a 'd' matrix with 1 column" 
+
+    if not dims: dims = {'l': h.size[0], 'q': [], 's': []}
     if type(dims['l']) is not int or dims['l'] < 0: 
         raise TypeError, "'dims['l']' must be a nonnegative integer"
     if [ k for k in dims['q'] if type(k) is not int or k < 1 ]:
@@ -813,24 +353,23 @@ def conelp(c, G, h, dims, A = None, b = None, primalstart = None,
             "integers"
 
     try: refinement = options['refinement']
-    except KeyError: refinement = True
-
-    # Number of second-order and positive semidefinite cones.
-    Nq, Ns = len(dims['q']), len(dims['s'])
+    except KeyError: 
+        if dims['q'] or dims['s']: refinement = 1
+        else: refinement = 0
+    else:
+        if type(refinement) is not int or refinement < 0: 
+            raise ValueError, "options['refinement'] must be a "\
+                "nonnegative integer"
 
-    # Logarithmic degree of the product cone.
-    cdeg = dims['l'] + Nq + sum(dims['s'])  
 
-    # Dimension of the product cone, with 's' components unpacked.  
     cdim = dims['l'] + sum(dims['q']) + sum([k**2 for k in dims['s']])
-
-    # Dimension of the product cone, with 's' components packed.  
-    cdim_pckd = dims['l'] + sum(dims['q']) + sum([k*(k+1)/2 for k 
-        in dims['s']])
-
-    # Dimension of the product cone, with diagonal 's' components.
+    cdim_pckd = dims['l'] + sum(dims['q']) + sum([k*(k+1)/2 for k in 
+        dims['s']])
     cdim_diag = dims['l'] + sum(dims['q']) + sum(dims['s'])
 
+    if h.size[0] != cdim:
+        raise TypeError, "'h' must be a 'd' matrix of size (%d,1)" %cdim
+
     # Data for kth 'q' constraint are found in rows indq[k]:indq[k+1] of G.
     indq = [ dims['l'] ]  
     for k in dims['q']:  indq = indq + [ indq[-1] + k ] 
@@ -839,589 +378,186 @@ def conelp(c, G, h, dims, A = None, b = None, primalstart = None,
     inds = [ indq[-1] ]
     for k in dims['s']:  inds = inds + [ inds[-1] + k**2 ] 
 
-    if type(h) is not matrix or h.typecode != 'd' or h.size[1] != 1:
-        raise TypeError, "'h' must be a 'd' matrix with 1 column" 
-    if type(G) is matrix or type(G) is spmatrix:
-        n = G.size[1]
-        if G.typecode != 'd' or G.size[0] != cdim:
-            raise TypeError, "'G' must be a 'd' matrix with %d rows " %cdim
-        if h.size[0] != cdim:
-            raise TypeError, "'h' must have %d rows" %cdim 
+    if not operatorG:
+        if G.typecode != 'd' or G.size != (cdim, c.size[0]):
+            raise TypeError, "'G' must be a 'd' matrix of size (%d, %d)"\
+                %(cdim, c.size[0])
         def Gf(x, y, trans = 'N', alpha = 1.0, beta = 0.0): 
-            sgemv(G, x, y, dims, trans = trans, alpha = alpha, beta = beta)
+            misc.sgemv(G, x, y, dims, trans = trans, alpha = alpha, 
+                beta = beta)
     else: 
-        if kktsolver is None:
-            raise ValueError, "argument 'kktsolver' must be provided if "\
-                "'G' is a function"
         Gf = G
 
-    if b is None: b = matrix(0.0, (0,1))
-    if type(b) is not matrix or b.typecode != 'd' or b.size[1] != 1:
-        raise TypeError, "'b' must be a 'd' matrix with 1 column" 
     if A is None: 
-        if type(G) is matrix or type(G) is spmatrix:
-            A = spmatrix([], [], [], (0,n))
-        else:
+        if customx or customy: 
             def A(x, y, trans = 'N', alpha = 1.0, beta = 0.0):
                 if trans == 'N': pass
-                else: xscal(beta, y)
-    if type(A) is matrix or type(A) is spmatrix:
-        p = A.size[0] 
-        if A.typecode != 'd' or A.size[1] != n:
-            raise TypeError, "'A' must be a 'd' matrix with %d columns " %n
-        if b.size[0] != p:
-            raise TypeError, "'b' must have %d rows" %p 
+                else: yscal(beta, y)
+        else: 
+            A = spmatrix([], [], [], (0, c.size[0]))
+    if not operatorA:
+        if A.typecode != 'd' or A.size[1] != c.size[0]:
+            raise TypeError, "'A' must be a 'd' matrix with %d columns "\
+                %c.size[0]
         def Af(x, y, trans = 'N', alpha = 1.0, beta = 0.0): 
             base.gemv(A, x, y, trans = trans, alpha = alpha, beta = beta)
     else: 
-        if kktsolver is None:
-            raise ValueError, "argument 'kktsolver' must be provided if "\
-                "'A' is a function"
         Af = A
 
+    if not customy:
+        if b is None: b = matrix(0.0, (0,1))
+        if type(b) is not matrix or b.typecode != 'd' or b.size[1] != 1:
+            raise TypeError, "'b' must be a 'd' matrix with one column" 
+        if not operatorA and b.size[0] != A.size[0]:
+            raise TypeError, "'b' must have length %d" %A.size[0]
+    else:
+        if b is None: 
+            raise ValueError, "use of non vector type for y requires b"
+
+
+    # kktsolver(W) returns a routine for solving 3x3 block KKT system 
+    #
+    #     [ 0   A'  G'*W^{-1} ] [ ux ]   [ bx ]
+    #     [ A   0   0         ] [ uy ] = [ by ].
+    #     [ G   0   -W'       ] [ uz ]   [ bz ]
+
+    if kktsolver in defaultsolvers:
+        if b.size[0] > c.size[0] or b.size[0] + cdim_pckd < c.size[0]:
+           raise ValueError, "Rank(A) < p or Rank([G; A]) < n"
+        if kktsolver == 'ldl': 
+            factor = misc.kkt_ldl(G, dims, A)
+        elif kktsolver == 'ldl2':
+            factor = misc.kkt_ldl2(G, dims, A)
+        elif kktsolver == 'qr':
+            factor = misc.kkt_qr(G, dims, A)
+        elif kktsolver == 'chol':
+            factor = kkt_chol(G, dims, A)
+        else:
+            factor = misc.kkt_chol2(G, dims, A)
+        def kktsolver(W):
+            return factor(W)
+
+
+    # res() evaluates residual in 5x5 block KKT system
+    #
+    # [ vx   ]    [ 0         ]   [ 0   A'  G'  c ] [ ux        ]
+    # [ vy   ]    [ 0         ]   [-A   0   0   b ] [ uy        ]
+    # [ vz   ] += [ W'*us     ] - [-G   0   0   h ] [ W^{-1}*uz ]
+    # [ vtau ]    [ dg*ukappa ]   [-c' -b' -h'  0 ] [ utau/dg   ]
+    # 
+    # vs := vs + lmbda o (dz + ds) 
+    # vkappa := vkappa + lmbdg * (dtau + dkappa).
+
+    ws3, wz3 = matrix(0.0, (cdim,1)), matrix(0.0, (cdim,1))
+    def res(ux, uy, uz, utau, us, ukappa, vx, vy, vz, vtau, vs, vkappa, W,
+        dg, lmbda):
+
+        # vx := vx - A'*uy - G'*W^{-1}*uz - c*utau/dg
+        Af(uy, vx, alpha = -1.0, beta = 1.0, trans = 'T')
+        blas.copy(uz, wz3)
+        misc.scale(wz3, W, inverse = 'I')
+        Gf(wz3, vx, alpha = -1.0, beta = 1.0, trans = 'T')
+        xaxpy(c, vx, alpha = -utau[0]/dg)
+
+        # vy := vy + A*ux - b*utau/dg
+        Af(ux, vy, alpha = 1.0, beta = 1.0)
+        yaxpy(b, vy, alpha = -utau[0]/dg)
+ 
+        # vz := vz + G*ux - h*utau/dg + W'*us
+        Gf(ux, vz, alpha = 1.0, beta = 1.0)
+        blas.axpy(h, vz, alpha = -utau[0]/dg)
+        blas.copy(us, ws3)
+        misc.scale(ws3, W, trans = 'T')
+        blas.axpy(ws3, vz)
+
+        # vtau := vtau + c'*ux + b'*uy + h'*W^{-1}*uz + dg*ukappa
+        vtau[0] += dg*ukappa[0] + xdot(c,ux) + ydot(b,uy) + \
+            misc.sdot(h, wz3, dims) 
+
+        # vs := vs + lmbda o (uz + us)
+        blas.copy(us, ws3)
+        blas.axpy(uz, ws3)
+        misc.sprod(ws3, lmbda, dims, diag = 'D')
+        blas.axpy(ws3, vs)
+
+        # vkappa += vkappa + lmbdag * (utau + ukappa)
+        vkappa[0] += lmbda[-1] * (utau[0] + ukappa[0])
+
+
+    if xnewcopy is None: xnewcopy = matrix 
+    if xdot is None: xdot = blas.dot
+    if xaxpy is None: xaxpy = blas.axpy 
+    if xscal is None: xscal = blas.scal 
     def xcopy(x, y): 
         xscal(0.0, y) 
         xaxpy(x, y)
-
+    if ynewcopy is None: ynewcopy = matrix 
+    if ydot is None: ydot = blas.dot 
+    if yaxpy is None: yaxpy = blas.axpy 
+    if yscal is None: yscal = blas.scal
     def ycopy(x, y): 
         yscal(0.0, y) 
         yaxpy(x, y)
 
-    if kktsolver is None: 
-        if dims['q'] or dims['s']: 
-            kktsolver = 'qr'
-        else: 
-            kktsolver = 'chol'
-    if kktsolver in ('qr', 'ldl', 'ldl2', 'chol'):
-        if type(A) is not matrix and type(A) is not spmatrix:
-            raise TypeError, "A must be a matrix or spmatrix if " \
-                "kktsolver is '" + kktsolver + "'"
-        if type(G) is not matrix and type(G) is not spmatrix:
-            raise TypeError, "G must be a matrix or spmatrix if " \
-                "kktsolver is '" + kktsolver + "'"
-        if p > n or cdim_pckd + p < n:
-            raise ValueError, "Rank(A) < p or Rank([G; A]) < n"
 
-    if kktsolver == 'qr':
+    # Select initial points.
 
-        # The default kktsolver, except for LPs.
-        #
-        # Two QR factorizations
-        #
-        #     A' = [Q1, Q2] * [R1; 0],   W^{-T} * G * Q1 = Q3*R3
-        # 
-        # (with columns of W^{-T}*G in packed storage).
+    x = xnewcopy(c);  xscal(0.0, x)
+    y = ynewcopy(b);  yscal(0.0, y)
+    s, z = matrix(0.0, (cdim,1)), matrix(0.0, (cdim,1))
+    dx, dy = xnewcopy(c), ynewcopy(b)
+    ds, dz = matrix(0.0, (cdim,1)), matrix(0.0, (cdim,1))
+    dkappa, dtau = matrix(0.0, (1,1)), matrix(0.0, (1,1))
 
-        # A' = [Q1, Q2] * [R1; 0]
-        if type(A) is matrix:
-            QA = +A.T
-        else:
-            QA = matrix(A.T)
-        tauA = matrix(0.0, (p,1))
-        lapack.geqrf(QA, tauA)
+    if primalstart is None or dualstart is None:
 
-        Gs = matrix(0.0, (cdim, n))
-        tauG = matrix(0.0, (n-p,1))
-        g = matrix(0.0, (cdim, 1))
-        u = matrix(0.0, (cdim_pckd, 1))
-        vv = matrix(0.0, (n,1))
-        w = matrix(0.0, (cdim_pckd, 1))
+        # Factor
+        #
+        #     [ 0   A'  G' ] 
+        #     [ A   0   0  ].
+        #     [ G   0  -I  ]
+    
+        W = {}
+        W['d'] = matrix(1.0, (dims['l'], 1)) 
+        W['di'] = matrix(1.0, (dims['l'], 1)) 
+        W['v'] = [ matrix(0.0, (m,1)) for m in dims['q'] ]
+        W['beta'] = len(dims['q']) * [ 1.0 ] 
+        for v in W['v']: v[0] = 1.0
+        W['r'] = [ matrix(0.0, (m,m)) for m in dims['s'] ]
+        W['rti'] = [ matrix(0.0, (m,m)) for m in dims['s'] ]
+        for r in W['r']: r[::r.size[0]+1 ] = 1.0
+        for rti in W['rti']: rti[::rti.size[0]+1 ] = 1.0
+        try: f = kktsolver(W)
+        except ArithmeticError:  
+            raise ValueError, "Rank(A) < p or Rank([G; A]) < n"
 
-        def kktsolver(W):
+    if primalstart is None:
 
-            # Gs = W^{-T}*G, in packed storage.
-            Gs[:,:] = G
-            scale(Gs, W, trans = 'T', inverse = 'I')
-            for k in xrange(n):
-                g[:] = Gs[:, k]
-                pack(g, Gs, dims, offsety = k*Gs.size[0])
- 
-            # Gs := [ Gs1, Gs2 ] 
-            #     = Gs * [ Q1, Q2 ]
-            lapack.ormqr(QA, tauA, Gs, side = 'R', m = cdim_pckd)
-
-            # QR factorization Gs2 := [ Q3, Q4 ] * [ R3; 0 ] 
-            lapack.geqrf(Gs, tauG, n = n-p, m = cdim_pckd, offsetA = 
-                Gs.size[0]*p)
-
-            def solve_kkt(x, y, z):
-
-                # On entry, x, y, z contain bx, by, bz.  On exit, they 
-                # contain the solution x, y, W*z of
-                #
-                #     [ 0         A'  G'*W^{-1} ]   [ x   ]   [bx       ]
-                #     [ A         0   0         ] * [ y   ] = [by       ].
-                #     [ W^{-T}*G  0   -I        ]   [ W*z ]   [W^{-T}*bz]
-                #
-                # The system is solved in five steps:
-                #
-                #       w := W^{-T}*bz - Gs1*R1^{-T}*by 
-                #       u := R3^{-T}*Q2'*bx + Q3'*w
-                #     W*z := Q3*u - w
-                #       y := R1^{-1} * (Q1'*bx - Gs1'*(W*z))
-                #       x := [ Q1, Q2 ] * [ R1^{-T}*by;  R3^{-1}*u ]
-
-                # w := W^{-T} * bz in packed storage 
-                scale(z, W, trans = 'T', inverse = 'I')
-                pack(z, w, dims)
-
-                # vv := [ Q1'*bx;  R3^{-T}*Q2'*bx ]
-                blas.copy(x, vv)
-                lapack.ormqr(QA, tauA, vv, trans='T') 
-                lapack.trtrs(Gs, vv, uplo = 'U', trans = 'T', n = n-p,
-                    offsetA = Gs.size[0]*p, offsetB = p)
-
-                # x[:p] := R1^{-T} * by 
-                blas.copy(y, x)
-                lapack.trtrs(QA, x, uplo = 'U', trans = 'T', n = p)
-
-                # w := w - Gs1 * x[:p] 
-                #    = W^{-T}*bz - Gs1*by 
-                blas.gemv(Gs, x, w, alpha = -1.0, beta = 1.0, n = p,
-                    m = cdim_pckd)
-
-                # u := [ Q3'*w + v[p:];  0 ]
-                #    = [ Q3'*w + R3^{-T}*Q2'*bx; 0 ]
-                blas.copy(w, u)
-                lapack.ormqr(Gs, tauG, u, trans = 'T', k = n-p, offsetA = 
-                    Gs.size[0]*p, m = cdim_pckd)
-                blas.axpy(vv, u, offsetx = p, n = n-p)
-                blas.scal(0.0, u, offset = n-p)
-
-                # x[p:] := R3^{-1} * u[:n-p]  
-                blas.copy(u, x, offsety = p, n = n-p)
-                lapack.trtrs(Gs, x, uplo='U', n = n-p, offsetA = 
-                    Gs.size[0]*p, offsetB = p)
-
-                # x is now [ R1^{-T}*by;  R3^{-1}*u[:n-p] ]
-                # x := [Q1 Q2]*x
-                lapack.ormqr(QA, tauA, x) 
- 
-                # u := [Q3, Q4] * u - w 
-                #    = Q3 * u[:n-p] - w
-                lapack.ormqr(Gs, tauG, u, k = n-p, m = cdim_pckd,
-                    offsetA = Gs.size[0]*p)
-                blas.axpy(w, u, alpha = -1.0)  
+	# minimize    || G * x - h ||^2
+	# subject to  A * x = b
+	#
+	# by solving
+	#
+	#     [ 0   A'  G' ]   [ x  ]   [ 0 ]
+	#     [ A   0   0  ] * [ dy ] = [ b ].
+	#     [ G   0  -I  ]   [ -s ]   [ h ]
 
-                # y := R1^{-1} * ( v[:p] - Gs1'*u )
-                #    = R1^{-1} * ( Q1'*bx - Gs1'*u )
-                blas.copy(vv, y, n = p)
-                blas.gemv(Gs, u, y, m = cdim_pckd, n = p, trans = 'T', 
-                    alpha = -1.0, beta = 1.0)
-                lapack.trtrs(QA, y, uplo = 'U', n=p) 
+        xscal(0.0, x)
+        ycopy(b, dy)  
+        blas.copy(h, s)
+        try: f(x, dy, s) 
+        except ArithmeticError:  
+            raise ValueError, "Rank(A) < p or Rank([G; A]) < n"
+        blas.scal(-1.0, s)  
 
-                unpack(u, z, dims)
+    else:
+        xcopy(primalstart['x'], x)
+        blas.copy(primalstart['s'], s)
 
-            return solve_kkt
-
-
-    elif kktsolver == 'ldl':   
-
-        # LDL factorization of 
-        #
-        #         [ 0         A'   G'*W^{-1} ]
-        #     K = [ A         0    0         ].
-        #         [ W^{-T}*G  0   -I         ]
-
-        ldK = n + p + cdim_pckd 
-        K = matrix(0.0, (ldK, ldK))
-        ipiv = matrix(0, (ldK, 1))
-        u = matrix(0.0, (ldK, 1))
-        g = matrix(0.0, (G.size[0], 1))
-
-        def kktsolver(W):
-
-            blas.scal(0.0, K)
-            K[(ldK+1)*(p+n) :: ldK+1]  = -1.0
-            K[n:n+p, :n] = A
-            for k in xrange(n):
-                g[:] = G[:,k]
-                scale(g, W, trans = 'T', inverse = 'I')
-                pack(g, K, dims, offsety = k*ldK+n+p)
-            lapack.sytrf(K, ipiv)
-
-            def solve_kkt(x, y, z):
-
-                # Solve
-                #
-                #         [ x   ]   [ bx        ]
-                #     K * [ y   ] = [ by        ]
-                #         [ W*z ]   [ W^{-T}*bz ] 
-                #
-                # and return x, y, W*z.
-                #
-                # On entry, x, y, z contain bx, by, bz.  On exit, they 
-                # contain the solution.
-
-                blas.copy(x, u)
-                blas.copy(y, u, offsety=n)
-                scale(z, W, trans='T', inverse='I') 
-                pack(z, u, dims, offsety = n+p)
-                lapack.sytrs(K, ipiv, u)
-                blas.copy(u, x, n=n)
-                blas.copy(u, y, offsetx = n, n = p)
-                unpack(u, z, dims, offsetx = n+p)
-	    
-            return solve_kkt
-
-
-    elif kktsolver == 'ldl2':
-
-        # LDL or Cholesky factorization of 
-        #
-        #         [ G' * W^{-1} * W^{-T} * G   A' ]
-        #     K = [                               ]
-        #         [ A                          0  ].
-
-        ldK = n + p 
-        K = matrix(0.0, (ldK, ldK))
-        if p: ipiv = matrix(0, (ldK, 1))
-        g = matrix(0.0, (G.size[0], 1))
-        u = matrix(0.0, (ldK, 1))
-
-        def kktsolver(W):
-
-            blas.scal(0.0, K)
-            K[n:,:n] = A
-            for k in xrange(n):
-                g[:] = G[:,k]
-                scale(g, W, trans = 'T', inverse = 'I')
-                scale(g, W, inverse = 'I')
-                sgemv(G, g, K, dims, trans = 'T', beta = 1.0, n = n-k, 
-                    offsetA = G.size[0]*k, offsety = (ldK + 1)*k)
-            if p: lapack.sytrf(K, ipiv)
-            else: lapack.potrf(K)
-
-            def solve_kkt(x, y, z):
-
-                # Solve
-                #
-                #         [ x   ]   [ bx + G' * W^{-1} * W^{-T} * bz ]
-                #     K * [     ] = [                                ] 
-                #         [ y   ]   [ by                             ]
-                #
-                # and return x, y, W*z = W^{-T} * (G*x - bz).
-
-                blas.copy(z, g)
-                scale(g, W, trans = 'T', inverse = 'I')
-                scale(g, W, inverse = 'I')
-                sgemv(G, g, u, dims, trans = 'T')
-                blas.axpy(x, u)
-                blas.copy(y, u, offsety = n)
-                if p: lapack.sytrs(K, ipiv, u)
-                else: lapack.potrs(K, u)
-                blas.copy(u, x, n = n)
-                blas.copy(u, y, offsetx = n, n = p)
-                sgemv(G, x, z, dims, alpha = 1.0, beta = -1.0)
-                scale(z, W, trans = 'T', inverse = 'I')
-	    
-            return solve_kkt
-
-    elif kktsolver == 'chol' and (dims['q'] or dims['s']):
-
-        # Dense Cholesky factorizations of 
-        #
-        #     S = G' * W^{-1} * W^{-T} * G  +  A' * A 
-        #     K = A * S^{-1} * A'.
-
-        S, K = matrix(0.0, (n,n)), matrix(0.0, (p,p))
-        g = matrix(0.0, (G.size[0], 1))
-
-        def kktsolver(W):
-
-            # S = G' * W^{-1} * W^{-T} * G  +  A' * A 
-            blas.scal(0.0, S)
-            for k in xrange(n):
-                g[:] = G[:,k]
-                scale(g, W, trans = 'T', inverse = 'I')
-                scale(g, W, inverse = 'I')
-                sgemv(G, g, S, dims, trans = 'T', beta = 1.0, n = n-k, 
-                    offsetA = G.size[0]*k, offsety = (n+1)*k)
-            base.syrk(A, S, trans='T', beta=1.0)
-            lapack.potrf(S) 
-
-            # Asct := L^{-1}*A'.  Factor K = Asct'*Asct.
-            if type(A) is matrix:
-                Asct = A.T
-            else:
-                Asct = matrix(A.T)
-            blas.trsm(S, Asct)
-            blas.syrk(Asct, K, trans='T')
-            lapack.potrf(K)
-
-            def solve_kkt(x, y, z):
-        
-                # Solve for y, x:
-                #
-                #     K * y = A * S^{-1} * ( bx + G'*W^{-1}*W^{-T}*bz + 
-                #         A'*by ) - by
-                #     S*x = bx + G'*W^{-1}*W^{-T}*bz + A'*by - A'*y.
-                #     
-                #     Wz = W^{-T} * ( G*x - bz ).
-
-                # x := L^{-1} * ( bx + G'*W^{-1}*W^{-T}*bz + A' * by ).
-                blas.copy(z, g)
-                scale(g, W, trans = 'T', inverse = 'I')
-                scale(g, W, inverse = 'I')
-                sgemv(G, g, x, dims, beta = 1.0, trans = 'T')
-                base.gemv(A, y, x, trans='T', beta=1.0)
-                blas.trsv(S, x)
-
-                # y := K^{-1} * (A * L^{-T} * x - by)
-                base.gemv(Asct, x, y, trans = 'T', beta = -1.0)
-                lapack.potrs(K, y)
-
-                # x := L^{-T} * (x - Asc'*y)
-                base.gemv(Asct, y, x, alpha = -1.0, beta = 1.0)
-                blas.trsv(S, x, trans='T')
-
-                #     Wz = W^{-T} * ( G*x - bz ).
-                sgemv(G, x, z, dims, alpha = 1.0, beta = -1.0)
-                scale(z, W, trans = 'T', inverse = 'I')
-
-            return solve_kkt 
-
-
-    elif kktsolver == 'chol' and not dims['q'] and not dims['s']:
-
-        # This is the default kktsolver for LPs.  It exploits sparsity to 
-        # some extent.
-        #
-        # Factor
-        #
-        #     S = G' * W^{-2} * G  where W = diag( W['di'] )^{-1} 
-        #     K = A * S^{-1} * A',
-        #
-        # using dense (L*L') or sparse (P'*L*L'*P) Cholesky factorizations.
-        # If S turns out to be singular in the first factorization, then 
-        # switch to factoring 
-        # 
-        #     S = G' * W^{-2} * G  +  A' * A 
-        #     K = A * S^{-1} * A'.
-
-        F = {'firstcall': True, 'singular': False}
-        if type(G) is matrix: 
-            Gs = matrix(0.0, G.size) 
-            F['S'] = matrix(0.0, (n,n))
-            K = matrix(0.0, (p,p))
-        else:
-            Gs = spmatrix(0.0, G.I, G.J, G.size) 
-            F['S'] = spmatrix([], [], [], (n,n), 'd')
-            F['Sf'] = None
-            if type(A) is matrix:
-                K = matrix(0.0, (p,p))
-            else:
-                K = spmatrix([], [], [], (p,p), 'd')
-        m = dims['l']
-
-        def kktsolver(W):
-
-            # Gs = W^{-1} * G
-            base.gemm( spmatrix(W['di'], range(m), range(m)), G, Gs, 
-                partial = True)
-
-            if F['firstcall']:
-                F['firstcall'] = False
-                base.syrk(Gs, F['S'], trans = 'T') 
-                try:
-                    if type(F['S']) is matrix: 
-                        lapack.potrf(F['S']) 
-                    else:
-                        F['Sf'] = cholmod.symbolic(F['S'])
-                        cholmod.numeric(F['S'], F['Sf'])
-                except ArithmeticError:
-                    F['singular'] = True 
-                    if type(A) is matrix and type(F['S']) is spmatrix:
-                        F['S'] = matrix(0.0, (n,n))
-                    base.syrk(Gs, F['S'], trans = 'T') 
-                    base.syrk(A, F['S'], trans = 'T', beta = 1.0) 
-                    if type(F['S']) is matrix: 
-                        lapack.potrf(F['S']) 
-                    else:
-                        F['Sf'] = cholmod.symbolic(F['S'])
-                        cholmod.numeric(F['S'], F['Sf'])
-
-            else:
-                base.syrk(Gs, F['S'], trans = 'T', partial = True)
-                if F['singular']:
-                    base.syrk(A, F['S'], trans = 'T', beta = 1.0, partial 
-                        = True) 
-                if type(F['S']) is matrix: 
-                    lapack.potrf(F['S']) 
-                else:
-                    cholmod.numeric(F['S'], F['Sf'])
-
-            if type(F['S']) is matrix: 
-                # Asct := L^{-1}*A'.  Factor K = Asct'*Asct.
-                if type(A) is matrix: 
-                    Asct = A.T
-                else: 
-                    Asct = matrix(A.T)
-                blas.trsm(F['S'], Asct)
-                blas.syrk(Asct, K, trans='T')
-                lapack.potrf(K)
-
-            else:
-                # Asct := L^{-1}*P*A'.  Factor K = Asct'*Asct.
-                if type(A) is matrix:
-                    Asct = A.T
-                    cholmod.solve(F['Sf'], Asct, sys = 7)
-                    cholmod.solve(F['Sf'], Asct, sys = 4)
-                    blas.syrk(Asct, K, trans = 'T')
-                    lapack.potrf(K) 
-                else:
-                    Asct = cholmod.spsolve(F['Sf'], A.T, sys = 7)
-                    Asct = cholmod.spsolve(F['Sf'], Asct, sys = 4)
-                    base.syrk(Asct, K, trans = 'T')
-                    Kf = cholmod.symbolic(K)
-                    cholmod.numeric(K, Kf)
-
-            def solve_kkt(x, y, z):
-
-                # If not F['singular']:
-                #
-                #     K*y = A * S^{-1} * ( bx + G'*W^{-2}*bz ) - by
-                #     S*x = bx + G'*W^{-2}*bz - A'*y
-                #     W*z = W^{-1} * ( G*x - bz ).
-                #    
-                # If F['singular']:
-                #
-                #     K*y = A * S^{-1} * ( bx + G'*W^{-1}*W^{-T}*bz + 
-                #         A'*by ) - by
-                #     S*x = bx + G'*W^{-1}*W^{-T}*bz + A'*by - A'*y.
-                #     W*z = W^{-T} * ( G*x - bz ).
-
-
-                # z := W^{-1} * z = W^{-1} * bz
-                blas.tbmv(W['di'], z, n = m, k = 0, ldA = 1)
-
-                # If not F['singular']:
-                #     x := L^{-1} * P * (x + Gs'*z)
-                #        = L^{-1} * P * (x + G'*W^{-2}*bz)
-                #
-                # If F['singular']:
-                #     x := L^{-1} * P * (x + Gs'*z + A'*y))
-                #        = L^{-1} * P * (x + G'*W^{-2}*bz + A'*y)
-
-                base.gemv(Gs, z, x, trans = 'T', beta = 1.0)
-                if F['singular']:
-                    base.gemv(A, y, x, trans = 'T', beta = 1.0)
-                if type(F['S']) is matrix:
-                    blas.trsv(F['S'], x)
-                else:
-                    cholmod.solve(F['Sf'], x, sys = 7)
-                    cholmod.solve(F['Sf'], x, sys = 4)
-
-                # y := K^{-1} * (Asc*x - y)
-                #    = K^{-1} * (A * S^{-1} * (bx + G'*W^{-2}*bz) - by)  
-                #      (if not F['singular'])
-                #    = K^{-1} * (A * S^{-1} * (bx + G'*W^{-2}*bz + A'*by) 
-                #      - by)  
-                #      (if F['singular']).
-
-                base.gemv(Asct, x, y, trans = 'T', beta = -1.0)
-                if type(K) is matrix:
-                    lapack.potrs(K, y)
-                else:
-                    cholmod.solve(Kf, y)
-
-                # x := P' * L^{-T} * (x - Asc'*y)
-                #    = S^{-1} * (bx + G'*W^{-2}*bz - A'*y) 
-                #      (if not F['singular'])  
-                #    = S^{-1} * (bx + G'*W^{-2}*bz + A'*by - A'*y) 
-                #      (if F['singular'])
-
-                base.gemv(Asct, y, x, alpha = -1.0, beta = 1.0)
-                if type(F['S']) is matrix:
-                    blas.trsv(F['S'], x, trans='T')
-                else:
-                    cholmod.solve(F['Sf'], x, sys = 5)
-                    cholmod.solve(F['Sf'], x, sys = 8)
-
-                # W*z := Gs*x - z = W^{-1} * (G*x - bz)
-                base.gemv(Gs, x, z, beta = -1.0)
-
-            return solve_kkt
-
-    else: 
-        # User provided kktsolver.
-
-        pass
-
-    x = xnewcopy(c);  xscal(0.0, x)
-    y = ynewcopy(b);  yscal(0.0, y)
-    s, z = matrix(0.0, (cdim,1)), matrix(0.0, (cdim,1))
-
-    resx, hresx = xnewcopy(c), xnewcopy(c)
-    resy, hresy = ynewcopy(b), ynewcopy(b)
-    resz, hresz = matrix(0.0, (cdim,1)), matrix(0.0, (cdim,1))
-    dx, dy = xnewcopy(c), ynewcopy(b)
-    ds, dz = matrix(0.0, (cdim,1)), matrix(0.0, (cdim,1))
-    dx1, dy1 = xnewcopy(c), ynewcopy(b)
-    ds1, dz1 = matrix(0.0, (cdim,1)), matrix(0.0, (cdim,1))
-    dkappa, dtau = matrix(0.0, (1,1)), matrix(0.0, (1,1))
-    dx2, dy2 = xnewcopy(c), ynewcopy(b)
-    ds2, dz2 = matrix(0.0, (cdim,1)), matrix(0.0, (cdim,1))
-    dkappa2, dtau2 = matrix(0.0, (1,1)), matrix(0.0, (1,1))
-    th = matrix(0.0, (cdim,1))
-    sigs = matrix(0.0, (sum(dims['s']), 1))
-    sigz = matrix(0.0, (sum(dims['s']), 1))
-    lmbda = matrix(0.0, (cdim_diag + 1, 1))
-    lmbdasq = matrix(0.0, (cdim_diag + 1, 1))
-    work = matrix(0.0, (max( [0] + dims['s'] )**2, 1))
-
-
-    # Select initial points.
-
-    if primalstart is None or dualstart is None:
-
-        # Factor
-        #
-        #     [ 0   A'  G' ] 
-        #     [ A   0   0  ].
-        #     [ G   0  -I  ]
-    
-        W = {}
-        W['d'] = matrix(1.0, (dims['l'], 1)) 
-        W['di'] = matrix(1.0, (dims['l'], 1)) 
-        W['v'] = [ matrix(0.0, (m,1)) for m in dims['q'] ]
-        W['beta'] = Nq * [ 1.0 ] 
-        for v in W['v']: v[0] = 1.0
-        W['r'] = [ matrix(0.0, (m,m)) for m in dims['s'] ]
-        W['rti'] = [ matrix(0.0, (m,m)) for m in dims['s'] ]
-        for r in W['r']: r[::r.size[0]+1 ] = 1.0
-        for rti in W['rti']: rti[::rti.size[0]+1 ] = 1.0
-        try: f = kktsolver(W)
-        except ArithmeticError:  
-            raise ValueError, "Rank(A) < p or Rank([G; A]) < n"
-
-    if primalstart is None:
-
-	# minimize    || G * x - h ||^2
-	# subject to  A * x = b
-	#
-	# by solving
-	#
-	#     [ 0   A'  G' ]   [ x  ]   [ 0 ]
-	#     [ A   0   0  ] * [ dy ] = [ b ].
-	#     [ G   0  -I  ]   [ -s ]   [ h ]
-
-        xscal(0.0, x)
-        ycopy(b, dy)  
-        blas.copy(h, s)
-        try: f(x, dy, s) 
-        except ArithmeticError:  
-            raise ValueError, "Rank(A) < p or Rank([G; A]) < n"
-        blas.scal(-1.0, s)  
-
-    else:
-        xcopy(primalstart['x'], x)
-        blas.copy(primalstart['s'], s)
-
-    # ts = min{ t | s + t*e >= 0 }
-    ts = max_step(s, dims)
-    if ts >= 0 and primalstart: 
-        raise ValueError, "initial s is not positive"
+    # ts = min{ t | s + t*e >= 0 }
+    ts = misc.max_step(s, dims)
+    if ts >= 0 and primalstart: 
+        raise ValueError, "initial s is not positive"
 
 
     if dualstart is None:
@@ -1448,7 +584,7 @@ def conelp(c, G, h, dims, A = None, b = None, primalstart = None,
         blas.copy(dualstart['z'], z)
 
     # tz = min{ t | z + t*e >= 0 }
-    tz = max_step(z, dims)
+    tz = misc.max_step(z, dims)
     if tz >= 0 and dualstart: 
         raise ValueError, "initial z is not positive"
 
@@ -1458,14 +594,14 @@ def conelp(c, G, h, dims, A = None, b = None, primalstart = None,
     trs = sum(s[:dims['l']]) + sum(s[indq[:-1]]) + sum([ 
         sum(s[inds[k] : inds[k+1] : dims['s'][k]+1]) for k in 
         xrange(len(dims['s'])) ])
-    nrms = snrm2(s, dims)
-    nrmz = snrm2(z, dims)
+    nrms = misc.snrm2(s, dims)
+    nrmz = misc.snrm2(z, dims)
 
     if primalstart is None and dualstart is None: 
 
-        gap = sdot(s, z, dims) 
+        gap = misc.sdot(s, z, dims) 
         pcost = xdot(c,x)
-        dcost = -ydot(b,y) - sdot(h,z,dims) 
+        dcost = -ydot(b,y) - misc.sdot(h, z, dims) 
 
         if ts <= 0 and tz <= 0 and (gap <= ABSTOL or ( pcost < 0.0 and 
             gap / -pcost <= RELTOL ) or (dcost > 0.0 and gap / dcost 
@@ -1475,8 +611,8 @@ def conelp(c, G, h, dims, A = None, b = None, primalstart = None,
             # optimal.  
             ind = dims['l'] + sum(dims['q'])
             for m in dims['s']:
-                symm(s, m, ind)
-                symm(z, m, ind)
+                misc.symm(s, m, ind)
+                misc.symm(z, m, ind)
                 ind += m**2
             return {'status': 'optimal', 'x': x, 'y': y, 's': s, 'z': z}
 
@@ -1520,119 +656,79 @@ def conelp(c, G, h, dims, A = None, b = None, primalstart = None,
                 z[ind : ind+m*m : m+1] += a
                 ind += m**2
 
-
-    if 0:  # random starting points for debugging
-
-        from cvxopt import random
-        n = len(c)
-        x = random.normal(n,1)
-        y = random.normal(p,1)
-	s, z = matrix(0.0, (cdim,1)), matrix(0.0, (cdim,1))
-	s[:dims['l']] = random.uniform(dims['l'], 1) 
-	z[:dims['l']] = random.uniform(dims['l'], 1) 
-        for k in xrange(Nq):
-            mk = dims['q'][k]
-            sk = random.normal(mk, 1)
-            sk[0] = blas.nrm2(sk[1:]) + random.uniform(1)
-            s[indq[k]:indq[k+1]] = sk
-            zk = random.normal(mk, 1)
-            zk[0] = blas.nrm2(zk[1:]) + random.uniform(1)
-            z[indq[k]:indq[k+1]] = zk
-        for k in xrange(Ns):
-            mk = dims['s'][k]
-            sk = random.normal(mk, mk)
-            sk = sk*sk.T
-            s[inds[k]:inds[k+1]] = sk[:]
-            zk = random.normal(mk, mk)
-            zk = zk*zk.T
-            z[inds[k]:inds[k+1]] = zk[:]
-
-    if 0:  # starting point = e for debugging
-
-        from cvxopt import random
-        n = len(c)
-        x = matrix(0.0, (n,1))
-        y = matrix(0.0, (p,1))
-	s, z = matrix(0.0, (cdim,1)), matrix(0.0, (cdim,1))
-	s[:dims['l']] = 1.0
-        s[indq[:-1]] = 1.0
-        ind = dims['l'] + sum(dims['q'])
-        for m in dims['s']:
-            s[ind : ind+m*m : m+1] = 1.0
-            ind += m**2
-	z[:dims['l']] = 1.0
-        z[indq[:-1]] = 1.0
-        ind = dims['l'] + sum(dims['q'])
-        for m in dims['s']:
-            z[ind : ind+m*m : m+1] = 1.0
-            ind += m**2
-
     tau, kappa = 1.0, 1.0
-    gap = sdot(s, z, dims) 
-
-    for iters in xrange(MAXITERS):
-
-        # hresx = -A'(y) - G'(z) 
-        Af(y, hresx, alpha = -1.0, trans = 'T') 
-        Gf(z, hresx, alpha = -1.0, beta = 1.0, trans = 'T') 
-
-        # resx = hresx - c*tau 
-        #      = -A'(y) - G'(z) - c*tau
-        xcopy(hresx, resx)
-        xaxpy(c, resx, alpha = -tau)
-
-        # hresy = A(x)  
-        Af(x, hresy)
 
-        # resy = hresy - b*tau 
-        #      = A(x) - b*tau
-        ycopy(hresy, resy)
-        yaxpy(b, resy, alpha = -tau)
+    rx, hrx = xnewcopy(c), xnewcopy(c)
+    ry, hry = ynewcopy(b), ynewcopy(b)
+    rz, hrz = matrix(0.0, (cdim,1)), matrix(0.0, (cdim,1))
+    sigs = matrix(0.0, (sum(dims['s']), 1))
+    sigz = matrix(0.0, (sum(dims['s']), 1))
+    lmbda = matrix(0.0, (cdim_diag + 1, 1))
+    lmbdasq = matrix(0.0, (cdim_diag + 1, 1)) 
 
-        # hresz = s + G(x)  
-        Gf(x, hresz)
-        blas.axpy(s, hresz)
+    resx0 = max(1.0, math.sqrt(xdot(c,c)))
+    resy0 = max(1.0, math.sqrt(ydot(b,b)))
+    resz0 = max(1.0, misc.snrm2(h, dims))
+    gap = misc.sdot(s, z, dims) 
 
-        # resz = hresz - h*tau 
-        #      = s + G(x) - h*tau
-        blas.scal(0, resz)
-        blas.axpy(hresz, resz)
-        blas.axpy(h, resz, alpha = -tau)
+    for iters in xrange(MAXITERS):
 
-        # rest = kappa + <c,x> + <b,y> + <h,z> 
-        cx, by, hz = xdot(c,x), ydot(b,y), sdot(h, z, dims) 
-        rest = kappa + cx + by + hz 
+        # hrx = -A'*y - G'*z 
+        Af(y, hrx, alpha = -1.0, trans = 'T') 
+        Gf(z, hrx, alpha = -1.0, beta = 1.0, trans = 'T') 
+        hresx = math.sqrt( xdot(hrx, hrx) ) 
+
+        # rx = hrx - c*tau 
+        #    = -A'*y - G'*z - c*tau
+        xcopy(hrx, rx)
+        xaxpy(c, rx, alpha = -tau)
+        resx = math.sqrt( xdot(rx, rx) ) / tau
+
+        # hry = A*x  
+        Af(x, hry)
+        hresy = math.sqrt( ydot(hry, hry) )
+
+        # ry = hry - b*tau 
+        #    = A*x - b*tau
+        ycopy(hry, ry)
+        yaxpy(b, ry, alpha = -tau)
+        resy = math.sqrt( ydot(ry, ry) ) / tau
+
+        # hrz = s + G*x  
+        Gf(x, hrz)
+        blas.axpy(s, hrz)
+        hresz = misc.snrm2(hrz, dims) 
+
+        # rz = hrz - h*tau 
+        #    = s + G*x - h*tau
+        blas.scal(0, rz)
+        blas.axpy(hrz, rz)
+        blas.axpy(h, rz, alpha = -tau)
+        resz = misc.snrm2(rz, dims) / tau 
+
+        # rt = kappa + c'*x + b'*y + h'*z 
+        cx, by, hz = xdot(c,x), ydot(b,y), misc.sdot(h, z, dims) 
+        rt = kappa + cx + by + hz 
 
         # stopping criteria
         pcost, dcost = cx/tau, -(by + hz) / tau        
-        nrmhresx = math.sqrt( xdot(hresx, hresx) ) 
-        nrmresx = math.sqrt( xdot(resx, resx) ) / tau
-        nrmhresy = math.sqrt( ydot(hresy, hresy) )
-        nrmresy = math.sqrt( ydot(resy, resy) ) / tau
-        nrmhresz = snrm2(hresz, dims) 
-        nrmresz = snrm2(resz, dims) / tau 
         if pcost < 0.0:
             relgap = gap / -pcost
         elif dcost > 0.0:
             relgap = gap / dcost
         else: 
             relgap = None
-
-        if iters == 0: 
-            nrmresx0 = max(1.0, math.sqrt(xdot(c,c)))
-            nrmresy0 = max(1.0, math.sqrt(ydot(b,b)))
-            nrmresz0 = max(1.0, snrm2(h, dims))
+        pres = max(resy/resy0, resz/resz0)
+        dres = resx/resx0
 
         if show_progress:
             if iters==0:
                 print "% 10s% 12s% 10s% 8s% 7s % 5s" %("pcost", "dcost",
                     "gap", "pres", "dres", "k/t")
             print "%2d: % 8.4e % 8.4e % 4.0e% 7.0e% 7.0e% 7.0e" \
-                %(iters, pcost, dcost, gap, max(nrmresz/nrmresz0,
-                nrmresy/nrmresy0), nrmresx/nrmresx0, kappa/tau)
+                %(iters, pcost, dcost, gap, pres, dres, kappa/tau)
 
-        if max(nrmresz/nrmresz0, nrmresy/nrmresy0) <= FEASTOL and \
-            nrmresx/nrmresx0 <= FEASTOL and ( gap <= ABSTOL or 
+        if pres <= FEASTOL and dres <= FEASTOL and ( gap <= ABSTOL or 
             (relgap is not None and relgap <= RELTOL) ):
             xscal(1.0/tau, x)
             yscal(1.0/tau, y)
@@ -1640,45 +736,41 @@ def conelp(c, G, h, dims, A = None, b = None, primalstart = None,
             blas.scal(1.0/tau, z)
             ind = dims['l'] + sum(dims['q'])
             for m in dims['s']:
-                symm(s, m, ind)
-                symm(z, m, ind)
+                misc.symm(s, m, ind)
+                misc.symm(z, m, ind)
                 ind += m**2
             return {'status': 'optimal', 'x': x, 'y': y, 's': s, 'z': z}
 
-        elif hz + by < 0.0 and nrmhresx/nrmresx0 / (-hz - by) <= FEASTOL:
+        elif hz + by < 0.0 and hresx/resx0 / (-hz - by) <= FEASTOL:
             yscal(1.0/(-hz - by), y)
             blas.scal(1.0/(-hz - by), z)
             ind = dims['l'] + sum(dims['q'])
             for m in dims['s']:
-                symm(z, m, ind)
+                misc.symm(z, m, ind)
                 ind += m**2
             return {'status': 'primal infeasible', 'x': None, 's': None, 
                 'y': y, 'z': z }
 
-        elif cx < 0.0 and max(nrmhresy/nrmresy0, nrmhresz/nrmresz0) \
-            / (-cx) <= FEASTOL:
+        elif cx < 0.0 and max(hresy/resy0, hresz/resz0) / (-cx) <= FEASTOL:
             xscal(1.0/(-cx), x)
             blas.scal(1.0/(-cx), s)
             ind = dims['l'] + sum(dims['q'])
             for m in dims['s']:
-                symm(s, m, ind)
+                misc.symm(s, m, ind)
                 ind += m**2
             return {'status': 'dual infeasible', 'x': x, 's': s, 'y': None,
                 'z': None}
 
 
-        if iters == 0:
-
-            # Compute initial scaling W:
-            # 
-            #     W * z = W^{-T} * s = lambda
-            #     dg * tau = 1/dg * kappa = lambdag.
+        # Compute initial scaling W:
+        # 
+        #     W * z = W^{-T} * s = lambda
+        #     dg * tau = 1/dg * kappa = lambdag.
 
-            W = {}
+        if iters == 0:
 
+            W = misc.compute_scaling(s, z, lmbda, dims, mnl = 0)
 
-            # For kappa, tau block: 
-            #
             #     dg = sqrt( kappa / tau )
             #     dgi = sqrt( tau / kappa )
             #     lambda_g = sqrt( tau * kappa )  
@@ -1689,703 +781,1216 @@ def conelp(c, G, h, dims, A = None, b = None, primalstart = None,
             dgi = math.sqrt( tau / kappa )
             lmbda[-1] = math.sqrt( tau * kappa )
 
+        # lmbdasq := lmbda o lmbda 
+        misc.ssqr(lmbdasq, lmbda, dims)
+        lmbdasq[-1] = lmbda[-1]**2
 
-            # For the 'l' block: 
-            #
-            #     W['d'] = sqrt( sk ./ zk )
-            #     W['di'] = sqrt( zk ./ sk )
-            #     lambdak = sqrt( sk .* zk )
-            #
-            # where sk and zk are the first dims['l'] entries of s and z.
-            # lambda_k is stored in the first dims['l'] positions of lmbda.
-             
-            m = dims['l']
-            W['d'] = base.sqrt( base.div( s[:m], z[:m] ))
-            W['di'] = W['d']**-1
-            lmbda[:m] = base.sqrt( base.mul( s[:m], z[:m] ) ) 
 
+        # f3(x, y, z) solves    
+        #
+        #     [ 0  A'  G'   ] [ ux        ]   [ bx ]
+        #     [ A  0   0    ] [ uy        ] = [ by ].
+        #     [ G  0  -W'*W ] [ W^{-1}*uz ]   [ bz ]
+        #
+        # On entry, x, y, z contain bx, by, bz.
+        # On exit, they contain ux, uy, uz.
+
+        try: f3 = kktsolver(W)
+        except ArithmeticError:
+            if iters == 0 and primalstart and dualstart: 
+                raise ValueError, "Rank(A) < p or Rank([G; A]) < n"
+            else:
+                raise ArithmeticError, "singular KKT matrix"
+
+
+        # Solve
+        #
+        #     [ 0   A'  G'    ] [ x1        ]          [ c ]
+        #     [-A   0   0     ]*[ y1        ] = -dgi * [ b ].
+        #     [-G   0   W'*W  ] [ W^{-1}*z1 ]          [ h ]
+         
+        if iters == 0:
+            x1, y1 = xnewcopy(c), ynewcopy(b)
+            z1 = matrix(0.0, (cdim,1))
+        xcopy(c, x1);  xscal(-1, x1)
+        ycopy(b, y1)
+        blas.copy(h, z1)
+        try: f3(x1, y1, z1)
+        except ArithmeticError:
+            if iters == 0 and primalstart and dualstart: 
+                raise ValueError, "Rank(A) < p or Rank([G; A]) < n"
+            else:
+                raise ArithmeticError, "singular KKT matrix"
+        xscal(dgi, x1)
+        yscal(dgi, y1)
+        blas.scal(dgi, z1)
+
+
+        # f6_no_ir(x, y, z, tau, s, kappa) solves
+        #
+        #     [ 0         ]   [  0   A'  G'  c ] [ ux        ]    [ bx   ]
+        #     [ 0         ]   [ -A   0   0   b ] [ uy        ]    [ by   ]
+        #     [ W'*us     ] - [ -G   0   0   h ] [ W^{-1}*uz ] = -[ bz   ]
+        #     [ dg*ukappa ]   [ -c' -b' -h'  0 ] [ utau/dg   ]    [ btau ]
+        # 
+        #     lmbda o (uz + us) = -bs
+        #     lmbdag * (utau + ukappa) = -bkappa.
+        #
+        # On entry, x, y, z, tau, s, kappa contain bx, by, bz, btau, 
+        # bkappa.  On exit, they contain ux, uy, uz, utau, ukappa.
+
+        # th = W^{-T} * h
+        if iters == 0: th = matrix(0.0, (cdim,1))
+        blas.copy(h, th)
+        misc.scale(th, W, trans = 'T', inverse = 'I')
+
+        def f6_no_ir(x, y, z, tau, s, kappa):
 
-            # For the 'q' blocks, compute lists 'v', 'beta' of length Nq. 
+            # Solve 
             #
-            # The vector v[k] has unit hyperbolic norm: 
-            # 
-            #     (sqrt( v[k]' * J * v[k] ) = 1 with J = [1, 0; 0, -I]).
-            # 
-            # beta[k] is a positive scalar.
+            #     [  0   A'  G'    0   ] [ ux        ]   
+            #     [ -A   0   0     b   ] [ uy        ]  
+            #     [ -G   0   W'*W  h   ] [ W^{-1}*uz ] 
+            #     [ -c' -b' -h'    k/t ] [ utau/dg   ]
             #
-            # The hyperbolic Householder matrix H = 2*v[k]*v[k]' - J
-            # defined by v[k] satisfies 
-            # 
-            #     (beta[k] * H) * zk  = (beta[k] * H) \ sk = lambda_k
+            #           [ bx                    ]
+            #           [ by                    ]
+            #         = [ bz - W'*(lmbda o\ bs) ]
+            #           [ btau - bkappa/tau     ]
             #
-            # where sk = s[indq[k]:indq[k+1]], zk = z[indq[k]:indq[k+1]].
+            #     us = -lmbda o\ bs - uz
+            #     ukappa = -bkappa/lmbdag - utau.
+
+
+            # First solve 
             #
-            # lambda_k is stored in lmbda[indq[k]:indq[k+1]].
-           
-            ind = dims['l']
-            W['v'] = [ matrix(0.0, (k,1)) for k in dims['q'] ]
-            W['beta'] = Nq * [ 0.0 ] 
+            #     [ 0  A' G'   ] [ ux        ]   [  bx                    ]
+            #     [ A  0  0    ] [ uy        ] = [ -by                    ]
+            #     [ G  0 -W'*W ] [ W^{-1}*uz ]   [ -bz + W'*(lmbda o\ bs) ]
+
+            # y := -y = -by
+            yscal(-1.0, y) 
 
-            for k in xrange(Nq):
-                m = dims['q'][k]
-                v = W['v'][k]
+            # s := -lmbda o\ s = -lmbda o\ bs
+            misc.sinv(s, lmbda, dims)
+            blas.scal(-1.0, s)
 
-                # a = sqrt( sk' * J * sk )  where J = [1, 0; 0, -I]
-                aa = jnrm2(s, offset = ind, n = m)
+            # z := -(z + W'*s) = -bz + W'*(lambda o\ bs)
+            blas.copy(s, ws3)  
+            misc.scale(ws3, W, trans = 'T')
+            blas.axpy(ws3, z)
+            blas.scal(-1.0, z)
 
-                # b = sqrt( zk' * J * zk )
-                bb = jnrm2(z, offset = ind, n = m) 
+            # Solve system.
+            f3(x, y, z)
 
-                # beta[k] = ( a / b )**1/2
-                W['beta'][k] = math.sqrt( aa / bb )
+            # Combine with solution of 
+            #
+            #     [ 0   A'  G'    ] [ x1         ]          [ c ]
+            #     [-A   0   0     ] [ y1         ] = -dgi * [ b ]
+            #     [-G   0   W'*W  ] [ W^{-1}*dzl ]          [ h ]
+            # 
+            # to satisfy
+            #
+            #     -c'*x - b'*y - h'*W^{-1}*z + dg*tau = btau - bkappa/tau.
 
-                # c = sqrt( (sk/a)' * (zk/b) + 1 ) / sqrt(2)    
-                cc = math.sqrt( ( blas.dot(s, z, n = m, offsetx = ind, 
-                    offsety = ind) / aa / bb + 1.0 ) / 2.0 )
+            # kappa[0] := -kappa[0] / lmbd[-1] = -bkappa / lmbdag
+            kappa[0] = -kappa[0] / lmbda[-1]
 
-                # vk = 1/(2*c) * ( (sk/a) + J * (zk/b) )
-                blas.copy(z, v, offsetx = ind, n = m)
-                blas.scal(-1.0/bb, v)
-                v[0] *= -1.0 
-                blas.axpy(s, v, 1.0/aa, offsetx = ind, n = m)
-                blas.scal(1.0/2.0/cc, v)
+            # tau[0] = tau[0] + kappa[0] / dgi = btau[0] - bkappa / tau
+            tau[0] += kappa[0] / dgi
+ 
+            tau[0] = dgi * ( tau[0] + xdot(c,x) + ydot(b,y) + 
+                misc.sdot(th, z, dims) ) / (1.0 + misc.sdot(z1, z1, dims))
+            xaxpy(x1, x, alpha = tau[0])
+            yaxpy(y1, y, alpha = tau[0])
+            blas.axpy(z1, z, alpha = tau[0])
 
-                # v[k] = 1/sqrt(2*(vk0 + 1)) * ( vk + e ),  e = [1; 0]
-                v[0] += 1.0
-                blas.scal(1.0/math.sqrt(2.0 * v[0]), v)
-            
+            # s := s - z = - lambda o\ bs - z 
+            blas.axpy(z, s, alpha = -1)
 
-                # To get the scaled variable lambda_k
-                # 
-                #     d =  sk0/a + zk0/b + 2*c
-                #     lambda_k = [ c;  (c + zk0/b)/d * sk1/a + 
-                #         (c + sk0/a)/d * zk1/b ]
-                #     lambda_k *= sqrt(a * b)
-
-                lmbda[ind] = cc
-                dd = 2*cc + s[ind]/aa + z[ind]/bb
-                blas.copy(s, lmbda, offsetx = ind+1, offsety = ind+1,
-                    n = m-1) 
-                blas.scal((cc + z[ind]/bb)/dd/aa, lmbda, n = m-1, offset 
-                    = ind+1)
-                blas.axpy(z, lmbda, (cc + s[ind]/aa)/dd/bb, n = m-1, 
-                    offsetx = ind+1, offsety = ind+1)
-                blas.scal(math.sqrt(aa*bb), lmbda, offset = ind, n = m)
+            kappa[0] -= tau[0]
 
-                ind += m
 
+        # f6(x, y, z, tau, s, kappa) solves the same system as f6_no_ir, 
+        # but applies iterative refinement.
 
-            # For the 's' blocks: compute two lists 'r' and 'rti' of 
-            # length Ns.
-            #
-            #     r[k]' * sk^{-1} * r[k] = diag(lambda_k)^{-1}
-            #     r[k]' * zk * r[k] = diag(lambda_k)
-            #
-            # where sk and zk are the entries inds[k] : inds[k+1] of
-            # s and z, reshaped into symmetric matrices.
+        if iters == 0:
+            if refinement:
+                wx, wy = xnewcopy(c), ynewcopy(b)
+                wz, ws = matrix(0.0, (cdim, 1)), matrix(0.0, (cdim, 1))
+                wtau, wkappa = matrix(0.0), matrix(0.0)
+            if refinement:
+                wx2, wy2 = xnewcopy(c), ynewcopy(b)
+                wz2, ws2 = matrix(0.0, (cdim, 1)), matrix(0.0, (cdim, 1))
+                wtau2, wkappa2 = matrix(0.0), matrix(0.0)
+
+        def f6(x, y, z, tau, s, kappa):
+            if refinement:
+                xcopy(x, wx)
+                ycopy(y, wy)
+                blas.copy(z, wz)
+                wtau[0] = tau[0]
+                blas.copy(s, ws)
+                wkappa[0] = kappa[0]
+            f6_no_ir(x, y, z, tau, s, kappa)
+            for i in xrange(refinement):
+                xcopy(wx, wx2)
+                ycopy(wy, wy2)
+                blas.copy(wz, wz2)
+                wtau2[0] = wtau[0]
+                blas.copy(ws, ws2)
+                wkappa2[0] = wkappa[0]
+                res(x, y, z, tau, s, kappa, wx2, wy2, wz2, wtau2, ws2, 
+                    wkappa2, W, dg, lmbda)
+                f6_no_ir(wx2, wy2, wz2, wtau2, ws2, wkappa2)
+                xaxpy(wx2, x)
+                yaxpy(wy2, y)
+                blas.axpy(wz2, z)
+                tau[0] += wtau2[0]
+                blas.axpy(ws2, s)
+                kappa[0] += wkappa2[0]
+
+        mu = blas.nrm2(lmbda)**2 / (1 + cdim_diag) 
+        sigma = 0.0
+        for i in [0,1]:
+
+            # Solve
             #
-            # rti[k] is the inverse of r[k]', so 
+            #     [ 0         ]   [  0   A'  G'  c ] [ dx        ]
+            #     [ 0         ]   [ -A   0   0   b ] [ dy        ]
+            #     [ W'*ds     ] - [ -G   0   0   h ] [ W^{-1}*dz ]
+            #     [ dg*dkappa ]   [ -c' -b' -h'  0 ] [ dtau/dg   ]
             #
-            #     rti[k]' * sk * rti[k] = diag(lambda_k)^{-1}
-            #     rti[k]' * zk^{-1} * rti[k] = diag(lambda_k).
+            #                       [ rx   ]
+            #                       [ ry   ]
+            #         = - (1-sigma) [ rz   ]
+            #                       [ rtau ]
             #
-            # The vectors lambda_k are stored in 
-            # 
-            #     lmbda[ dims['l'] + sum(dims['q']) : -1 ]
+            #     lmbda o (dz + ds) = -lmbda o lmbda + sigma*mu*e
+            #     lmbdag * (dtau + dkappa) = - kappa * tau + sigma*mu
+
             
-            W['r'] = [ matrix(0.0, (m,m)) for m in dims['s'] ]
-            W['rti'] = [ matrix(0.0, (m,m)) for m in dims['s'] ]
+            # ds = -lmbdasq if i is 0
+            #    = -lmbdasq - dsa o dza + sigma*mu*e if i is 1
+            # dkappa = -lambdasq[-1] if i is 0 
+            #        = -lambdasq[-1] - dkappaa*dtaua + sigma*mu if i is 1.
 
+            blas.copy(lmbdasq, ds, n = dims['l'] + sum(dims['q']))
+            ind = dims['l'] + sum(dims['q'])
             ind2 = ind
-            for k in xrange(Ns):
-                m = dims['s'][k]
-                r, rti = W['r'][k], W['rti'][k]
-
-                # Factor sk = L1*L1'; store L1 in ds[inds[k]:inds[k+1]].
-                blas.copy(s, ds, offsetx = ind2, offsety = ind2, n = m**2) 
-                lapack.potrf(ds, n = m, ldA = m, offsetA = ind2)
-
-                # Factor zs[k] = L2*L2'; store L2 in dz[inds[k]:inds[k+1]].
-                blas.copy(z, dz, offsetx = ind2, offsety = ind2, n = m**2) 
-                lapack.potrf(dz, n = m, ldA = m, offsetA = ind2)
-	 
-                # SVD L2'*L1 = U*diag(lambda_k)*V'.  Keep U in work. 
-                for i in xrange(m): 
-                    blas.scal(0.0, ds, offset = ind2 + i*m, n = i)
-                blas.copy(ds, work, offsetx = ind2, n = m**2)
-                blas.trmm(dz, work, transA = 'T', ldA = m, ldB = m, n = m,
-                    m = m, offsetA = ind2)
-                lapack.gesvd(work, lmbda, jobu = 'O', ldA = m, m = m, 
-                    n = m, offsetS = ind)
-	       
-                # r = L2^{-T} * U 
-                blas.copy(work, r, n = m*m)
-                blas.trsm(dz, r, transA = 'T', m = m, n = m, ldA = m,
-                    offsetA = ind2)
-
-                # rti = L2 * U 
-                blas.copy(work, rti, n = m*m)
-                blas.trmm(dz, rti, m = m, n = m, ldA = m, offsetA = ind2)
-
-                # r := r * diag(sqrt(lambda_k))
-                # rti := rti * diag(1 ./ sqrt(lambda_k))
-                for i in xrange(m):
-                    a = math.sqrt( lmbda[ind+i] )
-                    blas.scal(a, r, offset = m*i, n = m)
-                    blas.scal(1.0/a, rti, offset = m*i, n = m)
+            blas.scal(0.0, ds, offset = ind)
+            for m in dims['s']:
+                blas.copy(lmbdasq, ds, n = m, offsetx = ind2, 
+                    offsety = ind, incy = m+1)
+                ind += m*m
+                ind2 += m
+            dkappa[0] = lmbdasq[-1]
+            if i == 1:
+                blas.axpy(ws3, ds)
+                ds[:dims['l']] -= sigma*mu 
+                ds[indq[:-1]] -= sigma*mu
+                ind = dims['l'] + sum(dims['q'])
+                ind2 = ind
+                for m in dims['s']:
+                    ds[ind : ind+m*m : m+1] -= sigma*mu
+                    ind += m*m
+                dkappa[0] += wkappa3 - sigma*mu
+ 
+            # (dx, dy, dz, dtau) = (1-sigma)*(rx, ry, rz, rt)
+            xcopy(rx, dx);  xscal(1.0 - sigma, dx)
+            ycopy(ry, dy);  yscal(1.0 - sigma, dy)
+            blas.copy(rz, dz);  blas.scal(1.0 - sigma, dz)
+            dtau[0] = (1.0 - sigma) * rt 
 
-                ind += m
-                ind2 += m*m
+            f6(dx, dy, dz, dtau, ds, dkappa)
 
+            # Save ds o dz and dkappa * dtau for Mehrotra correction
+            if i == 0:
+                blas.copy(ds, ws3)
+                misc.sprod(ws3, dz, dims)
+                wkappa3 = dtau[0] * dkappa[0]
 
-        # Define a function 
+            # Maximum step to boundary.
+            #
+            # If i is 1, also compute eigenvalue decomposition of the 's' 
+            # blocks in ds, dz.  The eigenvectors Qs, Qz are stored in 
+            # dsk, dzk.  The eigenvalues are stored in sigs, sigz. 
+
+            misc.scale2(lmbda, ds, dims)
+            misc.scale2(lmbda, dz, dims)
+            if i == 0:
+                ts = misc.max_step(ds, dims)
+                tz = misc.max_step(dz, dims)
+            else:
+                ts = misc.max_step(ds, dims, sigma = sigs)
+                tz = misc.max_step(dz, dims, sigma = sigz)
+            tt = -dtau[0] / lmbda[-1]
+            tk = -dkappa[0] / lmbda[-1]
+            t = max([ 0.0, ts, tz, tt, tk ])
+            if t == 0.0:
+                step = 1.0
+            else:
+                if i == 0:
+                    step = min(1.0, 1.0 / t)
+                else:
+                    step = min(1.0, STEP / t)
+            if i == 0:
+                sigma = (1.0 - step)**EXPON
+
+
+        # Update x, y.
+        xaxpy(dx, x, alpha = step)
+        yaxpy(dy, y, alpha = step)
+
+
+        # Replace 'l' and 'q' blocks of ds and dz with the updated 
+        # variables in the current scaling.
+        # Replace 's' blocks of ds and dz with the factors Ls, Lz in a 
+        # factorization Ls*Ls', Lz*Lz' of the updated variables in the 
+        # current scaling.
+
+        # ds := e + step*ds for 'l' and 'q' blocks.
+        # dz := e + step*dz for 'l' and 'q' blocks.
+        blas.scal(step, ds, n = dims['l'] + sum(dims['q']))
+        blas.scal(step, dz, n = dims['l'] + sum(dims['q']))
+        ds[:dims['l']] += 1.0
+        dz[:dims['l']] += 1.0
+        ds[indq[:-1]] += 1.0
+        dz[indq[:-1]] += 1.0
+
+        # ds := H(lambda)^{-1/2} * ds and dz := H(lambda)^{-1/2} * dz.
         #
-        # solve_newton(dx, dy, dz, dtau, ds, dkappa)
+        # This replaces the 'l' and 'q' components of ds and dz with the
+        # updated variables in the current scaling.  
+        # The 's' components of ds and dz are replaced with 
         #
-        # for solving 
+        #     diag(lmbda_k)^{1/2} * Qs * diag(lmbda_k)^{1/2} 
+        #     diag(lmbda_k)^{1/2} * Qz * diag(lmbda_k)^{1/2} 
         #
-        #     [ 0      ]   [  0   A'  G'  c ] [ dx   ]    [ rhsx   ]
-        #     [ 0      ]   [ -A   0   0   b ] [ dy   ]    [ rhsy   ]
-        #     [ ds     ] - [ -G   0   0   h ] [ dz   ] = -[ rhsz   ]
-        #     [ dkappa ]   [ -c' -b' -h'  0 ] [ dtau ]    [ rhstau ]
-        # 
-        #     s o dz + z o dz = -rhss
-        #     kappa*dtau + tau*dkappa = -rhskappa.
+        misc.scale2(lmbda, ds, dims, inverse = 'I')
+        misc.scale2(lmbda, dz, dims, inverse = 'I')
 
-        try: f = kktsolver(W)
-        except ArithmeticError:
-            if iters == 0 and primalstart and dualstart: 
-                raise ValueError, "Rank(A) < p or Rank([G; A]) < n"
-            else:
-                raise ArithmeticError, "singular KKT matrix"
+        # sigs := ( e + step*sigs ) ./ lambda for 's' blocks.
+        # sigz := ( e + step*sigz ) ./ lambda for 's' blocks.
+        blas.scal(step, sigs)
+        blas.scal(step, sigz)
+        sigs += 1.0
+        sigz += 1.0
+        blas.tbsv(lmbda, sigs, n = sum(dims['s']), k = 0, ldA = 1, 
+            offsetA = dims['l'] + sum(dims['q']))
+        blas.tbsv(lmbda, sigz, n = sum(dims['s']), k = 0, ldA = 1, 
+            offsetA = dims['l'] + sum(dims['q']))
 
-        # th = W^{-T} * h
-        blas.copy(h, th)
-        scale(th, W, trans = 'T', inverse = 'I')
+        # dsk := Ls = dsk * sqrt(sigs).  
+        # dzk := Lz = dzk * sqrt(sigz).
+        ind2, ind3 = dims['l'] + sum(dims['q']), 0
+        for k in xrange(len(dims['s'])):
+            m = dims['s'][k]
+            for i in xrange(m):
+                blas.scal(math.sqrt(sigs[ind3+i]), ds, offset = ind2 + m*i,
+                    n = m)
+                blas.scal(math.sqrt(sigz[ind3+i]), dz, offset = ind2 + m*i,
+                    n = m)
+            ind2 += m*m
+            ind3 += m
 
-        # Solve
+
+        # Update lambda and scaling.
+
+        misc.update_scaling(W, lmbda, ds, dz)
+
+        # For kappa, tau block: 
         #
-        #     [ 0   A'  G'    ] [ dx1 ]          [ c ]
-        #     [-A   0   0     ]*[ dy1 ] = -dgi * [ b ].
-        #     [-G   0   W'*W  ] [ dzl ]          [ h ]
-         
-        xcopy(c, dx1);  xscal(-1, dx1)
-        ycopy(b, dy1)
-        blas.copy(h, dz1)
-        try: f(dx1, dy1, dz1)
-        except ArithmeticError:
-            if iters == 0 and primalstart and dualstart: 
-                raise ValueError, "Rank(A) < p or Rank([G; A]) < n"
-            else:
-                raise ArithmeticError, "singular KKT matrix"
-        xscal(dgi, dx1)
-        yscal(dgi, dy1)
-        blas.scal(dgi, dz1)
+        #     dg := sqrt( (kappa + step*dkappa) / (tau + step*dtau) ) 
+        #         = dg * sqrt( (1 - step*tk) / (1 - step*tt) )
+        #
+        #     lmbda[-1] := sqrt((tau + step*dtau) * (kappa + step*dkappa))
+        #                = lmbda[-1] * sqrt(( 1 - step*tt) * (1 - step*tk))
+
+        dg *= math.sqrt(1.0 - step*tk) / math.sqrt(1.0 - step*tt) 
+        dgi = 1.0 / dg
+        lmbda[-1] *= math.sqrt(1.0 - step*tt) * math.sqrt(1.0 - step*tk) 
+
+
+        # Unscale s, z, tau, kappa (unscaled variables are used only to 
+        # compute feasibility residuals).
+
+        blas.copy(lmbda, s, n = dims['l'] + sum(dims['q']))
+        ind = dims['l'] + sum(dims['q'])
+        ind2 = ind
+        for m in dims['s']:
+            blas.scal(0.0, s, offset = ind2)
+            blas.copy(lmbda, s, offsetx = ind, offsety = ind2, n = m, 
+                incy = m+1)
+            ind += m
+            ind2 += m*m
+        misc.scale(s, W, trans = 'T')
+
+        blas.copy(lmbda, z, n = dims['l'] + sum(dims['q']))
+        ind = dims['l'] + sum(dims['q'])
+        ind2 = ind
+        for m in dims['s']:
+            blas.scal(0.0, z, offset = ind2)
+            blas.copy(lmbda, z, offsetx = ind, offsety = ind2, n = m, 
+                    incy = m+1)
+            ind += m
+            ind2 += m*m
+        misc.scale(z, W, inverse = 'I')
+
+        kappa, tau = lmbda[-1]/dgi, lmbda[-1]*dgi
+        gap = blas.dot(lmbda, lmbda, n = lmbda.size[0]-1) / tau**2
+
+
+    return {'status': 'unknown', 'x': None, 'y': None, 's': None, 
+        'z': None}
+
+
+def coneqp(P, q, G = None, h = None, dims = None, A = None, b = None,
+    initvals = None, kktsolver = None, xnewcopy = None, xdot = None,
+    xaxpy = None, xscal = None, ynewcopy = None, ydot = None, yaxpy = None,
+    yscal = None):
+    """
+
+    Solves a pair of primal and dual convex quadratic cone programs
+
+        minimize    (1/2)*x'*P*x + q'*x    
+        subject to  G*x + s = h      
+                    A*x = b
+                    s >= 0
+
+        maximize    -(1/2)*(q + G'*z + A'*y)' * pinv(P) * (q + G'*z + A'*y)
+                    - h'*z - b'*y 
+        subject to  q + G'*z + A'*y in range(P)
+                    z >= 0.
+
+    The inequalities are with respect to a cone C defined as the Cartesian
+    product of N + M + 1 cones:
+    
+        C = C_0 x C_1 x .... x C_N x C_{N+1} x ... x C_{N+M}.
+
+    The first cone C_0 is the nonnegative orthant of dimension ml.  
+    The next N cones are 2nd order cones of dimension mq[0], ..., mq[N-1].
+    The second order cone of dimension m is defined as
+    
+        { (u0, u1) in R x R^{m-1} | u0 >= ||u1||_2 }.
+
+    The next M cones are positive semidefinite cones of order ms[0], ...,
+    ms[M-1] >= 0.  
+
+
+    Input arguments (basic usage).
+
+        P is a dense or sparse 'd' matrix of size (n,n) with the lower 
+        triangular part of the Hessian of the objective stored in the 
+        lower triangle.  Must be positive semidefinite.
+
+        q is a dense 'd' matrix of size (n,1).
+
+        dims is a dictionary with the dimensions of the components of C.  
+        It has three fields.
+        - dims['l'] = ml, the dimension of the nonnegative orthant C_0.
+          (ml >= 0.)
+        - dims['q'] = mq = [ mq[0], mq[1], ..., mq[N-1] ], a list of N 
+          integers with the dimensions of the second order cones 
+          C_1, ..., C_N.  (N >= 0 and mq[k] >= 1.)
+        - dims['s'] = ms = [ ms[0], ms[1], ..., ms[M-1] ], a list of M  
+          integers with the orders of the semidefinite cones 
+          C_{N+1}, ..., C_{N+M}.  (M >= 0 and ms[k] >= 0.)
+        The default value of dims = {'l': G.size[0], 'q': [], 's': []}.
+
+        G is a dense or sparse 'd' matrix of size (K,n), where
+
+            K = ml + mq[0] + ... + mq[N-1] + ms[0]**2 + ... + ms[M-1]**2.
+
+        Each column of G describes a vector 
+
+            v = ( v_0, v_1, ..., v_N, vec(v_{N+1}), ..., vec(v_{N+M}) ) 
+
+        in V = R^ml x R^mq[0] x ... x R^mq[N-1] x S^ms[0] x ... x S^ms[M-1]
+        stored as a column vector
+
+            [ v_0; v_1; ...; v_N; vec(v_{N+1}); ...; vec(v_{N+M}) ].
+
+        Here, if u is a symmetric matrix of order m, then vec(u) is the 
+        matrix u stored in column major order as a vector of length m**2.
+        We use BLAS unpacked 'L' storage, i.e., the entries in vec(u) 
+        corresponding to the strictly upper triangular entries of u are 
+        not referenced.
+
+        h is a dense 'd' matrix of size (K,1), representing a vector in V,
+        in the same format as the columns of G.
+    
+        A is a dense or sparse 'd' matrix of size (p,n).   The default
+        value is a sparse 'd' matrix of size (0,n).
+
+        b is a dense 'd' matrix of size (p,1).  The default value is a 
+        dense 'd' matrix of size (0,1).
+
+        initvals is a dictionary with optional primal and dual starting 
+        points initvals['x'], initvals['s'], initvals['y'], initvals['z'].
+        - initvals['x'] is a dense 'd' matrix of size (n,1).   
+        - initvals['s'] is a dense 'd' matrix of size (K,1), representing
+          a vector that is strictly positive with respect to the cone C.  
+        - initvals['y'] is a dense 'd' matrix of size (p,1).  
+        - initvals['z'] is a dense 'd' matrix of size (K,1), representing
+          a vector that is strictly positive with respect to the cone C.
+        A default initialization is used for the variables that are not
+        specified in initvals.
+
+        It is assumed that rank(A) = p and rank([P; A; G]) = n.
+
+        The other arguments are normally not needed.  They make it possible
+        to exploit certain types of structure, as described below.
+
+
+    Output arguments.
+
+        Returns a dictionary with keys 'status', 'x', 's', 'y', 'z'.
+
+        If status is 'optimal', x, s, y, z are approximate solutions of 
+        the primal and dual problems.
+
+        If status is 'unknown' x, s, y, z are None.
+
+
+    Advanced usage.
+
+        Three mechanisms are provided to express problem structure.
+
+        1.  The user can provide a customized routine for solving linear 
+        equations (`KKT systems')
+
+            [ P   A'  G'    ] [ ux ]   [ bx ]
+            [ A   0   0     ] [ uy ] = [ by ].
+            [ G   0   -W'*W ] [ uz ]   [ bz ]
+
+        W is a scaling matrix, a block diagonal mapping
+
+           W*u = ( W0*u_0, ..., W_{N+M}*u_{N+M} )
+
+        defined as follows.
+
+        - For the 'l' block (W_0):
+
+              W_0 = diag(d),
+
+          with d a positive vector of length ml.
+
+        - For the 'q' blocks (W_{k+1}, k = 0, ..., N-1):
+
+              W_{k+1} = beta_k * ( 2 * v_k * v_k' - J )
+
+          where beta_k is a positive scalar, v_k is a vector in R^mq[k]
+          with v_k[0] > 0 and v_k'*J*v_k = 1, and J = [1, 0; 0, -I].
+
+        - For the 's' blocks (W_{k+N}, k = 0, ..., M-1):
+
+              W_k * u = vec(r_k' * mat(u) * r_k)
+
+          where r_k is a nonsingular matrix of order ms[k], and mat(x) is
+          the inverse of the vec operation.
+
+        The optional argument kktsolver is a Python function that will be
+        called as g = kktsolver(W).  W is a dictionary that contains
+        the parameters of the scaling:
+
+        - W['d'] is a positive 'd' matrix of size (ml,1).
+        - W['di'] is a positive 'd' matrix with the elementwise inverse of
+          W['d'].
+        - W['beta'] is a list [ beta_0, ..., beta_{N-1} ]
+        - W['v'] is a list [ v_0, ..., v_{N-1} ]
+        - W['r'] is a list [ r_0, ..., r_{M-1} ]
+        - W['rti'] is a list [ rti_0, ..., rti_{M-1} ], with rti_k the
+          inverse of the transpose of r_k.
+
+        The call g = kktsolver(W) should return a function g that solves 
+        the KKT system by g(x, y, z).  On entry, x, y, z contain the 
+        righthand side bx, by, bz.  On exit, they contain the solution,
+        with uz scaled, the argument z contains W*uz.  In other words, 
+        on exit x, y, z are the solution of
+
+            [ P   A'  G'*W^{-1} ] [ ux ]   [ bx ]
+            [ A   0   0         ] [ uy ] = [ by ].
+            [ G   0   -W'       ] [ uz ]   [ bz ]
+
+
+        2.  The linear operators P*u, G*u and A*u can be specified 
+        by providing Python functions instead of matrices.  This can only 
+        be done in combination with 1. above, i.e., it requires the 
+        kktsolver argument.
+
+        If P is a function, the call P(u, v, alpha, beta) should evaluate 
+        the matrix-vectors product
+
+            v := alpha * P * u + beta * v.
+
+        The arguments u and v are required.  The other arguments have 
+        default values alpha = 1.0, beta = 0.0. 
+        
+        If G is a function, the call G(u, v, alpha, beta, trans) should 
+        evaluate the matrix-vector products
+
+            v := alpha * G * u + beta * v  if trans is 'N'
+            v := alpha * G' * u + beta * v  if trans is 'T'.
+
+        The arguments u and v are required.  The other arguments have
+        default values alpha = 1.0, beta = 0.0, trans = 'N'.
+
+        If A is a function, the call A(u, v, alpha, beta, trans) should
+        evaluate the matrix-vectors products
+
+            v := alpha * A * u + beta * v if trans is 'N'
+            v := alpha * A' * u + beta * v if trans is 'T'.
+
+        The arguments u and v are required.  The other arguments
+        have default values alpha = 1.0, beta = 0.0, trans = 'N'.
+
+
+        3.  Instead of using the default representation of the primal 
+        variable x and the dual variable y as one-column 'd' matrices, 
+        we can represent these variables and the corresponding parameters 
+        q and b by arbitrary Python objects (matrices, lists, dictionaries,
+        etc).  This can only be done in combination with 1. and 2. above,
+        i.e., it requires a user-provided KKT solver and an operator 
+        description of the linear mappings.   It also requires the 
+        arguments xnewcopy, xdot, xscal, xaxpy, ynewcopy, ydot, yscal, 
+        yaxpy.  These arguments are functions defined as follows.
+   
+        If X is the vector space of primal variables x, then:
+        - xnewcopy(u) creates a new copy of the vector u in X.
+        - xdot(u, v) returns the inner product of two vectors u and v in X.
+        - xscal(alpha, u) computes u := alpha*u, where alpha is a scalar
+          and u is a vector in X.
+        - xaxpy(u, v, alpha = 1.0, beta = 0.0) computes v := alpha*u + v
+          for a scalar alpha and two vectors u and v in X.
+
+        If Y is the vector space of primal variables y:
+        - ynewcopy(u) creates a new copy of the vector u in Y.
+        - ydot(u, v) returns the inner product of two vectors u and v in Y.
+        - yscal(alpha, u) computes u := alpha*u, where alpha is a scalar
+          and u is a vector in Y.
+        - yaxpy(u, v, alpha = 1.0, beta = 0.0) computes v := alpha*u + v
+          for a scalar alpha and two vectors u and v in Y.
+
+
+    Control parameters.
+
+       The following control parameters can be modified by adding an
+       entry to the dictionary options.
+
+       options['show_progress'] True/False (default: True)
+       options['maxiters'] positive integer (default: 100)
+       options['refinement'] nonnegative integer (default: 0 for problems
+           with no second-order cone and matrix inequality constraints;
+           1 otherwise)
+       options['abstol'] scalar (default: 1e-7)
+       options['reltol'] scalar (default: 1e-6)
+       options['feastol'] scalar (default: 1e-7).
+
+    """
+    import math
+    from cvxopt import base, blas, misc
+    from cvxopt.base import matrix, spmatrix
+
+    STEP = 0.99
+    EXPON = 3
+
+    try: MAXITERS = options['maxiters']
+    except KeyError: MAXITERS = 100
+    else: 
+        if type(MAXITERS) is not int or MAXITERS < 1: 
+            raise ValueError, "options['maxiters'] must be a positive "\
+                "integer"
+
+    try: ABSTOL = options['abstol']
+    except KeyError: ABSTOL = 1e-7
+    else: 
+        if type(ABSTOL) is not float and type(ABSTOL) is not int: 
+            raise ValueError, "options['abstol'] must be a scalar"
+
+    try: RELTOL = options['reltol']
+    except KeyError: RELTOL = 1e-6
+    else: 
+        if type(RELTOL) is not float and type(RELTOL) is not int: 
+            raise ValueError, "options['reltol'] must be a scalar"
+
+    try: FEASTOL = options['feastol']
+    except KeyError: FEASTOL = 1e-7
+    else: 
+        if type(FEASTOL) is not float and type(FEASTOL) is not int: 
+            raise ValueError, "options['feastol'] must be a scalar"
 
-        def solve_newton1(dx, dy, dz, dtau, ds, dkappa):
+    try: show_progress = options['show_progress']
+    except KeyError: show_progress = True
 
-            # Solve without refinement
-            #
-            #     [0     ]   [ 0   A'  G'  0 ] [dx  ]    [rhsx  ]
-            #     [0     ]   [-A   0   0   b ] [dy  ]    [rhsy  ]
-            #     [ds    ] - [-G   0   0   h ] [dz  ] = -[rhsz  ]
-            #     [dkappa]   [-c' -b' -h'  0 ] [dtau]    [rhstau]
-            #
-            #     s o dz + z o dz = -rhss
-            #     kappa*dtau + tau*dkappa = -rhskappa.
-            #
-            # Last two equations in scaled variables: 
-            #
-            #     lmbda o (W*dz + W^{-T}*ds) = -rhss
-            #     lmbdg * (w*dtau + dkappa/w) = -rhskappa.
-            #
-            # On entry, the righthand sides are stored in dx, dy, dz, dtau,
-            # ds, dkappa.  On exit, scaled quantities are returned for ds,
-            # dz, dtau, dkappa.
-
-            # dy := -dy = -rhsy
-            yscal(-1.0, dy) 
-
-            # ds := -lambda o\ ds (\o is inverse of o)
-            sinv(ds, lmbda, dims)
-            blas.scal(-1.0, ds)
-
-            # dz := -(dz + W'*ds) = -rhsz + W'*(lambda o\ rhss)
-            blas.copy(ds, ds1)
-            scale(ds1, W, trans = 'T')
-            blas.axpy(ds1, dz)
-            blas.scal(-1.0, dz)
-
-            # dkappa[0] := -dkappa[0]/lmbd[-1] 
-            #            = -rhskappa / sqrt(kappa*tau)
-            dkappa[0] = -dkappa[0] / lmbda[-1]
-
-            # dtau[0] = dtau[0] + dkappa[0] / dgi
-            #         = rhstau[0] - rhskappa / tau
-            dtau[0] += dkappa[0] / dgi
- 
 
-            # Solve 
-            #
-            #  [  0  A'  G'   0   ] [dx  ]   [ rhsx                     ]
-            #  [ -A  0   0    b   ] [dy  ]   [ rhsy                     ]
-            #  [ -G  0   W'*W h   ] [dz  ] = [ rhsz - W'*(lmbda o\ rhss)]
-            #  [ -c' -b' -h'  k/t ] [dtau]   [ rhst - rhsk/tau          ].
-
-            f(dx, dy, dz)
-
-            dtau[0] = dgi * ( dtau[0] + xdot(c, dx) + ydot(b, dy) + 
-                sdot(th, dz, dims) ) / ( 1.0 + sdot(dz1, dz1, dims) )
-            xaxpy(dx1, dx, alpha = dtau[0])
-            yaxpy(dy1, dy, alpha = dtau[0])
-            blas.axpy(dz1, dz, alpha = dtau[0])
- 
-            # ds := ds - dz = - lambda o\ rhs - dz 
-            blas.axpy(dz, ds, alpha = -1)
+    if kktsolver is None: 
+        if dims and (dims['q'] or dims['s']):  
+            kktsolver = 'chol'            
+        else:
+            kktsolver = 'chol2'            
+    defaultsolvers = ('ldl', 'ldl2', 'chol', 'chol2')
+    if type(kktsolver) is str and kktsolver not in defaultsolvers:
+        raise ValueError, "'%s' is not a valid value for kktsolver" \
+            %kktsolver
+
+
+    # Argument error checking depends on level of customization.
+    customkkt = type(kktsolver) is not str
+    operatorP = type(P) not in (matrix, spmatrix)
+    operatorG = G is not None and type(G) not in (matrix, spmatrix)
+    operatorA = A is not None and type(A) not in (matrix, spmatrix)
+    if (operatorP or operatorG or operatorA) and not customkkt:
+        raise ValueError, "use of function valued P, G, A requires a "\
+            "user-provided kktsolver"
+    customx = (xnewcopy != None or xdot != None or xaxpy != None or 
+        xscal != None) 
+    if customx and (not operatorP or not operatorG or not operatorA or 
+        not customkkt):
+        raise ValueError, "use of non-vector type for x requires "\
+            "function valued P, G, A and user-provided kktsolver"
+    customy = (ynewcopy != None or ydot != None or yaxpy != None or 
+        yscal != None) 
+    if customy and (not operatorP or not operatorA or not customkkt):
+        raise ValueError, "use of non vector type for y requires "\
+            "function valued P, A and user-provided kktsolver"
+
+
+    if not customx and (type(q) is not matrix or q.typecode != 'd' or
+        q.size[1] != 1):
+        raise TypeError, "'q' must be a 'd' matrix with one column"
+
+    if not operatorP:
+        if P.typecode != 'd' or P.size != (q.size[0], q.size[0]):
+            raise TypeError, "'P' must be a 'd' matrix of size (%d, %d)"\
+                %(q.size[0], q.size[0])
+        def fP(x, y, alpha = 1.0, beta = 0.0):
+            base.symv(P, x, y, alpha = alpha, beta = beta)
+    else:
+        fP = P
 
-            dkappa[0] -= dtau[0]
 
+    if h is None: h = matrix(0.0, (0,1))
+    if type(h) is not matrix or h.typecode != 'd' or h.size[1] != 1:
+        raise TypeError, "'h' must be a 'd' matrix with one column" 
 
-        if refinement:  
-            def solve_newton(dx, dy, dz, dtau, ds, dkappa):
-            
-                # copy righthand sides to dx2 etc
-                xcopy(dx, dx2)
-                ycopy(dy, dy2)
-                blas.copy(dz, dz2)
-                dtau2[0] = dtau[0]
-                blas.copy(ds, ds2)
-                dkappa2[0] = dkappa[0]
- 
-                solve_newton1(dx, dy, dz, dtau, ds, dkappa)
-
-                # store residuals in dx2, dy2, etc
-                #
-                #    [0     ]   [ 0  A'  G'  c ] [dx  ]   [rhsx  ]
-                #    [0     ]   [-A  0   0   b ] [dy  ]   [rhsy  ]
-                #    [ds    ] - [-Gl 0   0   h ] [dz  ] + [rhsz  ]
-                #    [dkappa]   [-c' -b' -h' 0 ] [dtau]   [rhstau]
-                # 
-                #    s o dz + z o dz = -rhss
-                #    kappa*dtau + tau*dkappa = -rhskappa.
-                #
-                # Last two equations in scaled variables: 
-                #
-                #     lmbda o (W*dz + W^{-T}*ds) = -rhss
-                #     lmbdg * (w*dtau + dkappa/w) = -rhskappa.
- 
-                # Store unscaled steps in s, z.
-                blas.copy(dz, z)
-                scale(z, W, inverse = 'I')
-                blas.copy(ds, s)
-                scale(s, W, trans = 'T')
-                dutau = dtau[0] * dgi
-                dukappa = dkappa[0] / dgi
-
-                Af(dy, dx2, alpha = -1.0, beta = 1.0, trans = 'T')
-                Gf(z, dx2, alpha = -1.0, beta = 1.0, trans = 'T')
-                xaxpy(c, dx2, alpha =- dutau)
-
-                Af(dx, dy2, alpha = 1.0, beta = 1.0)
-                yaxpy(b, dy2, alpha = -dutau)
-
-                Gf(dx, dz2, alpha = 1.0, beta = 1.0)
-                blas.axpy(h, dz2, alpha = -dutau)
-                blas.axpy(s, dz2)
-
-                dtau2[0] += dukappa + xdot(c,dx) + ydot(b,dy) + sdot(h, z, 
-                    dims) 
-
-                # s := lmbda o (W*dz + W^{-T}*ds) 
-                blas.copy(ds, s)
-                blas.axpy(dz, s)
-                sprod(s, lmbda, dims, diag = 'D')
- 
-                blas.axpy(s, ds2)
+    if not dims: dims = {'l': h.size[0], 'q': [], 's': []}
+    if type(dims['l']) is not int or dims['l'] < 0: 
+        raise TypeError, "'dims['l']' must be a nonnegative integer"
+    if [ k for k in dims['q'] if type(k) is not int or k < 1 ]:
+        raise TypeError, "'dims['q']' must be a list of positive integers"
+    if [ k for k in dims['s'] if type(k) is not int or k < 0 ]:
+        raise TypeError, "'dims['s']' must be a list of nonnegative " \
+            "integers"
 
-                dkappa2[0] += lmbda[-1] * (dtau[0] + dkappa[0])
+    try: refinement = options['refinement']
+    except KeyError: 
+        if dims['q'] or dims['s']: refinement = 1
+        else: refinement = 0
+    else:
+        if type(refinement) is not int or refinement < 0: 
+            raise ValueError, "options['refinement'] must be a "\
+                "nonnegative integer"
 
-                solve_newton1(dx2, dy2, dz2, dtau2, ds2, dkappa2)
 
-                xaxpy(dx2, dx)
-                yaxpy(dy2, dy)
-                blas.axpy(dz2, dz)
-                dtau[0] += dtau2[0]
-                blas.axpy(ds2, ds)
-                dkappa[0] += dkappa2[0]
+    cdim = dims['l'] + sum(dims['q']) + sum([ k**2 for k in dims['s'] ])
+    if h.size[0] != cdim:
+        raise TypeError, "'h' must be a 'd' matrix of size (%d,1)" %cdim
 
-        else:  # no iterative refinement
+    if G is None:
+        if customx:
+            def G(x, y, trans = 'N', alpha = 1.0, beta = 0.0):
+                if trans == 'N': pass
+                else: xscal(beta, y)
+        else:
+            G = spmatrix([], [], [], (0, q.size[0]))
+    if not operatorG:
+        if G.typecode != 'd' or G.size != (cdim, q.size[0]):
+            raise TypeError, "'G' must be a 'd' matrix of size (%d, %d)"\
+                %(cdim, q.size[0])
+        def fG(x, y, trans = 'N', alpha = 1.0, beta = 0.0):
+            misc.sgemv(G, x, y, dims, trans = trans, alpha = alpha, 
+                beta = beta)
+    else:
+        fG = G
 
-            solve_newton = solve_newton1
 
-        mu = blas.nrm2(lmbda)**2 / (cdeg + 1)
+    if A is None:
+        if customx or customy:
+            def A(x, y, trans = 'N', alpha = 1.0, beta = 0.0):
+                if trans == 'N': pass
+                else: yscal(beta, y)
+        else:
+            A = spmatrix([], [], [], (0, q.size[0]))
+    if not operatorA:
+        if A.typecode != 'd' or A.size[1] != q.size[0]:
+            raise TypeError, "'A' must be a 'd' matrix with %d columns" \
+                %q.size[0]
+        def fA(x, y, trans = 'N', alpha = 1.0, beta = 0.0):
+            base.gemv(A, x, y, trans = trans, alpha = alpha, beta = beta)
+    else:
+        fA = A
+    if not customy:
+        if b is None: b = matrix(0.0, (0,1))
+        if type(b) is not matrix or b.typecode != 'd' or b.size[1] != 1:
+            raise TypeError, "'b' must be a 'd' matrix with one column"
+        if not operatorA and b.size[0] != A.size[0]:
+            raise TypeError, "'b' must have length %d" %A.size[0]
+    if b is None and customy:  
+        raise ValueEror, "use of non-vector type for y requires b"
 
-        
-        # Affine scaling step is solution with right hand side
-        #
-        #     rhsx = resx  
-        #     rhsy = resy
-        #     rhsz = resz 
-        #     rhstau = rest
-        #     rhss = lambda o lambda
-        #     rhskappa = kappa * tau.
-
-        # dx := resx, dy := resy, dz := resz, dtau := rest
-        xcopy(resx, dx)
-        ycopy(resy, dy)
-        blas.copy(resz, dz)
-        dtau[0] = rest
 
-        # lmbdasq := lmbda o lmbda 
-        blas.copy(lmbda, lmbdasq)
-        blas.tbmv(lmbda, lmbdasq, n = dims['l'], k = 0, ldA = 1) 
-        ind = dims['l']
-        for m in dims['q']:
-            lmbdasq[ind] = blas.nrm2(lmbda, offset = ind, n = m)**2
-            blas.scal(2.0*lmbda[ind], lmbdasq, n = m-1, offset = ind+1)
-            ind += m
-        # Diagonal symmetric matrices are stored as vectors in lmbdasq.
-        blas.tbmv(lmbda, lmbdasq, n = sum(dims['s']) + 1, k = 0, ldA = 1, 
-            offsetA = ind, offsetx = ind) 
+    ws3, wz3 = matrix(0.0, (cdim,1 )), matrix(0.0, (cdim,1 ))
+    def res(ux, uy, uz, us, vx, vy, vz, vs, W, lmbda):
 
-        # ds := lambda o lambda 
-        blas.copy(lmbdasq, ds, n = dims['l'] + sum(dims['q']))
-        ind = dims['l'] + sum(dims['q'])
-        ind2 = ind
-        blas.scal(0.0, ds, offset = ind) 
-        for m in dims['s']:
-            blas.copy(lmbdasq, ds, n = m, offsetx = ind2, offsety = ind,
-                incy = m+1)
-            ind += m*m
-            ind2 += m
+        # Evaluates residual in Newton equations:
+        # 
+        #      [ vx ]    [ vx ]   [ 0     ]   [ P  A'  G' ]   [ ux        ]
+        #      [ vy ] := [ vy ] - [ 0     ] - [ A  0   0  ] * [ uy        ]
+        #      [ vz ]    [ vz ]   [ W'*us ]   [ G  0   0  ]   [ W^{-1}*uz ]
+        #
+        #      vs := vs - lmbda o (uz + us).
+
+        # vx := vx - P*ux - A'*uy - G'*W^{-1}*uz
+        fP(ux, vx, alpha = -1.0, beta = 1.0)
+        fA(uy, vx, alpha = -1.0, beta = 1.0, trans = 'T') 
+        blas.copy(uz, wz3)
+        misc.scale(wz3, W, inverse = 'I')
+        fG(wz3, vx, alpha = -1.0, beta = 1.0, trans = 'T') 
+
+        # vy := vy - A*ux
+        fA(ux, vy, alpha = -1.0, beta = 1.0)
+
+        # vz := vz - G*ux - W'*us
+        fG(ux, vz, alpha = -1.0, beta = 1.0)
+        blas.copy(us, ws3)
+        misc.scale(ws3, W, trans = 'T')
+        blas.axpy(ws3, vz, alpha = -1.0)
+ 
+        # vs := vs - lmbda o (uz + us)
+        blas.copy(us, ws3)
+        blas.axpy(uz, ws3)
+        misc.sprod(ws3, lmbda, dims, diag = 'D')
+        blas.axpy(ws3, vs, alpha = -1.0)
 
-        # dkappa = kappa*tau
-        dkappa[0] = lmbdasq[-1]
 
-        solve_newton(dx, dy, dz, dtau, ds, dkappa)
+    # kktsolver(W) returns a routine for solving 
+    #
+    #     [ P   A'  G'*W^{-1} ] [ ux ]   [ bx ]
+    #     [ A   0   0         ] [ uy ] = [ by ].
+    #     [ G   0   -W'       ] [ uz ]   [ bz ]
+
+    if kktsolver in defaultsolvers:
+         if b.size[0] > q.size[0]:
+             raise ValueError, "Rank(A) < p or Rank([P; G; A]) < n"
+         if kktsolver == 'ldl': 
+             factor = misc.kkt_ldl(G, dims, A)
+         elif kktsolver == 'ldl2': 
+             factor = misc.kkt_ldl2(G, dims, A)
+         elif kktsolver == 'chol':
+             factor = misc.kkt_chol(G, dims, A)
+         else:
+             factor = misc.kkt_chol2(G, dims, A)
+         def kktsolver(W):
+             return factor(W, P)
+
+    if xnewcopy is None: xnewcopy = matrix 
+    if xdot is None: xdot = blas.dot
+    if xaxpy is None: xaxpy = blas.axpy 
+    if xscal is None: xscal = blas.scal 
+    def xcopy(x, y): 
+        xscal(0.0, y) 
+        xaxpy(x, y)
+    if ynewcopy is None: ynewcopy = matrix 
+    if ydot is None: ydot = blas.dot 
+    if yaxpy is None: yaxpy = blas.axpy 
+    if yscal is None: yscal = blas.scal
+    def ycopy(x, y): 
+        yscal(0.0, y) 
+        yaxpy(x, y)
 
-        # Maximum step to boundary
-        blas.copy(ds, ds2)
-        scale2(lmbda, ds2, dims)
-        ts = max_step(ds2, dims)
-        blas.copy(dz, dz2)
-        scale2(lmbda, dz2, dims)
-        tz = max_step(dz2, dims)
-        tt = -dtau[0]/lmbda[-1]
-        tk = -dkappa[0]/lmbda[-1]
-        step = min(1.0, 1.0 / max( [ 0.0, ts, tz, tt, tk ] ))
-        sigma = (1.0 - step)**EXPON
 
+    if cdim == 0: 
 
-        # Centering-corrector step is solution with right hand side
+        # Solve
         #
-        #     rhsx = (1 - sigma) * resx 
-        #     rhsy = (1 - sigma) * resy
-        #     rhsz = (1 - sigma) * resz 
-        #     rhstau = (1 - sigma) * rest
-        #     rhss = lambda o lambda + ds o dz - sigma*mu*e
-        #     rhskappa = lambdag**2 + dkappa * dtau - sigma*mu.
-
-        # ds := dz o ds  + lambda o lambda - sigma*mu*e 
-        sprod(ds, dz, dims)
-        blas.axpy(lmbdasq, ds, n = dims['l'] + sum(dims['q']))
-        ds[:dims['l']] -= sigma*mu 
-        ds[indq[:-1]] -= sigma*mu
-        ind = dims['l'] + sum(dims['q'])
-        ind2 = ind
+        #     [ P  A' ] [ x ]   [ -q ]
+        #     [       ] [   ] = [    ].
+        #     [ A  0  ] [ y ]   [  b ]
+
+        try: f3 = kktsolver({'d': matrix(0.0, (0,1)), 'di': 
+            matrix(0.0, (0,1)), 'beta': [], 'v': [], 'r': [], 'rti': []})
+        except ArithmeticError: 
+            raise ValueError, "Rank(A) < p or Rank([P; A; G]) < n"
+        x = xnewcopy(q)  
+        xscal(-1.0, x)
+        y = ynewcopy(b)
+        f3(x, y, matrix(0.0, (0,1)))
+        return {'status': 'optimal', 'x': x,  'y': y, 'z': 
+            matrix(0.0, (0,1)), 's': matrix(0.0, (0,1))}
+
+
+    # Default initial points are x = 0, y = 0, s = e, z = e. 
+
+    x, y = xnewcopy(q), ynewcopy(b)  
+    s, z = matrix(0.0, (cdim, 1)), matrix(0.0, (cdim, 1))
+
+    if initvals is None: 
+        initvals = {}
+    if 'x' in initvals: 
+        xcopy(initvals['x'], x)
+    else: 
+        xscal(0.0, x)
+
+    if 's' in initvals:
+        blas.copy(initvals['s'], s)
+        # ts = min{ t | s + t*e >= 0 }
+        if misc.max_step(s, dims) >= 0:
+            raise ValueError, "initial s is not positive"
+    else: 
+        s[: dims['l']] = 1.0 
+        ind = dims['l']
+        for m in dims['q']:
+            s[ind] = 1.0
+            ind += m
         for m in dims['s']:
-            blas.axpy(lmbdasq, ds, n = m, offsetx = ind2, offsety = ind,
-                incy = m+1)
-            ds[ind : ind+m*m : m+1] -= sigma*mu
-            ind += m*m
-            ind2 += m
-        dkappa[0] = lmbdasq[-1] + dkappa[0]*dtau[0] - sigma*mu
-        xcopy(resx, dx);  xscal(1.0 - sigma, dx)
-        ycopy(resy, dy);  yscal(1.0 - sigma, dy)
-        blas.copy(resz, dz);  blas.scal(1.0 - sigma, dz)
-        dtau[0] = (1.0 - sigma) * rest 
-
-        solve_newton(dx, dy, dz, dtau, ds, dkappa)
-
-
-        # Maximum step to boundary.
-        # Compute eigenvalue decomposition of symmetric matrices in ds, dz.
-        # The eigenvectors of dsk, dzk are stored in dsk, dzk.  
-        # The eigenvalues are stored in sigs, sigz. 
-
-        scale2(lmbda, ds, dims)
-        ts = max_step(ds, dims, sigs)
-        scale2(lmbda, dz, dims)
-        tz = max_step(dz, dims, sigz)
-        tt = -dtau[0]/lmbda[-1]
-        tk = -dkappa[0]/lmbda[-1]
-        step = min(1.0, STEP / max([ 0.0, ts, tz, tt, tk ]))
+            s[ind : ind + m*m : m+1] = 1.0
+            ind += m**2
 
-        # ds := e + step*ds for 'l' and 'q' blocks.
-        blas.scal(step, ds, n = dims['l'] + sum(dims['q']))
-        ds[:dims['l']] += 1.0
-        ds[indq[:-1]] += 1.0
+    if 'y' in initvals:
+        ycopy(initvals['y'], y)
+    else:
+        yscal(0.0, y)
 
-        # sigs := e + step*sigs for 's' blocks.
-        blas.scal(step, sigs)
-        sigs += 1.0
+    if 'z' in initvals:
+        blas.copy(initvals['z'], z)
+        # tz = min{ t | z + t*e >= 0 }
+        if misc.max_step(z, dims) >= 0:
+            raise ValueError, "initial z is not positive"
+    else:
+        z[: dims['l']] = 1.0 
+        ind = dims['l']
+        for m in dims['q']:
+            z[ind] = 1.0
+            ind += m
+        for m in dims['s']:
+            z[ind : ind + m*m : m+1] = 1.0
+            ind += m**2
+    
 
-        # dz := e + step*dz for 'l' and 'q' blocks.
-        blas.scal(step, dz, n = dims['l'] + sum(dims['q']))
-        dz[:dims['l']] += 1.0
-        dz[indq[:-1]] += 1.0
+    rx, ry, rz = xnewcopy(q), ynewcopy(b), matrix(0.0, (cdim, 1)) 
+    dx, dy = xnewcopy(x), ynewcopy(y)   
+    dz, ds = matrix(0.0, (cdim, 1)), matrix(0.0, (cdim, 1))
+    lmbda = matrix(0.0, (dims['l'] + sum(dims['q']) + sum(dims['s']), 1))
+    lmbdasq = matrix(0.0, (dims['l'] + sum(dims['q']) + sum(dims['s']), 1))
+    sigs = matrix(0.0, (sum(dims['s']), 1))
+    sigz = matrix(0.0, (sum(dims['s']), 1))
 
-        # sigz := e + step*sigz for 's' blocks.
-        blas.scal(step, sigz)
-        sigz += 1.0
 
-        # ds := H(lambda)^{-1/2} * ds and dz := H(lambda)^{-1/2} * dz.
-        scale2(lmbda, ds, dims, inverse = 'I')
-        scale2(lmbda, dz, dims, inverse = 'I')
+    if show_progress: 
+        print "% 10s% 12s% 10s% 8s% 7s" %("pcost", "dcost", "gap", "pres",
+            "dres")
 
-        # The 'l' and 'q' components of ds and dz now contain the updated
-        # variables in the current scaling.  The 's' components of ds 
-        # and dz contain 
-        #
-        #     Lambda^1/2 * Qs * Lambda^1/2
-        #     Lambda^1/2 * Qz * Lambda^1/2
-        #
-        # where Lambda^1/2 * (Qs * diag(sigs) * Qs') * Lambda^1/2 and 
-        # Lambda^1/2 * (Qz * diag(sigs) * Qz') * Lambda^1/2 are the 
-        # updated variablaes in the current scaling.
+    resx0 = max(1.0, math.sqrt(xdot(q,q)))
+    resy0 = max(1.0, math.sqrt(ydot(b,b)))
+    resz0 = max(1.0, misc.snrm2(h, dims))
+    gap = misc.sdot(s, z, dims) 
 
+    for iters in xrange(MAXITERS):  
 
-        # Update lambda and scaling.
+        # f0 = (1/2)*x'*P*x + q'*x + r and  rx = P*x + q + A'*y + G'*z.
+        xcopy(q, rx)
+        fP(x, rx, beta = 1.0)
+        f0 = 0.5 * (xdot(x, rx) + xdot(x, q))
+        fA(y, rx, beta = 1.0, trans = 'T')
+        fG(z, rx, beta = 1.0, trans = 'T')
+        resx = math.sqrt(xdot(rx, rx))
+           
+        # ry = A*x - b
+        ycopy(b, ry)
+        fA(x, ry, alpha = 1.0, beta = -1.0)
+        resy = math.sqrt(ydot(ry, ry))
+
+        # rz = s + G*x - h
+        blas.copy(s, rz)
+        blas.axpy(h, rz, alpha = -1.0)
+        fG(x, rz, beta = 1.0)
+        resz = misc.snrm2(rz, dims)
+
+        # pcost = (1/2)*x'*P*x + q'*x 
+        # dcost = (1/2)*x'*P*x + q'*x + y'*(A*x-b) + z'*(G*x-h)
+        #       = (1/2)*x'*P*x + q'*x + y'*(A*x-b) + z'*(G*x-h+s) - z'*s
+        #       = (1/2)*x'*P*x + q'*x + y'*ry + z'*rz - gap
+        pcost = f0
+        dcost = f0 + ydot(y, ry) + misc.sdot(z, rz, dims) - gap
+        pres = max(resy/resy0, resz/resz0)
+        dres = resx/resx0 
 
-        # For kappa, tau block: 
-        #
-        #     dg := sqrt( (kappa + step*dkappa) / (tau + step*dtau) ) 
-        #         = dg * sqrt( (1 - step*tk) / (1 - step*tt) )
-        #
-        #     lmbda[-1] := sqrt((tau + step*dtau) * (kappa + step*dkappa))
-        #                = lmbda[-1] * sqrt(( 1 - step*tt) * (1 - step*tk))
+        if show_progress:
+            print "%2d: % 8.4e % 8.4e % 4.0e% 7.0e% 7.0e" \
+                %(iters, pcost, dcost, gap, pres, dres) 
 
-        dg *= math.sqrt(1.0 - step*tk) / math.sqrt(1.0 - step*tt) 
-        dgi = 1.0 / dg
-        lmbda[-1] *= math.sqrt(1.0 - step*tt) * math.sqrt(1.0 - step*tk) 
+        # Stopping criteria.    
+        if pres <= FEASTOL and dres <= FEASTOL and ( gap <= ABSTOL or 
+            (dcost > 0 and gap/dcost <= RELTOL) or (pcost < 0 and 
+            -gap/pcost <= RELTOL) ):
+            ind = dims['l'] + sum(dims['q'])
+            for m in dims['s']:
+                misc.symm(s, m, ind)
+                misc.symm(z, m, ind)
+                ind += m**2
+            return {'status': 'optimal', 'x': x,  'y': y, 'z': z, 's': s}
 
 
-        # 'l' blocks
+        # Compute initial scaling W and scaled iterates:  
         #
-        #    d :=  d .* sqrt( ds ./ dz )
-        #    lmbda := lmbda .* sqrt(ds) .* sqrt(dz)
-
-        m = dims['l']
-        ds[:m] = base.sqrt( ds[:m] )
-        dz[:m] = base.sqrt( dz[:m] )
- 
-        # d := d .* ds .* dz 
-        blas.tbmv(ds, W['d'], n = m, k = 0, ldA = 1)
-        blas.tbsv(dz, W['d'], n = m, k = 0, ldA = 1)
-        W['di'][:m] = W['d'][:m] ** -1
-         
-        # lmbda := ds .* dz
-        blas.copy(ds, lmbda, n = m)
-        blas.tbmv(dz, lmbda, n = m, k = 0, ldA = 1)
+        #     W * z = W^{-T} * s = lambda.
+        # 
+        # lmbdasq = lambda o lambda.
+        
+        if iters == 0:  W = misc.compute_scaling(s, z, lmbda, dims)
+        misc.ssqr(lmbdasq, lmbda, dims)
 
 
-        # 'q' blocks.
-        # 
-        # Let st and zt be the new variables in the old scaling:
-        #
-        #     st = ds_k,   zt = dz_k
-        #
-        # and a = sqrt(st' * J * st),  b = sqrt(zt' * J * zt).
-        #
-        # 1. Compute the hyperbolic Householder transformation 2*q*q' - J 
-        #    that maps st/a to zt/b.
-        # 
-        #        c = sqrt( (1 + st'*zt/(a*b)) / 2 ) 
-        #        q = (st/a + J*zt/b) / (2*c). 
-        #
-        #    The new scaling point is 
+        # f3(x, y, z) solves
         #
-        #        wk := betak * sqrt(a/b) * (2*v[k]*v[k]' - J) * q 
+        #    [ P   A'  G'    ] [ ux        ]   [ bx ]
+        #    [ A   0   0     ] [ uy        ] = [ by ].
+        #    [ G   0   -W'*W ] [ W^{-1}*uz ]   [ bz ]
         #
-        #    with betak = W['beta'][k].
+        # On entry, x, y, z containg bx, by, bz.
+        # On exit, they contain ux, uy, uz.
+
+        try: f3 = kktsolver(W)
+        except ArithmeticError: 
+            if iters == 0:
+                raise ValueError, "Rank(A) < p or Rank([P; A; G]) < n"
+            else:  
+                raise ArithmeticError, "singular KKT matrix"
+
+
+        # f4_no_ir(x, y, z, s) solves
         # 
-        # 3. The scaled variable:
-        #
-        #        lambda_k0 = sqrt(a*b) * c
-        #        lambda_k1 = sqrt(a*b) * ( (2vk*vk' - J) * (-d*q + u/2) )_1
+        #     [ 0     ]   [ P  A'  G' ]   [ ux        ]   [ bx ]
+        #     [ 0     ] + [ A  0   0  ] * [ uy        ] = [ by ]
+        #     [ W'*us ]   [ G  0   0  ]   [ W^{-1}*uz ]   [ bz ]
         #
-        #    where 
+        #     lmbda o (uz + us) = bs.
         #
-        #        u = st/a - J*zt/b 
-        #        d = ( vk0 * (vk'*u) + u0/2 ) / (2*vk0 *(vk'*q) - q0 + 1).
-        #
-        # 4. Update scaling
-        #   
-        #        v[k] := wk^1/2 
-        #              = 1 / sqrt(2*(wk0 + 1)) * (wk + e).
-        #        beta[k] *=  sqrt(a/b)
+        # On entry, x, y, z, s contain bx, by, bz, bs.
+        # On exit, they contain ux, uy, uz, us.
 
+        def f4_no_ir(x, y, z, s):
 
-        ind = dims['l']
-        for k in xrange(Nq):
+            # Solve 
+            #
+            #     [ P A' G'   ] [ ux        ]    [ bx                    ]
+            #     [ A 0  0    ] [ uy        ] =  [ by                    ]
+            #     [ G 0 -W'*W ] [ W^{-1}*uz ]    [ bz - W'*(lmbda o\ bs) ]
+            #
+            #     us = lmbda o\ bs - uz.
+            #
+            # On entry, x, y, z, s  contains bx, by, bz, bs. 
+            # On exit they contain x, y, z, s.
+            
+            # s := lmbda o\ s 
+            #    = lmbda o\ bs
+            misc.sinv(s, lmbda, dims)
 
-            m = dims['q'][k]
-            v = W['v'][k]
+            # z := z - W'*s 
+            #    = bz - W'*(lambda o\ bs)
+            blas.copy(s, ws3)
+            misc.scale(ws3, W, trans = 'T')
+            blas.axpy(ws3, z, alpha = -1.0)
 
-            # ln = sqrt( lambda_k' * J * lambda_k )
-            ln = jnrm2(lmbda, n = m, offset = ind) 
+            # Solve for ux, uy, uz
+            f3(x, y, z)
 
-            # a = sqrt( dsk' * J * dsk ) = sqrt( st' * J * st ) 
-            # ds := ds / a = st / a
-            aa = jnrm2(ds, offset = ind, n = m)
-            blas.scal(1.0/aa, ds, offset = ind, n = m)
+            # s := s - z 
+            #    = lambda o\ bs - uz.
+            blas.axpy(z, s, alpha = -1.0)
 
-            # b = sqrt( dzk' * J * dzk ) = sqrt( zt' * J * zt )
-            # dz := dz / a = zt / b
-            bb = jnrm2(dz, offset = ind, n = m) 
-            blas.scal(1.0/bb, dz, offset = ind, n = m)
 
-            # c = sqrt( ( 1 + (st'*zt) / (a*b) ) / 2 )
-            cc = math.sqrt( ( 1.0 + blas.dot(ds, dz, offsetx = ind,
-                offsety = ind, n = m) ) / 2.0 )
+        # f4(x, y, z, s) solves the same system as f4_no_ir, but applies
+        # iterative refinement.
 
-            # vs = v' * st / a 
-            vs = blas.dot(v, ds, offsety = ind, n = m) 
+        if iters == 0:
+            if refinement:
+                wx, wy = xnewcopy(q), ynewcopy(b) 
+                wz, ws = matrix(0.0, (cdim,1)), matrix(0.0, (cdim,1)) 
+            if refinement:
+                wx2, wy2 = xnewcopy(q), ynewcopy(b) 
+                wz2, ws2 = matrix(0.0, (cdim,1)), matrix(0.0, (cdim,1)) 
+
+        def f4(x, y, z, s):
+            if refinement: 
+                xcopy(x, wx)        
+                ycopy(y, wy)        
+                blas.copy(z, wz)        
+                blas.copy(s, ws)        
+            f4_no_ir(x, y, z, s)        
+            for i in xrange(refinement):
+                xcopy(wx, wx2)        
+                ycopy(wy, wy2)        
+                blas.copy(wz, wz2)        
+                blas.copy(ws, ws2)        
+                res(x, y, z, s, wx2, wy2, wz2, ws2, W, lmbda) 
+                f4_no_ir(wx2, wy2, wz2, ws2)
+                xaxpy(wx2, x)
+                yaxpy(wy2, y)
+                blas.axpy(wz2, z)
+                blas.axpy(ws2, s)
+
+        mu = gap / (dims['l'] + len(dims['q']) + sum(dims['s']))
+        sigma, eta = 0.0, 0.0
+
+        for i in [0, 1]:
+
+            # Solve
+            #
+            #     [ 0     ]   [ P  A' G' ]   [ dx        ]
+            #     [ 0     ] + [ A  0  0  ] * [ dy        ] = -(1 - eta) * r
+            #     [ W'*ds ]   [ G  0  0  ]   [ W^{-1}*dz ]
+            #
+            #     lmbda o (dz + ds) = -lmbda o lmbda + sigma*mu*e (i=0)
+            #     lmbda o (dz + ds) = -lmbda o lmbda - dsa o dza 
+            #                         + sigma*mu*e (i=1) where dsa, dza
+            #                         are the solution for i=0. 
+ 
+            # ds = -lmbdasq + sigma * mu * e  (if i is 0)
+            #    = -lmbdasq - dsa o dza + sigma * mu * e  (if i is 1), 
+            #     where ds, dz are solution for i is 0.
+            blas.scal(0.0, ds)
+            if i == 1:  
+                blas.axpy(ws3, ds, alpha = -1.0)
+            blas.axpy(lmbdasq, ds, n = dims['l'] + sum(dims['q']), 
+                alpha = -1.0)
+            ds[:dims['l']] += sigma*mu
+            ind = dims['l']
+            for m in dims['q']:
+                ds[ind] += sigma*mu
+                ind += m
+            ind2 = ind
+            for m in dims['s']:
+                blas.axpy(lmbdasq, ds, n = m, offsetx = ind2, offsety =  
+                    ind, incy = m + 1, alpha = -1.0)
+                ds[ind : ind + m*m : m+1] += sigma*mu
+                ind += m*m
+                ind2 += m
 
-            # vz = v' * J *zt / b
-            vz = jdot(v, dz, offsety = ind, n = m) 
+       
+            # (dx, dy, dz) := -(1 - eta) * (rx, ry, rz)
+            xscal(0.0, dx);  xaxpy(rx, dx, alpha = -1.0 + eta)
+            yscal(0.0, dy);  yaxpy(ry, dy, alpha = -1.0 + eta)
+            blas.scal(0.0, dz) 
+            blas.axpy(rz, dz, alpha = -1.0 + eta)
+            
+            try: f4(dx, dy, dz, ds)
+            except ArithmeticError: 
+                if iters == 0:
+                    raise ValueError, "Rank(A) < p or Rank([P; A; G]) < n"
+                else:
+                    raise ArithmeticError, "singular KKT matrix"
 
-            # vq = v' * q where q = (st/a + J * zt/b) / (2 * c)
-            vq = (vs + vz ) / 2.0 / cc
+            # Save ds o dz for Mehrotra correction
+            if i == 0:
+                blas.copy(ds, ws3)
+                misc.sprod(ws3, dz, dims)
 
-            # vu = v' * u  where u =  st/a - J * zt/b 
-            vu = vs - vz  
 
-            # lambda_k0 = c
-            lmbda[ind] = cc
+            # Maximum steps to boundary.  
+            # 
+            # If i is 1, also compute eigenvalue decomposition of the 
+            # 's' blocks in ds,dz.  The eigenvectors Qs, Qz are stored in 
+            # dsk, dzk.  The eigenvalues are stored in sigs, sigz.
+
+            misc.scale2(lmbda, ds, dims)
+            misc.scale2(lmbda, dz, dims)
+            if i == 0: 
+                ts = misc.max_step(ds, dims)
+                tz = misc.max_step(dz, dims)
+            else:
+                ts = misc.max_step(ds, dims, sigma = sigs)
+                tz = misc.max_step(dz, dims, sigma = sigz)
+            t = max([ 0.0, ts, tz ])
+            if t == 0:
+                step = 1.0
+            else:
+                if i == 0:
+                    step = min(1.0, 1.0 / t)
+                else:
+                    step = min(1.0, STEP / t)
+            if i == 0: 
+                sigma = (1.0 - step)**EXPON
+                eta = 0.0
 
-            # wk0 = 2 * vk0 * (vk' * q) - q0 
-            wk0 = 2 * v[0] * vq - ( ds[ind] + dz[ind] ) / 2.0 / cc 
 
-            # d = (v[0] * (vk' * u) - u0/2) / (wk0 + 1)
-            dd = (v[0] * vu - ds[ind]/2.0 + dz[ind]/2.0) / (wk0 + 1.0)
+        xaxpy(dx, x, alpha = step)
+        yaxpy(dy, y, alpha = step)
 
-            # lambda_k1 = 2 * v_k1 * vk' * (-d*q + u/2) - d*q1 + u1/2
-            blas.copy(v, lmbda, offsetx = 1, offsety = ind+1, n = m-1)
-            blas.scal(2.0 * (-dd * vq + 0.5 * vu), lmbda, offset = ind+1, 
-                n = m-1)
-            blas.axpy(ds, lmbda, 0.5 * (1.0 - dd/cc), offsetx = ind+1,
-                offsety = ind+1, n = m-1)
-            blas.axpy(dz, lmbda, 0.5 * (1.0 + dd/cc), offsetx = ind+1,
-                offsety = ind+1, n = m-1)
 
-            # Scale so that sqrt(lambda_k' * J * lambda_k) = sqrt(aa*bb).
-            blas.scal(math.sqrt(aa*bb), lmbda, offset = ind, n = m)
-            
-            # v := (2*v*v' - J) * q 
-            #    = 2 * (v'*q) * v' - (J* st/a + zt/b) / (2*c)
-            blas.scal(2.0 * vq, v)
-            v[0] -= ds[ind] / 2.0 / cc
-            blas.axpy(ds, v,  0.5/cc, offsetx = ind+1, offsety = 1,
-                n = m-1)
-            blas.axpy(dz, v, -0.5/cc, offsetx = ind, n = m)
-
-            # v := v^{1/2} = 1/sqrt(2 * (v0 + 1)) * (v + e)
-            v[0] += 1.0
-            blas.scal(1.0 / math.sqrt(2.0 * v[0]), v)
-
-            # beta[k] *= ( aa / bb )**1/2
-            W['beta'][k] *= math.sqrt( aa / bb )
-            
-            ind += m
+        # We will now replace the 'l' and 'q' blocks of ds and dz with 
+        # the updated iterates in the current scaling.
+        # We also replace the 's' blocks of ds and dz with the factors 
+        # Ls, Lz in a factorization Ls*Ls', Lz*Lz' of the updated variables
+        # in the current scaling.
 
+        # ds := e + step*ds for nonlinear, 'l' and 'q' blocks.
+        # dz := e + step*dz for nonlinear, 'l' and 'q' blocks.
+        blas.scal(step, ds, n = dims['l'] + sum(dims['q']))
+        blas.scal(step, dz, n = dims['l'] + sum(dims['q']))
+        ind = dims['l']
+        ds[:ind] += 1.0
+        dz[:ind] += 1.0
+        for m in dims['q']:
+            ds[ind] += 1.0
+            dz[ind] += 1.0
+            ind += m
 
-        # 's' blocks
-        # 
-        # Let st, zt be the updated variables in the old scaling:
-        # 
-        #     st = ds * diag(sigs ./ lambda) * ds'
-        #     zt = dz * diag(sigs ./ lambda) * dz'.
-        #
-        # 1.  Compute 
-        #
-        #         L1 = dsk * diag(sigs_k ./ lambda_k)^{1/2}
-        #         L2 = dzk * diag(sigz_k ./ lambda_k)^{1/2}.
-        #
-        #     We have 
-        #
-        #         L1 * L1' = st,  L2 * L2' = zt.
-        #
-        # 2.  SVD L2'*L1 = Uk * lambda_k^+ * Vk'.
+        # ds := H(lambda)^{-1/2} * ds and dz := H(lambda)^{-1/2} * dz.
         #
-        # 3.  New scaling is 
+        # This replaced the 'l' and 'q' components of ds and dz with the
+        # updated iterates in the current scaling.
+        # The 's' components of ds and dz are replaced with
         #
-        #         r[k] := r[k] * L1 * Vk * diag(lambda_k^+)^{-1/2}
-        #         rti[k] := r[k] * L2 * Uk * diag(lambda_k^+)^{-1/2}.
-
-        ind = dims['l'] + sum(dims['q'])
-        ind2, ind3 = ind, 0
-
-        # sigs := sigs./lambda.  sigz := sigz./lambda.
-        blas.tbsv(lmbda, sigs, n = sum(dims['s']), k = 0, ldA = 1, 
-            offsetA = ind)
-        blas.tbsv(lmbda, sigz, n = sum(dims['s']), k = 0, ldA = 1, 
-            offsetA = ind)
+        #     diag(lmbda_k)^{1/2} * Qs * diag(lmbda_k)^{1/2}
+        #     diag(lmbda_k)^{1/2} * Qz * diag(lmbda_k)^{1/2}
+        # 
+        misc.scale2(lmbda, ds, dims, inverse = 'I')
+        misc.scale2(lmbda, dz, dims, inverse = 'I')
 
-        for k in xrange(Ns):
+        # sigs := ( e + step*sigs ) ./ lambda for 's' blocks.
+        # sigz := ( e + step*sigz ) ./ lmabda for 's' blocks.
+        blas.scal(step, sigs)
+        blas.scal(step, sigz)
+        sigs += 1.0
+        sigz += 1.0
+        blas.tbsv(lmbda, sigs, n = sum(dims['s']), k = 0, ldA = 1, offsetA
+            = dims['l'] + sum(dims['q']))
+        blas.tbsv(lmbda, sigz, n = sum(dims['s']), k = 0, ldA = 1, offsetA
+            = dims['l'] + sum(dims['q']))
+
+        # dsk := Ls = dsk * sqrt(sigs).
+        # dzk := Lz = dzk * sqrt(sigz).
+        ind2, ind3 = dims['l'] + sum(dims['q']), 0
+        for k in xrange(len(dims['s'])):
             m = dims['s'][k]
-            r, rti = W['r'][k], W['rti'][k]
-
-            # dsk := L1 = dsk * sqrt(sigs).  dzk := L2 = dzk * sqrt(sigz).
             for i in xrange(m):
                 blas.scal(math.sqrt(sigs[ind3+i]), ds, offset = ind2 + m*i,
                     n = m)
                 blas.scal(math.sqrt(sigz[ind3+i]), dz, offset = ind2 + m*i,
                     n = m)
-
-            # r := r*dsk = r*L1
-            blas.gemm(r, ds, work, m = m, n = m, k = m, ldB = m, ldC = m,
-                offsetB = ind2)
-            blas.copy(work, r, n = m**2)
-
-            # rti := rti*dzk = rti*L2
-            blas.gemm(rti, dz, work, m = m, n = m, k = m, ldB = m, ldC = m,
-                offsetB = ind2)
-            blas.copy(work, rti, n = m**2)
-
-            # SVD L2'*L1 = U * lmbds^+ * V'; store U in dsk and V' in dzk.
-            blas.gemm(dz, ds, work, transA = 'T', m = m, n = m, k = m,
-                ldA = m, ldB = m, ldC = m, offsetA = ind2, offsetB = ind2)
-            lapack.gesvd(work, lmbda, jobu = 'A', jobvt = 'A', m = m,
-                n = m, ldA = m, U = ds, Vt = dz, ldU = m, ldVt = m,
-                offsetS = ind, offsetU = ind2, offsetVt = ind2)
-
-            # r := r*V
-            blas.gemm(r, dz, work, transB = 'T', m = m, n = m, k = m, 
-                ldB = m, ldC = m, offsetB = ind2)
-            blas.copy(work, r, n = m**2)
-
-            # rti := rti*U
-            blas.gemm(rti, ds, work, n = m, m = m, k = m, ldB = m, ldC = m,
-                offsetB = ind2)
-            blas.copy(work, rti, n = m**2)
-
-            # r := r*lambda^{-1/2}; rti := rti*lambda^{-1/2}
-            for i in xrange(m):    
-                a = 1.0 / math.sqrt(lmbda[ind+i])
-                blas.scal(a, r, offset = m*i, n = m)
-                blas.scal(a, rti, offset = m*i, n = m)
-
-            ind += m
             ind2 += m*m
             ind3 += m
 
-        xaxpy(dx, x, alpha = step)
-        yaxpy(dy, y, alpha = step)
 
+        # Update lambda and scaling.
+        misc.update_scaling(W, lmbda, ds, dz)
 
-        # Unscale s, z, tau, kappa (unscaled variables are used only to 
-        # compute feasibility residuals).
+
+        # Unscale s, z (unscaled variables are used only to compute 
+        # feasibility residuals).
 
         blas.copy(lmbda, s, n = dims['l'] + sum(dims['q']))
         ind = dims['l'] + sum(dims['q'])
@@ -2396,7 +2001,7 @@ def conelp(c, G, h, dims, A = None, b = None, primalstart = None,
                 incy = m+1)
             ind += m
             ind2 += m*m
-        scale(s, W, trans = 'T')
+        misc.scale(s, W, trans = 'T')
 
         blas.copy(lmbda, z, n = dims['l'] + sum(dims['q']))
         ind = dims['l'] + sum(dims['q'])
@@ -2404,18 +2009,15 @@ def conelp(c, G, h, dims, A = None, b = None, primalstart = None,
         for m in dims['s']:
             blas.scal(0.0, z, offset = ind2)
             blas.copy(lmbda, z, offsetx = ind, offsety = ind2, n = m, 
-                    incy = m+1)
+                incy = m+1)
             ind += m
             ind2 += m*m
-        scale(z, W, inverse = 'I')
+        misc.scale(z, W, inverse = 'I')
 
-        kappa, tau = lmbda[-1]/dgi, lmbda[-1]*dgi
-        gap = blas.dot(lmbda, lmbda, n = cdeg) / tau**2
-
-
-    return {'status': 'unknown', 'x': None, 'y': None, 's': None, 
-        'z': None}
+        gap = blas.dot(lmbda, lmbda) 
 
+    return {'status': 'unknown', 'x': None,  'y': None, 'z': None, 
+        's': None}
 
 
 def lp(c, G, h, A = None, b = None, solver = None, primalstart = None,
@@ -2500,6 +2102,9 @@ def lp(c, G, h, A = None, b = None, solver = None, primalstart = None,
     GLPK and MOSEK documentation.  Options that are not recognized are 
     replaced by their default values.
     """
+    import math
+    from cvxopt import base, blas, misc
+    from cvxopt.base import matrix, spmatrix
 
     if type(c) is not matrix or c.typecode != 'd' or c.size[1] != 1: 
         raise TypeError, "'c' must be a dense column matrix"
@@ -2571,8 +2176,8 @@ def lp(c, G, h, A = None, b = None, solver = None, primalstart = None,
             x, s, y, z = None, None, None, None
         return {'status': status, 'x': x, 's': s, 'y': y, 'z': z}
 
-    return conelp(c, G, h, {'l': m, 'q': [], 's': []}, A,  b, primalstart 
-        = primalstart, dualstart = dualstart)
+    return conelp(c, G, h, {'l': m, 'q': [], 's': []}, A,  b, primalstart,
+        dualstart )
 
 
 
@@ -2669,6 +2274,9 @@ def socp(c, Gl = None, hl = None, Gq = None, hq = None, A = None, b = None,
         options['feastol'] scalar (default: 1e-7).
     """
 
+    from cvxopt import base, blas
+    from cvxopt.base import matrix, spmatrix
+
     if type(c) is not matrix or c.typecode != 'd' or c.size[1] != 1: 
         raise TypeError, "'c' must be a dense column matrix"
     n = c.size[0]
@@ -2945,6 +2553,10 @@ def sdp(c, Gl = None, hl = None, Gs = None, hs = None, A = None, b = None,
         options['rgaptol'] scalar (default: 1e-5).
     """
 
+    import math
+    from cvxopt import base, blas, misc
+    from cvxopt.base import matrix, spmatrix
+
     if type(c) is not matrix or c.typecode != 'd' or c.size[1] != 1: 
         raise TypeError, "'c' must be a dense column matrix"
     n = c.size[0]
@@ -3005,8 +2617,8 @@ def sdp(c, Gl = None, hl = None, Gs = None, hs = None, A = None, b = None,
         ss = [ hs[k] - matrix(Gs[k]*x, (ms[k], ms[k])) for k in 
             xrange(len(ms)) ]
         for k in xrange(len(ms)):  
-            symm(ss[k], ms[k])
-            symm(zs[k], ms[k])
+            misc.symm(ss[k], ms[k])
+            misc.symm(zs[k], ms[k])
         if dsdpstatus == 'DSDP_PDFEASIBLE':
             y = matrix(0.0, (0,1))
             status = 'optimal'
@@ -3018,7 +2630,7 @@ def sdp(c, Gl = None, hl = None, Gs = None, hs = None, A = None, b = None,
             zl, zs = None, None
             status = 'dual infeasible'
         elif dsdpstatus == 'DSDP_INFEASIBLE':
-            dcost = -blas.dot(hl,zl) - sdot2(hs, zs)
+            dcost = -blas.dot(hl,zl) - misc.sdot2(hs, zs)
             zl /= -dcost 
             zs = [zs / -docst for zk in zs]
             y = matrix(0.0, (0,1))
@@ -3101,3 +2713,105 @@ def sdp(c, Gl = None, hl = None, Gs = None, hs = None, A = None, b = None,
              val['zs'][k][:] = sol['z'][ind:ind+m*m]
              ind += m**2
     return val
+
+
+def qp(P, q, G = None, h = None, A = None, b = None, solver = None, 
+    initvals = None):
+
+    """
+    Solves a quadratic program
+
+        minimize    (1/2)*x'*P*x + q'*x 
+        subject to  G*x <= h      
+                    A*x = b.
+
+
+    Input arguments 
+
+        P is a nxn dense or sparse 'd' matrix with the lower triangular 
+        part of P stored in the lower triangle.  Must be positive 
+        semidefinite.
+
+        q is an nx1 dense 'd' matrix.
+
+        G is an mxn dense or sparse 'd' matrix.
+
+        h is an mx1 dense 'd' matrix.
+
+        A is a pxn dense or sparse 'd' matrix.
+
+        b is a px1 dense 'd' matrix or None.
+
+        solver is None or 'mosek'.
+
+        The default values for G, h, A and b are empty matrices with 
+        zero rows.
+
+
+    Returns a dictionary with keys 'status', 'x', 's', 'y', 'z'.
+
+        The default solver returns with status 'optimal' or 'unknown'.
+        The MOSEK solver can also return with status 'primal infeasible'
+        or 'dual infeasible'.
+
+        If status is 'optimal', x, s, y, z are the primal and dual 
+        optimal solutions.
+
+        If status is 'primal infeasible', x = s = None and z, y are 
+        a proof of primal infeasibility:
+
+            G'*z + A'*y = 0,  h'*z + b'*y = -1,  z >= 0.
+
+        If status is 'dual infeasible', z = y = None, and x, s are 
+        a proof of dual infeasibility:
+
+            P*x = 0,  q'*x = -1,  G*x + s = 0,  A*x = 0,  s >=0
+
+        If status is 'unknown', x, y, s, z are None.  
+    """
+
+    from cvxopt import base, blas
+    from cvxopt.base import matrix, spmatrix
+
+    if solver == 'mosek':
+        try: 
+            from cvxopt import mosek
+            import pymosek 
+        except ImportError: raise ValueError, "invalid option "\
+            "(solver='mosek'): cvxopt.mosek is not installed" 
+
+        if 'MOSEK' in options:
+            mosek.options = options['MOSEK']
+        else:
+            mosek.options = {}
+        solsta, x, z, y = mosek.solveqp(P, q, G, h, A, b)
+        m = G.size[0]
+
+        if solsta == pymosek.solsta.optimal:
+            s = matrix(0.0, (m,1))
+            blas.copy(h, s)    
+            base.gemv(G, x, s, alpha = -1.0, beta = 1.0)
+            status = 'optimal'
+
+        elif solsta == pymosek.solsta.prim_infeas_cer:
+            status = 'primal infeasible'
+            ducost = -blas.dot(h,z) - blas.dot(b,y)
+            blas.scal(1.0/ducost, y);
+            blas.scal(1.0/ducost, z);
+            x, s = None, None
+
+        elif solsta == pymosek.solsta_dual_infeas_cer:
+            status = 'dual infeasible'
+            qx = blas.dot(q,x)
+            if qx:  x /= (-qx)
+            s = matrix(0.0, (m,1))
+            base.gemv(G, x, s, alpha=-1.0)
+            z, y = None, None
+
+        else: 
+            status = 'unknown'
+            x, s, y, z = None, None, None, None
+
+        return {'status': status, 'x': x, 's': s, 'y': y, 'z': z}
+
+    return coneqp(P, q, G, h, None, A,  b, initvals)
diff --git a/src/python/cvxprog.py b/src/python/cvxprog.py
index 758117f..857973c 100644
--- a/src/python/cvxprog.py
+++ b/src/python/cvxprog.py
@@ -6,9 +6,9 @@ for quadratic and geometric programming.  Also includes an interface
 to the quadratic programming solver from MOSEK.
 """
 
-# Copyright 2004-2007 J. Dahl and L. Vandenberghe.
+# Copyright 2004-2008 J. Dahl and L. Vandenberghe.
 # 
-# This file is part of CVXOPT version 0.9.2.
+# This file is part of CVXOPT version 0.9.3.
 #
 # CVXOPT is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
@@ -23,430 +23,18 @@ to the quadratic programming solver from MOSEK.
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import math 
-from cvxopt import base, blas, lapack, misc
-from cvxopt.base import matrix, spmatrix 
 
 __all__ = []
 options = {}
 
 
-def kkt_ldl(mnl, G, dims, A):
-
-    # Returns a function that (1) computes the LDL factorization of
-    #
-    #     [ H           A'   GG'*W^{-1} ] 
-    #     [ A           0    0          ],
-    #     [ W^{-T}*GG   0   -I          ] 
-    #
-    # given H, Df, W, where GG = [Df; G], and (2) returns a function for 
-    # solving 
-    #
-    #     [ H     A'   GG'   ]   [ ux ]   [ bx ]
-    #     [ A     0    0     ] * [ uy ] = [ by ].
-    #     [ GG    0   -W'*W  ]   [ uz ]   [ bz ]
-    
-    p, n = A.size
-    cdim = mnl + dims['l'] + sum(dims['q']) + sum([ k**2 for k 
-        in dims['s'] ])
-    cdim_pckd = mnl + dims['l'] + sum(dims['q']) + sum([ k*(k+1)/2 for k 
-        in dims['s'] ])
-    ldK = n + p + cdim_pckd 
-    K = matrix(0.0, (ldK, ldK))
-    ipiv = matrix(0, (ldK, 1))
-    u = matrix(0.0, (ldK, 1))
-    g = matrix(0.0, (cdim, 1))
-
-    def factor(H, Df, W):
-        blas.scal(0.0, K)
-        K[:n, :n] = H
-        K[n:n+p, :n] = A
-        for k in xrange(n):
-            g[:mnl] = Df[:,k]
-            g[mnl:] = G[:,k]
-            misc.scale(g, W, trans = 'T', inverse = 'I')
-            misc.pack(g, K, mnl, dims, offsety = k*ldK + n + p)
-        K[(ldK+1)*(p+n) :: ldK+1]  = -1.0
-        lapack.sytrf(K, ipiv)
-
-        def solve(x, y, z):
-
-            # Solve
-            #
-            #     [ H          A'   GG'*W^{-1} ]   [ ux   ]   [ bx        ]
-            #     [ A          0    0          ] * [ uy   [ = [ by        ]
-            #     [ W^{-T}*GG  0   -I          ]   [ W*uz ]   [ W^{-T}*bz ]
-            #
-            # and return ux, uy, W*uz.
-            #
-            # On entry, x, y, z contain bx, by, bz.  On exit, they contain
-            # the solution ux, uy, W*uz.
-
-            blas.copy(x, u)
-            blas.copy(y, u, offsety = n)
-            misc.scale(z, W, trans = 'T', inverse = 'I') 
-            misc.pack(z, u, mnl, dims, offsety = n + p)
-            lapack.sytrs(K, ipiv, u)
-            blas.copy(u, x, n = n)
-            blas.copy(u, y, offsetx = n, n = p)
-            misc.unpack(u, z, mnl, dims, offsetx = n + p)
-    
-        return solve
-
-    return factor
-
-
-def kkt_qr(mnl, G, dims, A):
-
-    # Computes the QR factorization
-    #
-    #     A' = [Q1, Q2] * [R; 0]
-    #
-    # and returns a function that (1) computes the Cholesky factorization 
-    #
-    #     Q_2^T * (H + GG^T * W^{-1} * W^{-T} * GG) * Q2 = L * L^T, 
-    #
-    # given H, Df, W, where GG = [Df; G], and (2) returns a function for 
-    # solving 
-    #
-    #     [ H    A'   GG'    ]   [ ux ]   [ bx ]
-    #     [ A    0    0      ] * [ uy ] = [ by ].
-    #     [ GG   0    -W'*W  ]   [ uz ]   [ bz ]
-
-    p, n = A.size
-    cdim = mnl + dims['l'] + sum(dims['q']) + sum([ k**2 for k in 
-        dims['s'] ])
-    cdim_pckd = mnl + dims['l'] + sum(dims['q']) + sum([ k*(k+1)/2 for k 
-        in dims['s'] ])
-
-    # A' = [Q1, Q2] * [R; 0]  (Q1 is n x p, Q2 is n x n-p).
-    if type(A) is matrix: 
-        QA = A.T
-    else: 
-        QA = matrix(A.T)
-    tauA = matrix(0.0, (p,1))
-    lapack.geqrf(QA, tauA)
-
-    Gs = matrix(0.0, (cdim, n))
-    g = matrix(0.0, (cdim, 1))
-    K = matrix(0.0, (n,n)) 
-    bzp = matrix(0.0, (cdim_pckd, 1))
-    yy = matrix(0.0, (p,1))
-
-    def factor(H, Df, W):
-
-        # Compute 
-        #
-        #     K = [Q1, Q2]' * (H + GG' * W^{-1} * W^{-T} * GG) * [Q1, Q2]
-        #
-        # and take the Cholesky factorization of the 2,2 block
-        #
-        #     Q_2' * (H + GG^T * W^{-1} * W^{-T} * GG) * Q2.
-
-        # Gs = W^{-T} * GG in packed storage.
-        Gs[:mnl, :] = Df
-        Gs[mnl:, :] = G
-        misc.scale(Gs, W, trans = 'T', inverse = 'I')
-        for k in xrange(n):
-            g[:] = Gs[:, k]
-            misc.pack(g, Gs, mnl, dims, offsety = k*Gs.size[0])
-
-        # K = [Q1, Q2]' * (H + Gs' * Gs) * [Q1, Q2].
-        K[:,:] = H
-        blas.syrk(Gs, K, beta = 1.0, k = cdim_pckd, trans = 'T')
-        misc.symm(K, n)
-        lapack.ormqr(QA, tauA, K, side = 'L', trans = 'T')
-        lapack.ormqr(QA, tauA, K, side = 'R')
-
-        # Cholesky factorization of 2,2 block of K.
-        lapack.potrf(K, n = n-p, offsetA = p*(n+1))
-
-        def solve(x, y, z):
-
-            # Solve
-            #
-            #     [ 0          A'  GG'*W^{-1} ]   [ ux   ]   [ bx        ]
-            #     [ A          0   0          ] * [ uy   ] = [ by        ]
-            #     [ W^{-T}*GG  0   -I         ]   [ W*uz ]   [ W^{-T}*bz ]
-            #
-            # and return ux, uy, W*uz.
-            #
-            # On entry, x, y, z contain bx, by, bz.  On exit, they contain
-            # the solution ux, uy, W*uz.
-            #
-            # If we change variables ux = Q1*v + Q2*w, the system becomes 
-            # 
-            #     [ K11 K12 R ]   [ v  ]   [Q1'*(bx+GG'*W^{-1}*W^{-T}*bz)]
-            #     [ K21 K22 0 ] * [ w  ] = [Q2'*(bx+GG'*W^{-1}*W^{-T}*bz)]
-            #     [ R^T 0   0 ]   [ uy ]   [by                           ]
-            # 
-            #     W*uz = W^{-T} * ( GG*ux - bz ).
-
-            # bzp := W^{-T} * bz in packed storage 
-            misc.scale(z, W, trans = 'T', inverse = 'I')
-            misc.pack(z, bzp, mnl, dims)
-
-            # x := [Q1, Q2]' * (x + Gs' * bzp)
-            #    = [Q1, Q2]' * (bx + Gs' * W^{-T} * bz)
-            blas.gemv(Gs, bzp, x, beta = 1.0, trans = 'T', m = cdim_pckd)
-            lapack.ormqr(QA, tauA, x, side = 'L', trans = 'T')
-
-            # y := x[:p] 
-            #    = Q1' * (bx + Gs' * W^{-T} * bz)
-            blas.copy(y, yy)
-            blas.copy(x, y, n = p)
-
-            # x[:p] := v = R^{-T} * by 
-            blas.copy(yy, x)
-            lapack.trtrs(QA, x, uplo = 'U', trans = 'T', n = p)
-
-            # x[p:] := K22^{-1} * (x[p:] - K21*x[:p])
-            #        = K22^{-1} * (Q2' * (bx + Gs' * W^{-T} * bz) - K21*v)
-            blas.gemv(K, x, x, alpha = -1.0, beta = 1.0, m = n-p, n = p,
-                offsetA = p, offsety = p)
-            lapack.potrs(K, x, n = n-p, offsetA = p*(n+1), offsetB = p)
-
-            # y := y - [K11, K12] * x
-            #    = Q1' * (bx + Gs' * W^{-T} * bz) - K11*v - K12*w
-            blas.gemv(K, x, y, alpha = -1.0, beta = 1.0, m = p, n = n)
-
-            # y := R^{-1}*y
-            #    = R^{-1} * (Q1' * (bx + Gs' * W^{-T} * bz) - K11*v 
-            #      - K12*w)
-            lapack.trtrs(QA, y, uplo = 'U', n = p)
-           
-            # x := [Q1, Q2] * x
-            lapack.ormqr(QA, tauA, x, side = 'L')
-
-            # bzp := Gs * x - bzp.
-            #      = W^{-T} * ( GG*ux - bz ) in packed storage.
-            # Unpack and copy to z.
-            blas.gemv(Gs, x, bzp, alpha = 1.0, beta = -1.0, m = cdim_pckd)
-            misc.unpack(bzp, z, mnl, dims)
-
-        return solve
-
-    return factor
-
-
-def kkt_chol(mnl, G, dims, A):
-
-    # This works only for problems with no second-order or semidefinite
-    # cone constraints.
-    #
-    # Returns a function that (1) computes Cholesky factorizations of
-    # the matrices 
-    #
-    #     S = H + GG' * W^{-1} * W^{-T} * GG,  
-    #     K = A * S^{-1} *A'
-    #
-    # or (if K is singular in the first call to the function), the matrices
-    #
-    #     S = H + GG' * W^{-1} * W^{-T} * GG + A' * A,  
-    #     K = A * S^{-1} * A',
-    #
-    # given H, Df, W, where GG = [Df; G], and (2) returns a function for 
-    # solving 
-    #
-    #     [ H     A'   GG'   ]   [ ux ]   [ bx ]
-    #     [ A     0    0     ] * [ uy ] = [ by ].
-    #     [ GG    0   -W'*W  ]   [ uz ]   [ bz ]
-
-    p, n = A.size
-    ml = dims['l']
-    F = {'firstcall': True, 'singular': False}
-    if dims['q'] or dims['s']:
-        raise ValueError, "kktsolver option 'chol' is only allowed for "\
-            "problems with no second-order or semidefinite cone "\
-            "constraints"
-
-    def factor(H, Df, W):
-
-        if F['firstcall']:
-            if type(G) is matrix: 
-                F['Gs'] = matrix(0.0, G.size) 
-            else:
-                F['Gs'] = spmatrix(0.0, G.I, G.J, G.size) 
-            if type(Df) is matrix:
-                F['Dfs'] = matrix(0.0, Df.size) 
-            else:
-                F['Dfs'] = spmatrix(0.0, Df.I, Df.J, Df.size) 
-            if type(Df) is matrix or type(G) is matrix or type(H) is \
-                matrix:
-                F['S'] = matrix(0.0, (n,n))
-                F['K'] = matrix(0.0, (p,p))
-            else:
-                F['S'] = spmatrix([], [], [], (n,n), 'd')
-                F['Sf'] = None
-                if type(A) is matrix:
-                    F['K'] = matrix(0.0, (p,p))
-                else:
-                    F['K'] = spmatrix([], [], [], (p,p), 'd')
-
-        # Dfs = Wnl^{-1} * Df and Gs = Wl^{-1} * G.
-        base.gemm(spmatrix(W['dnli'], range(mnl), range(mnl)), Df, 
-            F['Dfs'], partial = True)
-        base.gemm(spmatrix(W['dli'], range(ml), range(ml)), G, F['Gs'], 
-            partial = True)
-
-        if F['firstcall']:
-            base.syrk(F['Dfs'], F['S'], trans = 'T')
-            base.syrk(F['Gs'], F['S'], trans = 'T', beta = 1.0) 
-            F['S'] += H
-            try:
-                if type(F['S']) is matrix: 
-                    lapack.potrf(F['S']) 
-                else:
-                    F['Sf'] = cholmod.symbolic(F['S'])
-                    cholmod.numeric(F['S'], F['Sf'])
-            except ArithmeticError:
-                F['singular'] = True 
-                if type(A) is matrix and type(F['S']) is spmatrix:
-                    F['S'] = matrix(0.0, (n,n))
-                base.syrk(F['Dfs'], F['S'], trans = 'T')
-                base.syrk(F['Gs'], F['S'], trans = 'T', beta = 1.0) 
-                base.syrk(A, F['S'], trans = 'T', beta = 1.0) 
-                F['S'] += H
-                if type(F['S']) is matrix: 
-                    lapack.potrf(F['S']) 
-                else:
-                    F['Sf'] = cholmod.symbolic(F['S'])
-                    cholmod.numeric(F['S'], F['Sf'])
-            F['firstcall'] = False
-
-        else:
-            # S := H but do not remove nonzeros from sparsity pattern if S
-            # is sparse.
-            if type(F['S']) is spmatrix and type(H) is spmatrix:
-                F['S'] *= 0.0
-                F['S'] += H
-            else:
-                F['S'][:,:] = H
-            base.syrk(F['Dfs'], F['S'], trans = 'T', beta = 1.0, 
-                partial = True)
-            base.syrk(F['Gs'], F['S'], trans = 'T', beta = 1.0, 
-                partial = True)
-            if F['singular']:
-                base.syrk(A, F['S'], trans = 'T', beta = 1.0, partial = 
-                    True) 
-            if type(F['S']) is matrix: 
-                lapack.potrf(F['S']) 
-            else:
-                cholmod.numeric(F['S'], F['Sf'])
-
-        if type(F['S']) is matrix: 
-            # Asct := L^{-1}*A'.  Factor K = Asct'*Asct.
-            if type(A) is matrix: 
-                Asct = A.T
-            else: 
-                Asct = matrix(A.T)
-            blas.trsm(F['S'], Asct)
-            blas.syrk(Asct, F['K'], trans = 'T')
-            lapack.potrf(F['K'])
-
-        else:
-            # Asct := L^{-1}*P*A'.  Factor K = Asct'*Asct.
-            if type(A) is matrix:
-                Asct = A.T
-                cholmod.solve(F['Sf'], Asct, sys = 7)
-                cholmod.solve(F['Sf'], Asct, sys = 4)
-                blas.syrk(Asct, F['K'], trans = 'T')
-                lapack.potrf(F['K']) 
-            else:
-                Asct = cholmod.spsolve(F['Sf'], A.T, sys = 7)
-                Asct = cholmod.spsolve(F['Sf'], Asct, sys = 4)
-                base.syrk(Asct, F['K'], trans = 'T')
-                Kf = cholmod.symbolic(F['K'])
-                cholmod.numeric(K, Kf)
-
-        def solve(x, y, z):
-
-            # Solve
-            #
-            #     [ H          A'  GG'*W^{-1} ]   [ ux   ]   [ bx        ]
-            #     [ A          0   0          ] * [ uy   ] = [ by        ]
-            #     [ W^{-T}*GG  0   -I         ]   [ W*uz ]   [ W^{-T}*bz ]
-            #
-            # and return ux, uy, W*uz.
-            #
-            # If not F['singular']:
-            #
-            #     K*uy = A * S^{-1} * ( bx + GG'*W^{-1}*W^{-T}*bz ) - by
-            #     S*ux = bx + GG'*W^{-1}*W^{-T}*bz - A'*uy
-            #     W*uz = W^{-T} * ( GG*ux - bz ).
-            #    
-            # If F['singular']:
-            #
-            #     K*uy = A * S^{-1} * ( bx + GG'*W^{-1}*W^{-T}*bz + A'*by )
-            #            - by
-            #     S*ux = bx + GG'*W^{-1}*W^{-T}*bz + A'*by - A'*y.
-            #     W*uz = W^{-T} * ( GG*ux - bz ).
-
-            # z := W^{-1} * z = W^{-1} * bz
-            misc.scale(z, W, trans = 'T', inverse = 'I') 
-
-            # If not F['singular']:
-            #     x := L^{-1} * P * (x + GGs'*z)
-            #        = L^{-1} * P * (x + GG'*W^{-1}*W^{-T}*bz)
-            #
-            # If F['singular']:
-            #     x := L^{-1} * P * (x + GGs'*z + A'*y))
-            #        = L^{-1} * P * (x + GG'*W^{-1}*W^{-T}*bz + A'*y)
-
-            base.gemv(F['Dfs'], z, x, trans = 'T', beta = 1.0)
-            base.gemv(F['Gs'], z, x, offsetx = mnl, trans = 'T', 
-                beta = 1.0)
-            if F['singular']:
-                base.gemv(A, y, x, trans = 'T', beta = 1.0)
-            if type(F['S']) is matrix:
-                blas.trsv(F['S'], x)
-            else:
-                cholmod.solve(F['Sf'], x, sys = 7)
-                cholmod.solve(F['Sf'], x, sys = 4)
-
-            # y := K^{-1} * (Asc*x - y)
-            #    = K^{-1} * (A * S^{-1} * (bx + GG'*W^{-1}*W^{-T}*bz) - by)
-            #      (if not F['singular'])
-            #    = K^{-1} * (A * S^{-1} * (bx + GG'*W^{-1}*W^{-T}*bz + 
-            #      A'*by) - by)  
-            #      (if F['singular']).
-
-            base.gemv(Asct, x, y, trans = 'T', beta = -1.0)
-            if type(F['K']) is matrix:
-                lapack.potrs(F['K'], y)
-            else:
-                cholmod.solve(Kf, y)
-
-            # x := P' * L^{-T} * (x - Asc'*y)
-            #    = S^{-1} * (bx + GG'*W^{-1}*W^{-T}*bz - A'*y) 
-            #      (if not F['singular'])  
-            #    = S^{-1} * (bx + GG'*W^{-1}*W^{-T}*bz + A'*by - A'*y) 
-            #      (if F['singular'])
-
-            base.gemv(Asct, y, x, alpha = -1.0, beta = 1.0)
-            if type(F['S']) is matrix:
-                blas.trsv(F['S'], x, trans='T')
-            else:
-                cholmod.solve(F['Sf'], x, sys = 5)
-                cholmod.solve(F['Sf'], x, sys = 8)
-
-            # W*z := GGs*x - z = W^{-T} * (GG*x - bz)
-            base.gemv(F['Dfs'], x, z, beta = -1.0)
-            base.gemv(F['Gs'], x, z, beta = -1.0, offsety = mnl)
-
-        return solve
-
-    return factor
-
-
 def cpl(c, F, G = None, h = None, dims = None, A = None, b = None, 
-    kktsolver = None, xnewcopy = matrix, xdot = blas.dot, xaxpy = 
-    blas.axpy, xscal = blas.scal, ynewcopy = matrix, ydot = blas.dot, 
-    yaxpy = blas.axpy, yscal = blas.scal):
+    kktsolver = None, xnewcopy = None, xdot = None, xaxpy = None,
+    xscal = None, ynewcopy = None, ydot = None, yaxpy = None, 
+    yscal = None):
 
     """
-    Solves a nonlinearly constrained convex optimization problem with a
-    linear objective
+    Solves a convex optimization problem with a linear objective
 
         minimize    c'*x 
         subject to  f(x) <= 0
@@ -471,8 +59,7 @@ def cpl(c, F, G = None, h = None, dims = None, A = None, b = None,
 
     Input arguments (basic usage).
 
-        c is a dense 'd' matrix of size (n,1), where n is the dimension 
-        of the primal variable x.
+        c is a dense 'd' matrix of size (n,1). 
 
         F is a function that handles the following arguments.
 
@@ -513,7 +100,7 @@ def cpl(c, F, G = None, h = None, dims = None, A = None, b = None,
         - dims['s'] = ms = [ ms[0], ms[1], ..., ms[M-1] ], a list of M  
           integers with the orders of the semidefinite cones 
           C_{N+1}, ..., C_{N+M}.  (M >= 0 and ms[k] >= 0.)
-        The default value of dims = {'l': G.size[0], 'q': [], 's': []}.
+        The default value of dims is {'l': G.size[0], 'q': [], 's': []}.
 
         G is a dense or sparse 'd' matrix of size (K,n), where
 
@@ -600,9 +187,9 @@ def cpl(c, F, G = None, h = None, dims = None, A = None, b = None,
 
         - For the 'l' block (W_0):
 
-              W_0 = diag(dl),
+              W_0 = diag(d),
 
-          with dl a positive vector of length ml.
+          with d a positive vector of length ml.
 
         - For the 'q' blocks (W_{k+1}, k = 0, ..., N-1):
 
@@ -625,9 +212,9 @@ def cpl(c, F, G = None, h = None, dims = None, A = None, b = None,
         - W['dnl'] is a positive 'd' matrix of size (mnl, 1).
         - W['dnli'] is a positive 'd' matrix with the elementwise inverse 
           of W['dnl'].
-        - W['dl'] is a positive 'd' matrix of size (ml, 1).
-        - W['dli'] is a positive 'd' matrix with the elementwise inverse of
-          W['dl'].
+        - W['d'] is a positive 'd' matrix of size (ml, 1).
+        - W['di'] is a positive 'd' matrix with the elementwise inverse of
+          W['d'].
         - W['beta'] is a list [ beta_0, ..., beta_{N-1} ]
         - W['v'] is a list [ v_0, ..., v_{N-1} ]
         - W['r'] is a list [ r_0, ..., r_{M-1} ]
@@ -635,9 +222,14 @@ def cpl(c, F, G = None, h = None, dims = None, A = None, b = None,
           inverse of the transpose of r_k.
 
         The call g = kktsolver(x, z, W) should return a function g that
-        solves the KKT system by g(ux, uy, uz).  On entry, ux, uy, uz 
-        contain the righthand side bx, by, bz.  On exit, they contain the 
-        solution, with uz scaled: W*uz is returned instead of uz.
+        solves the KKT system by g(x, y, z).  On entry, x, y, z contain 
+        the righthand side bx, by, bz.  On exit, they contain the 
+        solution, with uz scaled: W*uz is returned instead of uz.  In other
+        words, on exit x, y, z are the solution of
+
+            [ sum_k zk*Hk(x)  A'   GG'*W^{-1} ] [ ux ]   [ bx ]
+            [ A               0    0          ] [ uy ] = [ by ].
+            [ GG              0   -W'         ] [ uz ]   [ bz ]
 
 
         2.  The linear operators Df*u, H*u, G*u and A*u can be specified 
@@ -709,12 +301,17 @@ def cpl(c, F, G = None, h = None, dims = None, A = None, b = None,
 
        options['show_progress'] True/False (default: True)
        options['maxiters'] positive integer (default: 100)
+       options['refinement'] nonnegative integer (default: 1)
        options['abstol'] scalar (default: 1e-7)
        options['reltol'] scalar (default: 1e-6)
        options['feastol'] scalar (default: 1e-7).
 
     """
 
+    import math 
+    from cvxopt import base, blas, misc
+    from cvxopt.base import matrix, spmatrix 
+
     STEP = 0.99
     BETA = 0.5
     ALPHA = 0.01
@@ -750,14 +347,21 @@ def cpl(c, F, G = None, h = None, dims = None, A = None, b = None,
     except KeyError: show_progress = True
 
     try: refinement = options['refinement']
-    except KeyError: refinement = True
+    except KeyError: refinement = 1
+    else:
+        if type(refinement) is not int or refinement < 0:
+            raise ValueError, "options['refinement'] must be a "\
+                "nonnegative integer"
 
     if kktsolver is None: 
         if dims and (dims['q'] or dims['s']):  
-            kktsolver = 'qr'            
-        else:
             kktsolver = 'chol'            
-    defaultsolvers = ('ldl', 'qr', 'chol')
+        else:
+            kktsolver = 'chol2'            
+    defaultsolvers = ('ldl', 'ldl2', 'chol', 'chol2')
+    if type(kktsolver) is str and kktsolver not in defaultsolvers:
+        raise ValueError, "'%s' is not a valid value for kktsolver" \
+            %kktsolver
 
     try: mnl, x0 = F()   
     except: raise ValueError, "function call 'F()' failed"
@@ -769,13 +373,13 @@ def cpl(c, F, G = None, h = None, dims = None, A = None, b = None,
     if (operatorG or operatorA) and not customkkt:
         raise ValueError, "use of function valued G, A requires a "\
             "user-provided kktsolver"
-    customx = xnewcopy != matrix or xdot != blas.dot or xaxpy != blas.axpy\
-        or xscal != blas.scal 
+    customx = (xnewcopy != None or xdot != None or xaxpy != None or 
+        xscal != None)
     if customx and (not operatorG or not operatorA or not customkkt):
         raise ValueError, "use of non-vector type for x requires "\
             "function valued G, A and user-provided kktsolver"
-    customy = ynewcopy != matrix or ydot != blas.dot or yaxpy != blas.axpy\
-        or yscal != blas.scal  
+    customy = (ynewcopy != None or ydot != None or yaxpy != None or 
+        yscal != None) 
     if customy and (not operatorA or not customkkt):
         raise ValueError, "use of non vector type for y requires "\
             "function valued A and user-provided kktsolver"
@@ -790,10 +394,8 @@ def cpl(c, F, G = None, h = None, dims = None, A = None, b = None,
     if h is None: h = matrix(0.0, (0,1))
     if type(h) is not matrix or h.typecode != 'd' or h.size[1] != 1:
         raise TypeError, "'h' must be a 'd' matrix with 1 column" 
-    if not dims:  dims = {'l': h.size[0], 'q': [], 's': []}
 
-    # Logarithmic degree of the product cone.
-    cdeg = mnl + dims['l'] + len(dims['q']) + sum(dims['s'])
+    if not dims:  dims = {'l': h.size[0], 'q': [], 's': []}
 
     # Dimension of the product cone of the linear inequalities. with 's' 
     # components unpacked.
@@ -801,15 +403,6 @@ def cpl(c, F, G = None, h = None, dims = None, A = None, b = None,
     if h.size[0] != cdim:
         raise TypeError, "'h' must be a 'd' matrix of size (%d,1)" %cdim
 
-    # Dimension of the product cone of the linear inequalities, with 's' 
-    # components packed.
-    cdim_pckd = dims['l'] + sum(dims['q']) + sum([ k*(k+1)/2 for k in 
-        dims['s'] ])
-
-    # Dimension of the product cone of the linear inequalities, with 
-    # diagonal 's' components.
-    cdim_diag = dims['l'] + sum(dims['q']) + sum(dims['s'])
-
     if G is None:
         if customx:
             def G(x, y, trans = 'N', alpha = 1.0, beta = 0.0):
@@ -817,7 +410,7 @@ def cpl(c, F, G = None, h = None, dims = None, A = None, b = None,
                 else: xscal(beta, y)
         else:
             G = spmatrix([], [], [], (0, c.size[0]))
-    if type(G) is matrix or type(G) is spmatrix:
+    if not operatorG:
         if G.typecode != 'd' or G.size != (cdim, c.size[0]):
             raise TypeError, "'G' must be a 'd' matrix with size (%d, %d)"\
                 %(cdim, c.size[0])
@@ -828,13 +421,13 @@ def cpl(c, F, G = None, h = None, dims = None, A = None, b = None,
         fG = G
 
     if A is None:
-        if customy:
+        if customx or customy:
             def A(x, y, trans = 'N', alpha = 1.0, beta = 0.0):
                 if trans == 'N': pass
-                else: xscal(beta, y)
+                else: yscal(beta, y)
         else:
             A = spmatrix([], [], [], (0, c.size[0]))
-    if type(A) is matrix or type(A) is spmatrix:
+    if not operatorA:
         if A.typecode != 'd' or A.size[1] != c.size[0]:
             raise TypeError, "'A' must be a 'd' matrix with %d columns" \
                 %c.size[0]
@@ -851,24 +444,43 @@ def cpl(c, F, G = None, h = None, dims = None, A = None, b = None,
     if b is None and customy:  
         raise ValueEror, "use of non vector type for y requires b"
 
+   
+    # kktsolver(x, z, W) returns a routine for solving
+    #
+    #     [ sum_k zk*Hk(x)  A'   GG'*W^{-1} ] [ ux ]   [ bx ]
+    #     [ A               0    0          ] [ uy ] = [ by ]
+    #     [ GG              0   -W'         ] [ uz ]   [ bz ]
+    #
+    # where G = [Df(x); G].
 
-    def xcopy(x, y):  
+    if kktsolver in defaultsolvers:
+         if kktsolver == 'ldl': 
+             factor = misc.kkt_ldl(G, dims, A, mnl)
+         elif kktsolver == 'ldl2': 
+             factor = misc.kkt_ldl2(G, dims, A, mnl)
+         elif kktsolver == 'chol':
+             factor = misc.kkt_chol(G, dims, A, mnl)
+         else: 
+             factor = misc.kkt_chol2(G, dims, A, mnl)
+         def kktsolver(x, z, W):
+             f, Df, H = F(x, z)
+             return factor(W, H, Df)             
+
+
+    if xnewcopy is None: xnewcopy = matrix 
+    if xdot is None: xdot = blas.dot
+    if xaxpy is None: xaxpy = blas.axpy 
+    if xscal is None: xscal = blas.scal 
+    def xcopy(x, y): 
         xscal(0.0, y) 
         xaxpy(x, y)
-    def ycopy(x, y):  
-        yscal(0.0, y)  
+    if ynewcopy is None: ynewcopy = matrix 
+    if ydot is None: ydot = blas.dot 
+    if yaxpy is None: yaxpy = blas.axpy 
+    if yscal is None: yscal = blas.scal
+    def ycopy(x, y): 
+        yscal(0.0, y) 
         yaxpy(x, y)
-
-    if kktsolver in defaultsolvers:
-         if kktsolver == 'ldl': 
-             factor = kkt_ldl(mnl, G, dims, A)
-         elif kktsolver == 'qr':
-             factor = kkt_qr(mnl, G, dims, A)
-         else:
-             factor = kkt_chol(mnl, G, dims, A)
-         def kktsolver(x, znl, W):
-             f, Df, H = F(x, znl)
-             return factor(H, Df, W)             
              
 
     # Initial points
@@ -886,25 +498,26 @@ def cpl(c, F, G = None, h = None, dims = None, A = None, b = None,
         z[ind : ind + m*m : m+1] = 1.0
         s[ind : ind + m*m : m+1] = 1.0
         ind += m**2
-    
+
+
     rx, ry = xnewcopy(x0), ynewcopy(b)
     rznl, rzl = matrix(0.0, (mnl, 1)), matrix(0.0, (cdim, 1)), 
     dx, dy = xnewcopy(x), ynewcopy(y)   
     dz, ds = matrix(0.0, (mnl + cdim, 1)), matrix(0.0, (mnl + cdim, 1))
-    ds1 = matrix(0.0, (mnl + cdim, 1))
-    dx2, dy2 = xnewcopy(x), ynewcopy(y)   
+
+    lmbda = matrix(0.0, (mnl + dims['l'] + sum(dims['q']) + 
+        sum(dims['s']), 1))
+    lmbdasq = matrix(0.0, (mnl + dims['l'] + sum(dims['q']) + 
+        sum(dims['s']), 1))
+    sigs = matrix(0.0, (sum(dims['s']), 1))
+    sigz = matrix(0.0, (sum(dims['s']), 1))
+
     dz2, ds2 = matrix(0.0, (mnl + cdim, 1)), matrix(0.0, (mnl + cdim, 1))
-    dz2nl, dz2l = matrix(0.0, (mnl,1)), matrix(0.0, (cdim, 1))
-    duz, dus = matrix(0.0, (mnl + cdim, 1)), matrix(0.0, (mnl + cdim, 1))
+
     newx, newy = xnewcopy(x),  ynewcopy(y)
     newz, news = matrix(0.0, (mnl + cdim, 1)), matrix(0.0, (mnl + cdim, 1))
     newrx = xnewcopy(x0)
     newrznl = matrix(0.0, (mnl, 1))
-    lmbda = matrix(0.0, (mnl + cdim_diag, 1))
-    lmbdasq = matrix(0.0, (mnl + cdim_diag, 1))
-    sigs = matrix(0.0, (sum(dims['s']), 1))
-    sigz = matrix(0.0, (sum(dims['s']), 1))
-    work = matrix(0.0, (max( [0] + dims['s'] )**2, 1))
 
     rx0, ry0 = xnewcopy(x0), ynewcopy(b)
     rznl0, rzl0 = matrix(0.0, (mnl, 1)), matrix(0.0, (cdim, 1)), 
@@ -919,21 +532,23 @@ def cpl(c, F, G = None, h = None, dims = None, A = None, b = None,
     W0 = {}
     W0['dnl'] = matrix(0.0, (mnl, 1))
     W0['dnli'] = matrix(0.0, (mnl, 1))
-    W0['dl'] = matrix(0.0, (dims['l'], 1))
-    W0['dli'] = matrix(0.0, (dims['l'], 1))
+    W0['d'] = matrix(0.0, (dims['l'], 1))
+    W0['di'] = matrix(0.0, (dims['l'], 1))
     W0['v'] = [ matrix(0.0, (m, 1)) for m in dims['q'] ]
     W0['beta'] = len(dims['q']) * [ 0.0 ]
     W0['r'] = [ matrix(0.0, (m, m)) for m in dims['s'] ]
     W0['rti'] = [ matrix(0.0, (m, m)) for m in dims['s'] ]
-    lmbda0 = matrix(0.0, (mnl + cdim_diag, 1))
-    lmbdasq0 = matrix(0.0, (mnl + cdim_diag, 1))
-    
+    lmbda0 = matrix(0.0, (mnl + dims['l'] + sum(dims['q']) + 
+        sum(dims['s']), 1))
+    lmbdasq0 = matrix(0.0, (mnl + dims['l'] + sum(dims['q']) + 
+        sum(dims['s']), 1))
     
 
     if show_progress: 
         print "% 10s% 12s% 10s% 8s% 7s" %("pcost", "dcost", "gap", "pres",
             "dres")
 
+
     relaxed_iters = 0
     for iters in xrange(MAXITERS):  
 
@@ -980,8 +595,7 @@ def cpl(c, F, G = None, h = None, dims = None, A = None, b = None,
                 fH = H
            
 
-        gap = misc.sdot(s, z, mnl, dims) 
-        mu = gap / cdeg
+        gap = misc.sdot(s, z, dims, mnl) 
 
         # rx = c + A'*y + Df'*z[:mnl] + G'*z[mnl:]
         xcopy(c, rx) 
@@ -1004,7 +618,7 @@ def cpl(c, F, G = None, h = None, dims = None, A = None, b = None,
         blas.copy(s[mnl:], rzl)
         blas.axpy(h, rzl, alpha = -1.0)
         fG(x, rzl, beta = 1.0)
-        reszl = misc.snrm2(rzl, 0, dims)
+        reszl = misc.snrm2(rzl, dims)
 
         # pcost = c'*x
         # dcost = c'*x + y'*(A*x-b) + znl'*f(x) + zl'*(G*x-h)
@@ -1013,7 +627,7 @@ def cpl(c, F, G = None, h = None, dims = None, A = None, b = None,
         #       = c'*x + y'*ry + znl'*rznl + zl'*rzl - gap
         pcost = xdot(c,x)
         dcost = pcost + ydot(y, ry) + blas.dot(z[:mnl], rznl) + \
-            misc.sdot(z[mnl:], rzl, 0, dims) - gap
+            misc.sdot(z[mnl:], rzl, dims) - gap
 
         pres = math.sqrt( resy**2 + resznl**2 + reszl**2 )
         dres = resx
@@ -1046,191 +660,27 @@ def cpl(c, F, G = None, h = None, dims = None, A = None, b = None,
                 'zl': zl, 'snl': s[:mnl], 'sl': sl}
 
 
-        if iters == 0:
-
-            # Compute initial scaling W:
-            # 
-            #     W * z = W^{-T} * s = lambda.
-
-            W = {}
-
-
-            # For the nonlinear and 'l' blocks: 
-            #
-            #     W['dnl'] = sqrt( s[:mnl] ./ z[:mnl] )
-            #     W['dnli'] = sqrt( z[:mnl] ./ s[:mnl] )
-            #     W['dl'] = sqrt( s[mnl:m] ./ z[mnl:m] )
-            #     W['dli'] = sqrt( z[mnl:m] ./ s[mnl:m] )
-            #     lambda[:m] = sqrt( s[:m] .* z[:m] )
-            # 
-            # where m = mnl + dims['l'].
-             
-            m = mnl + dims['l']
-            W['dnl'] = base.sqrt( base.div( s[:mnl], z[:mnl] ))
-            W['dnli'] = W['dnl']**-1
-            W['dl'] = base.sqrt( base.div( s[mnl:m], z[mnl:m] ))
-            W['dli'] = W['dl']**-1
-            lmbda[:m] = base.sqrt( base.mul( s[:m], z[:m] )) 
-
-
-            # For the 'q' blocks, compute lists 'v', 'beta' of the same
-            # length as dims['q'].
-            #
-            # The vector v[k] has unit hyperbolic norm: 
-            # 
-            #     (sqrt( v[k]' * J * v[k] ) = 1 with J = [1, 0; 0, -I]).
-            # 
-            # beta[k] is a positive scalar.
-            #
-            # The hyperbolic Householder matrix H = 2*v[k]*v[k]' - J
-            # defined by v[k] satisfies 
-            # 
-            #     (beta[k] * H) * zk  = (beta[k] * H) \ sk = lambda_k
-           
-            ind = mnl + dims['l']
-            W['v'] = [ matrix(0.0, (k,1)) for k in dims['q'] ]
-            W['beta'] = len(dims['q']) * [ 0.0 ] 
-
-            for k in xrange(len(dims['q'])):
-                m = dims['q'][k]
-                v = W['v'][k]
-
-                # a = sqrt( sk' * J * sk )  where J = [1, 0; 0, -I]
-                aa = misc.jnrm2(s, offset = ind, n = m)
-
-                # b = sqrt( zk' * J * zk )
-                bb = misc.jnrm2(z, offset = ind, n = m) 
-
-                # beta[k] = ( a / b )**1/2
-                W['beta'][k] = math.sqrt( aa / bb )
-
-                # c = sqrt( (sk/a)' * (zk/b) + 1 ) / sqrt(2)    
-                cc = math.sqrt( ( blas.dot(s, z, n = m, offsetx = ind, 
-                    offsety = ind) / aa / bb + 1.0 ) / 2.0 )
-
-                # vk = 1/(2*c) * ( (sk/a) + J * (zk/b) )
-                blas.copy(z, v, offsetx = ind, n = m)
-                blas.scal(-1.0/bb, v)
-                v[0] *= -1.0 
-                blas.axpy(s, v, 1.0/aa, offsetx = ind, n = m)
-                blas.scal(1.0/2.0/cc, v)
-
-                # v[k] = 1/sqrt(2*(vk0 + 1)) * ( vk + e ),  e = [1; 0]
-                v[0] += 1.0
-                blas.scal(1.0/math.sqrt(2.0 * v[0]), v)
-            
-
-                # To get the scaled variable lambda_k
-                # 
-                #     d =  sk0/a + zk0/b + 2*c
-                #     lambda_k = [ c;  (c + zk0/b)/d * sk1/a + 
-                #         (c + sk0/a)/d * zk1/b ]
-                #     lambda_k *= sqrt(a * b)
-
-                lmbda[ind] = cc
-                dd = 2*cc + s[ind]/aa + z[ind]/bb
-                blas.copy(s, lmbda, offsetx = ind+1, offsety = ind+1,
-                    n = m-1) 
-                blas.scal((cc + z[ind]/bb)/dd/aa, lmbda, n = m-1, offset 
-                    = ind+1)
-                blas.axpy(z, lmbda, (cc + s[ind]/aa)/dd/bb, n = m-1, 
-                    offsetx = ind+1, offsety = ind+1)
-                blas.scal(math.sqrt(aa*bb), lmbda, offset = ind, n = m)
-
-                ind += m
-
-
-            # For the 's' blocks: compute two lists 'r' and 'rti' of 
-            # the same length as dims['s'].
-            #
-            #     r[k]' * sk^{-1} * r[k] = diag(lambda_k)^{-1}
-            #     r[k]' * zk * r[k] = diag(lambda_k)
-            #
-            # where sk and zk are the corresponding blocks of s and z, 
-            # reshaped into symmetric matrices.
-            #
-            # rti[k] is the inverse of r[k]', so 
-            #
-            #     rti[k]' * sk * rti[k] = diag(lambda_k)^{-1}
-            #     rti[k]' * zk^{-1} * rti[k] = diag(lambda_k).
-            #
-            # The vectors lambda_k are stored in 
-            # 
-            #     lmbda[ dims['l'] + sum(dims['q']) : -1 ]
-            
-            W['r'] = [ matrix(0.0, (m,m)) for m in dims['s'] ]
-            W['rti'] = [ matrix(0.0, (m,m)) for m in dims['s'] ]
-
-            ind2 = ind
-            for k in xrange(len(dims['s'])):
-                m = dims['s'][k]
-                r, rti = W['r'][k], W['rti'][k]
-
-                # Factor sk = L1*L1'; store L1 in block k of ds.
-                blas.copy(s, ds, offsetx = ind2, offsety = ind2, n = m**2) 
-                lapack.potrf(ds, n = m, ldA = m, offsetA = ind2)
-
-                # Factor zs[k] = L2*L2'; store L2 in block k of dz.
-                blas.copy(z, dz, offsetx = ind2, offsety = ind2, n = m**2) 
-                lapack.potrf(dz, n = m, ldA = m, offsetA = ind2)
-	 
-                # SVD L2'*L1 = U*diag(lambda_k)*V'.  Keep U in work. 
-                for i in xrange(m): 
-                    blas.scal(0.0, ds, offset = ind2 + i*m, n = i)
-                blas.copy(ds, work, offsetx = ind2, n = m**2)
-                blas.trmm(dz, work, transA = 'T', ldA = m, ldB = m, n = m,
-                    m = m, offsetA = ind2)
-                lapack.gesvd(work, lmbda, jobu = 'O', ldA = m, m = m, 
-                    n = m, offsetS = ind)
-	       
-                # r = L2^{-T} * U 
-                blas.copy(work, r, n = m*m)
-                blas.trsm(dz, r, transA = 'T', m = m, n = m, ldA = m,
-                    offsetA = ind2)
-
-                # rti = L2 * U 
-                blas.copy(work, rti, n = m*m)
-                blas.trmm(dz, rti, m = m, n = m, ldA = m, offsetA = ind2)
-
-                # r := r * diag(sqrt(lambda_k))
-                # rti := rti * diag(1 ./ sqrt(lambda_k))
-                for i in xrange(m):
-                    a = math.sqrt( lmbda[ind+i] )
-                    blas.scal(a, r, offset = m*i, n = m)
-                    blas.scal(1.0/a, rti, offset = m*i, n = m)
-
-                ind += m
-                ind2 += m*m
-
-
+        # Compute initial scaling W: 
+        #
+        #     W * z = W^{-T} * s = lambda.
+        #
         # lmbdasq = lambda o lambda 
-        blas.copy(lmbda, lmbdasq)
-        blas.tbmv(lmbda, lmbdasq, n = mnl + dims['l'], k = 0, ldA = 1) 
-        ind = mnl + dims['l']
-        for m in dims['q']:
-            lmbdasq[ind] = blas.nrm2(lmbda, offset = ind, n = m)**2
-            blas.scal(2.0*lmbda[ind], lmbdasq, n = m-1, offset = ind+1)
-            ind += m
-        # Diagonal symmetric matrices are stored as vectors in lmbdasq.
-        blas.tbmv(lmbda, lmbdasq, n = sum(dims['s']), k = 0, ldA = 1, 
-            offsetA = ind, offsetx = ind) 
 
+        if iters == 0:  
+            W = misc.compute_scaling(s, z, lmbda, dims, mnl)
+        misc.ssqr(lmbdasq, lmbda, dims, mnl)
 
-        # Define a function solvenewton(dx, dy, dz, ds) for solving
-        # 
-        #     [ 0  ]   [ H   A'  GG' ]   [ dx ]   [ rhsx ]
-        #     [ 0  ] + [ A   0   0   ] * [ dy ] = [ rhsy ]
-        #     [ ds ]   [ GG  0   0   ]   [ dz ]   [ rhsz ]
-        #
-        #     s o dz + z o ds = rhss
-        #
-        # where GG = [ Df(x); G ].
+
+        # f3(x, y, z) solves
         #
-        # The last equation in scaled variables:
+        #     [ H   A'  GG'*W^{-1} ] [ ux ]   [ bx ]
+        #     [ A   0   0          ] [ uy ] = [ by ].
+        #     [ GG  0  -W'         ] [ uz ]   [ bz ]
         #
-        #     lmbda o (W*dz + W^{-T}*ds) = rhss.
-
-        try: g = kktsolver(x, z[:mnl], W)
+        # On entry, x, y, z contain bx, by, bz.
+        # On exit, they contain ux, uy, uz.
+        
+        try: f3 = kktsolver(x, z[:mnl], W)
         except ArithmeticError: 
             if iters == 0:
                 raise ValueError, "Rank(A) < p or "\
@@ -1240,20 +690,22 @@ def cpl(c, F, G = None, h = None, dims = None, A = None, b = None,
                 # The arithmetic error may be caused by a relaxed line 
                 # search in the previous iteration.  Therefore we restore 
                 # the last saved state and require a standard line search. 
+
                 phi, gap = phi0, gap0
-                mu = gap / cdeg
+                mu = gap / ( mnl + dims['l'] + len(dims['q']) + 
+                    sum(dims['s']) )
                 blas.copy(W0['dnl'], W['dnl'])
                 blas.copy(W0['dnli'], W['dnli'])
-                blas.copy(W0['dl'], W['dl'])
-                blas.copy(W0['dli'], W['dli'])
+                blas.copy(W0['d'], W['d'])
+                blas.copy(W0['di'], W['di'])
                 for k in xrange(len(dims['q'])):
                     blas.copy(W0['v'][k], W['v'][k])
                     W['beta'][k] = W0['beta'][k]
                 for k in xrange(len(dims['s'])):
                     blas.copy(W0['r'][k], W['r'][k])
                     blas.copy(W0['rti'][k], W['rti'][k])
-                xcopy(x0, x); xcopy(dx0, dx);
-                ycopy(y0, y); ycopy(dy0, dy);
+                xcopy(x0, x); 
+                ycopy(y0, y); 
                 blas.copy(s0, s); blas.copy(z0, z)
                 blas.copy(lmbda0, lmbda)
                 blas.copy(lmbdasq, lmbdasq0)
@@ -1264,120 +716,142 @@ def cpl(c, F, G = None, h = None, dims = None, A = None, b = None,
 
                 relaxed_iters = -1
 
-                try: g = kktsolver(x, z[:mnl], W)
+                try: f3 = kktsolver(x, z[:mnl], W)
                 except ArithmeticError: 
                     raise ArithmeticError, "singular KKT matrix"
 
             else:  
                 raise ArithmeticError, "singular KKT matrix"
 
-        def solve_newton1(dx, dy, dz, ds):
 
-            # Solve without refinement
-            #        
-            #     [ 0  ]   [ H   A'  GG' ]   [ dx ]   [ rhsx ]
-            #     [ 0  ] + [ A   0   0   ] * [ dy ] = [ rhsy ]
-            #     [ ds ]   [ GG  0   0   ]   [ dz ]   [ rhsz ]
-            #
-            #     lmbda o (W*dz + W^{-T}*ds) = rhss.
-            #
-            # On entry, the righthand sides are stored in dx, dy, dz.  
-            # On exit, scaled quantities are returned for ds and dz.
+        # f4_no_ir(x, y, z, s) solves
+        # 
+        #     [ 0     ]   [ H   A'  GG' ] [ ux        ]   [ bx ]
+        #     [ 0     ] + [ A   0   0   ] [ uy        ] = [ by ]
+        #     [ W'*us ]   [ GG  0   0   ] [ W^{-1}*uz ]   [ bz ]
+        #
+        #     lmbda o (uz + us) = bs.
+        #
+        # On entry, x, y, z, x, contain bx, by, bz, bs.
+        # On exit, they contain ux, uy, uz, us.
+
+        if iters == 0:
+            ws3 = matrix(0.0, (mnl + cdim, 1))
+            wz3 = matrix(0.0, (mnl + cdim, 1))
 
+        def f4_no_ir(x, y, z, s):
 
             # Solve 
             #
-            #     [ H  A' GG'   ]   [ dx ]   [ rhsx                      ]
-            #     [ A  0  0     ] * [ dy ] = [ rhsy                      ]
-            #     [ GG 0  -W'*W ]   [ dz ]   [ rhsz - W'*(lmbda o\ rhss) ]
+            #     [ H  A'  GG'  ] [ ux        ]   [ bx                    ]
+            #     [ A  0   0    ] [ uy        ] = [ by                    ]
+            #     [ GG 0  -W'*W ] [ W^{-1}*uz ]   [ bz - W'*(lmbda o\ bs) ]
             #
-            #     ds = W' * (lmbda o\ rhss - W*dz).
+            #     us = lmbda o\ bs - uz.
             
-            # ds := lmbda o\ ds = lmbda o\ rhss
-            misc.sinv(ds, lmbda, mnl, dims)
+            # s := lmbda o\ s 
+            #    = lmbda o\ bs
+            misc.sinv(s, lmbda, dims, mnl)
 
-            # dz := dz - W'*ds = rhsz - W' * (lambda o\ rhss)
-            ds1 = +ds 
-            misc.scale(ds1, W, trans = 'T')
-            blas.axpy(ds1, dz, alpha = -1.0)
+            # z := z - W'*s 
+            #    = bz - W' * (lambda o\ bs)
+            blas.copy(s, ws3)
+            misc.scale(ws3, W, trans = 'T')
+            blas.axpy(ws3, z, alpha = -1.0)
 
-            g(dx, dy, dz)
+            # Solve for ux, uy, uz
+            f3(x, y, z)
 
-            # ds := ds - dz = lambda o\ rhss - dz.
-            blas.axpy(dz, ds, alpha = -1.0)
+            # s := s - z 
+            #    = lambda o\ bs - z.
+            blas.axpy(z, s, alpha = -1.0)
 
 
-        if refinement:
-            def solve_newton(dx, dy, dz, ds):
-                
-                # Copy righthand sides to dx2, dy2, ds2, dz2.
-                xcopy(dx, dx2)
-                ycopy(dy, dy2)
-                blas.copy(ds, ds2)
-                blas.copy(dz, dz2)
-
-                solve_newton1(dx, dy, dz, ds)
-
-                # Store residuals in dx2, dy2, dz2, ds2
-                #        
-                #     [ 0  ]   [ H   A'  GG' ]   [ dx ]   [ rhsx ]
-                #     [ 0  ] + [ A   0   0   ] * [ dy ] = [ rhsy ]
-                #     [ ds ]   [ GG  0   0   ]   [ dz ]   [ rhsz ]
-                #
-                #     s o dz + z o ds = rhss
-                #                
-                # Last equation in scaled variables:
-                #
-                #     lmbda o (W*dz + W^{-T}*ds) = rhss.
-                
-                # Store unscaled steps in dus, duz
-                blas.copy(dz, duz)
-                misc.scale(duz, W, inverse = 'I')
-                blas.copy(ds, dus)
-                misc.scale(dus, W, trans = 'T')
-
-                # Store rhs minus lhs in in dx2, dy2, ds2, dz2.
-                fH(dx, dx2, alpha = -1.0, beta = 1.0)
-                fA(dy, dx2, alpha = -1.0, beta = 1.0, trans = 'T') 
-                fDf(duz[:mnl], dx2, alpha = -1.0, beta = 1.0, trans = 'T')
-                fG(duz[mnl:], dx2, alpha = -1.0, beta = 1.0, trans = 'T') 
-
-                fA(dx, dy2, alpha = -1.0, beta = 1.0)
-
-                fDf(dx, dz2nl)
-                blas.axpy(dz2nl, dz2, alpha = -1.0)
-                fG(dx, dz2l)
-                blas.axpy(dz2l, dz2, alpha = -1.0, offsety = mnl)
-                blas.axpy(dus, dz2, alpha = -1.0)
- 
-                # dus := lmbda o (W*dz + W^{-T}*ds)
-                blas.copy(ds, dus)
-                blas.axpy(dz, dus)
-                misc.sprod(dus, lmbda, mnl, dims, diag = 'D')
-                blas.axpy(dus, ds2, alpha = -1.0)
-            
-                solve_newton1(dx2, dy2, dz2, ds2)
+        if iters == 0:
+            wz2nl, wz2l = matrix(0.0, (mnl,1)), matrix(0.0, (cdim, 1))
 
-                xaxpy(dx2, dx, alpha = -1.0)
-                yaxpy(dy2, dy, alpha = -1.0)
-                blas.axpy(dz2, dz, alpha = -1.0)
-                blas.axpy(ds2, ds, alpha = -1.0)
+        def res(ux, uy, uz, us, vx, vy, vz, vs):
 
-        else:
-            solve_newton = solve_newton1
+            # Evaluates residuals in Newton equations:
+            #
+            #     [ vx ]     [ 0     ]   [ H  A' GG' ] [ ux        ]
+            #     [ vy ] -=  [ 0     ] + [ A  0  0   ] [ uy        ]
+            #     [ vz ]     [ W'*us ]   [ GG 0  0   ] [ W^{-1}*uz ]
+            #
+            #     vs -= lmbda o (uz + us).
+
+            # vx := vx - H*ux - A'*uy - GG'*W^{-1}*uz
+            fH(ux, vx, alpha = -1.0, beta = 1.0)
+            fA(uy, vx, alpha = -1.0, beta = 1.0, trans = 'T') 
+            blas.copy(uz, wz3)
+            misc.scale(wz3, W, inverse = 'I')
+            fDf(wz3[:mnl], vx, alpha = -1.0, beta = 1.0, trans = 'T')
+            fG(wz3[mnl:], vx, alpha = -1.0, beta = 1.0, trans = 'T') 
+
+            # vy := vy - A*ux 
+            fA(ux, vy, alpha = -1.0, beta = 1.0)
+
+            # vz := vz - W'*us - GG*ux 
+            fDf(ux, wz2nl)
+            blas.axpy(wz2nl, vz, alpha = -1.0)
+            fG(ux, wz2l)
+            blas.axpy(wz2l, vz, alpha = -1.0, offsety = mnl)
+            blas.copy(us, ws3) 
+            misc.scale(ws3, W, trans = 'T')
+            blas.axpy(ws3, vz, alpha = -1.0)
+
+            # vs -= lmbda o (uz + us)
+            blas.copy(us, ws3)
+            blas.axpy(uz, ws3)
+            misc.sprod(ws3, lmbda, dims, mnl, diag = 'D')
+            blas.axpy(ws3, vs, alpha = -1.0)
+
+
+        # f4(x, y, z, s) solves the same system as f4_no_ir, but applies
+        # iterative refinement.
+
+        if iters == 0:
+            if refinement:
+                wx, wy = xnewcopy(c), ynewcopy(b)
+                wz = matrix(0.0, (mnl + cdim, 1))
+                ws = matrix(0.0, (mnl + cdim, 1))
+            if refinement:
+                wx2, wy2 = xnewcopy(c), ynewcopy(b)
+                wz2 = matrix(0.0, (mnl + cdim, 1))
+                ws2 = matrix(0.0, (mnl + cdim, 1))
+
+        def f4(x, y, z, s):
+            if refinement: 
+                xcopy(x, wx)        
+                ycopy(y, wy)        
+                blas.copy(z, wz)        
+                blas.copy(s, ws)        
+            f4_no_ir(x, y, z, s)        
+            for i in xrange(refinement):
+                xcopy(wx, wx2)        
+                ycopy(wy, wy2)        
+                blas.copy(wz, wz2)        
+                blas.copy(ws, ws2)        
+                res(x, y, z, s, wx2, wy2, wz2, ws2) 
+                f4_no_ir(wx2, wy2, wz2, ws2)
+                xaxpy(wx2, x)
+                yaxpy(wy2, y)
+                blas.axpy(wz2, z)
+                blas.axpy(ws2, s)
 
         sigma, eta = 0.0, 0.0
         for i in [0, 1]:
 
             # Solve
             #
-            #     [ 0  ]   [ H   A'  GG' ]   [ dx ]
-            #     [ 0  ] + [ A   0   0   ] * [ dy ] = -(1 - eta) * r  
-            #     [ ds ]   [ GG  0   0   ]   [ dz ]
+            #     [ 0     ]   [ H  A' GG' ] [ dx        ]
+            #     [ 0     ] + [ A  0  0   ] [ dy        ] = -(1 - eta)*r  
+            #     [ W'*ds ]   [ GG 0  0   ] [ W^{-1}*dz ]
             #
-            #     lmbda o (W*dz + W^{-T}*ds) = -lmbda o lmbda + sigma*mu*e.
+            #     lmbda o (dz + ds) = -lmbda o lmbda + sigma*mu*e.
             #
-            # Compute scaled variables W*dz, W^{-T}*ds instead of dz, ds.
+
+            mu = gap / (mnl + dims['l'] + len(dims['q']) + sum(dims['s']))
 
             # ds = -lmbdasq + sigma * mu * e  
             blas.scal(0.0, ds)
@@ -1403,7 +877,7 @@ def cpl(c, F, G = None, h = None, dims = None, A = None, b = None,
             blas.axpy(rznl, dz, alpha = -1.0 + eta)
             blas.axpy(rzl, dz, alpha = -1.0 + eta, offsety = mnl)
             
-            try: solve_newton(dx, dy, dz, ds)
+            try: f4(dx, dy, dz, ds)
             except ArithmeticError: 
                 if iters == 0:
                     raise ValueError, "Rank(A) < p or "\
@@ -1413,17 +887,22 @@ def cpl(c, F, G = None, h = None, dims = None, A = None, b = None,
 
             # Inner product ds'*dz and unscaled steps are needed in the 
             # line search.
-            dsdz = misc.sdot(ds, dz, mnl, dims)
+            dsdz = misc.sdot(ds, dz, dims, mnl)
             blas.copy(dz, dz2)
             misc.scale(dz2, W, inverse = 'I')
             blas.copy(ds, ds2)
             misc.scale(ds2, W, trans = 'T')
 
-            # Maximum steps to boundary.  (This scales ds and dz.) 
-            misc.scale2(lmbda, ds, mnl, dims)
-            ts = misc.max_step(ds, mnl, dims, sigs)
-            misc.scale2(lmbda, dz, mnl, dims)
-            tz = misc.max_step(dz, mnl, dims, sigz)
+            # Maximum steps to boundary. 
+            # 
+            # Also compute the eigenvalue decomposition of 's' blocks in 
+            # ds, dz.  The eigenvectors Qs, Qz are stored in ds, dz.
+            # The eigenvalues are stored in sigs, sigz.
+
+            misc.scale2(lmbda, ds, dims, mnl)
+            ts = misc.max_step(ds, dims, mnl, sigs)
+            misc.scale2(lmbda, dz, dims, mnl)
+            tz = misc.max_step(dz, dims, mnl, sigz)
             t = max([ 0.0, ts, tz ])
             if t == 0:
                 step = 1.0
@@ -1572,8 +1051,8 @@ def cpl(c, F, G = None, h = None, dims = None, A = None, b = None,
                             step0 = step
                             blas.copy(W['dnl'], W0['dnl'])
                             blas.copy(W['dnli'], W0['dnli'])
-                            blas.copy(W['dl'], W0['dl'])
-                            blas.copy(W['dli'], W0['dli'])
+                            blas.copy(W['d'], W0['d'])
+                            blas.copy(W['di'], W0['di'])
                             for k in xrange(len(dims['q'])):
                                 blas.copy(W['v'][k], W0['v'][k])
                                 W0['beta'][k] = W['beta'][k]
@@ -1621,8 +1100,8 @@ def cpl(c, F, G = None, h = None, dims = None, A = None, b = None,
                             step = step0
                             blas.copy(W0['dnl'], W['dnl'])
                             blas.copy(W0['dnli'], W['dnli'])
-                            blas.copy(W0['dl'], W['dl'])
-                            blas.copy(W0['dli'], W['dli'])
+                            blas.copy(W0['d'], W['d'])
+                            blas.copy(W0['di'], W['di'])
                             for k in xrange(len(dims['q'])):
                                 blas.copy(W0['v'][k], W['v'][k])
                                 W['beta'][k] = W0['beta'][k]
@@ -1648,6 +1127,17 @@ def cpl(c, F, G = None, h = None, dims = None, A = None, b = None,
                             relaxed_iters = -1
 
 
+        # Update x, y.
+        xaxpy(dx, x, alpha = step)
+        yaxpy(dy, y, alpha = step)
+
+
+        # Replace nonlinear, 'l' and 'q' blocks of ds and dz with the 
+        # updated variables in the current scaling.
+        # Replace 's' blocks of ds and dz with the factors Ls, Lz in a
+        # factorization Ls*Ls', Lz*Lz' of the updated variables in the 
+        # current scaling.
+
         # ds := e + step*ds for nonlinear, 'l' and 'q' blocks.
         # dz := e + step*dz for nonlinear, 'l' and 'q' blocks.
         blas.scal(step, ds, n = mnl + dims['l'] + sum(dims['q']))
@@ -1660,249 +1150,47 @@ def cpl(c, F, G = None, h = None, dims = None, A = None, b = None,
             dz[ind] += 1.0
             ind += m
 
-        # sigs := e + step*sigs for 's' blocks.
-        blas.scal(step, sigs)
-        sigs += 1.0
-
-        # sigz := e + step*sigz for 's' blocks.
-        blas.scal(step, sigz)
-        sigz += 1.0
 
         # ds := H(lambda)^{-1/2} * ds and dz := H(lambda)^{-1/2} * dz.
-        misc.scale2(lmbda, ds, mnl, dims, inverse = 'I')
-        misc.scale2(lmbda, dz, mnl, dims, inverse = 'I')
-
-        # The nonlinear, 'l' and 'q' components of ds and dz now contain 
-        # the updated variables in the current scaling.  The 's' 
-        # components of ds and dz contain 
-        #
-        #     Lambda^1/2 * Qs * Lambda^1/2
-        #     Lambda^1/2 * Qz * Lambda^1/2
-        #
-        # where Lambda^1/2 * (Qs * diag(sigs) * Qs') * Lambda^1/2 and 
-        # Lambda^1/2 * (Qz * diag(sigs) * Qz') * Lambda^1/2 are the 
-        # updated variablaes in the current scaling.
-
-
-        # Update lambda and scaling.
-
-        # Nonlinear and 'l' blocks
-        #
-        #    d :=  d .* sqrt( ds ./ dz )
-        #    lmbda := lmbda .* sqrt(ds) .* sqrt(dz)
-
-        m = mnl + dims['l']
-        ds[:m] = base.sqrt( ds[:m] )
-        dz[:m] = base.sqrt( dz[:m] )
- 
-        # d := d .* ds .* dz 
-        blas.tbmv(ds, W['dnl'], n = mnl, k = 0, ldA = 1)
-        blas.tbsv(dz, W['dnl'], n = mnl, k = 0, ldA = 1)
-        W['dnli'][:] = W['dnl'] ** -1
-        blas.tbmv(ds, W['dl'], n = dims['l'], k = 0, ldA = 1, offsetA 
-            = mnl)
-        blas.tbsv(dz, W['dl'], n = dims['l'], k = 0, ldA = 1, offsetA 
-            = mnl)
-        W['dli'][:] = W['dl'] ** -1
-
-        # lmbda := ds .* dz
-        blas.copy(ds, lmbda, n = m)
-        blas.tbmv(dz, lmbda, n = m, k = 0, ldA = 1)
-
-
-        # 'q' blocks.
-        # 
-        # Let st and zt be the new variables in the old scaling:
-        #
-        #     st = ds_k,   zt = dz_k
-        #
-        # and a = sqrt(st' * J * st),  b = sqrt(zt' * J * zt).
-        #
-        # 1. Compute the hyperbolic Householder transformation 2*q*q' - J 
-        #    that maps st/a to zt/b.
-        # 
-        #        c = sqrt( (1 + st'*zt/(a*b)) / 2 ) 
-        #        q = (st/a + J*zt/b) / (2*c). 
-        #
-        #    The new scaling point is 
-        #
-        #        wk := betak * sqrt(a/b) * (2*v[k]*v[k]' - J) * q 
-        #
-        #    with betak = W['beta'][k].
-        # 
-        # 3. The scaled variable:
-        #
-        #        lambda_k0 = sqrt(a*b) * c
-        #        lambda_k1 = sqrt(a*b) * ( (2vk*vk' - J) * (-d*q + u/2) )_1
-        #
-        #    where 
-        #
-        #        u = st/a - J*zt/b 
-        #        d = ( vk0 * (vk'*u) + u0/2 ) / (2*vk0 *(vk'*q) - q0 + 1).
-        #
-        # 4. Update scaling
-        #   
-        #        v[k] := wk^1/2 
-        #              = 1 / sqrt(2*(wk0 + 1)) * (wk + e).
-        #        beta[k] *=  sqrt(a/b)
-
-
-        ind = mnl + dims['l']
-        for k in xrange(len(dims['q'])):
-
-            m = dims['q'][k]
-            v = W['v'][k]
-
-            # ln = sqrt( lambda_k' * J * lambda_k )
-            ln = misc.jnrm2(lmbda, n = m, offset = ind) 
-
-            # a = sqrt( dsk' * J * dsk ) = sqrt( st' * J * st ) 
-            # ds := ds / a = st / a
-            aa = misc.jnrm2(ds, offset = ind, n = m)
-            blas.scal(1.0/aa, ds, offset = ind, n = m)
-
-            # b = sqrt( dzk' * J * dzk ) = sqrt( zt' * J * zt )
-            # dz := dz / a = zt / b
-            bb = misc.jnrm2(dz, offset = ind, n = m) 
-            blas.scal(1.0/bb, dz, offset = ind, n = m)
-
-            # c = sqrt( ( 1 + (st'*zt) / (a*b) ) / 2 )
-            cc = math.sqrt( ( 1.0 + blas.dot(ds, dz, offsetx = ind,
-                offsety = ind, n = m) ) / 2.0 )
-
-            # vs = v' * st / a 
-            vs = blas.dot(v, ds, offsety = ind, n = m) 
-
-            # vz = v' * J *zt / b
-            vz = misc.jdot(v, dz, offsety = ind, n = m) 
-
-            # vq = v' * q where q = (st/a + J * zt/b) / (2 * c)
-            vq = (vs + vz ) / 2.0 / cc
-
-            # vu = v' * u  where u =  st/a - J * zt/b 
-            vu = vs - vz  
-
-            # lambda_k0 = c
-            lmbda[ind] = cc
-
-            # wk0 = 2 * vk0 * (vk' * q) - q0 
-            wk0 = 2 * v[0] * vq - ( ds[ind] + dz[ind] ) / 2.0 / cc 
-
-            # d = (v[0] * (vk' * u) - u0/2) / (wk0 + 1)
-            dd = (v[0] * vu - ds[ind]/2.0 + dz[ind]/2.0) / (wk0 + 1.0)
-
-            # lambda_k1 = 2 * v_k1 * vk' * (-d*q + u/2) - d*q1 + u1/2
-            blas.copy(v, lmbda, offsetx = 1, offsety = ind+1, n = m-1)
-            blas.scal(2.0 * (-dd * vq + 0.5 * vu), lmbda, offset = ind+1, 
-                n = m-1)
-            blas.axpy(ds, lmbda, 0.5 * (1.0 - dd/cc), offsetx = ind+1,
-                offsety = ind+1, n = m-1)
-            blas.axpy(dz, lmbda, 0.5 * (1.0 + dd/cc), offsetx = ind+1,
-                offsety = ind+1, n = m-1)
-
-            # Scale so that sqrt(lambda_k' * J * lambda_k) = sqrt(aa*bb).
-            blas.scal(math.sqrt(aa*bb), lmbda, offset = ind, n = m)
-            
-            # v := (2*v*v' - J) * q 
-            #    = 2 * (v'*q) * v' - (J* st/a + zt/b) / (2*c)
-            blas.scal(2.0 * vq, v)
-            v[0] -= ds[ind] / 2.0 / cc
-            blas.axpy(ds, v,  0.5/cc, offsetx = ind+1, offsety = 1,
-                n = m-1)
-            blas.axpy(dz, v, -0.5/cc, offsetx = ind, n = m)
-
-            # v := v^{1/2} = 1/sqrt(2 * (v0 + 1)) * (v + e)
-            v[0] += 1.0
-            blas.scal(1.0 / math.sqrt(2.0 * v[0]), v)
-
-            # beta[k] *= ( aa / bb )**1/2
-            W['beta'][k] *= math.sqrt( aa / bb )
-            
-            ind += m
-
-
-        # 's' blocks
-        # 
-        # Let st, zt be the updated variables in the old scaling:
         # 
-        #     st = ds * diag(sigs ./ lambda) * ds'
-        #     zt = dz * diag(sigs ./ lambda) * dz'.
-        #
-        # 1.  Compute 
-        #
-        #         L1 = dsk * diag(sigs_k ./ lambda_k)^{1/2}
-        #         L2 = dzk * diag(sigz_k ./ lambda_k)^{1/2}.
+        # This replaces the nonlinear, 'l' and 'q' components of ds and dz
+        # with the updated variables in the new scaling.
+        # The 's' components of ds and dz are replaced with
         #
-        #     We have 
-        #
-        #         L1 * L1' = st,  L2 * L2' = zt.
-        #
-        # 2.  SVD L2'*L1 = Uk * lambda_k^+ * Vk'.
-        #
-        # 3.  New scaling is 
-        #
-        #         r[k] := r[k] * L1 * Vk * diag(lambda_k^+)^{-1/2}
-        #         rti[k] := r[k] * L2 * Uk * diag(lambda_k^+)^{-1/2}.
-
-        ind = mnl + dims['l'] + sum(dims['q'])
-        ind2, ind3 = ind, 0
-
-        # sigs := sigs./lambda.  sigz := sigz./lambda.
+        #     diag(lmbda_k)^{1/2} * Qs * diag(lmbda_k)^{1/2}
+        #     diag(lmbda_k)^{1/2} * Qz * diag(lmbda_k)^{1/2}
+         
+        misc.scale2(lmbda, ds, dims, mnl, inverse = 'I')
+        misc.scale2(lmbda, dz, dims, mnl, inverse = 'I')
+
+        # sigs := ( e + step*sigs ) ./ lambda for 's' blocks.
+        # sigz := ( e + step*sigz ) ./ lambda for 's' blocks.
+        blas.scal(step, sigs)
+        blas.scal(step, sigz)
+        sigs += 1.0
+        sigz += 1.0
         blas.tbsv(lmbda, sigs, n = sum(dims['s']), k = 0, ldA = 1, 
-            offsetA = ind)
+            offsetA = mnl + dims['l'] + sum(dims['q']) )
         blas.tbsv(lmbda, sigz, n = sum(dims['s']), k = 0, ldA = 1, 
-            offsetA = ind)
+            offsetA = mnl + dims['l'] + sum(dims['q']) )
 
+        # dsk := Ls = dsk * sqrt(sigs).
+        # dzk := Lz = dzk * sqrt(sigz).
+        ind2, ind3 = mnl + dims['l'] + sum(dims['q']), 0
         for k in xrange(len(dims['s'])):
             m = dims['s'][k]
-            r, rti = W['r'][k], W['rti'][k]
-
-            # dsk := L1 = dsk * sqrt(sigs).  dzk := L2 = dzk * sqrt(sigz).
             for i in xrange(m):
                 blas.scal(math.sqrt(sigs[ind3+i]), ds, offset = ind2 + m*i,
                     n = m)
                 blas.scal(math.sqrt(sigz[ind3+i]), dz, offset = ind2 + m*i,
                     n = m)
-
-            # r := r*dsk = r*L1
-            blas.gemm(r, ds, work, m = m, n = m, k = m, ldB = m, ldC = m,
-                offsetB = ind2)
-            blas.copy(work, r, n = m**2)
-
-            # rti := rti*dzk = rti*L2
-            blas.gemm(rti, dz, work, m = m, n = m, k = m, ldB = m, ldC = m,
-                offsetB = ind2)
-            blas.copy(work, rti, n = m**2)
-
-            # SVD L2'*L1 = U * lmbds^+ * V'; store U in dsk and V' in dzk.
-            blas.gemm(dz, ds, work, transA = 'T', m = m, n = m, k = m,
-                ldA = m, ldB = m, ldC = m, offsetA = ind2, offsetB = ind2)
-            lapack.gesvd(work, lmbda, jobu = 'A', jobvt = 'A', m = m,
-                n = m, ldA = m, U = ds, Vt = dz, ldU = m, ldVt = m,
-                offsetS = ind, offsetU = ind2, offsetVt = ind2)
-
-            # r := r*V
-            blas.gemm(r, dz, work, transB = 'T', m = m, n = m, k = m, 
-                ldB = m, ldC = m, offsetB = ind2)
-            blas.copy(work, r, n = m**2)
-
-            # rti := rti*U
-            blas.gemm(rti, ds, work, n = m, m = m, k = m, ldB = m, ldC = m,
-                offsetB = ind2)
-            blas.copy(work, rti, n = m**2)
-
-            # r := r*lambda^{-1/2}; rti := rti*lambda^{-1/2}
-            for i in xrange(m):    
-                a = 1.0 / math.sqrt(lmbda[ind+i])
-                blas.scal(a, r, offset = m*i, n = m)
-                blas.scal(a, rti, offset = m*i, n = m)
-
-            ind += m
             ind2 += m*m
             ind3 += m
 
-        xaxpy(dx, x, alpha = step)
-        yaxpy(dy, y, alpha = step)
+
+        # Update lambda and scaling.
+
+        misc.update_scaling(W, lmbda, ds, dz)
 
 
         # Unscale s, z (unscaled variables are used only to compute 
@@ -1930,16 +1218,16 @@ def cpl(c, F, G = None, h = None, dims = None, A = None, b = None,
             ind2 += m*m
         misc.scale(z, W, inverse = 'I')
 
-        gap = blas.dot(lmbda, lmbda, n = cdeg) 
+        gap = blas.dot(lmbda, lmbda) 
 
     return {'status': 'unknown', 'x': None,  'y': None, 'znl': None, 
         'zl': None, 'snl': None, 'sl': None}
 
 
 def cp(F, G = None, h = None, dims = None, A = None, b = None,
-    kktsolver = None, xnewcopy = matrix, xdot = blas.dot, xaxpy = 
-    blas.axpy, xscal = blas.scal, ynewcopy = matrix, ydot = blas.dot, 
-    yaxpy = blas.axpy, yscal = blas.scal):
+    kktsolver = None, xnewcopy = None, xdot = None, xaxpy = None,
+    xscal = None, ynewcopy = None, ydot = None, yaxpy = None, 
+    yscal = None):
 
     """
     Solves a convex optimization problem
@@ -1967,9 +1255,6 @@ def cp(F, G = None, h = None, dims = None, A = None, b = None,
 
     Input arguments (basic usage).
 
-        c is a dense 'd' matrix of size (n,1), where n is the dimension 
-        of the primal variable x.
-
         F is a function that handles the following arguments.
 
             F() returns a tuple (mnl, x0).  mnl is the number of nonlinear 
@@ -2048,7 +1333,7 @@ def cp(F, G = None, h = None, dims = None, A = None, b = None,
 
     Output arguments.
 
-        cpl() returns a dictionary with keys 'status', 'x', 'snl', 'sl',
+        cp() returns a dictionary with keys 'status', 'x', 'snl', 'sl',
         'znl', 'zl', 'y'.
 
         If status is 'optimal', x, snl, sl are approximate solutions of
@@ -2096,9 +1381,9 @@ def cp(F, G = None, h = None, dims = None, A = None, b = None,
 
         - For the 'l' block (W_0):
 
-              W_0 = diag(dl),
+              W_0 = diag(d),
 
-          with dl a positive vector of length ml.
+          with d a positive vector of length ml.
 
         - For the 'q' blocks (W_{k+1}, k = 0, ..., N-1):
 
@@ -2121,9 +1406,9 @@ def cp(F, G = None, h = None, dims = None, A = None, b = None,
         - W['dnl'] is a positive 'd' matrix of size (mnl, 1).
         - W['dnli'] is a positive 'd' matrix with the elementwise inverse 
           of W['dnl'].
-        - W['dl'] is a positive 'd' matrix of size (ml, 1).
-        - W['dli'] is a positive 'd' matrix with the elementwise inverse of
-          W['dl'].
+        - W['d'] is a positive 'd' matrix of size (ml, 1).
+        - W['di'] is a positive 'd' matrix with the elementwise inverse of
+          W['d'].
         - W['beta'] is a list [ beta_0, ..., beta_{N-1} ]
         - W['v'] is a list [ v_0, ..., v_{N-1} ]
         - W['r'] is a list [ r_0, ..., r_{M-1} ]
@@ -2211,6 +1496,10 @@ def cp(F, G = None, h = None, dims = None, A = None, b = None,
 
     """
 
+    import math 
+    from cvxopt import base, blas, misc
+    from cvxopt.base import matrix, spmatrix 
+
     mnl, x0 = F()
 
     # Argument error checking depends on level of customization.
@@ -2220,13 +1509,13 @@ def cp(F, G = None, h = None, dims = None, A = None, b = None,
     if (operatorG or operatorA) and not customkkt:
         raise ValueError, "use of function valued G, A requires a "\
             "user-provided kktsolver"
-    customx = xnewcopy != matrix or xdot != blas.dot or xaxpy != blas.axpy\
-        or xscal != blas.scal 
+    customx = (xnewcopy != None or xdot != None or xaxpy != None or
+        xscal != None)
     if customx and (not operatorG or not operatorA or not customkkt):
         raise ValueError, "use of non-vector type for x requires "\
             "function valued G, A and user-provided kktsolver"
-    customy = ynewcopy != matrix or ydot != blas.dot or yaxpy != blas.axpy\
-        or yscal != blas.scal  
+    customy = (ynewcopy != None or ydot != None or yaxpy != None or 
+        yscal != None)
     if customy and (not operatorA or not customkkt):
         raise ValueError, "use of non vector type for y requires "\
             "function valued A and user-provided kktsolver"
@@ -2288,13 +1577,21 @@ def cp(F, G = None, h = None, dims = None, A = None, b = None,
         raise ValueEror, "use of non vector type for y requires b"
 
 
-    def xcopy(x, y):  
+    if xnewcopy is None: xnewcopy = matrix 
+    if xdot is None: xdot = blas.dot
+    if xaxpy is None: xaxpy = blas.axpy 
+    if xscal is None: xscal = blas.scal 
+    def xcopy(x, y): 
         xscal(0.0, y) 
         xaxpy(x, y)
-    def ycopy(x, y):  
-        yscal(0.0, y)  
+    if ynewcopy is None: ynewcopy = matrix 
+    if ydot is None: ydot = blas.dot 
+    if yaxpy is None: yaxpy = blas.axpy 
+    if yscal is None: yscal = blas.scal
+    def ycopy(x, y): 
+        yscal(0.0, y) 
         yaxpy(x, y)
-
+             
 
     # The problem is solved by applying cpl() to the epigraph form 
     #
@@ -2421,19 +1718,21 @@ def cp(F, G = None, h = None, dims = None, A = None, b = None,
  
     if kktsolver is None: 
         if dims and (dims['q'] or dims['s']):  
-            kktsolver = 'qr'            
-        else:
             kktsolver = 'chol'            
-    if kktsolver in ('ldl', 'chol', 'qr'):
+        else:
+            kktsolver = 'chol2'            
+    if kktsolver in ('ldl', 'chol', 'chol2', 'qr'):
         if kktsolver == 'ldl':
-            factor = kkt_ldl(mnl, G, dims, A)
+            factor = misc.kkt_ldl(G, dims, A, mnl)
         elif kktsolver == 'qr':
-            factor = kkt_qr(mnl, G, dims, A)
+            factor = misc.kkt_qr(G, dims, A, mnl)
+        elif kktsolver == 'chol':
+            factor = misc.kkt_chol(G, dims, A, mnl)
         else: 
-            factor = kkt_chol(mnl, G, dims, A)
+            factor = misc.kkt_chol2(G, dims, A, mnl)
         def kktsolver(x, z, W):
             f, Df, H = F(x, z)
-            return factor(H, Df[1:,:], W)             
+            return factor(W, H, Df[1:,:])             
 
     ux, uz = xnewcopy(x0), matrix(0.0, (mnl + cdim, 1))
     def kktsolver_e(x, znl, W):
@@ -2516,124 +1815,6 @@ def cp(F, G = None, h = None, dims = None, A = None, b = None,
     return sol
 
 
-
-def qp(P, q, G=None, h=None, A=None, b=None, solver=None):
-
-    """
-    Solves a quadratic program
-
-        minimize    (1/2)*x'*P*x + q'*x 
-        subject to  G*x <= h      
-                    A*x = b.
-
-
-    Input arguments 
-
-        P is a nxn dense or sparse 'd' matrix with the lower triangular 
-        part of P stored in the lower triangle.  Must be positive 
-        semidefinite.
-
-        q is an nx1 dense 'd' matrix.
-
-        G is an mxn dense or sparse 'd' matrix.
-
-        h is an mx1 dense 'd' matrix.
-
-        A is a pxn dense or sparse 'd' matrix.
-
-        b is a px1 dense 'd' matrix or None.
-
-        solver is None or 'mosek'.
-
-        The default values for G, h, A and b are empty matrices with 
-        zero rows.
-
-
-    Returns a dictionary with keys 'status', 'x', 's', 'y', 'z'.
-
-        The default solver returns with status 'optimal' or 'unknown'.
-        The MOSEK solver can also return with status 'primal infeasible'
-        or 'dual infeasible'.
-
-        If status is 'optimal', x, s, y, z are the primal and dual 
-        optimal solutions.
-
-        If status is 'primal infeasible', x = s = None and z, y are 
-        a proof of primal infeasibility:
-
-            G'*z + A'*y = 0,  h'*z + b'*y = -1,  z >= 0.
-
-        If status is 'dual infeasible', z = y = None, and x, s are 
-        a proof of dual infeasibility:
-
-            P*x = 0,  q'*x = -1,  G*x + s = 0,  A*x = 0,  s >=0
-
-        If status is 'unknown', x, y, s, z are None.  
-    """
-
-    if solver == 'mosek':
-        try: 
-            from cvxopt import mosek
-            import pymosek 
-        except ImportError: raise ValueError, "invalid option "\
-            "(solver='mosek'): cvxopt.mosek is not installed" 
-
-        if 'MOSEK' in options:
-            mosek.options = options['MOSEK']
-        else:
-            mosek.options = {}
-        solsta, x, z, y = mosek.solveqp(P, q, G, h, A, b)
-        m = G.size[0]
-
-        if solsta == pymosek.solsta.optimal:
-            s = matrix(0.0, (m,1))
-            blas.copy(h, s)    
-            base.gemv(G, x, s, alpha = -1.0, beta = 1.0)
-            status = 'optimal'
-        elif solsta == pymosek.solsta.prim_infeas_cer:
-            status = 'primal infeasible'
-            ducost = -blas.dot(h,z) - blas.dot(b,y)
-            blas.scal(1.0/ducost, y);
-            blas.scal(1.0/ducost, z);
-            x, s = None, None
-        elif solsta == pymosek.solsta_dual_infeas_cer:
-            status = 'dual infeasible'
-            qx = blas.dot(q,x)
-            if qx:  x /= (-qx)
-            s = matrix(0.0, (m,1))
-            base.gemv(G, x, s, alpha=-1.0)
-            z, y = None, None
-        else: 
-            status = 'unknown'
-            x, s, y, z = None, None, None, None
-        return {'status': status, 'x': x, 's': s, 'y': y, 'z': z}
-
-    if type(P) not in (matrix, spmatrix) or P.typecode != 'd' or \
-        P.size[0] != P.size[1]:
-        raise TypeError, "'P' must be a square dense or sparse 'd' "\
-            "matrix"
-    n = P.size[0]
-    if type(q) is not matrix or q.typecode != 'd' or q.size != (n,1): 
-        raise TypeError, "'q' must be a dense 'd' matrix of "\
-            "size (%d,1)" %n
-
-    def F(x = None, z = None):
-        if x is None: 
-            return 0, matrix(0.0, (n,1))
-        grad = matrix(0.0, (1,n))
-        base.symv(P, x, grad) 
-        f = .5 * blas.dot(grad, x) + blas.dot(q, x) 
-        blas.axpy(q, grad)
-        if z is None: return f, grad
-        else: return f, grad, z[0]*P
-
-    dims = {'l': G.size[0], 'q': [], 's': []}
-    sol =  cp(F, G, h, dims, A, b)
-    return {'status': sol['status'], 'x': sol['x'], 's': sol['sl'],
-        'y': sol['y'], 'z': sol['zl']}
-
-
-
 def gp(K, F, g, G=None, h=None, A=None, b=None):
 
     """
@@ -2679,6 +1860,10 @@ def gp(K, F, g, G=None, h=None, A=None, b=None):
         If status is 'unknown', x, snl, sl, y, znl and zl are None.
     """
 
+    import math 
+    from cvxopt import base, blas, misc
+    from cvxopt.base import matrix, spmatrix 
+
     if type(K) is not list or [ k for k in K if type(k) is not int 
         or k <= 0 ]:
         raise TypeError, "'K' must be a list of positive integers" 
diff --git a/src/python/info.py b/src/python/info.py
index 15844c6..a5f54ba 100644
--- a/src/python/info.py
+++ b/src/python/info.py
@@ -1,8 +1,8 @@
-version = '0.9.2'
+version = '0.9.3'
 
 def license(): print(
 """
-CVXOPT version 0.9.2.  Copyright (c) 2004-2007 J. Dahl and L. Vandenberghe.
+CVXOPT version 0.9.3.  Copyright (c) 2004-2008 J. Dahl and L. Vandenberghe.
 
 This program is free software; you can redistribute it and/or modify
 it under the terms of the GNU General Public License as published by
@@ -17,7 +17,7 @@ GNU General Public License for more details.
 You should have received a copy of the GNU General Public License
 along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
----
+----
 The CVXOPT distribution includes source code for part of the SuiteSparse
 suite of sparse matrix algorithms, including:
 
diff --git a/src/python/misc.py b/src/python/misc.py
index bc129a4..8672260 100644
--- a/src/python/misc.py
+++ b/src/python/misc.py
@@ -1,6 +1,6 @@
-# Copyright 2004-2007 J. Dahl and L. Vandenberghe.
+# Copyright 2004-2008 J. Dahl and L. Vandenberghe.
 # 
-# This file is part of CVXOPT version 0.9.2.
+# This file is part of CVXOPT version 0.9.3.
 #
 # CVXOPT is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
@@ -16,10 +16,11 @@
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 import math
-from cvxopt import base, blas, lapack
-from cvxopt.base import matrix
+from cvxopt import base, blas, lapack, cholmod
+from cvxopt.base import matrix, spmatrix
 __all__ = []
 
+
 def scale(x, W, trans = 'N', inverse = 'N'):  
 
     # Computes 
@@ -33,39 +34,44 @@ def scale(x, W, trans = 'N', inverse = 'N'):
     #
     # W is a dictionary with entries:
     #
-    # - W['dnl']: positive vector 
+    # - W['dnl']: positive vector
     # - W['dnli']: componentwise inverse of W['dnl']
-    # - W['dl']: positive vector 
-    # - W['dli']: componentwise inverse of W['dl']
-    # - W['v']: lists of second order cone vectors with unit hyperbolic 
-    #   norms
+    # - W['d']: positive vector
+    # - W['di']: componentwise inverse of W['d']
+    # - W['v']: lists of 2nd order cone vectors with unit hyperbolic norms
     # - W['beta']: list of positive numbers
     # - W['r']: list of square matrices 
-    # - W['rti']: list of square matrices.  rti[k] is the inverse  
-    #   transpose of r[k].
+    # - W['rti']: list of square matrices.  rti[k] is the inverse transpose
+    #   of r[k].
+    #
+    # The 'dnl' and 'dnli' entries are optional, and only present when the 
+    # function is called from the nonlinear solver.
 
+    ind = 0
 
     # Scaling for nonlinear component xk is xk := dnl .* xk; inverse 
     # scaling is xk ./ dnl = dnli .* xk, where dnl = W['dnl'], 
     # dnli = W['dnli'].
 
-    if inverse == 'N': w = W['dnl']
-    else: w = W['dnli']
-    mnl = w.size[0]
-    for k in xrange(x.size[1]):
-        blas.tbmv(w, x, n = mnl, k = 0, ldA = 1, offsetx = k*x.size[0])
+    if 'dnl' in W:
+        if inverse == 'N': w = W['dnl']
+        else: w = W['dnli']
+        for k in xrange(x.size[1]):
+            blas.tbmv(w, x, n = w.size[0], k = 0, ldA = 1, offsetx = 
+                k*x.size[0])
+        ind += w.size[0]
 
 
-    # Scaling for linear 'l' component xk is xk := dl .* xk; inverse 
-    # scaling is xk ./ dl = dli .* xk, where dl = W['dl'], dli = W['dli'].
+    # Scaling for linear 'l' component xk is xk := d .* xk; inverse 
+    # scaling is xk ./ d = di .* xk, where d = W['d'], di = W['di'].
 
-    if inverse == 'N': w = W['dl']
-    else: w = W['dli']
-    ml = w.size[0]
+    if inverse == 'N': w = W['d']
+    else: w = W['di']
     for k in xrange(x.size[1]):
-        blas.tbmv(w, x, n = ml, k = 0, ldA = 1, offsetx = k*x.size[0] + 
-            mnl)
-
+        blas.tbmv(w, x, n = w.size[0], k = 0, ldA = 1, offsetx = 
+            k*x.size[0] + ind)
+    ind += w.size[0]
+  
 
     # Scaling for 'q' component is 
     #
@@ -80,7 +86,6 @@ def scale(x, W, trans = 'N', inverse = 'N'):
     #         = 1/beta * (-J) * (2*v*((-J*xk)'*v)' + xk). 
 
     w = matrix(0.0, (x.size[1], 1))
-    ind = mnl + ml
     for k in xrange(len(W['v'])):
         v = W['v'][k]
         m = v.size[0]
@@ -150,8 +155,7 @@ def scale(x, W, trans = 'N', inverse = 'N'):
         ind += n**2
 
 
-
-def scale2(lmbda, x, mnl, dims, inverse = 'N'):
+def scale2(lmbda, x, dims, mnl = 0, inverse = 'N'):
 
     # x := H(lambda^{1/2}) * x   (inverse is 'N')
     # x := H(lambda^{-1/2}) * x  (inverse is 'I')
@@ -159,7 +163,7 @@ def scale2(lmbda, x, mnl, dims, inverse = 'N'):
     # H is the Hessian of the logarithmic barrier.
       
 
-    # For the nonlinear and 'l' block, 
+    # For the nonlinear and 'l' blocks, 
     #
     #     xk := xk ./ l   (inverse is 'N')
     #     xk := xk .* l   (inverse is 'I')
@@ -228,116 +232,396 @@ def scale2(lmbda, x, mnl, dims, inverse = 'N'):
         ind2 += m
 
 
-def max_step(x, mnl, dims, sigma = None):
+def compute_scaling(s, z, lmbda, dims, mnl = None):
 
-    # Returns min {t | x + t*e >= 0}.
-    # When called with the argument sigma, also returns the eigenvalues 
-    # (in sigma) and the eigenvectors (in x) of the 's' components of x.
+    # Returns the Nesterov-Todd scaling W at points s and z, and stores
+    # the scaled variable in lmbda. 
+    #
+    #    W * z = W^{-T} * s = lmbda. 
+     
+    W = {}
 
-    t = []
-    ind = mnl + dims['l']
-    if ind: t += [ -min(x[:ind]) ] 
-    for m in dims['q']:
-        if m: t += [ blas.nrm2(x, offset = ind + 1, n = m-1) - x[ind] ]
-        ind += m
-    if sigma is None and dims['s']:  
-        Q = matrix(0.0, (max(dims['s']), max(dims['s'])))
-        w = matrix(0.0, (max(dims['s']),1))
-    ind2 = 0
-    for m in dims['s']:
-        if sigma is None:
-            blas.copy(x, Q, offsetx = ind, n = m**2)
-            lapack.syevr(Q, w, range = 'I', il = 1, iu = 1, n = m, ldA = m)
-            if m:  t += [ -w[0] ]
-        else:            
-            lapack.syevd(x, sigma, jobz = 'V', n = m, ldA = m, offsetA = 
-                ind, offsetW = ind2)
-            if m:  t += [ -sigma[ind2] ] 
-        ind += m*m
-        ind2 += m
-    if t: return max(t)
-    else: return 0.0
+    # For the nonlinear block:
+    #
+    #     W['dnl'] = sqrt( s[:mnl] ./ z[:mnl] )
+    #     W['dnli'] = sqrt( z[:mnl] ./ s[:mnl] )
+    #     lambda[:mnl] = sqrt( s[:mnl] .* z[:mnl] )
 
+    if mnl is None:
+        mnl = 0
+    else:
+        W['dnl'] = base.sqrt( base.div( s[:mnl], z[:mnl] ))
+        W['dnli'] = W['dnl']**-1
+        lmbda[:mnl] = base.sqrt( base.mul( s[:mnl], z[:mnl] ) ) 
+        
 
+    # For the 'l' block: 
+    #
+    #     W['d'] = sqrt( sk ./ zk )
+    #     W['di'] = sqrt( zk ./ sk )
+    #     lambdak = sqrt( sk .* zk )
+    #
+    # where sk and zk are the first dims['l'] entries of s and z.
+    # lambda_k is stored in the first dims['l'] positions of lmbda.
+             
+    m = dims['l']
+    W['d'] = base.sqrt( base.div( s[mnl:mnl+m], z[mnl:mnl+m] ))
+    W['di'] = W['d']**-1
+    lmbda[mnl:mnl+m] = base.sqrt( base.mul( s[mnl:mnl+m], z[mnl:mnl+m] ) ) 
 
-def sgemv(A, x, y, dims, trans = 'N', alpha = 1.0, beta = 0.0, m = None, 
-    n = None, offsetA = 0, offsety = 0): 
 
-    # A is a matrix or spmatrix of size (N, n) where 
+    # For the 'q' blocks, compute lists 'v', 'beta'.
     #
-    #     N = dims['l'] + sum(dims['q']) + sum( k**2 for k in dims['s'] ). 
+    # The vector v[k] has unit hyperbolic norm: 
+    # 
+    #     (sqrt( v[k]' * J * v[k] ) = 1 with J = [1, 0; 0, -I]).
+    # 
+    # beta[k] is a positive scalar.
     #
-    # If trans is 'N': 
+    # The hyperbolic Householder matrix H = 2*v[k]*v[k]' - J
+    # defined by v[k] satisfies 
+    # 
+    #     (beta[k] * H) * zk  = (beta[k] * H) \ sk = lambda_k
     #
-    #     y := alpha*A*x + beta * y   (trans = 'N').
+    # where sk = s[indq[k]:indq[k+1]], zk = z[indq[k]:indq[k+1]].
     #
-    # x is a vector of length n.  y is a vector of length N.
+    # lambda_k is stored in lmbda[indq[k]:indq[k+1]].
+           
+    ind = mnl + dims['l']
+    W['v'] = [ matrix(0.0, (k,1)) for k in dims['q'] ]
+    W['beta'] = len(dims['q']) * [ 0.0 ] 
+
+    for k in xrange(len(dims['q'])):
+        m = dims['q'][k]
+        v = W['v'][k]
+
+        # a = sqrt( sk' * J * sk )  where J = [1, 0; 0, -I]
+        aa = jnrm2(s, offset = ind, n = m)
+
+        # b = sqrt( zk' * J * zk )
+        bb = jnrm2(z, offset = ind, n = m) 
+
+        # beta[k] = ( a / b )**1/2
+        W['beta'][k] = math.sqrt( aa / bb )
+
+        # c = sqrt( (sk/a)' * (zk/b) + 1 ) / sqrt(2)    
+        cc = math.sqrt( ( blas.dot(s, z, n = m, offsetx = ind, offsety = 
+            ind) / aa / bb + 1.0 ) / 2.0 )
+
+        # vk = 1/(2*c) * ( (sk/a) + J * (zk/b) )
+        blas.copy(z, v, offsetx = ind, n = m)
+        blas.scal(-1.0/bb, v)
+        v[0] *= -1.0 
+        blas.axpy(s, v, 1.0/aa, offsetx = ind, n = m)
+        blas.scal(1.0/2.0/cc, v)
+
+        # v[k] = 1/sqrt(2*(vk0 + 1)) * ( vk + e ),  e = [1; 0]
+        v[0] += 1.0
+        blas.scal(1.0/math.sqrt(2.0 * v[0]), v)
+            
+        # To get the scaled variable lambda_k
+        # 
+        #     d =  sk0/a + zk0/b + 2*c
+        #     lambda_k = [ c; 
+        #                  (c + zk0/b)/d * sk1/a + (c + sk0/a)/d * zk1/b ]
+        #     lambda_k *= sqrt(a * b)
+
+        lmbda[ind] = cc
+        dd = 2*cc + s[ind]/aa + z[ind]/bb
+        blas.copy(s, lmbda, offsetx = ind+1, offsety = ind+1, n = m-1) 
+        blas.scal((cc + z[ind]/bb)/dd/aa, lmbda, n = m-1, offset = ind+1)
+        blas.axpy(z, lmbda, (cc + s[ind]/aa)/dd/bb, n = m-1, offsetx = 
+            ind+1, offsety = ind+1)
+        blas.scal(math.sqrt(aa*bb), lmbda, offset = ind, n = m)
+
+        ind += m
+
+
+    # For the 's' blocks: compute two lists 'r' and 'rti'.
     #
-    # If trans is 'T':
+    #     r[k]' * sk^{-1} * r[k] = diag(lambda_k)^{-1}
+    #     r[k]' * zk * r[k] = diag(lambda_k)
     #
-    #     y := alpha*A'*x + beta * y  (trans = 'T').
+    # where sk and zk are the entries inds[k] : inds[k+1] of
+    # s and z, reshaped into symmetric matrices.
     #
-    # x is a vector of length N.  y is a vector of length n.
+    # rti[k] is the inverse of r[k]', so 
     #
-    # The 's' components in S are stored in unpacked 'L' storage.
+    #     rti[k]' * sk * rti[k] = diag(lambda_k)^{-1}
+    #     rti[k]' * zk^{-1} * rti[k] = diag(lambda_k).
+    #
+    # The vectors lambda_k are stored in 
+    # 
+    #     lmbda[ dims['l'] + sum(dims['q']) : -1 ]
+            
+    W['r'] = [ matrix(0.0, (m,m)) for m in dims['s'] ]
+    W['rti'] = [ matrix(0.0, (m,m)) for m in dims['s'] ]
+    work = matrix(0.0, (max( [0] + dims['s'] )**2, 1))
+    Ls = matrix(0.0, (max( [0] + dims['s'] )**2, 1))
+    Lz = matrix(0.0, (max( [0] + dims['s'] )**2, 1))
 
-    if m is None: m = A.size[0]
-    if n is None: n = A.size[1]
+    ind2 = ind
+    for k in xrange(len(dims['s'])):
+        m = dims['s'][k]
+        r, rti = W['r'][k], W['rti'][k]
+
+        # Factor sk = Ls*Ls'; store Ls in ds[inds[k]:inds[k+1]].
+        blas.copy(s, Ls, offsetx = ind2, n = m**2) 
+        lapack.potrf(Ls, n = m, ldA = m)
+
+        # Factor zs[k] = Lz*Lz'; store Lz in dz[inds[k]:inds[k+1]].
+        blas.copy(z, Lz, offsetx = ind2, n = m**2) 
+        lapack.potrf(Lz, n = m, ldA = m)
+	 
+        # SVD Lz'*Ls = U*diag(lambda_k)*V'.  Keep U in work. 
+        for i in xrange(m): 
+            blas.scal(0.0, Ls, offset = i*m, n = i)
+        blas.copy(Ls, work, n = m**2)
+        blas.trmm(Lz, work, transA = 'T', ldA = m, ldB = m, n = m, m = m) 
+        lapack.gesvd(work, lmbda, jobu = 'O', ldA = m, m = m, n = m, 
+            offsetS = ind)
+	       
+        # r = Lz^{-T} * U 
+        blas.copy(work, r, n = m*m)
+        blas.trsm(Lz, r, transA = 'T', m = m, n = m, ldA = m)
+
+        # rti = Lz * U 
+        blas.copy(work, rti, n = m*m)
+        blas.trmm(Lz, rti, m = m, n = m, ldA = m)
+
+        # r := r * diag(sqrt(lambda_k))
+        # rti := rti * diag(1 ./ sqrt(lambda_k))
+        for i in xrange(m):
+            a = math.sqrt( lmbda[ind+i] )
+            blas.scal(a, r, offset = m*i, n = m)
+            blas.scal(1.0/a, rti, offset = m*i, n = m)
 
-    if trans == 'T' and alpha:
-        ind = dims['l'] + sum(dims['q'])
-        for mk in dims['s']:
-            # Set upper triangular part of x to zero and scale strict 
-            # lower triangular part by 2.
-            for j in xrange(1, mk):  
-                blas.scal(0.0, x, n = mk-j, inc = mk, offset = 
-                    ind + j*(mk + 1) - 1) 
-                blas.scal(2.0, x, offset = ind + mk*(j-1) + j, n = mk-j) 
-            ind += mk**2
+        ind += m
+        ind2 += m*m
 
-    base.gemv(A, x, y, trans = trans, alpha = alpha, beta = beta, m = m,
-        n = n, offsetA = offsetA, offsety = offsety)
+    return W
 
-    if trans == 'T' and alpha:
-        ind = dims['l'] + sum(dims['q'])
-        for mk in dims['s']:
-            # Scale strict lower triangular part of x by 0.5.
-            for j in xrange(1, mk):  
-                blas.scal(0.5, x, offset = ind + mk*(j-1) + j, n = mk-j) 
-            ind += mk**2
 
+def update_scaling(W, lmbda, s, z):
 
+    # Updates the Nesterov-Todd scaling matrix W and the scaled variable 
+    # lmbda so that on exit
+    #
+    #      W * zt = W^{-T} * st = lmbda.
+    #
+    # On entry, the nonlinear, 'l' and 'q' components of the arguments s 
+    # and z contain W^{-T}*st and W*zt, i.e, the new iterates in the 
+    # current scaling.
+    #
+    # The 's' components contain the factors Ls, Lz in a factorization of 
+    # the new iterates in the current scaling, W^{-T}*st = Ls*Ls',   
+    # W*zt = Lz*Lz'.
+  
 
-def jdot(x, y, n = None, offsetx = 0, offsety = 0):
-
-    # Returns x' * J * y, where J = [1, 0; 0, -I].
+    # Nonlinear and 'l' blocks
+    #
+    #    d :=  d .* sqrt( s ./ z )
+    #    lmbda := lmbda .* sqrt(s) .* sqrt(z)
 
-    if n is None: 
-         if len(x) != len(y): raise ValueError, "x and y must have the "\
-             "same length"
-         n = len(x)
-    return x[offsetx] * y[offsety] - blas.dot(x, y, n = n-1, 
-        offsetx = offsetx + 1, offsety = offsety + 1) 
+    if 'dnl' in W:
+        mnl = len(W['dnl'])
+    else:
+        mnl = 0
+    ml = len(W['d'])
+    m = mnl + ml
+    s[:m] = base.sqrt( s[:m] )
+    z[:m] = base.sqrt( z[:m] )
+ 
+    # d := d .* s .* z 
+    if 'dnl' in W:
+        blas.tbmv(s, W['dnl'], n = mnl, k = 0, ldA = 1)
+        blas.tbsv(z, W['dnl'], n = mnl, k = 0, ldA = 1)
+        W['dnli'][:] = W['dnl'][:] ** -1
+    blas.tbmv(s, W['d'], n = ml, k = 0, ldA = 1, offsetA = mnl)
+    blas.tbsv(z, W['d'], n = ml, k = 0, ldA = 1, offsetA = mnl)
+    W['di'][:] = W['d'][:] ** -1
+         
+    # lmbda := s .* z
+    blas.copy(s, lmbda, n = m)
+    blas.tbmv(z, lmbda, n = m, k = 0, ldA = 1)
+
+
+    # 'q' blocks.
+    # 
+    # Let st and zt be the new variables in the old scaling:
+    #
+    #     st = s_k,   zt = z_k
+    #
+    # and a = sqrt(st' * J * st),  b = sqrt(zt' * J * zt).
+    #
+    # 1. Compute the hyperbolic Householder transformation 2*q*q' - J 
+    #    that maps st/a to zt/b.
+    # 
+    #        c = sqrt( (1 + st'*zt/(a*b)) / 2 ) 
+    #        q = (st/a + J*zt/b) / (2*c). 
+    #
+    #    The new scaling point is 
+    #
+    #        wk := betak * sqrt(a/b) * (2*v[k]*v[k]' - J) * q 
+    #
+    #    with betak = W['beta'][k].
+    # 
+    # 3. The scaled variable:
+    #
+    #        lambda_k0 = sqrt(a*b) * c
+    #        lambda_k1 = sqrt(a*b) * ( (2vk*vk' - J) * (-d*q + u/2) )_1
+    #
+    #    where 
+    #
+    #        u = st/a - J*zt/b 
+    #        d = ( vk0 * (vk'*u) + u0/2 ) / (2*vk0 *(vk'*q) - q0 + 1).
+    #
+    # 4. Update scaling
+    #   
+    #        v[k] := wk^1/2 
+    #              = 1 / sqrt(2*(wk0 + 1)) * (wk + e).
+    #        beta[k] *=  sqrt(a/b)
 
+    ind = m
+    for k in xrange(len(W['v'])):
 
+        v = W['v'][k]
+        m = len(v)
+
+        # ln = sqrt( lambda_k' * J * lambda_k )
+        ln = jnrm2(lmbda, n = m, offset = ind) 
+
+        # a = sqrt( sk' * J * sk ) = sqrt( st' * J * st ) 
+        # s := s / a = st / a
+        aa = jnrm2(s, offset = ind, n = m)
+        blas.scal(1.0/aa, s, offset = ind, n = m)
+
+        # b = sqrt( zk' * J * zk ) = sqrt( zt' * J * zt )
+        # z := z / a = zt / b
+        bb = jnrm2(z, offset = ind, n = m) 
+        blas.scal(1.0/bb, z, offset = ind, n = m)
+
+        # c = sqrt( ( 1 + (st'*zt) / (a*b) ) / 2 )
+        cc = math.sqrt( ( 1.0 + blas.dot(s, z, offsetx = ind, offsety = 
+            ind, n = m) ) / 2.0 )
+
+        # vs = v' * st / a 
+        vs = blas.dot(v, s, offsety = ind, n = m) 
+
+        # vz = v' * J *zt / b
+        vz = jdot(v, z, offsety = ind, n = m) 
+
+        # vq = v' * q where q = (st/a + J * zt/b) / (2 * c)
+        vq = (vs + vz ) / 2.0 / cc
+
+        # vu = v' * u  where u =  st/a - J * zt/b 
+        vu = vs - vz  
+
+        # lambda_k0 = c
+        lmbda[ind] = cc
+
+        # wk0 = 2 * vk0 * (vk' * q) - q0 
+        wk0 = 2 * v[0] * vq - ( s[ind] + z[ind] ) / 2.0 / cc 
+
+        # d = (v[0] * (vk' * u) - u0/2) / (wk0 + 1)
+        dd = (v[0] * vu - s[ind]/2.0 + z[ind]/2.0) / (wk0 + 1.0)
+
+        # lambda_k1 = 2 * v_k1 * vk' * (-d*q + u/2) - d*q1 + u1/2
+        blas.copy(v, lmbda, offsetx = 1, offsety = ind+1, n = m-1)
+        blas.scal(2.0 * (-dd * vq + 0.5 * vu), lmbda, offset = ind+1, 
+           n = m-1)
+        blas.axpy(s, lmbda, 0.5 * (1.0 - dd/cc), offsetx = ind+1, offsety 
+           = ind+1, n = m-1)
+        blas.axpy(z, lmbda, 0.5 * (1.0 + dd/cc), offsetx = ind+1, offsety
+           = ind+1, n = m-1)
+
+        # Scale so that sqrt(lambda_k' * J * lambda_k) = sqrt(aa*bb).
+        blas.scal(math.sqrt(aa*bb), lmbda, offset = ind, n = m)
+            
+        # v := (2*v*v' - J) * q 
+        #    = 2 * (v'*q) * v' - (J* st/a + zt/b) / (2*c)
+        blas.scal(2.0 * vq, v)
+        v[0] -= s[ind] / 2.0 / cc
+        blas.axpy(s, v,  0.5/cc, offsetx = ind+1, offsety = 1, n = m-1)
+        blas.axpy(z, v, -0.5/cc, offsetx = ind, n = m)
+
+        # v := v^{1/2} = 1/sqrt(2 * (v0 + 1)) * (v + e)
+        v[0] += 1.0
+        blas.scal(1.0 / math.sqrt(2.0 * v[0]), v)
+
+        # beta[k] *= ( aa / bb )**1/2
+        W['beta'][k] *= math.sqrt( aa / bb )
+            
+        ind += m
 
-def jnrm2(x, n = None, offset = 0):
 
-    # Returns sqrt(x' * J * x) where J = [1, 0; 0, -I], for a vector
-    # x in a second order cone. 
+    # 's' blocks
+    # 
+    # Let st, zt be the updated variables in the old scaling:
+    # 
+    #     st = Ls * Ls', zt = Lz * Lz'.
+    #
+    # where Ls and Lz are the 's' components of s, z.
+    #
+    # 1.  SVD Lz'*Ls = Uk * lambda_k^+ * Vk'.
+    #
+    # 2.  New scaling is 
+    #
+    #         r[k] := r[k] * Ls * Vk * diag(lambda_k^+)^{-1/2}
+    #         rti[k] := r[k] * Lz * Uk * diag(lambda_k^+)^{-1/2}.
+    #
 
-    if n is None:  n = len(x)
-    a = blas.nrm2(x, n = n-1, offset = offset+1)
-    return math.sqrt(x[offset] - a) * math.sqrt(x[offset] + a)
+    work = matrix(0.0, (max( [0] + [r.size[0] for r in W['r']])**2, 1))
+    ind = mnl + ml + sum([ len(v) for v in W['v'] ])
+    ind2, ind3 = ind, 0
+    for k in xrange(len(W['r'])):
+        r, rti = W['r'][k], W['rti'][k]
+        m = r.size[0]
+
+        # r := r*sk = r*Ls
+        blas.gemm(r, s, work, m = m, n = m, k = m, ldB = m, ldC = m,
+            offsetB = ind2)
+        blas.copy(work, r, n = m**2)
+
+        # rti := rti*zk = rti*Lz
+        blas.gemm(rti, z, work, m = m, n = m, k = m, ldB = m, ldC = m,
+            offsetB = ind2)
+        blas.copy(work, rti, n = m**2)
+
+        # SVD Lz'*Ls = U * lmbds^+ * V'; store U in sk and V' in zk.
+        blas.gemm(z, s, work, transA = 'T', m = m, n = m, k = m, ldA = m,
+            ldB = m, ldC = m, offsetA = ind2, offsetB = ind2)
+        lapack.gesvd(work, lmbda, jobu = 'A', jobvt = 'A', m = m, n = m, 
+            ldA = m, U = s, Vt = z, ldU = m, ldVt = m, offsetS = ind, 
+            offsetU = ind2, offsetVt = ind2)
+
+        # r := r*V
+        blas.gemm(r, z, work, transB = 'T', m = m, n = m, k = m, ldB = m,
+            ldC = m, offsetB = ind2)
+        blas.copy(work, r, n = m**2)
+
+        # rti := rti*U
+        blas.gemm(rti, s, work, n = m, m = m, k = m, ldB = m, ldC = m,
+            offsetB = ind2)
+        blas.copy(work, rti, n = m**2)
+
+        # r := r*lambda^{-1/2}; rti := rti*lambda^{-1/2}
+        for i in xrange(m):    
+            a = 1.0 / math.sqrt(lmbda[ind+i])
+            blas.scal(a, r, offset = m*i, n = m)
+            blas.scal(a, rti, offset = m*i, n = m)
 
+        ind += m
+        ind2 += m*m
+        ind3 += m
 
 
-def pack(x, y, mnl, dims, offsetx = 0, offsety = 0):
+def pack(x, y, dims, mnl = 0, offsetx = 0, offsety = 0):
 
      # The vector x is an element of S, with the 's' components stored 
      # in unpacked storage.  On return, x is copied to y with the 's' 
-     # components matrices stored in packed storage and the off-diagonal 
-     # entries scaled by sqrt(2).
+     # components stored in packed storage and the off-diagonal entries 
+     # scaled by sqrt(2).
 
      nlq = mnl + dims['l'] + sum(dims['q'])
      np = sum([ n*(n+1)/2 for n in dims['s'] ])
@@ -352,8 +636,7 @@ def pack(x, y, mnl, dims, offsetx = 0, offsety = 0):
      blas.scal(math.sqrt(2.0), y, n = np, offset = offsety+nlq)
      
 
-
-def unpack(x, y, mnl, dims, offsetx = 0, offsety = 0):
+def unpack(x, y, dims, mnl = 0, offsetx = 0, offsety = 0):
 
      # The vector x is an element of S, with the 's' components stored
      # in unpacked storage and off-diagonal entries scaled by sqrt(2).
@@ -373,10 +656,9 @@ def unpack(x, y, mnl, dims, offsetx = 0, offsety = 0):
      blas.scal(1.0/math.sqrt(2.0), y, n = nu, offset = offsety+nlq)
 
 
+def sdot(x, y, dims, mnl = 0):
 
-def sdot(x, y, mnl, dims):
-
-    # Returns the inner product of two vectors in S
+    # Returns the inner product of two vectors in S.
     
     ind = mnl + dims['l'] + sum(dims['q'])
     a = blas.dot(x, y, n = ind)
@@ -390,12 +672,106 @@ def sdot(x, y, mnl, dims):
     return a
 
 
+def sdot2(x, y):
+    """
+    Inner product of two block-diagonal symmetric dense 'd' matrices.
+
+    x and y are square dense 'd' matrices, or lists of N square dense 'd' 
+    matrices.
+    """
+
+    a = 0.0
+    if type(x) is matrix:
+	n = x.size[0]
+	a += blas.dot(x, y, incx=n+1, incy=n+1, n=n)
+	for j in xrange(1,n):
+	    a += 2.0 * blas.dot(x, y, incx=n+1, incy=n+1, offsetx=j,
+		offsety=j, n=n-j)
+
+    else:
+	for k in xrange(len(x)):
+	    n = x[k].size[0]
+	    a += blas.dot(x[k], y[k], incx=n+1, incy=n+1, n=n)
+	    for j in xrange(1,n):
+		a += 2.0 * blas.dot(x[k], y[k], incx=n+1, incy=n+1, 
+		    offsetx=j, offsety=j, n=n-j)
+    return a
+
 
-def snrm2(x, mnl, dims): 
+def snrm2(x, dims, mnl = 0): 
 
     # Returns the norm of a vector in S
 
-    return math.sqrt(sdot(x, x, mnl, dims))
+    return math.sqrt(sdot(x, x, dims, mnl))
+
+
+def sgemv(A, x, y, dims, trans = 'N', alpha = 1.0, beta = 0.0, m = None, 
+    n = None, offsetA = 0, offsetx = 0, offsety = 0): 
+
+    # A is a matrix or spmatrix of size (N, n) where 
+    #
+    #     N = dims['l'] + sum(dims['q']) + sum( k**2 for k in dims['s'] ). 
+    #
+    # If trans is 'N': 
+    #
+    #     y := alpha*A*x + beta * y   (trans = 'N').
+    #
+    # x is a vector of length n.  y is a vector of length N.
+    #
+    # If trans is 'T':
+    #
+    #     y := alpha*A'*x + beta * y  (trans = 'T').
+    #
+    # x is a vector of length N.  y is a vector of length n.
+    #
+    # The 's' components in S are stored in unpacked 'L' storage.
+
+    if m is None: m = A.size[0]
+    if n is None: n = A.size[1]
+
+    if trans == 'T' and alpha:
+        ind = offsetx + dims['l'] + sum(dims['q'])
+        for mk in dims['s']:
+            # Set upper triangular part of x to zero and scale strict 
+            # lower triangular part by 2.
+            for j in xrange(1, mk):  
+                blas.scal(0.0, x, n = mk-j, inc = mk, offset = 
+                    ind + j*(mk + 1) - 1) 
+                blas.scal(2.0, x, offset = ind + mk*(j-1) + j, n = mk-j) 
+            ind += mk**2
+
+    base.gemv(A, x, y, trans = trans, alpha = alpha, beta = beta, m = m,
+        n = n, offsetA = offsetA, offsetx = offsetx, offsety = offsety)
+
+    if trans == 'T' and alpha:
+        ind = offsetx + dims['l'] + sum(dims['q'])
+        for mk in dims['s']:
+            # Scale strict lower triangular part of x by 0.5.
+            for j in xrange(1, mk):  
+                blas.scal(0.5, x, offset = ind + mk*(j-1) + j, n = mk-j) 
+            ind += mk**2
+
+
+def jdot(x, y, n = None, offsetx = 0, offsety = 0):
+
+    # Returns x' * J * y, where J = [1, 0; 0, -I].
+
+    if n is None: 
+         if len(x) != len(y): raise ValueError, "x and y must have the "\
+             "same length"
+         n = len(x)
+    return x[offsetx] * y[offsety] - blas.dot(x, y, n = n-1, 
+        offsetx = offsetx + 1, offsety = offsety + 1) 
+
+
+def jnrm2(x, n = None, offset = 0):
+
+    # Returns sqrt(x' * J * x) where J = [1, 0; 0, -I], for a vector
+    # x in a second order cone. 
+
+    if n is None:  n = len(x)
+    a = blas.nrm2(x, n = n-1, offset = offset+1)
+    return math.sqrt(x[offset] - a) * math.sqrt(x[offset] + a)
 
 
 def symm(x, n, offset = 0):
@@ -403,13 +779,13 @@ def symm(x, n, offset = 0):
     # Fills in the upper triangular part of the symmetric matrix stored in
     # x[offset : offset+n*n] using 'L' storage.
 
-    if n <= 1:  pass
+    if n <= 1:  return
     for i in xrange(n-1):
         blas.copy(x, x, offsetx = offset + i*(n+1) + 1, offsety = 
             offset + (i+1)*(n+1) - 1, incy = n, n = n-i-1)
 
 
-def sprod(x, y, mnl, dims, diag = 'N'):   
+def sprod(x, y, dims, mnl = 0, diag = 'N'):   
 
     # The product x := (y o x).  If diag is 'D', the 's' part of y is 
     # diagonal and only the diagonal is stored.
@@ -475,8 +851,23 @@ def sprod(x, y, mnl, dims, diag = 'N'):
             ind2 += m
 
 
+def ssqr(x, y, dims, mnl = 0):
+
+    # The product x := y o y.   The 's' components of y are diagonal and
+    # only the diagonals of x and y are stored.     
 
-def sinv(x, y, mnl, dims):   
+    blas.copy(y, x)
+    blas.tbmv(y, x, n = mnl + dims['l'], k = 0, ldA = 1) 
+    ind = mnl + dims['l']
+    for m in dims['q']:
+        x[ind] = blas.nrm2(y, offset = ind, n = m)**2
+        blas.scal(2.0*y[ind], x, n = m-1, offset = ind+1)
+        ind += m 
+    blas.tbmv(y, x, n = sum(dims['s']), k = 0, ldA = 1, offsetA = ind, 
+        offsetx = ind) 
+
+
+def sinv(x, y, dims, mnl = 0):   
 
     # The inverse product x := (y o\ x), when the 's' components of y are 
     # diagonal.
@@ -523,3 +914,684 @@ def sinv(x, y, mnl, dims):
                 j*(m+1))  
         ind += m*m
         ind2 += m
+
+
+def max_step(x, dims, mnl = 0, sigma = None):
+
+    # Returns min {t | x + t*e >= 0}, where e is defined as follows
+    #
+    # - For the nonlinear and 'l' blocks: e is the vector of ones.
+    # - For the 'q' blocks: e is the first unit vector.
+    # - For the 's' blocks: e is the identity matrix.
+    #
+    # When called with the argument sigma, also returns the eigenvalues 
+    # (in sigma) and the eigenvectors (in x) of the 's' components of x.
+
+    t = []
+    ind = mnl + dims['l']
+    if ind: t += [ -min(x[:ind]) ] 
+    for m in dims['q']:
+        if m: t += [ blas.nrm2(x, offset = ind + 1, n = m-1) - x[ind] ]
+        ind += m
+    if sigma is None and dims['s']:  
+        Q = matrix(0.0, (max(dims['s']), max(dims['s'])))
+        w = matrix(0.0, (max(dims['s']),1))
+    ind2 = 0
+    for m in dims['s']:
+        if sigma is None:
+            blas.copy(x, Q, offsetx = ind, n = m**2)
+            lapack.syevr(Q, w, range = 'I', il = 1, iu = 1, n = m, ldA = m)
+            if m:  t += [ -w[0] ]
+        else:            
+            lapack.syevd(x, sigma, jobz = 'V', n = m, ldA = m, offsetA = 
+                ind, offsetW = ind2)
+            if m:  t += [ -sigma[ind2] ] 
+        ind += m*m
+        ind2 += m
+    if t: return max(t)
+    else: return 0.0
+
+
+def kkt_ldl(G, dims, A, mnl = 0):
+
+    # Solution of KKT equations by a dense LDL factorization of the 
+    # 3 x 3 system.
+    #
+    # Returns a function that (1) computes the LDL factorization of
+    #
+    #     [ H           A'   GG'*W^{-1} ] 
+    #     [ A           0    0          ],
+    #     [ W^{-T}*GG   0   -I          ] 
+    #
+    # given H, Df, W, where GG = [Df; G], and (2) returns a function for 
+    # solving 
+    #
+    #     [ H     A'   GG'   ]   [ ux ]   [ bx ]
+    #     [ A     0    0     ] * [ uy ] = [ by ].
+    #     [ GG    0   -W'*W  ]   [ uz ]   [ bz ]
+    #
+    # H is n x n,  A is p x n, Df is mnl x n, G is N x n where
+    # N = dims['l'] + sum(dims['q']) + sum( k**2 for k in dims['s'] ).
+    
+    p, n = A.size
+    ldK = n + p + mnl + dims['l'] + sum(dims['q']) + sum([ k*(k+1)/2 for k 
+        in dims['s'] ])
+    K = matrix(0.0, (ldK, ldK))
+    ipiv = matrix(0, (ldK, 1))
+    u = matrix(0.0, (ldK, 1))
+    g = matrix(0.0, (mnl + G.size[0], 1))
+
+    def factor(W, H = None, Df = None):
+
+        blas.scal(0.0, K)
+        if H is not None: K[:n, :n] = H
+        K[n:n+p, :n] = A
+        for k in xrange(n):
+            if mnl: g[:mnl] = Df[:,k]
+            g[mnl:] = G[:,k]
+            scale(g, W, trans = 'T', inverse = 'I')
+            pack(g, K, dims, mnl, offsety = k*ldK + n + p)
+        K[(ldK+1)*(p+n) :: ldK+1]  = -1.0
+        lapack.sytrf(K, ipiv)
+
+        def solve(x, y, z):
+
+            # Solve
+            #
+            #     [ H          A'   GG'*W^{-1} ]   [ ux   ]   [ bx        ]
+            #     [ A          0    0          ] * [ uy   [ = [ by        ]
+            #     [ W^{-T}*GG  0   -I          ]   [ W*uz ]   [ W^{-T}*bz ]
+            #
+            # and return ux, uy, W*uz.
+            #
+            # On entry, x, y, z contain bx, by, bz.  On exit, they contain
+            # the solution ux, uy, W*uz.
+
+            blas.copy(x, u)
+            blas.copy(y, u, offsety = n)
+            scale(z, W, trans = 'T', inverse = 'I') 
+            pack(z, u, dims, mnl, offsety = n + p)
+            lapack.sytrs(K, ipiv, u)
+            blas.copy(u, x, n = n)
+            blas.copy(u, y, offsetx = n, n = p)
+            unpack(u, z, dims, mnl, offsetx = n + p)
+    
+        return solve
+
+    return factor
+
+
+def kkt_ldl2(G, dims, A, mnl = 0):
+
+    # Solution of KKT equations by a dense LDL factorization of the 2 x 2 
+    # system.
+    #
+    # Returns a function that (1) computes the LDL factorization of
+    #
+    #     [ H + GG' * W^{-1} * W^{-T} * GG   A' ]
+    #     [                                     ]
+    #     [ A                                0  ]
+    #
+    # given H, Df, W, where GG = [Df; G], and (2) returns a function for 
+    # solving 
+    #
+    #     [ H    A'   GG'   ]   [ ux ]   [ bx ]
+    #     [ A    0    0     ] * [ uy ] = [ by ].
+    #     [ GG   0   -W'*W  ]   [ uz ]   [ bz ]
+    #
+    # H is n x n,  A is p x n, Df is mnl x n, G is N x n where
+    # N = dims['l'] + sum(dims['q']) + sum( k**2 for k in dims['s'] ).
+
+    p, n = A.size
+    ldK = n + p 
+    K = matrix(0.0, (ldK, ldK))
+    if p: ipiv = matrix(0, (ldK, 1))
+    g = matrix(0.0, (mnl + G.size[0], 1))
+    u = matrix(0.0, (ldK, 1))
+
+    def factor(W, H = None, Df = None):
+
+        blas.scal(0.0, K)
+        if H is not None: K[:n, :n] = H
+        K[n:,:n] = A
+        for k in xrange(n):
+            if mnl: g[:mnl] = Df[:,k]
+            g[mnl:] = G[:,k]
+            scale(g, W, trans = 'T', inverse = 'I')
+            scale(g, W, inverse = 'I')
+            if mnl: base.gemv(Df, g, K, trans = 'T', beta = 1.0, n = n-k, 
+                offsetA = mnl*k, offsety = (ldK + 1)*k)
+            sgemv(G, g, K, dims, trans = 'T', beta = 1.0, n = n-k,
+                offsetA = G.size[0]*k, offsetx = mnl, offsety = 
+                (ldK + 1)*k)
+        if p: lapack.sytrf(K, ipiv)
+        else: lapack.potrf(K)
+
+        def solve(x, y, z):
+
+            # Solve
+            #
+            #     [ H + GG' * W^{-1} * W^{-T} * GG    A' ]   [ ux ]   
+            #     [                                      ] * [    ] 
+            #     [ A                                 0  ]   [ uy ]   
+            #
+            #         [ bx + GG' * W^{-1} * W^{-T} * bz ]
+            #     =   [                                 ]
+            #         [ by                              ]
+            #
+            # and return x, y, W*z = W^{-T} * (GG*x - bz).
+
+            blas.copy(z, g)
+            scale(g, W, trans = 'T', inverse = 'I')
+            scale(g, W, inverse = 'I')
+            if mnl: 
+                base.gemv(Df, g, u, trans = 'T')
+                beta = 1.0
+            else: 
+                beta = 0.0
+            sgemv(G, g, u, dims, trans = 'T', offsetx = mnl, beta = beta)
+            blas.axpy(x, u)
+            blas.copy(y, u, offsety = n)
+            if p: lapack.sytrs(K, ipiv, u)
+            else: lapack.potrs(K, u)
+            blas.copy(u, x, n = n)
+            blas.copy(u, y, offsetx = n, n = p)
+            if mnl: base.gemv(Df, x, z, alpha = 1.0, beta = -1.0)
+            sgemv(G, x, z, dims, alpha = 1.0, beta = -1.0, offsety = mnl)
+            scale(z, W, trans = 'T', inverse = 'I')
+	    
+        return solve
+
+    return factor
+
+
+def kkt_chol(G, dims, A, mnl = 0):
+
+    # Solution of KKT equations by reduction to a 2 x 2 system, a QR 
+    # factorization to eliminate the equality constraints, and a dense 
+    # Cholesky factorization of order n-p. 
+    #
+    # Computes the QR factorization
+    #
+    #     A' = [Q1, Q2] * [R; 0]
+    #
+    # and returns a function that (1) computes the Cholesky factorization 
+    #
+    #     Q_2^T * (H + GG^T * W^{-1} * W^{-T} * GG) * Q2 = L * L^T, 
+    #
+    # given H, Df, W, where GG = [Df; G], and (2) returns a function for 
+    # solving 
+    #
+    #     [ H    A'   GG'    ]   [ ux ]   [ bx ]
+    #     [ A    0    0      ] * [ uy ] = [ by ].
+    #     [ GG   0    -W'*W  ]   [ uz ]   [ bz ]
+    #
+    # H is n x n,  A is p x n, Df is mnl x n, G is N x n where
+    # N = dims['l'] + sum(dims['q']) + sum( k**2 for k in dims['s'] ).
+
+    p, n = A.size
+    cdim = mnl + dims['l'] + sum(dims['q']) + sum([ k**2 for k in 
+        dims['s'] ])
+    cdim_pckd = mnl + dims['l'] + sum(dims['q']) + sum([ k*(k+1)/2 for k 
+        in dims['s'] ])
+
+    # A' = [Q1, Q2] * [R; 0]  (Q1 is n x p, Q2 is n x n-p).
+    if type(A) is matrix: 
+        QA = A.T
+    else: 
+        QA = matrix(A.T)
+    tauA = matrix(0.0, (p,1))
+    lapack.geqrf(QA, tauA)
+
+    Gs = matrix(0.0, (cdim, n))
+    g = matrix(0.0, (cdim, 1))
+    K = matrix(0.0, (n,n)) 
+    bzp = matrix(0.0, (cdim_pckd, 1))
+    yy = matrix(0.0, (p,1))
+
+    def factor(W, H = None, Df = None):
+
+        # Compute 
+        #
+        #     K = [Q1, Q2]' * (H + GG' * W^{-1} * W^{-T} * GG) * [Q1, Q2]
+        #
+        # and take the Cholesky factorization of the 2,2 block
+        #
+        #     Q_2' * (H + GG^T * W^{-1} * W^{-T} * GG) * Q2.
+
+        # Gs = W^{-T} * GG in packed storage.
+        if mnl: 
+            Gs[:mnl, :] = Df
+        Gs[mnl:, :] = G
+        scale(Gs, W, trans = 'T', inverse = 'I')
+        for k in xrange(n):
+            g[:] = Gs[:, k]
+            pack(g, Gs, dims, mnl, offsety = k*Gs.size[0])
+
+        # K = [Q1, Q2]' * (H + Gs' * Gs) * [Q1, Q2].
+        if H is not None: K[:,:] = H
+        blas.syrk(Gs, K, beta = 1.0, k = cdim_pckd, trans = 'T')
+        symm(K, n)
+        lapack.ormqr(QA, tauA, K, side = 'L', trans = 'T')
+        lapack.ormqr(QA, tauA, K, side = 'R')
+
+        # Cholesky factorization of 2,2 block of K.
+        lapack.potrf(K, n = n-p, offsetA = p*(n+1))
+
+        def solve(x, y, z):
+
+            # Solve
+            #
+            #     [ 0          A'  GG'*W^{-1} ]   [ ux   ]   [ bx        ]
+            #     [ A          0   0          ] * [ uy   ] = [ by        ]
+            #     [ W^{-T}*GG  0   -I         ]   [ W*uz ]   [ W^{-T}*bz ]
+            #
+            # and return ux, uy, W*uz.
+            #
+            # On entry, x, y, z contain bx, by, bz.  On exit, they contain
+            # the solution ux, uy, W*uz.
+            #
+            # If we change variables ux = Q1*v + Q2*w, the system becomes 
+            # 
+            #     [ K11 K12 R ]   [ v  ]   [Q1'*(bx+GG'*W^{-1}*W^{-T}*bz)]
+            #     [ K21 K22 0 ] * [ w  ] = [Q2'*(bx+GG'*W^{-1}*W^{-T}*bz)]
+            #     [ R^T 0   0 ]   [ uy ]   [by                           ]
+            # 
+            #     W*uz = W^{-T} * ( GG*ux - bz ).
+
+            # bzp := W^{-T} * bz in packed storage 
+            scale(z, W, trans = 'T', inverse = 'I')
+            pack(z, bzp, dims, mnl)
+
+            # x := [Q1, Q2]' * (x + Gs' * bzp)
+            #    = [Q1, Q2]' * (bx + Gs' * W^{-T} * bz)
+            blas.gemv(Gs, bzp, x, beta = 1.0, trans = 'T', m = cdim_pckd)
+            lapack.ormqr(QA, tauA, x, side = 'L', trans = 'T')
+
+            # y := x[:p] 
+            #    = Q1' * (bx + Gs' * W^{-T} * bz)
+            blas.copy(y, yy)
+            blas.copy(x, y, n = p)
+
+            # x[:p] := v = R^{-T} * by 
+            blas.copy(yy, x)
+            lapack.trtrs(QA, x, uplo = 'U', trans = 'T', n = p)
+
+            # x[p:] := K22^{-1} * (x[p:] - K21*x[:p])
+            #        = K22^{-1} * (Q2' * (bx + Gs' * W^{-T} * bz) - K21*v)
+            blas.gemv(K, x, x, alpha = -1.0, beta = 1.0, m = n-p, n = p,
+                offsetA = p, offsety = p)
+            lapack.potrs(K, x, n = n-p, offsetA = p*(n+1), offsetB = p)
+
+            # y := y - [K11, K12] * x
+            #    = Q1' * (bx + Gs' * W^{-T} * bz) - K11*v - K12*w
+            blas.gemv(K, x, y, alpha = -1.0, beta = 1.0, m = p, n = n)
+
+            # y := R^{-1}*y
+            #    = R^{-1} * (Q1' * (bx + Gs' * W^{-T} * bz) - K11*v 
+            #      - K12*w)
+            lapack.trtrs(QA, y, uplo = 'U', n = p)
+           
+            # x := [Q1, Q2] * x
+            lapack.ormqr(QA, tauA, x, side = 'L')
+
+            # bzp := Gs * x - bzp.
+            #      = W^{-T} * ( GG*ux - bz ) in packed storage.
+            # Unpack and copy to z.
+            blas.gemv(Gs, x, bzp, alpha = 1.0, beta = -1.0, m = cdim_pckd)
+            unpack(bzp, z, dims, mnl)
+
+        return solve
+
+    return factor
+
+
+def kkt_chol2(G, dims, A, mnl = 0):
+
+    # Solution of KKT equations by reduction to a 2 x 2 system, a sparse 
+    # or dense Cholesky factorization of order n to eliminate the 1,1 
+    # block, and a sparse or dense Cholesky factorization of order p.
+    # Implemented only for problems with no second-order or semidefinite
+    # cone constraints.
+    #
+    # Returns a function that (1) computes Cholesky factorizations of
+    # the matrices 
+    #
+    #     S = H + GG' * W^{-1} * W^{-T} * GG,  
+    #     K = A * S^{-1} *A'
+    #
+    # or (if K is singular in the first call to the function), the matrices
+    #
+    #     S = H + GG' * W^{-1} * W^{-T} * GG + A' * A,  
+    #     K = A * S^{-1} * A',
+    #
+    # given H, Df, W, where GG = [Df; G], and (2) returns a function for 
+    # solving 
+    #
+    #     [ H     A'   GG'   ]   [ ux ]   [ bx ]
+    #     [ A     0    0     ] * [ uy ] = [ by ].
+    #     [ GG    0   -W'*W  ]   [ uz ]   [ bz ]
+    #
+    # H is n x n,  A is p x n, Df is mnl x n, G is dims['l'] x n.
+
+    if dims['q'] or dims['s']:
+        raise ValueError, "kktsolver option 'kkt_chol2' is implemented "\
+            "only for problems with no second-order or semidefinite cone "\
+            "constraints"
+    p, n = A.size
+    ml = dims['l']
+    F = {'firstcall': True, 'singular': False}
+
+    def factor(W, H = None, Df = None):
+
+        if F['firstcall']:
+            if type(G) is matrix: 
+                F['Gs'] = matrix(0.0, G.size) 
+            else:
+                F['Gs'] = spmatrix(0.0, G.I, G.J, G.size) 
+            if mnl:
+                if type(Df) is matrix:
+                    F['Dfs'] = matrix(0.0, Df.size) 
+                else: 
+                    F['Dfs'] = spmatrix(0.0, Df.I, Df.J, Df.size) 
+            if (mnl and type(Df) is matrix) or type(G) is matrix or \
+                type(H) is matrix:
+                F['S'] = matrix(0.0, (n,n))
+                F['K'] = matrix(0.0, (p,p))
+            else:
+                F['S'] = spmatrix([], [], [], (n,n), 'd')
+                F['Sf'] = None
+                if type(A) is matrix:
+                    F['K'] = matrix(0.0, (p,p))
+                else:
+                    F['K'] = spmatrix([], [], [], (p,p), 'd')
+
+        # Dfs = Wnl^{-1} * Df 
+        if mnl: base.gemm(spmatrix(W['dnli'], range(mnl), range(mnl)), Df, 
+            F['Dfs'], partial = True)
+
+        # Gs = Wl^{-1} * G.
+        base.gemm(spmatrix(W['di'], range(ml), range(ml)), G, F['Gs'], 
+            partial = True)
+
+        if F['firstcall']:
+            base.syrk(F['Gs'], F['S'], trans = 'T') 
+            if mnl: 
+                base.syrk(F['Dfs'], F['S'], trans = 'T', beta = 1.0)
+            if H is not None: 
+                F['S'] += H
+            try:
+                if type(F['S']) is matrix: 
+                    lapack.potrf(F['S']) 
+                else:
+                    F['Sf'] = cholmod.symbolic(F['S'])
+                    cholmod.numeric(F['S'], F['Sf'])
+            except ArithmeticError:
+                F['singular'] = True 
+                if type(A) is matrix and type(F['S']) is spmatrix:
+                    F['S'] = matrix(0.0, (n,n))
+                base.syrk(F['Gs'], F['S'], trans = 'T') 
+                if mnl:
+                    base.syrk(F['Dfs'], F['S'], trans = 'T', beta = 1.0)
+                base.syrk(A, F['S'], trans = 'T', beta = 1.0) 
+                if H is not None:
+                    F['S'] += H
+                if type(F['S']) is matrix: 
+                    lapack.potrf(F['S']) 
+                else:
+                    F['Sf'] = cholmod.symbolic(F['S'])
+                    cholmod.numeric(F['S'], F['Sf'])
+            F['firstcall'] = False
+
+        else:
+            base.syrk(F['Gs'], F['S'], trans = 'T', partial = True)
+            if mnl: base.syrk(F['Dfs'], F['S'], trans = 'T', beta = 1.0, 
+                partial = True)
+            if H is not None:
+                F['S'] += H
+            if F['singular']:
+                base.syrk(A, F['S'], trans = 'T', beta = 1.0, partial = 
+                    True) 
+            if type(F['S']) is matrix: 
+                lapack.potrf(F['S']) 
+            else:
+                cholmod.numeric(F['S'], F['Sf'])
+
+        if type(F['S']) is matrix: 
+            # Asct := L^{-1}*A'.  Factor K = Asct'*Asct.
+            if type(A) is matrix: 
+                Asct = A.T
+            else: 
+                Asct = matrix(A.T)
+            blas.trsm(F['S'], Asct)
+            blas.syrk(Asct, F['K'], trans = 'T')
+            lapack.potrf(F['K'])
+
+        else:
+            # Asct := L^{-1}*P*A'.  Factor K = Asct'*Asct.
+            if type(A) is matrix:
+                Asct = A.T
+                cholmod.solve(F['Sf'], Asct, sys = 7)
+                cholmod.solve(F['Sf'], Asct, sys = 4)
+                blas.syrk(Asct, F['K'], trans = 'T')
+                lapack.potrf(F['K']) 
+            else:
+                Asct = cholmod.spsolve(F['Sf'], A.T, sys = 7)
+                Asct = cholmod.spsolve(F['Sf'], Asct, sys = 4)
+                base.syrk(Asct, F['K'], trans = 'T')
+                Kf = cholmod.symbolic(F['K'])
+                cholmod.numeric(F['K'], Kf)
+
+        def solve(x, y, z):
+
+            # Solve
+            #
+            #     [ H          A'  GG'*W^{-1} ]   [ ux   ]   [ bx        ]
+            #     [ A          0   0          ] * [ uy   ] = [ by        ]
+            #     [ W^{-T}*GG  0   -I         ]   [ W*uz ]   [ W^{-T}*bz ]
+            #
+            # and return ux, uy, W*uz.
+            #
+            # If not F['singular']:
+            #
+            #     K*uy = A * S^{-1} * ( bx + GG'*W^{-1}*W^{-T}*bz ) - by
+            #     S*ux = bx + GG'*W^{-1}*W^{-T}*bz - A'*uy
+            #     W*uz = W^{-T} * ( GG*ux - bz ).
+            #    
+            # If F['singular']:
+            #
+            #     K*uy = A * S^{-1} * ( bx + GG'*W^{-1}*W^{-T}*bz + A'*by )
+            #            - by
+            #     S*ux = bx + GG'*W^{-1}*W^{-T}*bz + A'*by - A'*y.
+            #     W*uz = W^{-T} * ( GG*ux - bz ).
+
+            # z := W^{-1} * z = W^{-1} * bz
+            scale(z, W, trans = 'T', inverse = 'I') 
+
+            # If not F['singular']:
+            #     x := L^{-1} * P * (x + GGs'*z)
+            #        = L^{-1} * P * (x + GG'*W^{-1}*W^{-T}*bz)
+            #
+            # If F['singular']:
+            #     x := L^{-1} * P * (x + GGs'*z + A'*y))
+            #        = L^{-1} * P * (x + GG'*W^{-1}*W^{-T}*bz + A'*y)
+
+            if mnl: base.gemv(F['Dfs'], z, x, trans = 'T', beta = 1.0)
+            base.gemv(F['Gs'], z, x, offsetx = mnl, trans = 'T', 
+                beta = 1.0)
+            if F['singular']:
+                base.gemv(A, y, x, trans = 'T', beta = 1.0)
+            if type(F['S']) is matrix:
+                blas.trsv(F['S'], x)
+            else:
+                cholmod.solve(F['Sf'], x, sys = 7)
+                cholmod.solve(F['Sf'], x, sys = 4)
+
+            # y := K^{-1} * (Asc*x - y)
+            #    = K^{-1} * (A * S^{-1} * (bx + GG'*W^{-1}*W^{-T}*bz) - by)
+            #      (if not F['singular'])
+            #    = K^{-1} * (A * S^{-1} * (bx + GG'*W^{-1}*W^{-T}*bz + 
+            #      A'*by) - by)  
+            #      (if F['singular']).
+
+            base.gemv(Asct, x, y, trans = 'T', beta = -1.0)
+            if type(F['K']) is matrix:
+                lapack.potrs(F['K'], y)
+            else:
+                cholmod.solve(Kf, y)
+
+            # x := P' * L^{-T} * (x - Asc'*y)
+            #    = S^{-1} * (bx + GG'*W^{-1}*W^{-T}*bz - A'*y) 
+            #      (if not F['singular'])  
+            #    = S^{-1} * (bx + GG'*W^{-1}*W^{-T}*bz + A'*by - A'*y) 
+            #      (if F['singular'])
+
+            base.gemv(Asct, y, x, alpha = -1.0, beta = 1.0)
+            if type(F['S']) is matrix:
+                blas.trsv(F['S'], x, trans='T')
+            else:
+                cholmod.solve(F['Sf'], x, sys = 5)
+                cholmod.solve(F['Sf'], x, sys = 8)
+
+            # W*z := GGs*x - z = W^{-T} * (GG*x - bz)
+            if mnl:
+                base.gemv(F['Dfs'], x, z, beta = -1.0)
+            base.gemv(F['Gs'], x, z, beta = -1.0, offsety = mnl)
+
+        return solve
+
+    return factor
+
+
+def kkt_qr(G, dims, A):
+
+    # Solution of KKT equations with zero 1,1 block, by eliminating the
+    # equality constraints via a QR factorization, and solving the
+    # reduced KKT system by another QR factorization.
+    #
+    # Computes the QR factorization
+    #
+    #     A' = [Q1, Q2] * [R1; 0]
+    #
+    # and returns a function that (1) computes the QR factorization 
+    #
+    #     W^{-T} * G * Q2 = Q3 * R3
+    #
+    # (with columns of W^{-T}*G in packed storage), and (2) returns a 
+    # function for # solving 
+    #
+    #     [ 0    A'   G'    ]   [ ux ]   [ bx ]
+    #     [ A    0    0     ] * [ uy ] = [ by ].
+    #     [ G    0   -W'*W  ]   [ uz ]   [ bz ]
+    #
+    # A is p x n and G is N x n where N = dims['l'] + sum(dims['q']) + 
+    # sum( k**2 for k in dims['s'] ).
+ 
+    p, n = A.size
+    cdim = dims['l'] + sum(dims['q']) + sum([ k**2 for k in dims['s'] ])
+    cdim_pckd = dims['l'] + sum(dims['q']) + sum([ k*(k+1)/2 for k in 
+        dims['s'] ])
+
+    # A' = [Q1, Q2] * [R1; 0]
+    if type(A) is matrix:
+        QA = +A.T
+    else:
+        QA = matrix(A.T)
+    tauA = matrix(0.0, (p,1))
+    lapack.geqrf(QA, tauA)
+
+    Gs = matrix(0.0, (cdim, n))
+    tauG = matrix(0.0, (n-p,1))
+    g = matrix(0.0, (cdim, 1))
+    u = matrix(0.0, (cdim_pckd, 1))
+    vv = matrix(0.0, (n,1))
+    w = matrix(0.0, (cdim_pckd, 1))
+
+    def factor(W):
+
+        # Gs = W^{-T}*G, in packed storage.
+        Gs[:,:] = G
+        scale(Gs, W, trans = 'T', inverse = 'I')
+        for k in xrange(n):
+            g[:] = Gs[:, k]
+            pack(g, Gs, dims, offsety = k*Gs.size[0])
+ 
+        # Gs := [ Gs1, Gs2 ] 
+        #     = Gs * [ Q1, Q2 ]
+        lapack.ormqr(QA, tauA, Gs, side = 'R', m = cdim_pckd)
+
+        # QR factorization Gs2 := [ Q3, Q4 ] * [ R3; 0 ] 
+        lapack.geqrf(Gs, tauG, n = n-p, m = cdim_pckd, offsetA = 
+                Gs.size[0]*p)
+
+        def solve(x, y, z):
+
+            # On entry, x, y, z contain bx, by, bz.  On exit, they 
+            # contain the solution x, y, W*z of
+            #
+            #     [ 0         A'  G'*W^{-1} ]   [ x   ]   [bx       ]
+            #     [ A         0   0         ] * [ y   ] = [by       ].
+            #     [ W^{-T}*G  0   -I        ]   [ W*z ]   [W^{-T}*bz]
+            #
+            # The system is solved in five steps:
+            #
+            #       w := W^{-T}*bz - Gs1*R1^{-T}*by 
+            #       u := R3^{-T}*Q2'*bx + Q3'*w
+            #     W*z := Q3*u - w
+            #       y := R1^{-1} * (Q1'*bx - Gs1'*(W*z))
+            #       x := [ Q1, Q2 ] * [ R1^{-T}*by;  R3^{-1}*u ]
+
+            # w := W^{-T} * bz in packed storage 
+            scale(z, W, trans = 'T', inverse = 'I')
+            pack(z, w, dims)
+
+            # vv := [ Q1'*bx;  R3^{-T}*Q2'*bx ]
+            blas.copy(x, vv)
+            lapack.ormqr(QA, tauA, vv, trans='T') 
+            lapack.trtrs(Gs, vv, uplo = 'U', trans = 'T', n = n-p, offsetA
+                = Gs.size[0]*p, offsetB = p)
+
+            # x[:p] := R1^{-T} * by 
+            blas.copy(y, x)
+            lapack.trtrs(QA, x, uplo = 'U', trans = 'T', n = p)
+
+            # w := w - Gs1 * x[:p] 
+            #    = W^{-T}*bz - Gs1*by 
+            blas.gemv(Gs, x, w, alpha = -1.0, beta = 1.0, n = p, m = 
+                cdim_pckd)
+
+            # u := [ Q3'*w + v[p:];  0 ]
+            #    = [ Q3'*w + R3^{-T}*Q2'*bx; 0 ]
+            blas.copy(w, u)
+            lapack.ormqr(Gs, tauG, u, trans = 'T', k = n-p, offsetA = 
+                Gs.size[0]*p, m = cdim_pckd)
+            blas.axpy(vv, u, offsetx = p, n = n-p)
+            blas.scal(0.0, u, offset = n-p)
+
+            # x[p:] := R3^{-1} * u[:n-p]  
+            blas.copy(u, x, offsety = p, n = n-p)
+            lapack.trtrs(Gs, x, uplo='U', n = n-p, offsetA = Gs.size[0]*p,
+                offsetB = p)
+
+            # x is now [ R1^{-T}*by;  R3^{-1}*u[:n-p] ]
+            # x := [Q1 Q2]*x
+            lapack.ormqr(QA, tauA, x) 
+ 
+            # u := [Q3, Q4] * u - w 
+            #    = Q3 * u[:n-p] - w
+            lapack.ormqr(Gs, tauG, u, k = n-p, m = cdim_pckd, offsetA = 
+                Gs.size[0]*p)
+            blas.axpy(w, u, alpha = -1.0)  
+
+            # y := R1^{-1} * ( v[:p] - Gs1'*u )
+            #    = R1^{-1} * ( Q1'*bx - Gs1'*u )
+            blas.copy(vv, y, n = p)
+            blas.gemv(Gs, u, y, m = cdim_pckd, n = p, trans = 'T', alpha = 
+                -1.0, beta = 1.0)
+            lapack.trtrs(QA, y, uplo = 'U', n=p) 
+
+            unpack(u, z, dims)
+
+        return solve
+
+    return factor
diff --git a/src/python/modeling.py b/src/python/modeling.py
index 9ebb80e..788b36f 100644
--- a/src/python/modeling.py
+++ b/src/python/modeling.py
@@ -5,9 +5,9 @@ Routines for specifying and solving convex optimization problems with
 piecewise-linear objective and constraint functions.
 """
 
-# Copyright 2004-2007 J. Dahl and L. Vandenberghe.
+# Copyright 2004-2008 J. Dahl and L. Vandenberghe.
 # 
-# This file is part of CVXOPT version 0.9.2.
+# This file is part of CVXOPT version 0.9.3.
 #
 # CVXOPT is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
diff --git a/src/python/mosek.py b/src/python/mosek.py
index 90b88d4..7fdc4ca 100644
--- a/src/python/mosek.py
+++ b/src/python/mosek.py
@@ -2,9 +2,9 @@
 CVXOPT interface for MOSEK 5.0
 """
 
-# Copyright 2004-2007 J. Dahl and L. Vandenberghe.
+# Copyright 2004-2008 J. Dahl and L. Vandenberghe.
 # 
-# This file is part of CVXOPT version 0.9.2.
+# This file is part of CVXOPT version 0.9.3.
 #
 # CVXOPT is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
diff --git a/src/python/printing.py b/src/python/printing.py
index 56c39dc..3e7ab68 100644
--- a/src/python/printing.py
+++ b/src/python/printing.py
@@ -1,7 +1,6 @@
-
-# Copyright 2004-2007 J. Dahl and L. Vandenberghe.
+# Copyright 2004-2008 J. Dahl and L. Vandenberghe.
 # 
-# This file is part of CVXOPT version 0.9.2.
+# This file is part of CVXOPT version 0.9.3.
 #
 # CVXOPT is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
diff --git a/src/python/solvers.py b/src/python/solvers.py
index add37c4..b647376 100644
--- a/src/python/solvers.py
+++ b/src/python/solvers.py
@@ -1,7 +1,8 @@
 """
 Convex optimization solvers.
 
-conelp:   solves cone programs.
+conelp:   solves linear cone programs.
+coneqp:   solves quadratic cone programs.
 cp:       solves nonlinear convex problem.
 cpl:      solves nonlinear convex problems with linear objectives.
 gp:       solves geometric programs.
@@ -12,9 +13,9 @@ socp:     solves second-order cone programs.
 options:  dictionary with customizable algorithm parameters.
 """
 
-# Copyright 2004-2007 J. Dahl and L. Vandenberghe.
+# Copyright 2004-2008 J. Dahl and L. Vandenberghe.
 # 
-# This file is part of CVXOPT version 0.9.2.
+# This file is part of CVXOPT version 0.9.3.
 #
 # CVXOPT is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
@@ -30,10 +31,9 @@ options:  dictionary with customizable algorithm parameters.
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 import cvxopt
-from cvxopt.cvxprog import cp, cpl, qp, gp
-from cvxopt.coneprog import conelp, lp, sdp, socp
-solvecp, solveqp, solvegp, solvelp, solvesdp = cp, qp, gp, lp, sdp
+from cvxopt.cvxprog import cp, cpl, gp 
+from cvxopt.coneprog import conelp, lp, sdp, socp, coneqp, qp
 options = {}
 cvxopt.cvxprog.options = options
 cvxopt.coneprog.options = options
-__all__ = ['conelp', 'cp', 'cpl', 'gp', 'lp', 'qp', 'sdp', 'socp', 'conelp']
+__all__ = ['conelp', 'coneqp', 'lp', 'socp', 'sdp', 'qp', 'cp', 'cpl', 'gp']
diff --git a/src/setup.py b/src/setup.py
index 92fdb4a..67c3483 100644
--- a/src/setup.py
+++ b/src/setup.py
@@ -152,7 +152,7 @@ extmods += [base, blas, lapack, umfpack, cholmod, amd]
 
 setup (name = 'cvxopt', 
     description = 'Convex optimization package',
-    version = '0.9.2', 
+    version = '0.9.3', 
     long_description = '''
 CVXOPT is a free software package for convex optimization based on the 
 Python programming language. It can be used with the interactive Python 

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/cvxopt.git



More information about the debian-science-commits mailing list