[r-cran-erm] 24/33: New upstream version 0.15-7

Andreas Tille tille at debian.org
Mon Dec 12 11:19:35 UTC 2016


This is an automated email from the git hooks/post-receive script.

tille pushed a commit to branch master
in repository r-cran-erm.

commit 832e5a4161bc51a9416b1df269bd5c4708cc3136
Author: Andreas Tille <tille at debian.org>
Date:   Mon Dec 12 11:48:22 2016 +0100

    New upstream version 0.15-7
---
 .Rinstignore                                |    3 +
 COPYING                                     |  340 ------
 COPYRIGHTS                                  |   10 -
 DESCRIPTION                                 |   44 +-
 MD5                                         |  241 ++--
 NAMESPACE                                   |   50 +-
 NEWS                                        |  314 +++--
 R/IC.default.R                              |    0
 R/IC.ppar.r                                 |    0
 R/IC.r                                      |    0
 R/LLRA.R                                    |    0
 R/LLTM.R                                    |    0
 R/LPCM.R                                    |    0
 R/LRSM.R                                    |    0
 R/LRtest.R                                  |    4 +-
 R/LRtest.Rm.R                               |  416 +++----
 R/MLoef.R                                   |   24 +-
 R/NPtest.R                                  |  521 ++++++---
 R/PCM.R                                     |    0
 R/RM.R                                      |    0
 R/ROCR_aux.R                                |    0
 R/RSM.R                                     |    0
 R/SepRel.R                                  |   65 ++
 R/Waldtest.R                                |    0
 R/Waldtest.Rm.R                             |  317 +++---
 R/anova.eRm.R                               |   54 +
 R/anova.llra.R                              |    2 +-
 R/build_W.R                                 |    0
 R/checkdata.R                               |    0
 R/cmlprep.R                                 |    0
 R/coef.eRm.R                                |    0
 R/coef.ppar.R                               |    3 +-
 R/collapse_W.R                              |    0
 R/confint.eRm.r                             |    0
 R/confint.ppar.r                            |    0
 R/confint.threshold.r                       |    0
 R/cwdeviance.r                              |    0
 R/datcheck.LRtest.r                         |    0
 R/datcheck.R                                |  220 ++--
 R/datprep_LLTM.R                            |    0
 R/datprep_LPCM.R                            |    0
 R/datprep_LRSM.R                            |    0
 R/datprep_PCM.R                             |    0
 R/datprep_RM.R                              |    0
 R/datprep_RSM.R                             |    0
 R/fitcml.R                                  |  153 ++-
 R/invalid.R                                 |    0
 R/item_info.R                               |   81 ++
 R/itemfit.R                                 |    0
 R/itemfit.ppar.R                            |    4 +-
 R/labeling.internal.r                       |    0
 R/likLR.R                                   |   63 +-
 R/llra.datprep.R                            |    0
 R/llra.internals.R                          |  201 ++--
 R/logLik.eRm.r                              |    0
 R/logLik.ppar.r                             |    0
 R/model.matrix.eRm.R                        |    0
 R/performance.R                             |    0
 R/performance_measures.R                    |    0
 R/performance_plots.R                       |    0
 R/person.parameter.R                        |    0
 R/person.parameter.eRm.R                    |  491 ++++----
 R/personfit.R                               |    0
 R/personfit.ppar.R                          |   49 +-
 R/phi.range.R                               |    9 +
 R/{pifit.internal.r => pifit.internal.R}    |   22 +-
 R/plist.internal.R                          |   35 +-
 R/plot.ppar.r                               |   42 +-
 R/plotCI.R                                  |    0
 R/plotDIF.R                                 |    6 +-
 R/plotGOF.LR.R                              |  352 +++---
 R/plotGOF.R                                 |    3 +-
 R/plotGR.R                                  |  101 +-
 R/plotICC.R                                 |    3 +-
 R/plotICC.Rm.R                              |  274 +++--
 R/plotINFO.R                                |   29 +
 R/plotPImap.R                               |    0
 R/plotPWmap.R                               |  122 +-
 R/plotTR.R                                  |   60 +-
 R/plotjointICC.R                            |    4 +-
 R/plotjointICC.dRm.R                        |   69 +-
 R/pmat.R                                    |    0
 R/pmat.default.R                            |    0
 R/pmat.ppar.R                               |    0
 R/prediction.R                              |    0
 R/print.ICr.r                               |    0
 R/print.LR.R                                |    0
 R/print.MLoef.r                             |    0
 R/print.eRm.R                               |    4 +
 R/print.ifit.R                              |    0
 R/print.llra.R                              |    0
 R/print.logLik.eRm.R                        |    0
 R/print.logLik.ppar.r                       |    0
 R/print.pfit.R                              |    0
 R/print.ppar.R                              |    0
 R/print.resid.R                             |    0
 R/print.step.r                              |    0
 R/print.summary.llra.R                      |    0
 R/print.threshold.r                         |    0
 R/print.wald.R                              |    0
 R/residuals.ppar.R                          |    0
 R/rsampler.R                                |   48 +
 R/rsctrl.R                                  |   15 +
 R/rserror.R                                 |   32 +
 R/rsextrmat.R                               |   15 +
 R/rsextrobj.R                               |   32 +
 R/rstats.R                                  |   25 +
 R/rsunpack.R                                |   37 +
 R/sim.2pl.R                                 |    0
 R/sim.locdep.R                              |    0
 R/sim.rasch.R                               |    0
 R/sim.xdim.R                                |   28 -
 R/stepwiseIt.R                              |    0
 R/stepwiseIt.eRm.R                          |    0
 R/summary.LR.r                              |    2 +-
 R/summary.MLoef.r                           |    0
 R/summary.RSctr.R                           |   12 +
 R/summary.RSmpl.R                           |   15 +
 R/summary.RSmplext.R                        |   14 +
 R/summary.eRm.R                             |   86 +-
 R/summary.llra.R                            |    4 +-
 R/summary.ppar.R                            |    0
 R/summary.threshold.r                       |    0
 R/test_info.R                               |   18 +
 R/thresholds.eRm.r                          |   82 +-
 R/thresholds.r                              |    0
 R/vcov.eRm.R                                |    0
 R/zzz.R                                     |    4 +
 build/partial.rdb                           |  Bin 0 -> 6461 bytes
 build/vignette.rds                          |  Bin 0 -> 192 bytes
 data/llraDat1.rda                           |  Bin 2569 -> 1897 bytes
 data/llraDat2.rda                           |  Bin 1187 -> 933 bytes
 data/llradat3.rda                           |  Bin 324 -> 325 bytes
 data/lltmdat1.rda                           |  Bin 642 -> 645 bytes
 data/lltmdat2.rda                           |  Bin 121 -> 124 bytes
 data/lpcmdat.rda                            |  Bin 211 -> 211 bytes
 data/lrsmdat.rda                            |  Bin 315 -> 315 bytes
 data/pcmdat.rda                             |  Bin 208 -> 209 bytes
 data/pcmdat2.rda                            |  Bin 478 -> 481 bytes
 data/raschdat1.rda                          |  Bin 641 -> 642 bytes
 data/raschdat1_RM_fitted.RData              |  Bin 0 -> 6758 bytes
 data/raschdat1_RM_lrres2.RData              |  Bin 0 -> 13618 bytes
 data/raschdat1_RM_plotDIF.RData             |  Bin 0 -> 26834 bytes
 data/raschdat2.rda                          |  Bin 151 -> 153 bytes
 data/raschdat3.rda                          |  Bin 0 -> 994 bytes
 data/raschdat4.rda                          |  Bin 0 -> 1251 bytes
 data/rsmdat.rda                             |  Bin 196 -> 197 bytes
 data/xmpl.RData                             |  Bin 0 -> 2839 bytes
 data/xmplbig.RData                          |  Bin 0 -> 106675 bytes
 inst/CITATION                               |  129 +++
 inst/NEWS.Rd                                |  170 +++
 inst/NEWS.pdf                               |  Bin 0 -> 38822 bytes
 inst/doc/UCML.jpg                           |  Bin 42742 -> 0 bytes
 inst/doc/Z.cls                              |  239 ----
 inst/doc/eRm.R                              |  107 ++
 inst/doc/eRm.Rnw                            | 1449 +++++++++++------------
 inst/doc/eRm.pdf                            |  Bin 469512 -> 501276 bytes
 inst/doc/eRmvig.bib                         |  695 -----------
 inst/doc/index.html.old                     |   10 -
 inst/doc/jss.bst                            | 1647 ---------------------------
 man/IC.Rd                                   |    5 +-
 man/LLRA.Rd                                 |   53 +-
 man/LLTM.Rd                                 |   10 +-
 man/LPCM.Rd                                 |   12 +-
 man/LRSM.Rd                                 |   12 +-
 man/LRtest.Rd                               |  206 ++--
 man/MLoef.Rd                                |   52 +-
 man/NPtest.Rd                               |  287 +++--
 man/PCM.Rd                                  |    6 +-
 man/RM.Rd                                   |   16 +-
 man/RSM.Rd                                  |   10 +-
 man/RSctr.Rd                                |   46 +
 man/RSmpl.Rd                                |   47 +
 man/RaschSampler.Rd                         |   75 ++
 man/SepRel.Rd                               |   78 ++
 man/Waldtest.Rd                             |  120 +-
 man/anova.eRm.Rd                            |   53 +
 man/anova.llra.Rd                           |   47 +-
 man/build_W.Rd                              |   29 +-
 man/collapse_W.Rd                           |   40 +-
 man/eRm-package.Rd                          |   34 +-
 man/eRm.data.Rd                             |   43 +
 man/gofIRT.Rd                               |    4 +-
 man/item_info.Rd                            |   51 +
 man/itemfit.ppar.Rd                         |    2 +-
 man/llra.datprep.Rd                         |    5 +-
 man/llraDat1.Rd                             |   68 +-
 man/llraDat2.Rd                             |   60 +-
 man/llradat3.Rd                             |   24 +-
 man/person.parameter.Rd                     |   21 +-
 man/phi.range.Rd                            |   16 +
 man/plotDIF.Rd                              |   45 +-
 man/plotGR.Rd                               |   32 +-
 man/plotICC.Rd                              |    6 +-
 man/plotINFO.Rd                             |   26 +
 man/plotPImap.Rd                            |    6 +-
 man/plotPWmap.Rd                            |   28 +-
 man/plotTR.Rd                               |   30 +-
 man/predict.ppar.Rd                         |    4 +-
 man/print.eRm.Rd                            |    7 +-
 man/raschdat.Rd                             |   30 -
 man/rsampler.Rd                             |   57 +
 man/rsctrl.Rd                               |   84 ++
 man/rsextrmat.Rd                            |   29 +
 man/rsextrobj.Rd                            |   70 ++
 man/rstats.Rd                               |   67 ++
 man/sim.2pl.Rd                              |   61 +-
 man/sim.locdep.Rd                           |    1 +
 man/sim.rasch.Rd                            |    1 +
 man/sim.xdim.Rd                             |    1 +
 man/stepwiseIt.Rd                           |    4 +-
 man/summary.RSctr.Rd                        |   20 +
 man/summary.RSmpl.Rd                        |   32 +
 man/summary.llra.Rd                         |   40 +-
 man/test_info.Rd                            |   42 +
 man/thresholds.Rd                           |    7 +-
 man/xmpl.Rd                                 |   26 +
 src/RaschSampler.f90                        |  550 +++++++++
 src/components.c                            |    0
 src/components.h                            |    0
 src/geodist.c                               |    0
 src/geodist.h                               |    0
 vignettes/UCML.pdf                          |  Bin 0 -> 40694 bytes
 vignettes/eRm.Rnw                           |  866 ++++++++++++++
 {inst/doc => vignettes}/eRm_object_tree.pdf |  Bin
 vignettes/eRmvig.bib                        |  853 ++++++++++++++
 {inst/doc => vignettes}/modelhierarchy.pdf  |  Bin
 227 files changed, 7885 insertions(+), 6392 deletions(-)

diff --git a/.Rinstignore b/.Rinstignore
new file mode 100644
index 0000000..369ed7c
--- /dev/null
+++ b/.Rinstignore
@@ -0,0 +1,3 @@
+eRm_object_tree\.pdf$
+eRmvig\.bib$
+modelhierarchy\.pdf$
diff --git a/COPYING b/COPYING
deleted file mode 100755
index d60c31a..0000000
--- a/COPYING
+++ /dev/null
@@ -1,340 +0,0 @@
-		    GNU GENERAL PUBLIC LICENSE
-		       Version 2, June 1991
-
- Copyright (C) 1989, 1991 Free Software Foundation, Inc.
-     59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-			    Preamble
-
-  The licenses for most software are designed to take away your
-freedom to share and change it.  By contrast, the GNU General Public
-License is intended to guarantee your freedom to share and change free
-software--to make sure the software is free for all its users.  This
-General Public License applies to most of the Free Software
-Foundation's software and to any other program whose authors commit to
-using it.  (Some other Free Software Foundation software is covered by
-the GNU Library General Public License instead.)  You can apply it to
-your programs, too.
-
-  When we speak of free software, we are referring to freedom, not
-price.  Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-this service if you wish), that you receive source code or can get it
-if you want it, that you can change the software or use pieces of it
-in new free programs; and that you know you can do these things.
-
-  To protect your rights, we need to make restrictions that forbid
-anyone to deny you these rights or to ask you to surrender the rights.
-These restrictions translate to certain responsibilities for you if you
-distribute copies of the software, or if you modify it.
-
-  For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must give the recipients all the rights that
-you have.  You must make sure that they, too, receive or can get the
-source code.  And you must show them these terms so they know their
-rights.
-
-  We protect your rights with two steps: (1) copyright the software, and
-(2) offer you this license which gives you legal permission to copy,
-distribute and/or modify the software.
-
-  Also, for each author's protection and ours, we want to make certain
-that everyone understands that there is no warranty for this free
-software.  If the software is modified by someone else and passed on, we
-want its recipients to know that what they have is not the original, so
-that any problems introduced by others will not reflect on the original
-authors' reputations.
-
-  Finally, any free program is threatened constantly by software
-patents.  We wish to avoid the danger that redistributors of a free
-program will individually obtain patent licenses, in effect making the
-program proprietary.  To prevent this, we have made it clear that any
-patent must be licensed for everyone's free use or not licensed at all.
-
-  The precise terms and conditions for copying, distribution and
-modification follow.
-

-		    GNU GENERAL PUBLIC LICENSE
-   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
-  0. This License applies to any program or other work which contains
-a notice placed by the copyright holder saying it may be distributed
-under the terms of this General Public License.  The "Program", below,
-refers to any such program or work, and a "work based on the Program"
-means either the Program or any derivative work under copyright law:
-that is to say, a work containing the Program or a portion of it,
-either verbatim or with modifications and/or translated into another
-language.  (Hereinafter, translation is included without limitation in
-the term "modification".)  Each licensee is addressed as "you".
-
-Activities other than copying, distribution and modification are not
-covered by this License; they are outside its scope.  The act of
-running the Program is not restricted, and the output from the Program
-is covered only if its contents constitute a work based on the
-Program (independent of having been made by running the Program).
-Whether that is true depends on what the Program does.
-
-  1. You may copy and distribute verbatim copies of the Program's
-source code as you receive it, in any medium, provided that you
-conspicuously and appropriately publish on each copy an appropriate
-copyright notice and disclaimer of warranty; keep intact all the
-notices that refer to this License and to the absence of any warranty;
-and give any other recipients of the Program a copy of this License
-along with the Program.
-
-You may charge a fee for the physical act of transferring a copy, and
-you may at your option offer warranty protection in exchange for a fee.
-
-  2. You may modify your copy or copies of the Program or any portion
-of it, thus forming a work based on the Program, and copy and
-distribute such modifications or work under the terms of Section 1
-above, provided that you also meet all of these conditions:
-
-    a) You must cause the modified files to carry prominent notices
-    stating that you changed the files and the date of any change.
-
-    b) You must cause any work that you distribute or publish, that in
-    whole or in part contains or is derived from the Program or any
-    part thereof, to be licensed as a whole at no charge to all third
-    parties under the terms of this License.
-
-    c) If the modified program normally reads commands interactively
-    when run, you must cause it, when started running for such
-    interactive use in the most ordinary way, to print or display an
-    announcement including an appropriate copyright notice and a
-    notice that there is no warranty (or else, saying that you provide
-    a warranty) and that users may redistribute the program under
-    these conditions, and telling the user how to view a copy of this
-    License.  (Exception: if the Program itself is interactive but
-    does not normally print such an announcement, your work based on
-    the Program is not required to print an announcement.)
-

-These requirements apply to the modified work as a whole.  If
-identifiable sections of that work are not derived from the Program,
-and can be reasonably considered independent and separate works in
-themselves, then this License, and its terms, do not apply to those
-sections when you distribute them as separate works.  But when you
-distribute the same sections as part of a whole which is a work based
-on the Program, the distribution of the whole must be on the terms of
-this License, whose permissions for other licensees extend to the
-entire whole, and thus to each and every part regardless of who wrote it.
-
-Thus, it is not the intent of this section to claim rights or contest
-your rights to work written entirely by you; rather, the intent is to
-exercise the right to control the distribution of derivative or
-collective works based on the Program.
-
-In addition, mere aggregation of another work not based on the Program
-with the Program (or with a work based on the Program) on a volume of
-a storage or distribution medium does not bring the other work under
-the scope of this License.
-
-  3. You may copy and distribute the Program (or a work based on it,
-under Section 2) in object code or executable form under the terms of
-Sections 1 and 2 above provided that you also do one of the following:
-
-    a) Accompany it with the complete corresponding machine-readable
-    source code, which must be distributed under the terms of Sections
-    1 and 2 above on a medium customarily used for software interchange; or,
-
-    b) Accompany it with a written offer, valid for at least three
-    years, to give any third party, for a charge no more than your
-    cost of physically performing source distribution, a complete
-    machine-readable copy of the corresponding source code, to be
-    distributed under the terms of Sections 1 and 2 above on a medium
-    customarily used for software interchange; or,
-
-    c) Accompany it with the information you received as to the offer
-    to distribute corresponding source code.  (This alternative is
-    allowed only for noncommercial distribution and only if you
-    received the program in object code or executable form with such
-    an offer, in accord with Subsection b above.)
-
-The source code for a work means the preferred form of the work for
-making modifications to it.  For an executable work, complete source
-code means all the source code for all modules it contains, plus any
-associated interface definition files, plus the scripts used to
-control compilation and installation of the executable.  However, as a
-special exception, the source code distributed need not include
-anything that is normally distributed (in either source or binary
-form) with the major components (compiler, kernel, and so on) of the
-operating system on which the executable runs, unless that component
-itself accompanies the executable.
-
-If distribution of executable or object code is made by offering
-access to copy from a designated place, then offering equivalent
-access to copy the source code from the same place counts as
-distribution of the source code, even though third parties are not
-compelled to copy the source along with the object code.
-

-  4. You may not copy, modify, sublicense, or distribute the Program
-except as expressly provided under this License.  Any attempt
-otherwise to copy, modify, sublicense or distribute the Program is
-void, and will automatically terminate your rights under this License.
-However, parties who have received copies, or rights, from you under
-this License will not have their licenses terminated so long as such
-parties remain in full compliance.
-
-  5. You are not required to accept this License, since you have not
-signed it.  However, nothing else grants you permission to modify or
-distribute the Program or its derivative works.  These actions are
-prohibited by law if you do not accept this License.  Therefore, by
-modifying or distributing the Program (or any work based on the
-Program), you indicate your acceptance of this License to do so, and
-all its terms and conditions for copying, distributing or modifying
-the Program or works based on it.
-
-  6. Each time you redistribute the Program (or any work based on the
-Program), the recipient automatically receives a license from the
-original licensor to copy, distribute or modify the Program subject to
-these terms and conditions.  You may not impose any further
-restrictions on the recipients' exercise of the rights granted herein.
-You are not responsible for enforcing compliance by third parties to
-this License.
-
-  7. If, as a consequence of a court judgment or allegation of patent
-infringement or for any other reason (not limited to patent issues),
-conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License.  If you cannot
-distribute so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you
-may not distribute the Program at all.  For example, if a patent
-license would not permit royalty-free redistribution of the Program by
-all those who receive copies directly or indirectly through you, then
-the only way you could satisfy both it and this License would be to
-refrain entirely from distribution of the Program.
-
-If any portion of this section is held invalid or unenforceable under
-any particular circumstance, the balance of the section is intended to
-apply and the section as a whole is intended to apply in other
-circumstances.
-
-It is not the purpose of this section to induce you to infringe any
-patents or other property right claims or to contest validity of any
-such claims; this section has the sole purpose of protecting the
-integrity of the free software distribution system, which is
-implemented by public license practices.  Many people have made
-generous contributions to the wide range of software distributed
-through that system in reliance on consistent application of that
-system; it is up to the author/donor to decide if he or she is willing
-to distribute software through any other system and a licensee cannot
-impose that choice.
-
-This section is intended to make thoroughly clear what is believed to
-be a consequence of the rest of this License.
-

-  8. If the distribution and/or use of the Program is restricted in
-certain countries either by patents or by copyrighted interfaces, the
-original copyright holder who places the Program under this License
-may add an explicit geographical distribution limitation excluding
-those countries, so that distribution is permitted only in or among
-countries not thus excluded.  In such case, this License incorporates
-the limitation as if written in the body of this License.
-
-  9. The Free Software Foundation may publish revised and/or new versions
-of the General Public License from time to time.  Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
-Each version is given a distinguishing version number.  If the Program
-specifies a version number of this License which applies to it and "any
-later version", you have the option of following the terms and conditions
-either of that version or of any later version published by the Free
-Software Foundation.  If the Program does not specify a version number of
-this License, you may choose any version ever published by the Free Software
-Foundation.
-
-  10. If you wish to incorporate parts of the Program into other free
-programs whose distribution conditions are different, write to the author
-to ask for permission.  For software which is copyrighted by the Free
-Software Foundation, write to the Free Software Foundation; we sometimes
-make exceptions for this.  Our decision will be guided by the two goals
-of preserving the free status of all derivatives of our free software and
-of promoting the sharing and reuse of software generally.
-
-			    NO WARRANTY
-
-  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
-FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
-OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
-PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
-OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
-TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
-PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
-REPAIR OR CORRECTION.
-
-  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
-REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
-INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
-OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
-TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
-YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
-PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGES.
-
-		     END OF TERMS AND CONDITIONS
-

-	    How to Apply These Terms to Your New Programs
-
-  If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
-  To do so, attach the following notices to the program.  It is safest
-to attach them to the start of each source file to most effectively
-convey the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
-    <one line to give the program's name and a brief idea of what it does.>
-    Copyright (C) <year>  <name of author>
-
-    This program is free software; you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation; either version 2 of the License, or
-    (at your option) any later version.
-
-    This program is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program; if not, write to the Free Software
-    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-
-
-Also add information on how to contact you by electronic and paper mail.
-
-If the program is interactive, make it output a short notice like this
-when it starts in an interactive mode:
-
-    Gnomovision version 69, Copyright (C) year  name of author
-    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
-    This is free software, and you are welcome to redistribute it
-    under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License.  Of course, the commands you use may
-be called something other than `show w' and `show c'; they could even be
-mouse-clicks or menu items--whatever suits your program.
-
-You should also get your employer (if you work as a programmer) or your
-school, if any, to sign a "copyright disclaimer" for the program, if
-necessary.  Here is a sample; alter the names:
-
-  Yoyodyne, Inc., hereby disclaims all copyright interest in the program
-  `Gnomovision' (which makes passes at compilers) written by James Hacker.
-
-  <signature of Ty Coon>, 1 April 1989
-  Ty Coon, President of Vice
-
-This General Public License does not permit incorporating your program into
-proprietary programs.  If your program is a subroutine library, you may
-consider it more useful to permit linking proprietary applications with the
-library.  If this is what you want to do, use the GNU Library General
-Public License instead of this License.
diff --git a/COPYRIGHTS b/COPYRIGHTS
deleted file mode 100755
index bfcbe3f..0000000
--- a/COPYRIGHTS
+++ /dev/null
@@ -1,10 +0,0 @@
-COPYRIGHT STATUS
-----------------
-
-This code is
-
-  Copyright (C) 2009 Patrick Mair and Reinhold Hatzinger
-
-All code is subject to the GNU General Public License, Version 2. See
-the file COPYING for the exact conditions under which you may
-redistribute it.
diff --git a/DESCRIPTION b/DESCRIPTION
old mode 100755
new mode 100644
index 1b69213..0cca1c0
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -1,28 +1,28 @@
 Package: eRm
 Type: Package
-Title: Extended Rasch Modeling.
-Version: 0.14-0
-Date: 2011-06-05
-Author: Patrick Mair, Reinhold Hatzinger, Marco Maier
-Maintainer: Patrick Mair <patrick.mair at wu.ac.at>
-Description: eRm fits Rasch models (RM), linear logistic test models
-        (LLTM), rating scale model (RSM), linear rating scale models
-        (LRSM), partial credit models (PCM), and linear partial credit
-        models (LPCM). Missing values are allowed in the data matrix.
-        Additional features are the ML estimation of the person
-        parameters, Andersen's LR-test, item-specific Wald test,
-        Martin-Loef-Test, nonparametric Monte-Carlo Tests, itemfit and
-        personfit statistics including infit and outfit measures,
-        various ICC and related plots, automated stepwise item
-        elimination, simulation module for various binary data
-        matrices. An eRm platform is provided at R-forge (see URL).
-License: GPL
+Title: Extended Rasch Modeling
+Version: 0.15-7
+Date: 2016-11-20
+Authors at R: c(person(given="Patrick", family="Mair", email="mair at fas.harvard.edu", role=c("cre","aut")),
+             person(given="Reinhold", family="Hatzinger", role="aut"),
+             person(given=c("Marco", "J."), family="Maier", email="marco.maier at wu.ac.at", role="aut"),
+             person(given="Thomas", family="Rusch", email="thomas.rusch at wu.ac.at", role="ctb"))
+Description: Fits Rasch models (RM), linear logistic test models (LLTM), rating scale model (RSM), linear rating scale models (LRSM), partial credit models (PCM), and linear partial credit models (LPCM).  Missing values are allowed in the data matrix.  Additional features are the ML estimation of the person parameters, Andersen's LR-test, item-specific Wald test, Martin-Löf-Test, nonparametric Monte-Carlo Tests, itemfit and personfit statistics including infit and outfit measures, variou [...]
+License: GPL-2
 URL: http://r-forge.r-project.org/projects/erm/
-Imports: graphics, stats, MASS, methods, Matrix
-Depends: R (>= 2.12.0), splines, methods, RaschSampler
-Suggests: lattice
+Imports: graphics, grDevices, stats, methods, MASS, splines, Matrix,
+        lattice
+Depends: R (>= 3.0.0)
+Encoding: UTF-8
 LazyData: yes
 LazyLoad: yes
-Packaged: 2011-06-05 11:22:28 UTC; hatz
+ByteCompile: yes
+NeedsCompilation: yes
+Packaged: 2016-11-20 18:02:22 UTC; patrick
+Author: Patrick Mair [cre, aut],
+  Reinhold Hatzinger [aut],
+  Marco J. Maier [aut],
+  Thomas Rusch [ctb]
+Maintainer: Patrick Mair <mair at fas.harvard.edu>
 Repository: CRAN
-Date/Publication: 2011-06-06 07:03:22
+Date/Publication: 2016-11-20 20:11:43
diff --git a/MD5 b/MD5
index cd4f507..dca2fdb 100644
--- a/MD5
+++ b/MD5
@@ -1,8 +1,6 @@
-94d55d512a9ba36caa9b7df079bae19f *COPYING
-d86eed66bba841658207fd609fdd3d4a *COPYRIGHTS
-c824379f1e7400c15cea5f79c12608b4 *DESCRIPTION
-2fa574614347ba33bf658a819b4fe98a *NAMESPACE
-c3686599805d8b0a1fcd21e60698b311 *NEWS
+4d241125d433563926acfd3ec1283ef1 *DESCRIPTION
+028c26f49aa69c432960400e15f48958 *NAMESPACE
+1212ac1b95ae050901d96389ce7ca292 *NEWS
 d2d22f8b2176c97ca7df720f7a089794 *R/IC.default.R
 2bedf16f88332f159bfad9cdb36f4689 *R/IC.ppar.r
 2e4b547352dd7af179635bc46f9cdf87 *R/IC.r
@@ -10,48 +8,51 @@ d2d22f8b2176c97ca7df720f7a089794 *R/IC.default.R
 7826640f34c84fbb51764108b0961824 *R/LLTM.R
 bf407bc699aa2a8a6bb1e507cc32f049 *R/LPCM.R
 559e1ab0a83b6e78e7ae67ddb4a41b1e *R/LRSM.R
-e09054c9e41da89f00cf09d2ca0b67b2 *R/LRtest.R
-2d59903e184e948d35803dc9745d1044 *R/LRtest.Rm.R
-c4570ad02cb02e2263ff64507e5aa619 *R/MLoef.R
-85771f7f5668ecde577700138d893add *R/NPtest.R
+1f36a87476e55123248725365c74d9bf *R/LRtest.R
+aed81214a0b03997d3b734ae90737634 *R/LRtest.Rm.R
+84fce84bef7d147fee133562dbaf065c *R/MLoef.R
+8be77686eb282ce16a87580d8d0a984a *R/NPtest.R
 bbd93fc79991f98878c94fff0bcfaf30 *R/PCM.R
 136ebfb819fe8287b48d0d33c194815d *R/RM.R
 8f3a897135420e5fb99b1adebc4c8765 *R/ROCR_aux.R
 83e714c0d5bd37f8299292e3bf574a09 *R/RSM.R
 bc7be7af543fb355b3452bade6befcef *R/Rsquared.R
+67e65b02c688ddf27d12f33b5a855c21 *R/SepRel.R
 f2527f30f48fcf4cfda14c9951186ead *R/Waldtest.R
-80e1701b16f045aaf746f28dc808c98e *R/Waldtest.Rm.R
-4de7e90e5eba8d4d90d7005bc6b9c38d *R/anova.llra.R
+e3cc111546e6458ff6baa890d1e54e7c *R/Waldtest.Rm.R
+0c232a63ae4fb206d91678d2b9c30f1a *R/anova.eRm.R
+ba403c28ae655f2a76c605d43123512c *R/anova.llra.R
 b2e5064d8aa93edded5747f9f64f315c *R/build_W.R
 a8aa6e591ab2b76117211cdef4cdcf52 *R/checkdata.R
 40e9100130fde20aa73a04818d9909b3 *R/cldeviance.R
 013c3532078f0f62c326f5f7fe10e23e *R/cmlprep.R
 9f91686f144add619bbaaec474974fbb *R/coef.eRm.R
-0218542e5f566c443de1054b43d1dc6e *R/coef.ppar.R
+e7bc0b0d98d621a66a84a196d057c00b *R/coef.ppar.R
 2a665e30eb32b2b8b9a7ee2cb846831b *R/collapse_W.R
 323a5c6fe9d49f18031aeeff22de0ee5 *R/confint.eRm.r
 a98e2b8f8c0dc9f184159e94c210611b *R/confint.ppar.r
 fd0612d832c6044df83689d42631a9af *R/confint.threshold.r
 b9da6dc9d83605723db2e81c69fd81b8 *R/cwdeviance.r
 9f6fff516f1257a642e18175e82ec53d *R/datcheck.LRtest.r
-4f7c2902266ffc2b21916d2791287ef9 *R/datcheck.R
+4c96fecd0305d59cb4c7433f54c63125 *R/datcheck.R
 3ed2f9de5c5ec9a4d3dd66b40e0425d1 *R/datprep_LLTM.R
 b9868aa42173751e8d9fd8a1a16c49e1 *R/datprep_LPCM.R
 5bd2486b21ae4a9f196c7445aea4c958 *R/datprep_LRSM.R
 8eff0f65d596844d9810c3cfa58bc0a2 *R/datprep_PCM.R
 ca0af91c768a48fff0782a8b92d5ed9d *R/datprep_RM.R
 977fc96896e111fcf2692e274d9be346 *R/datprep_RSM.R
-39b6410834977409a957f4dc85fb8aaf *R/fitcml.R
+8dc12ddd263a8642fd9e72b7fb905b4b *R/fitcml.R
 4878fc213dcd9d9a0b3b59353f06eebe *R/gofIRT.R
 57242fed1520c63de85bdc52e6a70c31 *R/gofIRT.ppar.R
 f9850156706dcad20dfedfc80c220b04 *R/hoslem.R
 71212f4c14003458fc3e30638f339629 *R/invalid.R
+8dbe4006ccd80c5ef48f4a20cd34f70a *R/item_info.R
 e2acfcf0f69a54046e7500f37b11cce7 *R/itemfit.R
-f242e9d1dc269c42da573caeb5beb426 *R/itemfit.ppar.R
+19b775ca4bcc82372376ae4db53800c1 *R/itemfit.ppar.R
 d12c6019315fbc11dca8ce109e1883ab *R/labeling.internal.r
-c6cc30c6760a3e4fd1213ec55bd086e9 *R/likLR.R
+b442d226c694aed7bc57103a009bc7ef *R/likLR.R
 9c56aa9ca79069fd2fa2a02f14d8e7b4 *R/llra.datprep.R
-b120a9958f01935b8829c7473345e9de *R/llra.internals.R
+927abf7f0a6f93732d2a2365f67df008 *R/llra.internals.R
 5bea9ef9bfb310d9d57960b335cc6e2f *R/logLik.eRm.r
 6f889b8ad497988f236902afa009cba3 *R/logLik.ppar.r
 a4b58be8d00e8a61a442e6e7f5709138 *R/model.matrix.eRm.R
@@ -59,24 +60,26 @@ a4b58be8d00e8a61a442e6e7f5709138 *R/model.matrix.eRm.R
 ad14b6669d085f886f032ff60fd2643b *R/performance_measures.R
 166e801703a416aa2a2cdbde298651db *R/performance_plots.R
 adabedf84b1cecfce267c7be91d7cbbb *R/person.parameter.R
-e91f0d25dcaa66df375fa34312e0f36c *R/person.parameter.eRm.R
+e4d07cd0462d21c9e56042e4661440be *R/person.parameter.eRm.R
 1580451632297ddae0e4ae6c262a9bb0 *R/personfit.R
-68cc2204b1c470c10a57948e6629d542 *R/personfit.ppar.R
-5211c838890a01fe791e26e5f2ba10c8 *R/pifit.internal.r
-6dc484eac9810ce58aa2a06fab305a56 *R/plist.internal.R
-e050e7af6f9bb0a52bf55026d3101d27 *R/plot.ppar.r
+15a3f4fa5f0cd5dc7fc051e1b8cfb0b8 *R/personfit.ppar.R
+37383b86a5a2d72e3c196556e3eaf09c *R/phi.range.R
+2da3e508955ee682575545a4386caf98 *R/pifit.internal.R
+4fd3e6bc84d8590d22bcbd1f7ff8c0d0 *R/plist.internal.R
+f57714dbe81ed2a76321d62598d810d7 *R/plot.ppar.r
 fa5b8a3513fcdf05e0533c7549d7adb7 *R/plotCI.R
-5b20ac29faf764d0e70892b578012162 *R/plotDIF.R
-a8afc0168d588f3380174f8e7b6d37cd *R/plotGOF.LR.R
-c535a897ffcc871b893cbbee237baf70 *R/plotGOF.R
-c0c259828a7490f7cf87888394e31214 *R/plotGR.R
-e5df0fcbc0cf84479dd1af053994135c *R/plotICC.R
-0add981ba89f86649c26ca8c5bd818b6 *R/plotICC.Rm.R
+acf0ac06eaf1147d8cac88a5b9849945 *R/plotDIF.R
+731caac5fa9067f4c2cf4d557ba85f0b *R/plotGOF.LR.R
+4cc4847166cd7e6949152b32a7552bc9 *R/plotGOF.R
+5194f5d3f572f07456c9364c6a23eb29 *R/plotGR.R
+c65fe90a731609939b208064669aa5c6 *R/plotICC.R
+3cb469fc8bcecbaa8c600776f3794196 *R/plotICC.Rm.R
+70fe5fa87bdce761a6f918b8164d767c *R/plotINFO.R
 e9b4f3c4e961531fbd204c4e52e056a4 *R/plotPImap.R
-53328e5a237f91c7f03e1447430a3580 *R/plotPWmap.R
-9f9956d4213bf12105e1985ff7c2fc28 *R/plotTR.R
-817427f50be483fdf3dc5be097121c7f *R/plotjointICC.R
-30e0c7cadcb3d360f9373b6ffdc58912 *R/plotjointICC.dRm.R
+a30b60f631c21127a6c19aa07bb5d2ea *R/plotPWmap.R
+a345184af2d832d0441725524bb35bfc *R/plotTR.R
+f3331bb586c29beaa1c691caf9a4f50e *R/plotjointICC.R
+bd2e30e5cc15ad9b2ad961dac5a75de0 *R/plotjointICC.dRm.R
 503d39882ff1519f9a9c822525df881b *R/pmat.R
 978c189f51d4c525f6a1d53377c72fab *R/pmat.default.R
 3b8b125515c56d967e0184d9bae4fe87 *R/pmat.ppar.R
@@ -85,7 +88,7 @@ c6520d9937a5432bba0921b1c1ddc154 *R/prediction.R
 2ef33bcb8ff18bada70cc9cd28167b6b *R/print.ICr.r
 e419bb90c13b3e34d60f809d2aab7a8c *R/print.LR.R
 baa854c72760319d47628d9cdeab5847 *R/print.MLoef.r
-063e72adfa562645ff3510ff2f93fc8a *R/print.eRm.R
+4fab83773c727ff7382af7fdf6788656 *R/print.eRm.R
 c5312a2efff02316933e78574a2d7072 *R/print.gof.R
 1a8e26e12c84b76aac1cccc7ecafd6c0 *R/print.ifit.R
 eceef6e388090c985b69bb7495fd898b *R/print.llra.R
@@ -100,84 +103,124 @@ cb2c9116b9ede352bb567fa9eb8ac393 *R/print.resid.R
 2ba6872ce486a3b88889f50ab34046f1 *R/print.wald.R
 681692c0b23c20c07fd6e27eb6823d03 *R/residuals.ppar.R
 7a34777d662d5c0f5c9c1e473bf52746 *R/rostdeviance.r
+da2aaf90411032f77c125a1802a17c9f *R/rsampler.R
+58c7acdcb60e5fef2d4bad8ff79f3751 *R/rsctrl.R
+96f255d9169d2c9b8b0121c457df43de *R/rserror.R
+84af75ad5133358a996b4f1334328453 *R/rsextrmat.R
+7b5ffeb67fffbae19d8cea59e7d000e3 *R/rsextrobj.R
+f5999b30f71b9b9e1635a2167d217c9d *R/rstats.R
+e1414590f20a4138cd04abda1c5066f4 *R/rsunpack.R
 b3f9c25354a6b9bddf925bcab2648687 *R/sim.2pl.R
 4b54d34fbb18f74fac3b93da2c11a84f *R/sim.locdep.R
 f882b1817e7bbcef9b27c276f257a626 *R/sim.rasch.R
-60c5d7095026f9b3f7ee2dc6321fbc25 *R/sim.xdim.R
+74ee5835cc74c86395748a51c3e87073 *R/sim.xdim.R
 50fba0ca19951546155a7d644228d24f *R/stepwiseIt.R
 ec78daaa3f45ed12a62e483f3ee1ce63 *R/stepwiseIt.eRm.R
-913ccfc27b8531d6efe242b877b45d30 *R/summary.LR.r
+a389fc4a22eb006dd3226f10f8790f58 *R/summary.LR.r
 65d0e9b84e5f96d02e9de47a6e8a5a90 *R/summary.MLoef.r
-dca2b5cf6a23b084d1fcee50a51050b6 *R/summary.eRm.R
+ad5211ab843e04ee7e0564032f0a5f1e *R/summary.RSctr.R
+bb58a9a1c07452f36f935b101a61e52a *R/summary.RSmpl.R
+c164f97f003fe597393346ebc34ed98f *R/summary.RSmplext.R
+e6db9b0df7e9ea045e8b57d0f7a13f8b *R/summary.eRm.R
 a201d1040cd729f62e09a45ff0402957 *R/summary.gof.R
-91593cc56050024103f88648bd3fd9d6 *R/summary.llra.R
+c03bb3ccc34c43a90ce88a799cda8cc3 *R/summary.llra.R
 bde67b4f83ca342816e87e9c39fde213 *R/summary.ppar.R
 d28668718027ba4f7d4210dcac0c6b76 *R/summary.threshold.r
-b80b7dbe79cca5167e85e53674887e40 *R/thresholds.eRm.r
+a00c44137f3f341aa1a71db916a38a85 *R/test_info.R
+a6419721b8ee926aa7c97c9810fa0de9 *R/thresholds.eRm.r
 3c6f6ec631aeacf22e73fadf8074fc12 *R/thresholds.r
 54259c5861ddc4a41474b0f47dc7ff13 *R/vcov.eRm.R
-e7182e7050856376cc68a090c3fc962d *R/zzz.R
-17e5211154d381fb70abc1eddbccb9d7 *data/llraDat1.rda
-04e416237eb526a153a793cec2988809 *data/llraDat2.rda
-9346da0bb55f95db5ed71f73d050f2bd *data/llradat3.rda
-ed5e5d66de298a34f143f361ee0722fa *data/lltmdat1.rda
-b66d20feef24d2792b7e496bf30dbb7e *data/lltmdat2.rda
-d064d228919bf4bec3f96f3bdc73309a *data/lpcmdat.rda
-1ae66d8c67c396f15799b2b8defd9447 *data/lrsmdat.rda
-edd14b4be4ee7329f78e2ec1555a4ad4 *data/pcmdat.rda
-3694b4d3076bbbb26f6734c06e923f90 *data/pcmdat2.rda
-257737a046f48862ca6f26fc8f5bc94f *data/raschdat1.rda
-18d2da8d7902349b513004d1342fbd3d *data/raschdat2.rda
-c42bff96b6fd7f98d0b86bd6b058b367 *data/rsmdat.rda
-c58787b71a1f1d3c295a4bfa20737306 *inst/doc/UCML.jpg
-859438dab150c3e7762183e79cb4e6ec *inst/doc/Z.cls
-6c088c475da8ad1998ff6abf0550a717 *inst/doc/eRm.Rnw
-53f32799d25b50133b6ff217357184ae *inst/doc/eRm.pdf
-61d680540f93d252561fa615bd95511b *inst/doc/eRm_object_tree.pdf
-4252672da961d1370a22bc901d68cc5d *inst/doc/eRmvig.bib
-405e1ee0b6273a4d992299ddc0f9233c *inst/doc/index.html.old
-92e903f33d4067a7fbc89fa4e7571c92 *inst/doc/jss.bst
-e97dfac8265ca8a3cbae7ff1d64ac832 *inst/doc/modelhierarchy.pdf
-3ccae9025b31a4c6c2a4a0c38fadcc60 *man/IC.Rd
-346aa4dad160d49be05a16a4f6fae983 *man/LLRA.Rd
-3ff5fc2b6804ccec50788a81ab024ad3 *man/LLTM.Rd
-b2674dd262ed08acf9973e028b6c9062 *man/LPCM.Rd
-2fe4a48d3db49e43f8ca765b3724a23b *man/LRSM.Rd
-21a5eee857676dadca51fce6f58438bb *man/LRtest.Rd
-145b3975b694fcb6040bb7ece98b4962 *man/MLoef.Rd
-07664ccf23e9418d1a3e2ae70572aa82 *man/NPtest.Rd
-49e40371caf75ce2b50dde4fce78fa83 *man/PCM.Rd
-ba1bc663be054dca6f66956e83296d9f *man/RM.Rd
-81a06eda5b851e2551d0c069132bd601 *man/RSM.Rd
-1fa68e025a4ece034a6e9719c58f26e7 *man/Waldtest.Rd
-7a672a0bdbee1d6dfcddd76a77e7f384 *man/anova.llra.Rd
-61d45eca1d3602e0910c562444a5a91a *man/build_W.Rd
-5646ca17db14129c73ecc8e2c059f0d6 *man/collapse_W.Rd
-3df6fa3e7648ed587015d46e61ff43f3 *man/eRm-package.Rd
-e61327803e5e6b7d2d3ccc944e37aabb *man/gofIRT.Rd
-76cea65dee359343eacaf632bda3ea52 *man/itemfit.ppar.Rd
-07fa1d3822aa64ac126dd6ea09fbd896 *man/llra.datprep.Rd
-3b7d3041daf6dff31a863240a7d66c43 *man/llraDat1.Rd
-6c2f252c609fbf7bf5e909c7aaae01f3 *man/llraDat2.Rd
-4e6086c12e5f1af87effae54623069b8 *man/llradat3.Rd
-5858e38d4bd68f93c3eb4961423f2aae *man/person.parameter.Rd
-cbae5267dff3868ef0cd2c382a9968da *man/plotDIF.Rd
-cc89a7415fd66ead9a764e66e190ebb8 *man/plotGR.Rd
-ebc0198383f388c7f118bf004e1ab47a *man/plotICC.Rd
-fb7839cc73bd28943352b8af8601cb87 *man/plotPImap.Rd
-db3395d4738132591b935af5dc7809ac *man/plotPWmap.Rd
-bb2e316fb66fbc62d5d9d86db9ced96a *man/plotTR.Rd
-5c7fab3317a8fcc01c2ff0a6fe1824a9 *man/predict.ppar.Rd
-9e66621272c2cf652b1396428edf94f8 *man/print.eRm.Rd
-d96cb1ddd85fd3b7ecdfe095550dc027 *man/raschdat.Rd
-8cd62fbd653f0036bdc4ff0a97c6c77d *man/sim.2pl.Rd
-91b5aef01c0f9142c7970c6b75727034 *man/sim.locdep.Rd
-b937031cca9d299b55d3932f299983af *man/sim.rasch.Rd
-242e82abb41d92f5926ec9ff2b037e1b *man/sim.xdim.Rd
-77f43c8fcfd4ff3af7f4351419fde0ff *man/stepwiseIt.Rd
-a62ad6ab0c17c4af3ea1de449c0cc388 *man/summary.llra.Rd
-eb6aee99b123cc957f76c938c624f264 *man/thresholds.Rd
+68dade378c849fd1b1e7f539aaf0f1e5 *R/zzz.R
+a7d4704a84d534fba6d79092771475cc *build/partial.rdb
+0ce2d3df887cc603044c2e2274e0fd12 *build/vignette.rds
+460165565169b8896f2b34d3bcb8b384 *data/llraDat1.rda
+6eff759b3773b364f2b80dfc1dfd48c2 *data/llraDat2.rda
+75011d511f7f8ef5815d3dd46e3b01b9 *data/llradat3.rda
+537c5f7c2088103bd5f8686d6f94f264 *data/lltmdat1.rda
+7ff7b2cf0434ac9b852d31afa9d3c83a *data/lltmdat2.rda
+894cc0b106afce23f253109c2e117852 *data/lpcmdat.rda
+c185bc8664388a0ebacd22e8c5f41eeb *data/lrsmdat.rda
+f7d19c424f3b5bd58194485309bd42ae *data/pcmdat.rda
+9688f7fa019623aff7c57829f023abd2 *data/pcmdat2.rda
+9b130e2a2410ceeea044fdddf4b35164 *data/raschdat1.rda
+3f7c47e9bb09efc4a7e54467f902206d *data/raschdat1_RM_fitted.RData
+0b2d2f5d5a9fd117d03476b4a156ce5b *data/raschdat1_RM_lrres2.RData
+5c5328452b86204e7c3b1ad9f90cb525 *data/raschdat1_RM_plotDIF.RData
+09689a9a24c489e5e839e78a14fee7e8 *data/raschdat2.rda
+fdeb1d965b5c4c8a868ab134040d2067 *data/raschdat3.rda
+5e2522617282490a22abb363aa4e2b2a *data/raschdat4.rda
+0d6f1eada2d20675c2fe5fd67333825e *data/rsmdat.rda
+d6ef4432484b6f07a9fb2178baa4a879 *data/xmpl.RData
+5ef84c3ef4febfaac942d321d43d40da *data/xmplbig.RData
+9839b61e6271f99586b5993f1ba4c599 *inst/CITATION
+6ceb3f1fb53de62b6cc4e39f1dbbf7dc *inst/NEWS.Rd
+4d6d70f06d012eb2f1f3d8c2de188a25 *inst/NEWS.pdf
+501fb62b99b337a884bbb8b21edd84c6 *inst/doc/eRm.R
+9f0d0175f42da4e7b4021e9cec1b9146 *inst/doc/eRm.Rnw
+8a270c8ed3b6b4ee7b607bf0bed08eba *inst/doc/eRm.pdf
+bba1ee0605c426e109144ecad35d39be *man/IC.Rd
+1524720959c29130b6eff3641ab62cc3 *man/LLRA.Rd
+4640617a3b1d2bab3ee71f94fbb04c21 *man/LLTM.Rd
+cd6d3227703aae9c394e9e4c16e38830 *man/LPCM.Rd
+f3d7a332c4ff086be9688df3eafb9baf *man/LRSM.Rd
+7983a333b76999438ee200e0144a60af *man/LRtest.Rd
+a2434611522236c5074a56314e93843b *man/MLoef.Rd
+3a4d8d8ecdc13c0fd51a016327d2c84e *man/NPtest.Rd
+11e5535f73e03c32f6df3684e42e4845 *man/PCM.Rd
+e11d28158fbbd3f3ca27e1ddf35b0b02 *man/RM.Rd
+e3b03791bdc647ec37fb54b5276e7d03 *man/RSM.Rd
+8ddd37ae0df397f3a0b31e05eb70a188 *man/RSctr.Rd
+442383d3bd01caef71f789f164cdc8cd *man/RSmpl.Rd
+10cacee291a1b7b0105235a56792e8c9 *man/RaschSampler.Rd
+32089f244daae08d88e750b95dd7ebe7 *man/SepRel.Rd
+c92d8df332fcd8c2f4bfad193d9f5bbf *man/Waldtest.Rd
+de7f86cbe19f2c3537b5373f8abcfdf0 *man/anova.eRm.Rd
+37f5d40e0adc158ed2b99135ed04d033 *man/anova.llra.Rd
+70e29891a48ddf8f4877ca19cdb95a7a *man/build_W.Rd
+526f7588a3558f213e3af11644b02d86 *man/collapse_W.Rd
+496fe6d296fe4b53e31261b68bb8e922 *man/eRm-package.Rd
+0fcd3bce302e222d026d1c5ee809c719 *man/eRm.data.Rd
+0ba205dca37c394b8ae21e0c970b626f *man/gofIRT.Rd
+b1288ed170917aec3479bccfecfbfe7e *man/item_info.Rd
+263c3737c328d83bbf2897015a970294 *man/itemfit.ppar.Rd
+63a16723424c844e2bc2679707a460cd *man/llra.datprep.Rd
+0868cb49e2e4628c40b487f880cf01e0 *man/llraDat1.Rd
+28009d26ad8a5582808e8db439ef61d4 *man/llraDat2.Rd
+53ce8a741a1171341736a6c3341c2d5e *man/llradat3.Rd
+ae3376e960e09f6eb4de696b08236c49 *man/person.parameter.Rd
+00d861e02078d473d0ca39410ee19fdf *man/phi.range.Rd
+b84fb54040dbc7685042dc38a61effe5 *man/plotDIF.Rd
+b290fcb7968aa06e1c70fa6037661854 *man/plotGR.Rd
+f8c987b93d9eff88febf200a62de4219 *man/plotICC.Rd
+2caef0ed82ad9dffea9d68776361f476 *man/plotINFO.Rd
+59d8f2d0f54f67b2749ec07b12a12bcb *man/plotPImap.Rd
+8acef48889c637940a6a6517136bd455 *man/plotPWmap.Rd
+7636949b2ed3aa9df02a8e3625fcb5e1 *man/plotTR.Rd
+6b609f99cb2aa4d4304d1baf00c386af *man/predict.ppar.Rd
+816e1da8b2ee90e6d3e07cb7dbb72682 *man/print.eRm.Rd
+68c458d8e4e4ef7c7b7c473185f78a72 *man/rsampler.Rd
+d1094279d3d2481d8b22a28f25c77f9d *man/rsctrl.Rd
+a5c9818ac52912853fba4e73177e4797 *man/rsextrmat.Rd
+d588c926da237096d48b1be266fbc75c *man/rsextrobj.Rd
+2df433fe6f36358cca9acaa1b59903a0 *man/rstats.Rd
+76df31d3513f7a420bd6cfcc3bd84c6a *man/sim.2pl.Rd
+2ff986362cce0119f3b2f04338c5096f *man/sim.locdep.Rd
+64e2be5183ec749727c7fff9eea70b11 *man/sim.rasch.Rd
+32381ef2cff1049fb6f85b7e301fe0bb *man/sim.xdim.Rd
+02abc88ad7d29b90190f5b0e4fb77096 *man/stepwiseIt.Rd
+3946b80ff77445fac4d9d7cedabfe10f *man/summary.RSctr.Rd
+c8a60360773dd87988a5dc0aa041b79e *man/summary.RSmpl.Rd
+6a3998417cef6ddba5b51ec270c450b1 *man/summary.llra.Rd
+f1da4d2b46f216407e888a605104fe28 *man/test_info.Rd
+f7728e3a39f3ca1dfad30e8be4304864 *man/thresholds.Rd
+34895a0b93ea8ec50341c83fd8138cca *man/xmpl.Rd
+fd59ab36845ad7023f529dbd04660c40 *src/RaschSampler.f90
 d0d66f1a61eae0e016a6e9401d0e5917 *src/components.c
 ce4282b827566f0ef0572ae06eb7bf04 *src/components.h
 b318d7bfff0ba1eccadc86d34e1f0c5f *src/geodist.c
 c44d6148a344eb9e3deb558f5d453d8c *src/geodist.h
+2c08e25aef0443820778321885461ba0 *vignettes/UCML.pdf
+9f0d0175f42da4e7b4021e9cec1b9146 *vignettes/eRm.Rnw
+61d680540f93d252561fa615bd95511b *vignettes/eRm_object_tree.pdf
+8fc81af9e3f5bbbee5393869304292f1 *vignettes/eRmvig.bib
+e97dfac8265ca8a3cbae7ff1d64ac832 *vignettes/modelhierarchy.pdf
diff --git a/NAMESPACE b/NAMESPACE
old mode 100755
new mode 100644
index d205143..89cedf6
--- a/NAMESPACE
+++ b/NAMESPACE
@@ -1,6 +1,11 @@
-useDynLib(eRm)
-import("stats", "graphics")
-importFrom(Matrix, bdiag)
+useDynLib("eRm")
+
+import("stats", "graphics", "methods", "lattice", "grDevices")
+
+importFrom("splines", "interpSpline")
+importFrom("MASS", "mvrnorm")
+importFrom("Matrix", "bdiag")
+
 export(RM)
 export(LLTM)
 export(RSM)
@@ -35,6 +40,11 @@ export(collapse_W)
 export(LLRA)
 export(plotGR)
 export(plotTR)
+export(plotINFO)
+export(item_info)
+export(test_info)
+export(i_info)
+export(SepRel)
 
 S3method(print, eRm)
 S3method(summary, eRm)
@@ -81,14 +91,34 @@ S3method(gofIRT, ppar)
 S3method(predict, ppar)
 S3method(summary, gof)
 S3method(print, gof)
-S3method(print, T1obj)
-S3method(print, T2obj)
-S3method(print, T4obj)
-S3method(print, T7obj)
-S3method(print, T7aobj)
-S3method(print, T10obj)
-S3method(print, T11obj)
+
+S3method("print", Tmdobj)
+S3method("print", Tpbisobj)
+S3method("print", T1obj)
+S3method("print", T1mobj)
+S3method("print", T1lobj)
+S3method("print", T2obj)
+S3method("print", T2mobj)
+S3method("print", T4obj)
+S3method("print", T10obj)
+S3method("print", T11obj)
+
+S3method(anova, eRm)
+S3method(print, eRm_anova)
 S3method(anova, llra)
 S3method(print, llra)
 S3method(print,summary.llra)
 S3method(summary, llra)
+S3method(print, eRm_SepRel)
+S3method(summary, eRm_SepRel)
+
+# from RaschSampler
+export("rsampler")
+export("rstats")
+export("rsctrl")
+export("rsextrobj")
+export("rsextrmat")
+export("phi.range")
+S3method("summary", "RSctr")
+S3method("summary", "RSmpl")
+S3method("summary", "RSmplext")
diff --git a/NEWS b/NEWS
old mode 100755
new mode 100644
index 8e2a01b..c3854bb
--- a/NEWS
+++ b/NEWS
@@ -1,131 +1,273 @@
-Changes in Version 0.14-0
+News for Package 'eRm'
 
-  o new (wrapper) function LLRA for fitting linear logistic
-    models with relaxed assumptions including utilities for
-    preparing data (llra.datprep), setting up (build_W) and
-    modifying (collapse_W) design matrices, comparing llra
-    models (anova) and plotting results (plotTR and plotGR)
+Changes in Version 0.15-6:
 
-  o 'exact' version of the Martin-Loef test for binary items and
-    arbitrary splits added as method to NPtest
+  � 'plotGOF()': added arguments 'x_axis', 'y_axis', 'set_par', and
+    'reset_par' to ease customization of the plot.
 
-  o in plotGOF confidence ellipses can now be drawn for
-    subsets of items, optionally using different colours
+  � Imports functions from default packages packages, as required by
+    the new CRAN-check.
 
-  o new function plotDIF (by Kathrin Gruber): plots confidence
-    intervals for item parameters estimated separately in
-    subgroups, uses LR objects as input
+Changes in Version 0.15-5:
 
-  o adapted the MLoef function to work with polytomous data
-    and more than two item groups
+  � implemented an 'anova()' method for all models (except LLRAs, which
+    have their own: 'anova.llra'). see '?anova.eRm'
 
-  o error checks in NPtest: (i) 0/full resposes for items
-    meaningless for NPtest, (ii) group in method="T4" must
-    be of type logical, (iii) specifying all items for
-    T4 gives meaningless results.
+  � added a function to compute separation reliability, as proposed in
+    Wright & Stone (1999; see '?SepRel').
 
-  o warning regarding group assignment when using median
-    split removed from LRtest and Waldtest
+  � 'plotINFO()' accepts more arguments via '...' (see '?plotINFO').
 
-  o some modifications in plotPWmap: horizontal plotting,
-    different default plotting symbols, option to change
-    size of plotting symbols
+  � fixed a bug in 'plotPWmap()'.
 
-  o bug in MLoef fixed (now using logs in calculating the
-    person contributions)
+  � fixed a bug in the internal function 'get_item_cats()' related to
+    'NA's (affecting LLRA and item-information functions).
 
-  o eRm now depends on R >= 2.12.0
+  � switched encoding to UTF-8 to avoid problems with diacritics, etc.
+    (e.g., Martin-L�f).
 
-  o Latin1 encoding removed
+  � updated citations.
 
-  o bug in plotICC (always same title) fixed
+  � general improvements.
 
-Changes in Version 0.13-0
+Changes in Version 0.15-4:
 
-  o LLTM, LRSM, and LPCM work now for repeated measurement
-    designs with treatment groups and missing values.
+  � streamlining some functions, updating the vignette, etc.
+
+  � warnings are now treated more consistently and can be suppressed
+    with 'suppressWarnings()'
+
+  � the 'plotGOF()' function was revamped and tidied up in a couple of
+    places, e.g.:
+
+    � the plotting order of elements in a non-interactive call has been
+      changed to put confidence ellipses and lines in the background
+      and text in the foreground.
+
+    � the x- and y-limits are now dynamically computed by default, so
+      that confidence regions and ellipses are inside the plotting
+      region.
+
+    � the leading "'I'" before item numbers has been removed for better
+      legibility.
+
+  � moved 'NEWS' to the new fancy 'NEWS.Rd' file/format
+
+Changes in eRm version 0.15-3:
+
+  � an error in 'test_info()' was fixed.
+
+  � 'eRm' now depends on 'R' >= 3.0.0
+
+Changes in eRm version 0.15-2:
+
+  � an error in 'Tpbis.stat' was fixed.
+
+Changes in eRm version 0.15-1:
+
+  � a bug in 'person.parameter()' has been fixed that caused the
+    estimation to crash in some cases.
+
+  � a bug in 'thresholds()' has been fixed that caused the routine to
+    crash.
+
+Changes in eRm version 0.15-0:
+
+  � the 'RaschSampler' package has been merged into 'eRm' for
+    convenience (still available as a separate package).
+
+Changes in eRm version 0.14-5:
+
+  � the package is now byte-compiled by default.
+
+  � some statistics added to 'NPtest()', 'T7' and 'T7a' removed.
+
+  � fixed a bug in 'plotPWmap()'.
+
+  � fixed the 'mplot' argument in plotting routines.
+
+  � fixed the split-criterion '"all.r"' in 'LRtest()'.
+
+  � deleted all usages of 'data()' in examples, since eRm uses "lazy
+    data."
+
+Changes in eRm version 0.14-4:
+
+  � when calling 'NPtest()', the 'RaschSampler' can now be controlled
+    more specifically ('burn_in', 'step', 'seed').
+
+  � various improvements and bugfixes for LLRA-related functions.
+
+  � person parameter values can be extracted now for all persons using
+    'coef()'.  Additionally, in-/exclusion of extrapolated values (for
+    0 and perfect scores) can be controlled via the argument
+    'extrapolated'.
+
+  � LRtest now computes standard errors ('se = TRUE') by default.
+
+  � plotDIF now plots "difficulties" for all models (formerly,
+    "easiness" parameters were plotted for Rasch models).
 
-  o Rename vignette to 'eRm'.
+Changes in eRm version 0.14-3:
+
+  � minor bug fixed in 'plotGOF()', where on rare occasions confidence
+    ellipses were plotted together with control lines (spotted by Peter
+    Parker)
+
+  � improved labelling in 'plotjointICC()'
+
+Changes in eRm version 0.14-2:
+
+  � warning regarding group assignment when using median or mean split
+    removed from 'MLoef()'
+
+  � modification in 'NPtest()' to split long output lines
+
+  � changed the delimiters of 'plotDIF()' confidence intervals to 'pch
+    = 20' (small bullet).
+
+Changes in eRm version 0.14-1:
+
+  � new experimental functions to calculate and plot item and test
+    information (by Thomas Rusch)
+
+  � bug fixed in the calculation of item and person Infit t and Outfit
+    t (hint from Rainer Alexandrowicz).
+
+  � 'eRm' no longer depends on the 'RaschSampler' package.  However, it
+    must be installed to use 'NPtest()'.
+
+  � changed the delimiters of 'plotDIF()' confidence intervals to 'pch
+    = 20'.
+
+Changes in eRm version 0.14-0:
+
+  � new (wrapper) function 'LLRA()' for fitting linear logistic models
+    with relaxed assumptions including utilities for preparing data
+    ('llra.datprep()'), setting up ('build_W()') and modifying
+    ('collapse_W()') design matrices, comparing LLRA models ('anova()')
+    and plotting results ('plotTR()' and 'plotGR()') (by Thomas Rusch).
+
+  � "exact" version of the Martin-L�f test for binary items and
+    arbitrary splits added as method to 'NPtest()'.
+
+  � in 'plotGOF()' confidence ellipses can now be drawn for subsets of
+    items, optionally using different colours
+
+  � new function 'plotDIF()' (by Kathrin Gruber): plots confidence
+    intervals for item parameters estimated separately in subgroups,
+    uses LR objects as input
+
+  � adapted the 'MLoef()' function to work with polytomous data and
+    more than two item groups
+
+  � error checks in NPtest:
+
+   1. 0/full responses for items meaningless for NPtest,
+
+   2. group in 'method = "T4"' must be of type logical,
+
+   3. specifying all items for T4 gives meaningless results.
+
+  � warning regarding group assignment when using median split removed
+    from 'LRtest()' and 'Waldtest()'.
+
+  � some modifications in 'plotPWmap()': horizontal plotting, different
+    default plotting symbols, option to change size of plotting symbols
+
+  � bug in 'MLoef()' fixed (now using logs in calculating the person
+    contributions)
+
+  � 'eRm' now depends on 'R' >= 2.12.0
+
+  � Latin1 encoding removed
+
+  � bug in 'plotICC()' (always same title) fixed
+
+Changes in eRm version 0.13-0:
+
+  � 'LLTM()', 'LRSM()', and 'LPCM()' work now for repeated measurement
+    designs with treatment groups and missing values.
 
+  � Rename vignette to "eRm".
 
-Changes in Version 0.12-2
+Changes in eRm version 0.12-2:
 
-  o new function plotPWmap to plot Bond-and-Fox style
-    pathway maps for the data by Julian Gilbey. Since
-    calculation of the t-statistics requires calculation
-    of the kurtosis of the standardized residuals,
-    according changes to itemfit.ppar, personfit.ppar,
-    pifit.internal, print.ifit, and print.pfit.
+  � new function 'plotPWmap()' to plot Bond-and-Fox style pathway maps
+    for the data by Julian Gilbey.  Since calculation of the
+    t-statistics requires calculation of the kurtosis of the
+    standardized residuals, according changes to 'itemfit.ppar()',
+    'personfit.ppar()', 'pifit.internal()', 'print.ifit()', and
+    'print.pfit()'.
 
-  o plotPImap patched by Julian Gilbey: length of item.subset
-    did not match the documentation, warning stars did not all
-    appear, pre-calculated person.parameter data can be passed
-    to the function via pp, mis-ordered items can be coloured.
-    some minor bugs fixed.
+  � 'plotPImap()' patched by Julian Gilbey: length of 'item.subset' did
+    not match the documentation, warning stars did not all appear,
+    pre-calculated person.parameter data can be passed to the function
+    via pp, mis-ordered items can be coloured.  some minor bugs fixed.
 
-  o the optimizer can be changed to optim using fitctrl<-"optim"
-    and reset to nlm (the default) with fitctrl<-"nlm"
+  � the optimizer can be changed to 'optim()' using 'fitctrl <-
+    "optim"' and reset to 'nlm()' (the default) with 'fitctrl <- "nlm"'
 
-  o value of LRtest now countains the list fitobj which contains
-    the model objects according to the subgroups specified by
-    splitcr
+  � value of 'LRtest()' now contains the list 'fitobj' which contains
+    the model objects according to the subgroups specified by 'splitcr'
 
-  o MLoef no longer supports missings values
+  � 'MLoef()' no longer supports missing values
 
-Changes in Version 0.12-1
+Changes in eRm version 0.12-1:
 
-  o function invalid from package gtools integrated into eRm
-    eRm no longer depends on gtools
+  � function invalid from package 'gtools' integrated into 'eRm'.
+    'eRm' no longer depends on 'gtools'.
 
-Changes in Version 0.12-0
+Changes in eRm version 0.12-0:
 
-  o for RM, RSM, and PCM: eta parameters are now
-    diplayed as difficulty parameters
-    print and summary methods changed accordingly
+  � for 'RM()', 'RSM()', and 'PCM()': eta parameters are now displayed
+    as difficulty parameters; 'print()' and 'summary()' methods changed
+    accordingly.
 
-  o new labeling of eta parameters in RM, RSM, and PCM.
-    they now are labeled according to the estimated
-    parameters for items (RM), items + categories (RSM),
-    items x categories (PCM)
+  � new labeling of eta parameters in 'RM()', 'RSM()', and 'PCM()'.
+    they now are labeled according to the estimated parameters for
+    items ('RM()'), items + categories ('RSM()'), items x categories
+    ('PCM()')
 
-  o function MLoef for Martin-Loef-Test added
+  � function 'MLoef()' for Martin-L�f-Test added
 
-  o df in personfit and itemfit corrected
+  � 'df' in 'personfit()' and 'itemfit()' corrected
 
-  o the logLik functions now extract the log-likelhood
-    and df into objects of class logLik.eRm and loglik.ppar
-    with elements loglik and df. the corresponding
-    print methods have been modified accordingly.
+  � the 'logLik()' functions now extract the log-likelhood and df into
+    objects of class '"logLik.eRm"' and '"loglik.ppar"' with elements
+    'loglik' and 'df'.  the corresponding print methods have been
+    modified accordingly.
 
-  o function coef.ppar to extract person parameter estimates added
+  � method 'coef.ppar()' to extract person parameter estimates added
 
-  o option for beta parameters added to coef.eRm
+  � option for beta parameters added to coef.eRm
 
-  o in confint.eRm: default parm = "beta"
+  � in confint.eRm: default 'parm = "beta"'
 
-  o minor modifications in the help file for IC()
+  � minor modifications in the help file for 'IC()'
 
-  o plotPImap: revised rug added, bug concerning item.subset fixed,
-    minor modifications to enhance readability
+  � 'plotPImap()': revised rug added, bug concerning 'item.subset'
+    fixed, minor modifications to enhance readability
 
-  o minor modifications in plotjointICC: allows for main title and colors,
-    option legpos = FALSE suppresses legends, dev.new removed,
-    legend = FALSE produced incorrect labeling
+  � minor modifications in 'plotjointICC()': allows for main title and
+    colors, option 'legpos = FALSE' suppresses legends, dev.new
+    removed, 'legend = FALSE' produced incorrect labeling
 
-  o minor modifications in plotICC: allows for main title and colors,
-    default coloring with col = NULL instead of NA for compatibility,
-    option legpos = FALSE suppresses legends, mplot is now FALSE if
-    only one item is specified
+  � minor modifications in 'plotICC()': allows for main title and
+    colors, default coloring with 'col = NULL' instead of 'NA' for
+    compatibility, option 'legpos = FALSE' suppresses legends, 'mplot'
+    is now 'FALSE' if only one item is specified
 
-  o plot.ppar: dev.new removed
+  � plot.ppar: dev.new removed
 
-  o option 'visible' in print.ifit und print.pfit to allow for avoiding
+  � option "visible" in print.ifit und print.pfit to allow for avoiding
     overly long output and for extraction of infit and outfit values
     (maybe changed to a coef method later)
 
-  o strwrap() for NPtest print methods to break long lines
+  � 'strwrap()' for NPtest print methods to break long lines
 
-  o new methods IC.default and pmat.default for enhanced error messages
+  � new methods 'IC.default()' and 'pmat.default()' for enhanced error
+    messages
 
-  o lazy loading package and datafiles
+  � lazy loading package and datafiles
 
diff --git a/R/IC.default.R b/R/IC.default.R
old mode 100755
new mode 100644
diff --git a/R/IC.ppar.r b/R/IC.ppar.r
old mode 100755
new mode 100644
diff --git a/R/IC.r b/R/IC.r
old mode 100755
new mode 100644
diff --git a/R/LLRA.R b/R/LLRA.R
old mode 100755
new mode 100644
diff --git a/R/LLTM.R b/R/LLTM.R
old mode 100755
new mode 100644
diff --git a/R/LPCM.R b/R/LPCM.R
old mode 100755
new mode 100644
diff --git a/R/LRSM.R b/R/LRSM.R
old mode 100755
new mode 100644
diff --git a/R/LRtest.R b/R/LRtest.R
old mode 100755
new mode 100644
index cc07687..ef9c0e0
--- a/R/LRtest.R
+++ b/R/LRtest.R
@@ -1,3 +1 @@
-`LRtest` <-
-function(object,splitcr="median",se=FALSE)UseMethod("LRtest")
-
+`LRtest` <- function(object, splitcr = "median", se = TRUE) UseMethod("LRtest")
diff --git a/R/LRtest.Rm.R b/R/LRtest.Rm.R
old mode 100755
new mode 100644
index 7d62e31..0fcf92c
--- a/R/LRtest.Rm.R
+++ b/R/LRtest.Rm.R
@@ -1,198 +1,218 @@
-`LRtest.Rm` <-
-function(object, splitcr="median", se=FALSE)
-{
-# performs Andersen LR-test
-# object... object of class RM
-# splitcr... splitting criterion for LR-groups. "all.r" corresponds to a complete
-#            raw score split (r=1,...,k-1), "median" to a median raw score split,
-#            "mean" corresponds to the mean raw score split.
-#            optionally also a vector of length n for group split can be submitted.
-# se...whether standard errors should be computed
-
-
-call<-match.call()
-
-spl.gr<-NULL
-
-X.original<-object$X
-if (length(splitcr)>1 && is.character(splitcr)){    # if splitcr is character vector, treated as factor
-   splitcr<-as.factor(splitcr)
-}
-if (is.factor(splitcr)){
-   spl.nam<-deparse(substitute(splitcr))
-   spl.lev<-levels(splitcr)
-   spl.gr<-paste(spl.nam,spl.lev,sep=" ")
-   splitcr<-unclass(splitcr)
-}
-
-numsplit<-is.numeric(splitcr)
-if (any(is.na(object$X))) {
-  if (!numsplit && splitcr=="mean") {                                   #mean split
-    spl.gr<-c("Raw Scores < Mean", "Raw Scores >= Mean")
-    X<-object$X
-    # calculates index for NA groups
-    # from person.parameter.eRm
-      dichX <- ifelse(is.na(X),1,0)
-      strdata <- apply(dichX,1,function(x) {paste(x,collapse="")})
-      gmemb <- as.vector(data.matrix(data.frame(strdata)))
-    gindx<-unique(gmemb)
-    rsum.all<-rowSums(X,na.rm=T)
-    grmeans<-tapply(rsum.all,gmemb,mean)      #sorted
-    ngr<-table(gmemb)                         #sorted
-    m.all<-rep(grmeans,ngr)                   #sorted,expanded
-    rsum.all<-rsum.all[order(gmemb)]
-    spl<-ifelse(rsum.all<m.all,1,2)
-    splitcr<-spl
-    object$X<-X[order(gmemb),]
-  }
-  if (!numsplit && splitcr=="median") {                                   #median split
-    spl.gr<-c("Raw Scores <= Median", "Raw Scores > Median")
-    # cat("Warning message: Persons with median raw scores are assigned to the lower raw score group!\n")
-    X<-object$X
-    # calculates index for NA groups
-    # from person.parameter.eRm
-      dichX <- ifelse(is.na(X),1,0)
-      strdata <- apply(dichX,1,function(x) {paste(x,collapse="")})
-      gmemb <- as.vector(data.matrix(data.frame(strdata)))
-    gindx<-unique(gmemb)
-    rsum.all<-rowSums(X,na.rm=T)
-    grmed<-tapply(rsum.all,gmemb,median)      #sorted
-    ngr<-table(gmemb)                         #sorted
-    m.all<-rep(grmed,ngr)                     #sorted,expanded
-    rsum.all<-rsum.all[order(gmemb)]
-    spl<-ifelse(rsum.all<=m.all,1,2)
-    splitcr<-spl
-    object$X<-X[order(gmemb),]
-  }
-}
-
-if (!is.numeric(splitcr)) {
-  if (splitcr=="all.r") {                                    #full raw score split
-    rvind <- apply(object$X,1,sum,na.rm=TRUE)                      #person raw scoobject
-    Xlist <- by(object$X,rvind,function(x) x)
-    names(Xlist) <- as.list(sort(unique(rv)))
-    }
-
-  if (splitcr=="median") {                                   #median split
-    spl.gr<-c("Raw Scores <= Median", "Raw Scores > Median")
-    #removed rh 2010-12-17
-    #cat("Warning message: Persons with median raw scores are assigned to the lower raw score group!\n")
-    rv <- apply(object$X,1,sum,na.rm=TRUE)
-    rvsplit <- median(rv)
-    rvind <- rep(0,length(rv))
-    rvind[rv > rvsplit] <- 1                                 #group with highraw scoobject
-    Xlist <- by(object$X,rvind,function(x) x)
-    names(Xlist) <- list("low","high")
-    }
-
-  if (splitcr=="mean") {                                     #mean split
-    spl.gr<-c("Raw Scores < Mean", "Raw Scores >= Mean")
-    rv <- apply(object$X,1,sum,na.rm=TRUE)
-    rvsplit <- mean(rv)
-    rvind <- rep(0,length(rv))
-    rvind[rv > rvsplit] <- 1                                 #group with highraw scoobject
-    Xlist <- by(object$X,rvind,function(x) x)
-    names(Xlist) <- list("low","high")
-    }
-}
-
-if (is.numeric(splitcr)) {                                 #manual raw score split
-  spl.nam<-deparse(substitute(splitcr))
-  if (length(splitcr)!=dim(object$X)[1]){
-    stop("Mismatch between length of split vector and number of persons!")
-  } else {
-    rvind <- splitcr
-    Xlist <- by(object$X,rvind, function(x) x)
-    names(Xlist) <- as.list(sort(unique(splitcr)))
-    if(is.null(spl.gr)){
-      spl.lev<-names(Xlist)
-      spl.gr<-paste(spl.nam,spl.lev,sep=" ")
-    }
-  }
-}
-
-#----------item to be deleted---------------
-del.pos.l <- lapply(Xlist, function(x) {
-                    it.sub <- datcheck.LRtest(x,object$X,object$model)  #items to be removed within subgroup
-                    })
-
-del.pos <- unique(unlist(del.pos.l))
-if ((length(del.pos)) >= (dim(object$X)[2]-1)) {
-  stop("\nNo items with appropriate response patterns left to perform LR-test!\n")
-}
-
-if (length(del.pos) > 0) {
-  warning("\nThe following items were excluded due to inappropriate response patterns within subgroups: ",immediate.=TRUE)
-    cat(colnames(object$X)[del.pos], sep=" ","\n")
-    cat("Full and subgroup models are estimated without these items!\n")
-}
-
-
-if (length(del.pos) > 0) {
-  X.el <- object$X[,-(del.pos)]
-} else {
-  X.el <- object$X
-}
-Xlist.n <- by(X.el,rvind,function(y) y)
-names(Xlist.n) <- names(Xlist)
-if (length(del.pos) > 0) Xlist.n <- c(Xlist.n,list(X.el)) # X.el added since we must refit whole group without del.pos items
-
-if (object$model=="RM") {
-       likpar <- sapply(Xlist.n,function(x) {                       #matrix with loglik and npar for each subgroup
-                               objectg <- RM(x,se=se)
-                               likg <- objectg$loglik
-                               nparg <- length(objectg$etapar)
-                              # betalab <- colnames(objectg$X)
-                               list(likg,nparg,objectg$betapar,objectg$etapar,objectg$se.beta,outobj=objectg)   # rh outobj added
-                               ###list(likg,nparg,objectg$betapar,objectg$etapar,objectg$se.beta)   # rh outobj added
-                               })
-       }
-if (object$model=="PCM") {
-       likpar <- sapply(Xlist.n,function(x) {                       #matrix with loglik and npar for each subgroup
-                               objectg <- PCM(x,se=se)
-                               likg <- objectg$loglik
-                               nparg <- length(objectg$etapar)
-                               list(likg,nparg,objectg$betapar,objectg$etapar,objectg$se.beta,outobj=objectg)   # rh outobj added
-                               ###list(likg,nparg,objectg$betapar,objectg$etapar,objectg$se.beta)   # rh outobj added
-                               })
-       }
-if (object$model=="RSM") {
-       likpar <- sapply(Xlist.n,function(x) {                       #matrix with loglik and npar for each subgroup
-                               objectg <- RSM(x,se=se)
-                               likg <- objectg$loglik
-                               nparg <- length(objectg$etapar)
-                               list(likg,nparg,objectg$betapar,objectg$etapar,objectg$se.beta,outobj=objectg)   # rh outobj added
-                               ###list(likg,nparg,objectg$betapar,objectg$etapar,objectg$se.beta)   # rh outobj added
-                               })
-       }
-
-## extract fitted splitgroup models  # rh 02-05-2010
-fitobj<-likpar[6,1:length(unique(rvind))]
-likpar<-likpar[-6,]
-
-if (length(del.pos) > 0) {                  #re-estimate full model
-  pos <- length(Xlist.n)                    #position of the full model
-  loglik.all <- likpar[1,pos][[1]]          #loglik full model
-  etapar.all <- rep(0,likpar[2,pos])         #etapar full model (filled with 0 for df computation)
-  likpar <- likpar[,-pos]
-  Xlist.n <- Xlist.n[-pos]
-} else {
-  loglik.all <- object$loglik
-  etapar.all <- object$etapar
-}
-
-loglikg <- sum(unlist(likpar[1,]))                    #sum of likelihood value for subgroups
-LR <- 2*(abs(loglikg-loglik.all))                  #LR value
-df = sum(unlist(likpar[2,]))-(length(etapar.all))  #final degrees of freedom
-pvalue <- 1-pchisq(LR,df)                             #pvalue
-
-betalist <- likpar[3,]                                #organizing betalist
-
-
-result <- list(X=X.original, X.list=Xlist.n, model=object$model,LR=LR,
-               df=df, pvalue=pvalue, likgroup=unlist(likpar[1,],use.names=FALSE),
-               betalist=betalist, etalist=likpar[4,],selist=likpar[5,], spl.gr=spl.gr, call=call, fitobj=fitobj)  ## rh fitobj added
-class(result) <- "LR"
-result
-}
-
+`LRtest.Rm` <-
+function(object, splitcr = "median", se = TRUE)
+{
+# performs Andersen LR-test
+# object... object of class RM
+# splitcr... splitting criterion for LR-groups. "all.r" corresponds to a complete
+#            raw score split (r=1,...,k-1), "median" to a median raw score split,
+#            "mean" corresponds to the mean raw score split.
+#            optionally also a vector of length n for group split can be submitted.
+# se...whether standard errors should be computed
+
+
+call<-match.call()
+
+spl.gr<-NULL
+
+X.original<-object$X
+if((length(splitcr) > 1) & is.character(splitcr)){    # if splitcr is character vector, treated as factor
+  splitcr<-as.factor(splitcr)
+}
+if(is.factor(splitcr)){
+   spl.nam<-deparse(substitute(splitcr))
+   spl.lev<-levels(splitcr)
+   spl.gr<-paste(spl.nam,spl.lev,sep=" ")
+   splitcr<-unclass(splitcr)
+}
+
+numsplit<-is.numeric(splitcr)
+if (any(is.na(object$X))) {
+  if (!numsplit && splitcr=="mean") {                                   #mean split
+    spl.gr<-c("Raw Scores < Mean", "Raw Scores >= Mean")
+    X<-object$X
+    # calculates index for NA groups
+    # from person.parameter.eRm
+      dichX <- ifelse(is.na(X),1,0)
+      strdata <- apply(dichX,1,function(x) {paste(x,collapse="")})
+      gmemb <- as.vector(data.matrix(data.frame(strdata)))
+    gindx<-unique(gmemb)
+    rsum.all<-rowSums(X,na.rm=T)
+    grmeans<-tapply(rsum.all,gmemb,mean)      #sorted
+    ngr<-table(gmemb)                         #sorted
+    m.all<-rep(grmeans,ngr)                   #sorted,expanded
+    rsum.all<-rsum.all[order(gmemb)]
+    spl<-ifelse(rsum.all<m.all,1,2)
+    splitcr<-spl
+    object$X<-X[order(gmemb),]
+  }
+  if (!numsplit && splitcr=="median") {                                   #median split
+    spl.gr<-c("Raw Scores <= Median", "Raw Scores > Median")
+    # cat("Warning message: Persons with median raw scores are assigned to the lower raw score group!\n")
+    X<-object$X
+    # calculates index for NA groups
+    # from person.parameter.eRm
+      dichX <- ifelse(is.na(X),1,0)
+      strdata <- apply(dichX,1,function(x) {paste(x,collapse="")})
+      gmemb <- as.vector(data.matrix(data.frame(strdata)))
+    gindx<-unique(gmemb)
+    rsum.all<-rowSums(X,na.rm=T)
+    grmed<-tapply(rsum.all,gmemb,median)      #sorted
+    ngr<-table(gmemb)                         #sorted
+    m.all<-rep(grmed,ngr)                     #sorted,expanded
+    rsum.all<-rsum.all[order(gmemb)]
+    spl<-ifelse(rsum.all<=m.all,1,2)
+    splitcr<-spl
+    object$X<-X[order(gmemb),]
+  }
+}
+
+if (!is.numeric(splitcr)) {
+  if (splitcr=="all.r") {                               #full raw score split   ### begin MjM 2012-03-18
+    rvind <- rowSums(object$X, na.rm=TRUE)              #person raw scoobject
+    excl_0_k <- (rvind > 0) & (rvind < sum(apply(object$X, 2, max, na.rm=T)))
+    Xlist <- by(object$X[excl_0_k,], rvind[excl_0_k], function(x) x)
+    names(Xlist) <- as.list(paste("Raw Score =", sort(unique(rvind[excl_0_k]))))
+    spl.gr <- unlist(names(Xlist))
+  }                                                                             ### end MjM 2012-03-18
+
+  if (splitcr=="median") {                                   #median split
+    spl.gr<-c("Raw Scores <= Median", "Raw Scores > Median")
+    #removed rh 2010-12-17
+    #cat("Warning message: Persons with median raw scores are assigned to the lower raw score group!\n")
+    rv <- apply(object$X,1,sum,na.rm=TRUE)
+    rvsplit <- median(rv)
+    rvind <- rep(0,length(rv))
+    rvind[rv > rvsplit] <- 1                                 #group with highraw scoobject
+    Xlist <- by(object$X,rvind,function(x) x)
+    names(Xlist) <- list("low","high")
+  }
+
+  if (splitcr=="mean") {                                     #mean split
+    spl.gr<-c("Raw Scores < Mean", "Raw Scores >= Mean")
+    rv <- apply(object$X,1,sum,na.rm=TRUE)
+    rvsplit <- mean(rv)
+    rvind <- rep(0,length(rv))
+    rvind[rv > rvsplit] <- 1                                 #group with highraw scoobject
+    Xlist <- by(object$X,rvind,function(x) x)
+    names(Xlist) <- list("low","high")
+    }
+}
+
+if (is.numeric(splitcr)) {                                 #manual raw score split
+  spl.nam<-deparse(substitute(splitcr))
+  if (length(splitcr)!=dim(object$X)[1]){
+    stop("Mismatch between length of split vector and number of persons!")
+  } else {
+    rvind <- splitcr
+    Xlist <- by(object$X,rvind, function(x) x)
+    names(Xlist) <- as.list(sort(unique(splitcr)))
+    if(is.null(spl.gr)){
+      spl.lev<-names(Xlist)
+      spl.gr<-paste(spl.nam,spl.lev,sep=" ")
+    }
+  }
+}
+
+#----------item to be deleted---------------
+del.pos.l <- lapply(Xlist, function(x) {
+                    it.sub <- datcheck.LRtest(x,object$X,object$model)  #items to be removed within subgroup
+                    })
+
+del.pos <- unique(unlist(del.pos.l))
+if (length(del.pos) >= (ncol(object$X)-1)) {
+  stop("\nNo items with appropriate response patterns left to perform LR-test!\n")
+}
+
+if(length(del.pos) > 0){                                                        ### begin MjM 2013-01-27
+  warning(paste0(
+    "\n", 
+    prettyPaste("The following items were excluded due to inappropriate response patterns within subgroups:"),
+    "\n",
+    paste(colnames(object$X)[del.pos], collapse=" "),
+    "\n\n",
+    prettyPaste("Full and subgroup models are estimated without these items!")
+  ), immediate.=TRUE)
+}                                                                               ### end MjM 2013-01-27
+
+
+if (length(del.pos) > 0) {
+  X.el <- object$X[,-(del.pos)]
+} else {
+  X.el <- object$X
+}
+
+if(ifelse(length(splitcr) == 1, splitcr != "all.r", TRUE)){   ### begin MjM 2012-03-18   # for all cases except "all.r"
+  Xlist.n <- by(X.el, rvind, function(y) y)
+  names(Xlist.n) <- names(Xlist)
+  if (length(del.pos) > 0) Xlist.n <- c(Xlist.n,list(X.el)) # X.el added since we must refit whole group without del.pos items
+} else {
+  Xlist.n <- by(X.el[excl_0_k,], rvind[excl_0_k], function(y) y)
+  names(Xlist.n) <- names(Xlist)
+  Xlist.n <- c(Xlist.n,list(X.el[excl_0_k,])) # X.el added since we must refit whole group without del.pos items
+}                         ### end MjM 2012-03-18
+
+if (object$model=="RM") {
+       likpar <- sapply(Xlist.n,function(x) {                       #matrix with loglik and npar for each subgroup
+                               objectg <- RM(x,se=se)
+                               likg <- objectg$loglik
+                               nparg <- length(objectg$etapar)
+                              # betalab <- colnames(objectg$X)
+                               list(likg,nparg,objectg$betapar,objectg$etapar,objectg$se.beta,outobj=objectg)   # rh outobj added
+                               ###list(likg,nparg,objectg$betapar,objectg$etapar,objectg$se.beta)   # rh outobj added
+                               })
+       }
+if (object$model=="PCM") {
+       likpar <- sapply(Xlist.n,function(x) {                       #matrix with loglik and npar for each subgroup
+                               objectg <- PCM(x,se=se)
+                               likg <- objectg$loglik
+                               nparg <- length(objectg$etapar)
+                               list(likg,nparg,objectg$betapar,objectg$etapar,objectg$se.beta,outobj=objectg)   # rh outobj added
+                               ###list(likg,nparg,objectg$betapar,objectg$etapar,objectg$se.beta)   # rh outobj added
+                               })
+       }
+if (object$model=="RSM") {
+       likpar <- sapply(Xlist.n,function(x) {                       #matrix with loglik and npar for each subgroup
+                               objectg <- RSM(x,se=se)
+                               likg <- objectg$loglik
+                               nparg <- length(objectg$etapar)
+                               list(likg,nparg,objectg$betapar,objectg$etapar,objectg$se.beta,outobj=objectg)   # rh outobj added
+                               ###list(likg,nparg,objectg$betapar,objectg$etapar,objectg$se.beta)   # rh outobj added
+                               })
+       }
+
+## extract fitted splitgroup models  # rh 02-05-2010
+if(ifelse(length(splitcr) == 1, splitcr != "all.r", TRUE)){   ### begin MjM 2012-03-18
+  fitobj <- likpar[6, 1:length(unique(rvind))]
+} else {
+  fitobj <- likpar[6, 1:length(unique(rvind[excl_0_k]))]
+}                         ### end MjM 2012-03-18
+likpar <- likpar[-6,]
+
+if((length(del.pos) > 0) | ifelse(length(splitcr) == 1, splitcr == "all.r", FALSE)) {                  #re-estimate full model   ### MjM 2012-03-18
+  pos <- length(Xlist.n)                    #position of the full model
+  loglik.all <- likpar[1,pos][[1]]          #loglik full model
+  # etapar.all <- rep(0,likpar[2,pos])         #etapar full model (filled with 0 for df computation)
+  etapar.all <- rep(0, unlist(likpar[2,pos]))         #etapar full model (filled with 0 for df computation)
+  likpar <- likpar[,-pos]
+  Xlist.n <- Xlist.n[-pos]
+} else {
+  loglik.all <- object$loglik
+  etapar.all <- object$etapar
+}
+
+loglikg <- sum(unlist(likpar[1,]))                    #sum of likelihood value for subgroups
+LR <- 2*(abs(loglikg-loglik.all))                  #LR value
+df = sum(unlist(likpar[2,]))-(length(etapar.all))  #final degrees of freedom
+pvalue <- 1 - pchisq(LR, df)                             #pvalue
+
+betalist <- likpar[3,]                                #organizing betalist
+
+
+result <- list(X=X.original, X.list=Xlist.n, model=object$model,LR=LR,
+               df=df, pvalue=pvalue, likgroup=unlist(likpar[1,],use.names=FALSE),
+               betalist=betalist, etalist=likpar[4,],selist=likpar[5,], spl.gr=spl.gr, call=call, fitobj=fitobj)  ## rh fitobj added
+class(result) <- "LR"
+
+return(result)
+
+}
diff --git a/R/MLoef.R b/R/MLoef.R
old mode 100755
new mode 100644
index 98a1a3b..4469c49
--- a/R/MLoef.R
+++ b/R/MLoef.R
@@ -17,18 +17,20 @@ MLoef <- function(robj, splitcr="median")
     if(splitcr == "median"){
       raw.scores <- colSums(robj$X,na.rm=T)
       numsplit <- as.numeric(raw.scores > median(raw.scores,na.rm=T))
-      if( any(raw.scores == median(raw.scores,na.rm=T)) ){   # Only if one item's raw score == the median, a warning is issued
-        wrning <- which(raw.scores == median(raw.scores,na.rm=T))   # append a warning-slot to the object for print and summary methods
-        cat("Item(s)",paste(names(wrning),collapse=", "),"with raw score equal to the median assigned to the lower raw score group!\n")
-      }
+      ## removed for the time being 2011-09-08 rh
+      #if( any(raw.scores == median(raw.scores,na.rm=T)) ){   # Only if one item's raw score == the median, a warning is issued
+      #  wrning <- which(raw.scores == median(raw.scores,na.rm=T))   # append a warning-slot to the object for print and summary methods
+      #  cat("Item(s)",paste(names(wrning),collapse=", "),"with raw score equal to the median assigned to the lower raw score group!\n")
+      #}
     }
     if(splitcr=="mean"){
       raw.scores <- colSums(robj$X,na.rm=T)
       numsplit <- as.numeric(raw.scores > mean(raw.scores,na.rm=T))
-      if( any(raw.scores == mean(raw.scores,na.rm=T)) ){   # Only if one item's raw score == the mean, a warning is issued
-        wrning <- which(raw.scores == mean(raw.scores,na.rm=T))   # append a warning-slot to the object for print and summary methods
-        cat("Item(s)",paste(names(wrning),collapse=", "),"with raw score equal to the mean assigned to the lower raw score group!\n")
-      }
+      ## removed for the time being 2011-09-08 rh
+      #if( any(raw.scores == mean(raw.scores,na.rm=T)) ){   # Only if one item's raw score == the mean, a warning is issued
+      #  wrning <- which(raw.scores == mean(raw.scores,na.rm=T))   # append a warning-slot to the object for print and summary methods
+      #  cat("Item(s)",paste(names(wrning),collapse=", "),"with raw score equal to the mean assigned to the lower raw score group!\n")
+      #}
     }
   } else {   # check if the submitted split-vector is appropriate
     if(length(splitcr) != ncol(robj$X)) stop("Split vector too long/short.")
@@ -72,12 +74,12 @@ MLoef <- function(robj, splitcr="median")
                 rowSums(M$X, na.rm=T)
               }))
   sub.tabs <- table(sub.tabs)
-  
+
   sub.term <- sub.tabs * (log(sub.tabs) - log(nrow(robj$X)))
   sub.term <- sum(na.omit(as.numeric(sub.term)))
-  
+
   sub.max <- lapply(i.groups, function(g){ sum(apply(robj$X[,g], 2, max)) })
-  
+
   full.tab  <- table(rowSums(robj$X, na.rm=T))
   full.term <- sum(na.omit(as.numeric( full.tab * (log(full.tab) - log(nrow(robj$X))) )))
 
diff --git a/R/NPtest.R b/R/NPtest.R
old mode 100755
new mode 100644
index 94a88d6..0e18649
--- a/R/NPtest.R
+++ b/R/NPtest.R
@@ -1,6 +1,34 @@
 NPtest<-function(obj, n=NULL, method="T1", ...){
+#npt<-function(obj, n=NULL, method="T1", ...){
+#-------------------------------------------------
+# changes: 2011-12-06
+#-------------------------------------------------
+# in ... koennen ausser den spezifikationen fuer die
+# einzelnen statistiken wie z.B. idx=, stat=, etc.
+# nun zusaetzlich step=, burn_in=, und seed=
+# angegeben werden
+# ausserdem mit RSinfo=TRUE wird eine summary des
+# RaschSampler output objects geprintet
+#-------------------------------------------------
+# Aenderung im Code:
+# - alle methods bei switch haben jetzt ... in der
+#   argument liste
+# - check des input objekts
+#-------------------------------------------------
 
-   if (is.matrix(obj) || is.data.frame(obj)){ # input is datamatrix -  RaschSampler object is generated
+
+#   require(RaschSampler)   # removed as RaschSampler is a part of eRm since 0.15-0
+
+   dots<-as.list(substitute(list(...)))[-1]
+   nn<-names(dots)
+   for (i in seq(along=dots)) assign(nn[i],dots[[i]])
+
+   if(!exists("burn_in", inherits = FALSE)) burn_in <- 256
+   if(!("step" %in% nn)) step<-32
+   if(!exists("seed", inherits = FALSE)) seed<-0
+   if(is.null(n)) n <- 500
+
+   if(is.matrix(obj) || is.data.frame(obj)){ # input is datamatrix -  RaschSampler object is generated
       if (!all(obj %in% 0:1)) stop("Data matrix must be binary, NAs not allowed")
       itscor<-colSums(obj) # rh 2011-03-03
       itcol<-(itscor==0|itscor==nrow(obj))
@@ -10,29 +38,40 @@ NPtest<-function(obj, n=NULL, method="T1", ...){
         cat("\n")
         stop("NPtest using these items is meaningless. Delete them first!")
       }
-      if (is.null(n)) n <- 500
-      obj<-rsampler(obj,rsctrl(burn_in=256, n_eff=n, step=32))
+      obj<-rsampler(obj,rsctrl(burn_in=burn_in, n_eff=n, step=step, seed=seed))
+#browser()
+
+   } else if(class(obj)!="RSmpl"){
+        stop("Input object must be data matrix/data frame or output from RaschSampler")
    }
-   switch(method,
-         "T1"=T1(obj),
-         "T2"=T2(obj, ...),
-         "T4"=T4(obj, ...),
-         "T7"=T7(obj, ...),
-         "T7a"=T7a(obj, ...),
-         "T10"=T10(obj, ...),
-         "T11"=T11(obj),
-         "MLoef"=MLoef.x(obj, ...)  ###############################################
-   )
+
+   if(exists("RSinfo", inherits = FALSE)) if(get("RSinfo")) summary(obj)
+
+  switch(method,
+    "T1"    = T1(obj, ...),
+    "T1l"   = T1l(obj, ...),
+    "T1m"   = T1m(obj, ...),
+    "Tmd"   = Tmd(obj, ...),
+    "T2"    = T2(obj, ...),
+    "T2m"   = T2m(obj, ...),
+    "T4"    = T4(obj, ...),
+#    "T7"    = T7(obj, ...),
+#    "T7a"   = T7a(obj, ...),
+    "T10"   = T10(obj, ...),
+    "T11"   = T11(obj, ...),
+    "Tpbis" = Tpbis(obj, ...),
+    "MLoef" = MLoef.x(obj, ...)
+  )
 }
 
-MLoef.x<-function(rsobj, splitcr=NULL){
+MLoef.x<-function(rsobj, splitcr=NULL, ...){
      # user function
      MLexact<-function(X,splitcr){
        rmod<-RM(X)
        LR<-MLoef(rmod,splitcr)$LR
        LR
      }
-     #if(!exists("splitcr")) splitcr="median"
+     #if(!exists("splitcr", inherits = FALSE)) splitcr="median"
      if(is.null(splitcr)) splitcr="median"
      res <- rstats(rsextrobj(rsobj, 2), MLexact, splitcr)
 
@@ -49,7 +88,96 @@ MLoef.x<-function(rsobj, splitcr=NULL){
      class(result)<-"MLobj"
      result
 }
-T1<-function(rsobj){
+
+Tpbis <- function(rsobj, idxt=NULL, idxs=NULL, ...){ # fixed 2013-08-09
+  Tpbis.stat <- function(x){
+    rb <- rowSums(x[, idxs, drop = FALSE])     # all raw scores
+    t  <- x[, idxt]                            # dichotomous item
+    r  <- tapply(rb, t, sum, simplify = FALSE) # raw scores by item; simplify = FALSE to be on the safe side
+    n1 <- sum(t)                               # n_1 = sum of raw scores with t == 1
+    n0 <- sum(1 - t)                           # n_0 = sum of raw scores with t == 0
+    return(n0 * r[[2L]][1L] - n1*r[[1L]][1L])  # n_0 * sum(r_1) - n_1 * sum(r_0)
+  }
+
+  if(is.null(idxs)) stop("No item(s) for subscale  specified (use idxs!)")
+  if(is.null(idxt)) stop("No test item for testing against subscale specified (use idx!)")
+  li1 <- length(idxt)
+  li2 <- length(idxs)
+  k   <- rsobj$k
+  if(li1 > 1L ||li2 >= k || (li1 + li2) > k || any(idxt %in% idxs) || any(c(idxt,idxs) > k)){
+    stop("Subscale and/or test item incorrectly specified.")
+  }
+
+  n_eff <- rsobj$n_eff                   # number of simulated matrices
+  n_tot <- rsobj$n_tot                   # number of simulated matrices
+
+  res     <- rstats(rsobj, Tpbis.stat)              # calculates statistic for each matrix
+  corrvec <- do.call(cbind, lapply(res, as.vector)) # converts result list to matrix
+
+  prop <- sum(corrvec[2L:(n_tot)] <= corrvec[1L]) / n_eff   # T(A_s) >= T(A_0)
+
+  # Tpbisobj
+  result <- list("n_eff"    = n_eff,
+                 "prop"     = prop,
+                 "idxt"     = idxt,
+                 "idxs"     = idxs,
+                 "Tpbisvec" = corrvec)
+  class(result)<-"Tpbisobj"
+  return(result)
+}
+
+Tmd<-function(rsobj, idx1=NULL, idx2=NULL, ...){
+     Tmd.stat<-function(x){
+        r1<-rowSums(x[,idx1, drop=FALSE])
+        r2<-rowSums(x[,idx2, drop=FALSE])
+        corr<-cor(r1,r2)
+        corr
+     }
+
+     if(is.null(idx1))
+         stop("No item(s) for subscale 1 specified (use idx1!)")
+     if(is.null(idx2))
+         stop("No item(s) for subscale 2 specified (use idx2!)")
+     li1<-length(idx1)
+     li2<-length(idx2)
+     k<-rsobj$k
+     if(li1>=k ||li2>=k || li1+li2>k || any(idx1 %in% idx2))
+         stop("Subscale(s) incorrectly specified.")
+
+     n_eff<-rsobj$n_eff                         # number of simulated matrices
+     n_tot<-rsobj$n_tot                         # number of simulated matrices
+
+     res<-rstats(rsobj,Tmd.stat)               # calculates statistic for each matrix
+     corrvec<-do.call(cbind, lapply(res,as.vector)) # converts result list to matrix
+
+     prop<-sum(corrvec[2:(n_tot)]<=corrvec[1])/n_eff
+
+     result<-list(n_eff=n_eff, prop=prop, idx1=idx1, idx2=idx2, Tmdvec=corrvec)   # Tmdobj
+     class(result)<-"Tmdobj"
+     result
+}
+
+
+T1m<-function(rsobj, ...){
+     T1mstat<-function(x){      # calculates statistic T1m
+        unlist(lapply(1:(k-1),function(i) lapply((i+1):k, function(j) sum(x[,i]==x[,j]))))
+     }
+     n_eff<-rsobj$n_eff                         # number of simulated matrices
+     n_tot<-rsobj$n_tot                         # number of simulated matrices
+     k<-rsobj$k                                 # number of columns of matrices
+
+     res<-rstats(rsobj,T1mstat)                  # calculates statistic for each matrix
+
+     res<-do.call(cbind, lapply(res,as.vector)) # converts result list to matrix
+     T1mvec<-apply(res, 1, function(x) sum(x[2:(n_tot)]<=x[1])/n_eff)
+     T1mmat<-matrix(,k,k)
+     T1mmat[lower.tri(T1mmat)] <- T1mvec           # lower triangular matrix of p-values
+     result<-list(n_eff=n_eff, prop=T1mvec, T1mmat=T1mmat) # T1mobj
+     class(result)<-"T1mobj"
+     result
+}
+
+T1<-function(rsobj, ...){
      T1stat<-function(x){      # calculates statistic T1
         unlist(lapply(1:(k-1),function(i) lapply((i+1):k, function(j) sum(x[,i]==x[,j]))))
      }
@@ -68,7 +196,25 @@ T1<-function(rsobj){
      result
 }
 
-T2<-function(rsobj,idx=NULL,stat="var"){
+T1l<-function(rsobj, ...){
+     T1lstat<-function(x){      # calculates statistic T1
+        unlist(lapply(1:(k-1),function(i) lapply((i+1):k, function(j) sum(x[,i] & x[,j]))))
+     }
+     n_eff<-rsobj$n_eff                         # number of simulated matrices
+     n_tot<-rsobj$n_tot                         # number of simulated matrices
+     k<-rsobj$k                                 # number of columns of matrices
+
+     res<-rstats(rsobj,T1lstat)                  # calculates statistic for each matrix
+
+     res<-do.call(cbind, lapply(res,as.vector)) # converts result list to matrix
+     T1lvec<-apply(res, 1, function(x) sum(x[2:(n_tot)]>=x[1])/n_eff)
+     T1lmat<-matrix(,k,k)
+     T1lmat[lower.tri(T1lmat)] <- T1lvec           # lower triangular matrix of p-values
+     result<-list(n_eff=n_eff, prop=T1lvec, T1lmat=T1lmat) # T1obj
+     class(result)<-"T1lobj"
+     result
+}
+T2<-function(rsobj,idx=NULL,stat="var", ...){
 
      T2.Var.stat<-function(x){       # calculates statistic T2
         var(rowSums(x[,idx, drop=FALSE]))
@@ -102,8 +248,42 @@ T2<-function(rsobj,idx=NULL,stat="var"){
      result
 }
 
+T2m<-function(rsobj,idx=NULL,stat="var", ...){
+
+     T2m.Var.stat<-function(x){       # calculates statistic T2m
+        var(rowSums(x[,idx, drop=FALSE]))
+     }
+     T2m.MAD1.stat<-function(x){       # calculates statistic T2m
+        y<-rowSums(x[,idx, drop=FALSE])           # mean absolute deviation
+        mean(abs(y-mean(y)))
+     }
+     T2m.MAD2.stat<-function(x){       # calculates statistic T2m
+        mad(rowSums(x[,idx, drop=FALSE]),constant=1) # unscaled median absolute deviation
+     }
+     T2m.Range.stat<-function(x){     # calculates statistic T2m
+        diff(range(rowSums(x[,idx, drop=FALSE])))
+     }
+     n<-rsobj$n
+     n_eff<-rsobj$n_eff
+     k<-rsobj$k                      # number of columns of matrices
+     if(is.null(idx))
+         stop("No item(s) for subscale specified (use idx!)")
+     res<-switch(stat,
+          "var"=rstats(rsobj,T2m.Var.stat),
+          "mad1"=rstats(rsobj,T2m.MAD1.stat),
+          "mad2"=rstats(rsobj,T2m.MAD2.stat),
+          "range"=rstats(rsobj,T2m.Range.stat),
+          stop("stat must be one of \"var\", \"mad1\", \"mad2\", \"range\"")
+     )
+     res<-unlist(res)
+     prop<-sum(res[2:(n_eff+1)]<=res[1])/n_eff
+     result<-list(n_eff=n_eff, prop=prop, idx=idx, stat=stat, T2mvec=res) # T2mobj
+     class(result)<-"T2mobj"
+     result
+}
+
 
-T4<-function(rsobj,idx=NULL,group=NULL,alternative="high"){
+T4<-function(rsobj,idx=NULL,group=NULL,alternative="high", ...){
 
      T4.stat<-function(x){      # calculates statistic T4
         sign*sum(rowSums(x[gr,idx,drop=FALSE]))
@@ -136,90 +316,90 @@ T4<-function(rsobj,idx=NULL,group=NULL,alternative="high"){
      class(result)<-"T4obj"
      result
 }
+# removed in version 0.14-5
+#T7<-function(rsobj,idx=NULL, ...){
+#     T7.stat<-function(x){      # calculates statistic T7
+#        calcT7<-function(i,j){  # calculates sum for all items in subscale
+#          if(sitscor[i]>sitscor[j]){
+#              sum(submat[,j]>submat[,i])   #
+#              # t<-table(submat[,i],submat[,j])    # odds ratio gives the same result
+#              # OR<-t[1]*t[4]/(t[2]*t[3])
+#              # 1/OR
+#          } else
+#              NA
+#        }
+#        submat<-x[,idx]
+#        submat<-submat[,order(itscor,decreasing=TRUE)]
+#        RET<-unlist(lapply(1:(m-1), function(i) lapply((i+1):m, function(j) calcT7(i,j))))
+#        RET
+#     }
+#
+#     n_eff<-rsobj$n_eff                         # number of simulated matrices
+#     n_tot<-rsobj$n_tot                         # number of all matrices
+#     k<-rsobj$k                                 # number of items
+#     if(is.null(idx))
+#         stop("No items for subscale specified (use idx!)")
+#     else if (length(idx)<2)
+#         stop("At least 2 items have to be specified with idx!")
+#     submat<-rsextrmat(rsobj,1)[,idx]
+#     itscor<-colSums(submat)
+#     names(itscor)<-colnames(submat)<-idx
+#
+#     submat<-submat[,order(itscor,decreasing=TRUE)]
+#     sitscor<-sort(itscor,decreasing=TRUE)      # sorted itemscore
+#     m<-length(itscor)
+#
+#     resList<-rstats(rsobj,T7.stat)
+#     res<-sapply(resList,sum,na.rm=TRUE)
+#     prop<-sum(res[2:(n_eff+1)]>=res[1])/n_eff
+#     result<-list(n_eff=n_eff, prop=prop, itscor=itscor, T7vec=res)   # T7obj
+#     class(result)<-"T7obj"
+#     result
+#}
+#T7a<-function(rsobj,idx=NULL, ...){
+#     T7a.stat<-function(x){      # calculates statistic T7a
+#        calcT7a<-function(i,j){  # calculates sum for single Itempair
+#          if(sitscor[i]>sitscor[j]){
+#              sum(submat[,j]>submat[,i])   #
+#              # t<-table(submat[,i],submat[,j])    # odds ratio gives the same result
+#              # OR<-t[1]*t[4]/(t[2]*t[3])
+#              # 1/OR
+#          } else
+#              NA
+#        }
+#        submat<-x[,idx]
+#        submat<-submat[,order(itscor,decreasing=TRUE)]
+#        RET<-unlist(lapply(1:(m-1), function(i) lapply((i+1):m, function(j) calcT7a(i,j))))
+#        RET
+#     }
+#
+#     n_eff<-rsobj$n_eff                         # number of simulated matrices
+#     n_tot<-rsobj$n_tot                         # number of all matrices
+#     k<-rsobj$k                                 # number of items
+#     if(is.null(idx))
+#         stop("No items for subscale specified (use idx!)")
+#     else if (length(idx)<2)
+#         stop("At least 2 items have to be specified with idx!")
+#     submat<-rsextrmat(rsobj,1)[,idx]
+#     itscor<-colSums(submat)
+#     names(itscor)<-colnames(submat)<-idx
+#     submat<-submat[,order(itscor,decreasing=TRUE)]
+#     sitscor<-sort(itscor,decreasing=TRUE)      # sorted itemscore
+#     m<-length(itscor)
+#
+#     res<-rstats(rsobj,T7a.stat)
+#     res<-do.call(cbind, lapply(res,as.vector)) # converts result list to matrix
+#     T7avec<-apply(res, 1, function(x) sum(x[2:(n_tot)]>=x[1])/n_eff)
+#     T7anam<-NULL
+#     for (i in 1:(m-1)) for(j in (i+1):m )
+#          T7anam<-c(T7anam, paste("(",names(sitscor[i]),">",names(sitscor[j]),")",sep="",collapse=""))
+#     names(T7avec)<-T7anam
+#     result<-list(n_eff=n_eff, prop=T7avec,itscor=itscor)    # T7aobj
+#     class(result)<-"T7aobj"
+#     result
+#}
 
-T7<-function(rsobj,idx=NULL){
-     T7.stat<-function(x){      # calculates statistic T7
-        calcT7<-function(i,j){  # calculates sum for all items in subscale
-          if(sitscor[i]>sitscor[j]){
-              sum(submat[,j]>submat[,i])   #
-              # t<-table(submat[,i],submat[,j])    # odds ratio gives the same result
-              # OR<-t[1]*t[4]/(t[2]*t[3])
-              # 1/OR
-          } else
-              NA
-        }
-        submat<-x[,idx]
-        submat<-submat[,order(itscor,decreasing=TRUE)]
-        RET<-unlist(lapply(1:(m-1), function(i) lapply((i+1):m, function(j) calcT7(i,j))))
-        RET
-     }
-
-     n_eff<-rsobj$n_eff                         # number of simulated matrices
-     n_tot<-rsobj$n_tot                         # number of all matrices
-     k<-rsobj$k                                 # number of items
-     if(is.null(idx))
-         stop("No items for subscale specified (use idx!)")
-     else if (length(idx)<2)
-         stop("At least 2 items have to be specified with idx!")
-     submat<-rsextrmat(rsobj,1)[,idx]
-     itscor<-colSums(submat)
-     names(itscor)<-colnames(submat)<-idx
-
-     submat<-submat[,order(itscor,decreasing=TRUE)]
-     sitscor<-sort(itscor,decreasing=TRUE)      # sorted itemscore
-     m<-length(itscor)
-
-     resList<-rstats(rsobj,T7.stat)
-     res<-sapply(resList,sum,na.rm=TRUE)
-     prop<-sum(res[2:(n_eff+1)]>=res[1])/n_eff
-     result<-list(n_eff=n_eff, prop=prop, itscor=itscor, T7vec=res)   # T7obj
-     class(result)<-"T7obj"
-     result
-}
-T7a<-function(rsobj,idx=NULL){
-     T7a.stat<-function(x){      # calculates statistic T7a
-        calcT7a<-function(i,j){  # calculates sum for single Itempair
-          if(sitscor[i]>sitscor[j]){
-              sum(submat[,j]>submat[,i])   #
-              # t<-table(submat[,i],submat[,j])    # odds ratio gives the same result
-              # OR<-t[1]*t[4]/(t[2]*t[3])
-              # 1/OR
-          } else
-              NA
-        }
-        submat<-x[,idx]
-        submat<-submat[,order(itscor,decreasing=TRUE)]
-        RET<-unlist(lapply(1:(m-1), function(i) lapply((i+1):m, function(j) calcT7a(i,j))))
-        RET
-     }
-
-     n_eff<-rsobj$n_eff                         # number of simulated matrices
-     n_tot<-rsobj$n_tot                         # number of all matrices
-     k<-rsobj$k                                 # number of items
-     if(is.null(idx))
-         stop("No items for subscale specified (use idx!)")
-     else if (length(idx)<2)
-         stop("At least 2 items have to be specified with idx!")
-     submat<-rsextrmat(rsobj,1)[,idx]
-     itscor<-colSums(submat)
-     names(itscor)<-colnames(submat)<-idx
-     submat<-submat[,order(itscor,decreasing=TRUE)]
-     sitscor<-sort(itscor,decreasing=TRUE)      # sorted itemscore
-     m<-length(itscor)
-
-     res<-rstats(rsobj,T7a.stat)
-     res<-do.call(cbind, lapply(res,as.vector)) # converts result list to matrix
-     T7avec<-apply(res, 1, function(x) sum(x[2:(n_tot)]>=x[1])/n_eff)
-     T7anam<-NULL
-     for (i in 1:(m-1)) for(j in (i+1):m )
-          T7anam<-c(T7anam, paste("(",names(sitscor[i]),">",names(sitscor[j]),")",sep="",collapse=""))
-     names(T7avec)<-T7anam
-     result<-list(n_eff=n_eff, prop=T7avec,itscor=itscor)    # T7aobj
-     class(result)<-"T7aobj"
-     result
-}
-
-T10<-function(rsobj, splitcr="median"){
+T10<-function(rsobj, splitcr="median", ...){
       calc.groups<-function(x,splitcr){
         if (length(splitcr) > 1)  {        # numeric vectors converted to factors
             if (length(splitcr) != nrow(x)) {
@@ -278,7 +458,7 @@ T10<-function(rsobj, splitcr="median"){
 }
 
 
-T11<-function(rsobj){
+T11<-function(rsobj, ...){
       T11.stat<-function(x){
          as.vector(cor(x))
       }
@@ -304,9 +484,30 @@ print.MLobj<-function(x,...){
   cat("'exact' p-value =", x$prop, " (based on", x$n_eff, "sampled matrices)\n\n")
 }
 
+print.Tmdobj<-function(x,...){
+  txt1<-"\nNonparametric RM model test: Tmd (Multidimensionality)"
+  writeLines(strwrap(txt1, exdent=4))
+  cat("    (correlation of subscale person scores)\n")
+  cat("Number of sampled matrices:", x$n_eff,"\n")
+  cat("Subscale 1 - Items:", x$idx1,"\n")
+  cat("Subscale 2 - Items:", x$idx2,"\n")
+  cat("Observed correlation:", x$Tmdvec[1],"\n")
+  cat("one-sided p-value:",x$prop,"\n\n")
+}
+
+print.Tpbisobj<-function(x,...){
+  txt1<-"\nNonparametric RM model test: Tpbis (discrimination)"
+  writeLines(strwrap(txt1, exdent=4))
+  cat("    (pointbiserial correlation of test item vs. subscale)\n")
+  cat("Number of sampled matrices:", x$n_eff,"\n")
+  cat("Test Item:", x$idxt,"\n")
+  cat("Subscale  - Items:", x$idxs,"\n")
+  cat("one-sided p-value (rpbis too low):",x$prop,"\n\n")
+}
+
 print.T1obj<-function(x,alpha=0.05,...){
   txt1<-"\nNonparametric RM model test: T1 (local dependence - increased inter-item correlations)\n"
-  writeLines(strwrap(txt1, exdent=5))
+  writeLines(strwrap(txt1, exdent=4))
   cat("    (counting cases with equal responses on both items)\n")
   cat("Number of sampled matrices:", x$n_eff,"\n")
   cat("Number of Item-Pairs tested:", length(x$prop),"\n")
@@ -320,6 +521,42 @@ print.T1obj<-function(x,alpha=0.05,...){
   else
      cat("none\n\n")
 }
+
+print.T1mobj<-function(x,alpha=0.05,...){
+  txT1m<-"\nNonparametric RM model test: T1m (multidimensionality - reduced inter-item correlations)\n"
+  writeLines(strwrap(txT1m, exdent=4))
+  cat("    (counting cases with equal responses on both items)\n")
+  cat("Number of sampled matrices:", x$n_eff,"\n")
+  cat("Number of Item-Pairs tested:", length(x$prop),"\n")
+  cat("Item-Pairs with one-sided p <", alpha,"\n")
+  T1mmat<-x$T1mmat
+  idx<-which(T1mmat<alpha,arr.ind=TRUE)
+  val<-T1mmat[which(T1mmat<alpha)]
+  names(val)<-apply(idx,1,function(x) paste("(",x[2],",",x[1],")",sep="",collapse=""))
+  if (length(val)>0)
+     print(round(val,digits=3))
+  else
+     cat("none\n\n")
+}
+
+print.T1lobj<-function(x,alpha=0.05,...){
+  txt1<-"\nNonparametric RM model test: T1 (learning - based on item pairs)\n"
+  writeLines(strwrap(txt1, exdent=4))
+  cat("    (counting cases with reponsepattern (1,1) for item pair)\n")
+  cat("Number of sampled matrices:", x$n_eff,"\n")
+  cat("Number of sampled matrices:", x$n_eff,"\n")
+  cat("Number of Item-Pairs tested:", length(x$prop),"\n")
+  cat("Item-Pairs with one-sided p <", alpha,"\n")
+  T1lmat<-x$T1lmat
+  idx<-which(T1lmat<alpha,arr.ind=TRUE)
+  val<-T1lmat[which(T1lmat<alpha)]
+  names(val)<-apply(idx,1,function(x) paste("(",x[2],",",x[1],")",sep="",collapse=""))
+  if (length(val)>0)
+     print(round(val,digits=3))
+  else
+     cat("none\n\n")
+}
+
 print.T2obj<-function(x,...){
   prop<-x$prop
   idx<-x$idx
@@ -331,8 +568,28 @@ print.T2obj<-function(x,...){
      "range"="range"
   )
   txt<-"\nNonparametric RM model test: T2 (local dependence - model deviating subscales)\n"
-  writeLines(strwrap(txt, exdent=5))
-  cat("    (dispersion of subscale person rawscores)\n")
+  writeLines(strwrap(txt, exdent=4))
+  cat("    (increased dispersion of subscale person rawscores)\n")
+  cat("Number of sampled matrices:", x$n_eff,"\n")
+  cat("Items in subscale:", idx,"\n")
+  cat("Statistic:", statnam,"\n")
+  cat("one-sided p-value:",prop,"\n\n")
+#  cat("    (proportion of sampled",statnam," GE observed)\n\n")
+}
+
+print.T2mobj<-function(x,...){
+  prop<-x$prop
+  idx<-x$idx
+  stat<-x$stat
+  statnam<-switch(stat,
+     "var"="variance",
+     "mad1"="mean absolute deviation",
+     "mad2"="median absolute deviation",
+     "range"="range"
+  )
+  txt<-"\nNonparametric RM model test: T2m (multidimensionality - model deviating subscales)\n"
+  writeLines(strwrap(txt, exdent=4))
+  cat("    (decreased dispersion of subscale person rawscores)\n")
   cat("Number of sampled matrices:", x$n_eff,"\n")
   cat("Items in subscale:", idx,"\n")
   cat("Statistic:", statnam,"\n")
@@ -346,7 +603,8 @@ print.T4obj<-function(x,...){
   gr.n<-x$gr.n
   alternative<-x$alternative
   cat("\nNonparametric RM model test: T4 (Group anomalies - DIF)\n")
-  cat("    (counting", alternative, "raw scores on item(s) for specified group)\n")
+  txt<-paste("    (counting", alternative, "raw scores on item(s) for specified group)\n", collapse="")
+  writeLines(strwrap(txt, exdent=4))
   cat("Number of sampled matrices:", x$n_eff,"\n")
   cat("Items in Subscale:", idx,"\n")
   cat("Group:",gr.nam,"  n =",gr.n,"\n")
@@ -354,31 +612,35 @@ print.T4obj<-function(x,...){
 #  cat("    (proportion of sampled raw scores GE observed)\n\n")
 }
 
-print.T7obj<-function(x,...){
-  prop<-x$prop
-  cat("\nNonparametric RM model test: T7 (different discrimination - 2PL)\n")
-  cat("    (counting cases with response 1 on more difficult and 0 on easier item)\n")
-  cat("Number of sampled matrices:", x$n_eff,"\n")
-  cat("Item Scores:\n")
-  print(x$itscor)
-  cat("one-sided p-value:",prop,"\n\n")
-}
-print.T7aobj<-function(x,...){
-  prop<-x$prop
-  cat("\nNonparametric RM model test: T7a (different discrimination - 2PL)\n")
-  cat("    (counting cases with response 1 on more difficult and 0 on easier item)\n")
-  cat("Number of sampled matrices:", x$n_eff,"\n")
-  cat("Item Scores:\n")
-  print(x$itscor)
-  cat("\nItem-Pairs: (i>j ... i easier than j)\n\n")
-  print(round(prop,digits=3))
-}
+# removed in version 0.14-5
+#print.T7obj<-function(x,...){
+#  prop<-x$prop
+#  cat("\nNonparametric RM model test: T7 (different discrimination - 2PL)\n")
+#  txt<-"    (counting cases with response 1 on more difficult and 0 on easier item)\n"
+#  writeLines(strwrap(txt, exdent=4))
+#  cat("Number of sampled matrices:", x$n_eff,"\n")
+#  cat("Item Scores:\n")
+#  print(x$itscor)
+#  cat("one-sided p-value:",prop,"\n\n")
+#}
+#print.T7aobj<-function(x,...){
+#  prop<-x$prop
+#  cat("\nNonparametric RM model test: T7a (different discrimination - 2PL)\n")
+#  txt<-"    (counting cases with response 1 on more difficult and 0 on easier item)\n"
+#  writeLines(strwrap(txt, exdent=4))
+#  cat("Number of sampled matrices:", x$n_eff,"\n")
+#  cat("Item Scores:\n")
+#  print(x$itscor)
+#  cat("\nItem-Pairs: (i>j ... i easier than j)\n\n")
+#  print(round(prop,digits=3))
+#}
 print.T10obj<-function(x,...){
   spl.nam<-x$spl.nam
   prop<-x$prop
   hi.n<-x$hi.n
   low.n<-x$low.n
-  cat("\nNonparametric RM model test: T10 (global test - subgroup-invariance)\n")
+  txt<-"\nNonparametric RM model test: T10 (global test - subgroup-invariance)\n"
+  writeLines(strwrap(txt, exdent=4))
   cat("Number of sampled matrices:", x$n_eff,"\n")
   cat("Split:",spl.nam,"\n")
   cat("Group 1: n = ",hi.n,"  Group 2: n =",low.n,"\n")
@@ -387,9 +649,12 @@ print.T10obj<-function(x,...){
 }
 print.T11obj<-function(x,...){
   prop<-x$prop
-  cat("\nNonparametric RM model test: T11 (global test - local dependence)\n")
-  cat("    (sum of deviations between observed and expected inter-item correlations)\n")
+  txt<-"\nNonparametric RM model test: T11 (global test - local dependence)\n"
+  writeLines(strwrap(txt, exdent=4))
+  txt<-"    (sum of deviations between observed and expected inter-item correlations)\n"
+  writeLines(strwrap(txt, exdent=4))
   cat("Number of sampled matrices:", x$n_eff,"\n")
   cat("one-sided p-value:",prop,"\n\n")
 #  cat("    (proportion of sampled sums GE observed)\n\n")
 }
+
diff --git a/R/PCM.R b/R/PCM.R
old mode 100755
new mode 100644
diff --git a/R/RM.R b/R/RM.R
old mode 100755
new mode 100644
diff --git a/R/ROCR_aux.R b/R/ROCR_aux.R
old mode 100755
new mode 100644
diff --git a/R/RSM.R b/R/RSM.R
old mode 100755
new mode 100644
diff --git a/R/SepRel.R b/R/SepRel.R
new file mode 100644
index 0000000..b262e7d
--- /dev/null
+++ b/R/SepRel.R
@@ -0,0 +1,65 @@
+SepRel <- function (pobject){   # requires an object of class "ppar"
+  if(!("ppar" %in% class(pobject))) stop('"pobject" must be of class "ppar"')
+
+  # get person scores from theta table (which is generated by the person.parameter function)
+  PersonScoresFull <- pobject[["theta.table"]][["Person Parameter"]]
+  # remove missing values
+  PersonScores <- PersonScoresFull[complete.cases(PersonScoresFull)]
+  # get standard errors 
+  StandardErrors <- unlist(pobject[["se.theta"]])
+
+  # ==============================
+  # compute seperation reliability
+  # ==============================
+
+  # compute the Observed Variance (also known as Total Person Variability or Squared Standard Deviation)
+  SSD.PersonScores <- var(PersonScores)
+
+  # compute the Mean Square Measurement error (also known as Model Error variance)
+  MSE <- sum((StandardErrors)^2) / length(StandardErrors)
+
+  separation.reliability <- (SSD.PersonScores-MSE) / SSD.PersonScores
+
+  # define the outcome of the function "SepRel" as an object of class "separation" 
+  result <- structure(
+    list(
+      "sep.rel" = separation.reliability,
+      "SSD.PS" = SSD.PersonScores,
+      "MSE" = MSE
+    ),
+    class="eRm_SepRel"
+  )
+
+  return(result)
+
+}
+
+
+
+print.eRm_SepRel <- function(x, ...){
+  if(interactive()) writeLines("")
+  writeLines(paste0("Separation Reliability: ", round(x$sep.rel, 4L)))
+  if(interactive()) writeLines("")
+}
+
+
+
+summary.eRm_SepRel <- function(object, ...){
+  txt1 <- format(c(
+    "Separation Reliability: ",
+    "Observed Variance: ",
+    "Mean Square Measurement Error: "
+  ), justify = "right")
+  
+  txt2 <- c(
+    " (Squared Standard Deviation)",
+    " (Model Error Variance)"
+  )
+  
+  if(interactive()) writeLines("")
+    writeLines(paste0(txt1[1L], round(object$sep.rel, 4L)))
+  writeLines("")
+    writeLines(paste0(txt1[2L], round(object$SSD.PS, 4L), txt2[1L]))
+    writeLines(paste0(txt1[3L], round(object$MSE, 4L), txt2[2L]))
+  if(interactive()) writeLines("")
+}
diff --git a/R/Waldtest.R b/R/Waldtest.R
old mode 100755
new mode 100644
diff --git a/R/Waldtest.Rm.R b/R/Waldtest.Rm.R
old mode 100755
new mode 100644
index feb9cdd..74099a6
--- a/R/Waldtest.Rm.R
+++ b/R/Waldtest.Rm.R
@@ -1,170 +1,173 @@
-`Waldtest.Rm` <-
-function(object, splitcr="median")
-{
-# performs item-based Wald test (Fischer & Molenaar, p.90)
-# object... object of class RM
-# splitcr... splitting criterion for LR-groups. "median" to a median raw score split,
-#            "mean" corobjectponds to the mean raw score split.
-#            optionally also a vector of length n for group split can be submitted.
-
-call<-match.call()
-
-spl.gr<-NULL
-
-X.original<-object$X
-if (length(splitcr)>1 && is.character(splitcr)){    # if splitcr is character vector, treated as factor
-   splitcr<-as.factor(splitcr)
-}
-if (is.factor(splitcr)){
-   spl.nam<-deparse(substitute(splitcr))
-   spl.lev<-levels(splitcr)
-   spl.gr<-paste(spl.nam,spl.lev,sep=" ")
-   splitcr<-unclass(splitcr)
-}
+Waldtest.Rm <- function(object, splitcr = "median"){
+  # performs item-based Wald test (Fischer & Molenaar, p.90)
+  # object... object of class RM
+  # splitcr... splitting criterion for LR-groups. "median" to a median raw score split,
+  #            "mean" corobjectponds to the mean raw score split.
+  #            optionally also a vector of length n for group split can be submitted.
+
+  call<-match.call()
 
-numsplit<-is.numeric(splitcr)
-if (any(is.na(object$X))) {
-  if (!numsplit && splitcr=="mean") {                                   #mean split
-    spl.gr<-c("Raw Scores < Mean", "Raw Scores >= Mean")
-    X<-object$X
-    # calculates index for NA groups
-    # from person.parameter.eRm
-      dichX <- ifelse(is.na(X),1,0)
-      strdata <- apply(dichX,1,function(x) {paste(x,collapse="")})
-      gmemb <- as.vector(data.matrix(data.frame(strdata)))
-    gindx<-unique(gmemb)
-    rsum.all<-rowSums(X,na.rm=TRUE)
-    grmeans<-tapply(rsum.all,gmemb,mean)      #sorted
-    ngr<-table(gmemb)                         #sorted
-    m.all<-rep(grmeans,ngr)                   #sorted,expanded
-    rsum.all<-rsum.all[order(gmemb)]
-    spl<-ifelse(rsum.all<m.all,1,2)
-    splitcr<-spl
-    object$X<-X[order(gmemb),]
+  spl.gr<-NULL
+
+  X.original<-object$X
+  if (length(splitcr)>1 && is.character(splitcr)){    # if splitcr is character vector, treated as factor
+     splitcr<-as.factor(splitcr)
   }
-  if (!numsplit && splitcr=="median") {                                   #median split
-    spl.gr<-c("Raw Scores <= Median", "Raw Scores > Median")
-    #removed rh 2010-12-17
-    #cat("Warning message: Persons with median raw scores are assigned to the lower raw score group!\n")
-    X<-object$X
-    # calculates index for NA groups
-    # from person.parameter.eRm
-      dichX <- ifelse(is.na(X),1,0)
-      strdata <- apply(dichX,1,function(x) {paste(x,collapse="")})
-      gmemb <- as.vector(data.matrix(data.frame(strdata)))
-    gindx<-unique(gmemb)
-    rsum.all<-rowSums(X,na.rm=TRUE)
-    grmed<-tapply(rsum.all,gmemb,median)      #sorted
-    ngr<-table(gmemb)                         #sorted
-    m.all<-rep(grmed,ngr)                     #sorted,expanded
-    rsum.all<-rsum.all[order(gmemb)]
-    spl<-ifelse(rsum.all<=m.all,1,2)
-    splitcr<-spl
-    object$X<-X[order(gmemb),]
+  if (is.factor(splitcr)){
+     spl.nam<-deparse(substitute(splitcr))
+     spl.lev<-levels(splitcr)
+     spl.gr<-paste(spl.nam,spl.lev,sep=" ")
+     splitcr<-unclass(splitcr)
   }
-}
-
 
-if (is.numeric(splitcr)){
-  spl.nam<-deparse(substitute(splitcr))
-  if (length(table(splitcr)) > 2) stop("Dichotomous person split required!")
-  if (length(splitcr) != dim(object$X)[1]) {
-    stop("Mismatch between length of split vector and number of persons!")
-  } else {
-    rvind <- splitcr
-    Xlist <- by(object$X,rvind, function(x) x)
-    names(Xlist) <- as.list(sort(unique(splitcr)))
-    if(is.null(spl.gr)){
-      spl.lev<-names(Xlist)
-      spl.gr<-paste(spl.nam,spl.lev,sep=" ")
+  numsplit<-is.numeric(splitcr)
+  if (any(is.na(object$X))) {
+    if (!numsplit && splitcr=="mean") {                                   #mean split
+      spl.gr<-c("Raw Scores < Mean", "Raw Scores >= Mean")
+      X<-object$X
+      # calculates index for NA groups
+      # from person.parameter.eRm
+        dichX <- ifelse(is.na(X),1,0)
+        strdata <- apply(dichX,1,function(x) {paste(x,collapse="")})
+        gmemb <- as.vector(data.matrix(data.frame(strdata)))
+      gindx<-unique(gmemb)
+      rsum.all<-rowSums(X,na.rm=TRUE)
+      grmeans<-tapply(rsum.all,gmemb,mean)      #sorted
+      ngr<-table(gmemb)                         #sorted
+      m.all<-rep(grmeans,ngr)                   #sorted,expanded
+      rsum.all<-rsum.all[order(gmemb)]
+      spl<-ifelse(rsum.all<m.all,1,2)
+      splitcr<-spl
+      object$X<-X[order(gmemb),]
     }
-  }}
-
-if (!is.numeric(splitcr)) {
-  if (splitcr=="median") {                                   #median split
-    rv <- apply(object$X,1,sum,na.rm=TRUE)
-    rvsplit <- median(rv)
-    rvind <- rep(0,length(rv))
-    rvind[rv > rvsplit] <- 1                                 #group with high raw score object
-    Xlist <- by(object$X,rvind,function(x) x)
-    names(Xlist) <- list("low","high")
+    if (!numsplit && splitcr=="median") {                                   #median split
+      spl.gr<-c("Raw Scores <= Median", "Raw Scores > Median")
+      #removed rh 2010-12-17
+      #cat("Warning message: Persons with median raw scores are assigned to the lower raw score group!\n")
+      X<-object$X
+      # calculates index for NA groups
+      # from person.parameter.eRm
+        dichX <- ifelse(is.na(X),1,0)
+        strdata <- apply(dichX,1,function(x) {paste(x,collapse="")})
+        gmemb <- as.vector(data.matrix(data.frame(strdata)))
+      gindx<-unique(gmemb)
+      rsum.all<-rowSums(X,na.rm=TRUE)
+      grmed<-tapply(rsum.all,gmemb,median)      #sorted
+      ngr<-table(gmemb)                         #sorted
+      m.all<-rep(grmed,ngr)                     #sorted,expanded
+      rsum.all<-rsum.all[order(gmemb)]
+      spl<-ifelse(rsum.all<=m.all,1,2)
+      splitcr<-spl
+      object$X<-X[order(gmemb),]
     }
+  }
 
-  if (splitcr=="mean") {                                     #mean split
-    rv <- apply(object$X,1,sum,na.rm=TRUE)
-    rvsplit <- mean(rv)
-    rvind <- rep(0,length(rv))
-    rvind[rv > rvsplit] <- 1                                 #group with highraw scoobject
-    Xlist <- by(object$X,rvind,function(x) x)
-    names(Xlist) <- list("low","high")
-    }
 
-}
+  if (is.numeric(splitcr)){
+    spl.nam<-deparse(substitute(splitcr))
+    if (length(table(splitcr)) > 2) stop("Dichotomous person split required!")
+    if (length(splitcr) != dim(object$X)[1]) {
+      stop("Mismatch between length of split vector and number of persons!")
+    } else {
+      rvind <- splitcr
+      Xlist <- by(object$X,rvind, function(x) x)
+      names(Xlist) <- as.list(sort(unique(splitcr)))
+      if(is.null(spl.gr)){
+        spl.lev<-names(Xlist)
+        spl.gr<-paste(spl.nam,spl.lev,sep=" ")
+      }
+    }}
+
+  if (!is.numeric(splitcr)) {
+    if (splitcr=="median") {                                   #median split
+      rv <- apply(object$X,1,sum,na.rm=TRUE)
+      rvsplit <- median(rv)
+      rvind <- rep(0,length(rv))
+      rvind[rv > rvsplit] <- 1                                 #group with high raw score object
+      Xlist <- by(object$X,rvind,function(x) x)
+      names(Xlist) <- list("low","high")
+      }
+
+    if (splitcr=="mean") {                                     #mean split
+      rv <- apply(object$X,1,sum,na.rm=TRUE)
+      rvsplit <- mean(rv)
+      rvind <- rep(0,length(rv))
+      rvind[rv > rvsplit] <- 1                                 #group with highraw scoobject
+      Xlist <- by(object$X,rvind,function(x) x)
+      names(Xlist) <- list("low","high")
+      }
 
-del.pos.l <- lapply(Xlist, function(x) {
-                    it.sub <- datcheck.LRtest(x,object$X,object$model)  #items to be removed within subgroup
-                    })
+  }
 
-del.pos <- unique(unlist(del.pos.l))
-if ((length(del.pos)) >= (dim(object$X)[2]-1)) {
-  stop("\nNo items with appropriate response patterns left to perform Wald-test!\n")
-}
+  del.pos.l <- lapply(Xlist, function(x) {
+                      it.sub <- datcheck.LRtest(x,object$X,object$model)  #items to be removed within subgroup
+                      })
 
-if (length(del.pos) > 0) {
-    warning("\nThe following items were excluded due to inappropriate response patterns within subgroups: ",immediate.=TRUE)
-    cat(colnames(object$X)[del.pos], sep=" ","\n")
-    cat("Subgroup models are estimated without these items!\n")
-}
+  del.pos <- unique(unlist(del.pos.l))
+  if ((length(del.pos)) >= (dim(object$X)[2]-1)) {
+    stop("\nNo items with appropriate response patterns left to perform Wald-test!\n")
+  }
 
-if (length(del.pos) > 0) {
-  X.el <- object$X[,-(del.pos)]
-} else {
-  X.el <- object$X
-}
-Xlist.n <- by(X.el,rvind,function(y) y)
-names(Xlist.n) <- names(Xlist)
-
-if (object$model=="RM") {
-       likpar <- sapply(Xlist.n,function(x) {                       #matrix with loglik and npar for each subgroup
-                               objectg <- RM(x)
-                               parg <- objectg$etapar
-                               seg <- objectg$se.eta
-                               list(parg,seg,objectg$betapar,objectg$se.beta)
-                               })
-       }
-if (object$model=="PCM") {
-       likpar <- sapply(Xlist.n,function(x) {                       #matrix with loglik and npar for each subgroup
-                               objectg <- PCM(x)
-                               parg <- objectg$etapar
-                               seg <- objectg$se.eta
-                               list(parg,seg,objectg$betapar,objectg$se.beta)
-                               })
-       }
-if (object$model=="RSM") {
-       likpar <- sapply(Xlist.n,function(x) {                       #matrix with loglik and npar for each subgroup
-                               objectg <- RSM(x)
-                               parg <- objectg$etapar
-                               seg <- objectg$se.eta
-                               list(parg,seg,objectg$betapar,objectg$se.beta)
-                               })
-       }
-
-
-betapar1 <- likpar[3,][[1]]
-beta1.se <- likpar[4,][[1]]
-betapar2 <- likpar[3,][[2]]
-beta2.se <- likpar[4,][[2]]
-num <- (betapar1-betapar2)
-denom <- sqrt(beta1.se^2 + beta2.se^2)
-W.i <- num/denom
-pvalues <- (1-pnorm(abs(W.i)))*2
-
-coef.table <- cbind(W.i,pvalues)
-dimnames(coef.table) <- list(names(betapar1),c("z-statistic","p-value"))
-
-result <- list(coef.table=coef.table,betapar1=betapar1,se.beta1=beta1.se,betapar2=betapar2,
-se.beta2=beta2.se, spl.gr=spl.gr, call=call, it.ex = del.pos)
-class(result) <- "wald"
-result
-}
+  if(length(del.pos) > 0){
+    warning(paste0(
+      "\n", 
+      prettyPaste("The following items were excluded due to inappropriate response patterns within subgroups:"),
+      "\n",
+      paste(colnames(object$X)[del.pos], collapse=" "),
+      "\n\n",
+      prettyPaste("Subgroup models are estimated without these items!")
+    ), immediate.=TRUE)
+  }
 
+  if(length(del.pos) > 0){
+    X.el <- object$X[,-(del.pos)]
+  } else {
+    X.el <- object$X
+  }
+  Xlist.n <- by(X.el,rvind,function(y) y)
+  names(Xlist.n) <- names(Xlist)
+
+  if (object$model=="RM") {
+         likpar <- sapply(Xlist.n,function(x) {                       #matrix with loglik and npar for each subgroup
+                                 objectg <- RM(x)
+                                 parg <- objectg$etapar
+                                 seg <- objectg$se.eta
+                                 list(parg,seg,objectg$betapar,objectg$se.beta)
+                                 })
+         }
+  if (object$model=="PCM") {
+         likpar <- sapply(Xlist.n,function(x) {                       #matrix with loglik and npar for each subgroup
+                                 objectg <- PCM(x)
+                                 parg <- objectg$etapar
+                                 seg <- objectg$se.eta
+                                 list(parg,seg,objectg$betapar,objectg$se.beta)
+                                 })
+         }
+  if (object$model=="RSM") {
+         likpar <- sapply(Xlist.n,function(x) {                       #matrix with loglik and npar for each subgroup
+                                 objectg <- RSM(x)
+                                 parg <- objectg$etapar
+                                 seg <- objectg$se.eta
+                                 list(parg,seg,objectg$betapar,objectg$se.beta)
+                                 })
+         }
+
+
+  betapar1 <- likpar[3,][[1]]
+  beta1.se <- likpar[4,][[1]]
+  betapar2 <- likpar[3,][[2]]
+  beta2.se <- likpar[4,][[2]]
+  num <- (betapar1-betapar2)
+  denom <- sqrt(beta1.se^2 + beta2.se^2)
+  W.i <- num/denom
+  pvalues <- (1-pnorm(abs(W.i)))*2
+
+  coef.table <- cbind(W.i,pvalues)
+  dimnames(coef.table) <- list(names(betapar1),c("z-statistic","p-value"))
+
+  result <- list(coef.table=coef.table,betapar1=betapar1,se.beta1=beta1.se,betapar2=betapar2,
+  se.beta2=beta2.se, spl.gr=spl.gr, call=call, it.ex = del.pos)
+  class(result) <- "wald"
+  result
+
+}
diff --git a/R/anova.eRm.R b/R/anova.eRm.R
new file mode 100644
index 0000000..6b8aa8d
--- /dev/null
+++ b/R/anova.eRm.R
@@ -0,0 +1,54 @@
+anova.eRm <- function(object, ...){
+  models <- c(list(object), list(...))
+#browser()
+  # exclude LLRAs
+  if(any(unlist(lapply(models, function(m){ "llra" %in% class(m) })))) stop("At least one model is an LLRA; comparison to other models not possible.")
+
+  # check if models' data matrices are identical
+  for(i in seq_along(models)[-1L]){
+    if(!identical(unname(models[[1L]][["X"]]), unname(models[[i]][["X"]]))) stop("Models are not nested.")
+  }
+
+  # sort by number of parameters
+  models <- models[order(unlist(lapply(models, function(m){ m[["npar"]] })), decreasing = TRUE)]
+
+  # extract information
+  calls <- unlist(lapply(models, function(m){ deparse(m[["call"]]) }))
+  LLs   <- unlist(lapply(models, function(m){ m[["loglik"]] }))
+  npar  <- unlist(lapply(models, function(m){ m[["npar"]] }))
+  dev   <- -2*LLs
+  LR    <- abs(c(NA, LLs[1L] - LLs[-1L]))
+  df    <- c(NA, npar[1L] - npar[-1L])
+  p     <- pchisq(LR, df, lower.tail = FALSE)
+
+  return(
+    structure(
+      list(
+        calls      = calls,
+        statistics = data.frame(LLs=LLs, dev=dev, npar=npar, LR=LR, df=df, p=p)
+      ),
+    class="eRm_anova")
+  )
+
+}
+
+print.eRm_anova <- function(x, ...){
+  if(interactive()) writeLines("")
+  writeLines("Analysis of Deviances Table\n")
+
+  for(i in seq_along(x[[1L]])){
+    writeLines(strwrap(paste0("Model ", i, ": ", x[[1L]][[i]]), width = getOption("width"), exdent = 4L))
+  }
+  writeLines("")
+
+  x_print <- as.matrix(x[[2L]])
+  rownames(x_print) <- paste0("Model ", seq_along(x[[1L]]))
+  colnames(x_print) <- c("cond. LL", "Deviance", "npar", "LR", "df", "p-value")
+  printCoefmat(as.matrix(x_print), cs.ind=c(1,2), tst.ind=4, has.Pvalue=TRUE, na.print = "")
+
+  writeLines("")
+  message("Note: The models appear to be nested, please check this assumption.")
+  if(interactive()) writeLines("")
+
+  invisible(x)
+}
diff --git a/R/anova.llra.R b/R/anova.llra.R
old mode 100755
new mode 100644
index b815842..b7421eb
--- a/R/anova.llra.R
+++ b/R/anova.llra.R
@@ -1,6 +1,6 @@
 anova.llra <- function(object, ...) UseMethod("anova.llra")
 
-anova.llra.default <- function(object,...)
+anova.llra <- function(object,...)
   {
     objets <- list(object, ...)
     isllra <- unlist(lapply(objets, function(x) "llra" %in% class(x)))
diff --git a/R/build_W.R b/R/build_W.R
old mode 100755
new mode 100644
diff --git a/R/checkdata.R b/R/checkdata.R
old mode 100755
new mode 100644
diff --git a/R/cmlprep.R b/R/cmlprep.R
old mode 100755
new mode 100644
diff --git a/R/coef.eRm.R b/R/coef.eRm.R
old mode 100755
new mode 100644
diff --git a/R/coef.ppar.R b/R/coef.ppar.R
old mode 100755
new mode 100644
index 150977f..e6e9028
--- a/R/coef.ppar.R
+++ b/R/coef.ppar.R
@@ -1,6 +1,7 @@
 `coef.ppar` <-
-function(object, ...) {
+function(object, extrapolated = TRUE, ...) {
    x <- object$theta.table[,1]
+   if(!extrapolated) x[object$theta.table[,3]] <- NA
    names(x) <- rownames(object$theta.table)
    x
 }
diff --git a/R/collapse_W.R b/R/collapse_W.R
old mode 100755
new mode 100644
diff --git a/R/confint.eRm.r b/R/confint.eRm.r
old mode 100755
new mode 100644
diff --git a/R/confint.ppar.r b/R/confint.ppar.r
old mode 100755
new mode 100644
diff --git a/R/confint.threshold.r b/R/confint.threshold.r
old mode 100755
new mode 100644
diff --git a/R/cwdeviance.r b/R/cwdeviance.r
old mode 100755
new mode 100644
diff --git a/R/datcheck.LRtest.r b/R/datcheck.LRtest.r
old mode 100755
new mode 100644
diff --git a/R/datcheck.R b/R/datcheck.R
old mode 100755
new mode 100644
index 925b8bc..6dd92f0
--- a/R/datcheck.R
+++ b/R/datcheck.R
@@ -1,119 +1,143 @@
-`datcheck` <-
-function(X,W,mpoints,groupvec,model)
-{
-  if (is.data.frame(X))  {X <- as.matrix(X)}                  #X as data frame allowed
-
-  if (is.null(colnames(X))) {                                 #determine item names
-    if (mpoints > 1) {
-      mpind <- paste("t",rep(1:mpoints,each=(dim(X)[2]/mpoints),1),sep="") #time points
-      itemind <- paste("I",1:(dim(X)[2]/mpoints),sep="")
+datcheck <- function(X, W, mpoints, groupvec, model){
+  if(is.data.frame(X)){
+    X <- as.matrix(X)   # X as data frame allowed
+  }                  
+
+  if(is.null(colnames(X))){                                 #determine item names
+    if(mpoints > 1){
+      mpind <- paste("t",rep(1:mpoints,each=(ncol(X)/mpoints),1),sep="") #time points
+      itemind <- paste("I",1:(ncol(X)/mpoints),sep="")
       colnames(X) <- paste(itemind,mpind)
     } else {
-      colnames(X) <- paste("I",1:dim(X)[2],sep="")                         #item labels
-  }}
-  if (is.null(rownames(X))) rownames(X) <- paste("P",1:dim(X)[1],sep="")   #person labels
+      colnames(X) <- paste("I",1:ncol(X),sep="")                         #item labels
+    }
+  }
+  if(is.null(rownames(X))) rownames(X) <- paste0("P", seq_len(nrow(X)))   #person labels
 
 #----------------------- check groupvec --------------------------
 
-  if ((length(groupvec) > 1) && (length(groupvec) != dim(X)[1])) {
-    stop("Wrong specification of groupvec!")}
+  if((length(groupvec) > 1L) && (length(groupvec) != nrow(X))){
+    stop("Wrong specification of groupvec!")
+  }
 
-  if (min(groupvec)!=1) {
-    stop("Group specification must start with 1!")}
+  if(min(groupvec) != 1L){
+    stop("Group specification must start with 1!")
+  }
 
-  if (length(unique(groupvec))!=(max(groupvec))) {
-    stop("Group vector is incorrectly specified (perhaps a category is missing)!")} # rh 2011-03-03
+  if(length(unique(groupvec)) != (max(groupvec))){
+    stop("Group vector is incorrectly specified (perhaps a category is missing)!")   # rh 2011-03-03
+  }
 
-  if ((max(groupvec) > 1) && (mpoints==1)) {
-    stop("Model not identifiable! Group contrasts can only be imposed for repeated measurement designs.") }
+  if((max(groupvec) > 1L) && (mpoints == 1)){
+    stop(paste0("\n", prettyPaste("Model not identifiable! Group contrasts can only be imposed for repeated measurement designs.")))
+  }
 
 #  if ((length(groupvec) > 1) && any(is.na(X))) {
 #    stop("Model with repeated measures, group specification and NAs cannot be computed!") }
 
 #----------------------- check X --------------------------------
-allna.vec <- apply(X,2,function(y) {all(is.na(y))})                 #eliminate items with all NA's
-if (any(allna.vec)) {stop("There are items with full NA responses which must be deleted!")}
-
-allna.vec <- apply(X,1,function(y) {all(is.na(y))})                 #eliminate items with all NA's
-if (any(allna.vec)) {stop("There are persons with full NA responses which must be deleted!")}
-
-allna.vec <- apply(X,1,function(y) {sum(is.na(y))})
-if (any(allna.vec == (dim(X)[2]-1))) {stop("Subjects with only 1 valid response must be removed!")}
-
-ri.min <- apply(X,2,min,na.rm=TRUE)                                 #if no 0 responses
-if (any(ri.min > 0)) {
-  cat("Warning message: The following items have no 0-responses: \n")
-  cat(colnames(X)[ri.min>0],sep=", ")
-  cat("\n")
-  cat("Responses are shifted such that lowest category is 0. \n")
-  cat("\n")
-}
-X <- t(apply(X,1,function(y) {y-ri.min}))                           #shift down to 0
-
-ri <- apply(X,2,sum,na.rm=TRUE)                                     #item raw scores
-n.NA <- colSums(apply(X,2,is.na))                                   #number of NA's per column
-maxri <- (dim(X)[1]*(apply(X,2,max,na.rm=TRUE)))-n.NA               #maximum item raw scores with NA
-TFcol <- ((ri==maxri) | (ri==0))
-X.n <- X[,!TFcol]                                                   #new matrix with excluded items
-item.ex <- (1:dim(X)[2])[TFcol]                                     #excluded items
-if (length(item.ex) > 0) {
-  if (mpoints == 1) {
-    cat("Warning message: The following items were excluded due to complete 0/full responses: \n")
-    cat(colnames(X)[item.ex],sep=", ")
-    cat("\n")
-  } else {
-    cat("The following items show complete 0/full responses: \n")
-    cat(colnames(X)[item.ex],sep=", ")
-    cat("\n")
-    stop("Estimation cannot be performed! Delete the correponding items for the other measurement points as well! \n")
-}}
-
-if ((model=="PCM") || (model=="LPCM")) {                         #check if there are missing categories for PCM (for RSM doesn't matter)
-  tablist <- apply(X,2,function(x) list(as.vector(table(x))))
-  tablen <- sapply(tablist,function(x) length(x[[1]]))
-  xmax <- apply(X,2,max)+1
-  indwrong <- which(tablen != xmax)
-  if (length(indwrong) > 0) {
-    cat("The following items do not have responses on each category: \n")
-    cat(colnames(X)[indwrong],sep=", ")
-    cat("\n")
-    cat("Warning message: Estimation may not be feasible. Please check data matrix! \n")
-    cat("\n")
+  allna.vec <- apply(X,2,function(y) {all(is.na(y))})                 #eliminate items with all NA's
+  if (any(allna.vec)) {stop("There are items with full NA responses which must be deleted!")}
+
+  allna.vec <- apply(X,1,function(y) {all(is.na(y))})                 #eliminate items with all NA's
+  if (any(allna.vec)) {stop("There are persons with full NA responses which must be deleted!")}
+
+  allna.vec <- apply(X,1,function(y) {sum(is.na(y))})
+  if (any(allna.vec == (ncol(X)-1L))) {stop("Subjects with only 1 valid response must be removed!")}
+
+  ri.min <- apply(X,2,min,na.rm=TRUE)                                 #if no 0 responses
+  if(any(ri.min > 0)){
+    warning(paste0(
+      "\n",
+      prettyPaste("The following items have no 0-responses:"),
+      "\n",
+      paste(colnames(X)[ri.min > 0], collapse=" "),
+      "\n",
+      prettyPaste("Responses are shifted such that lowest category is 0.")
+    ), call. = FALSE, immediate.=TRUE)
+  }
+  X <- t(apply(X,1,function(y) {y-ri.min}))                           #shift down to 0
+
+  ri <- apply(X,2,sum,na.rm=TRUE)                                     #item raw scores
+  n.NA <- colSums(apply(X,2,is.na))                                   #number of NA's per column
+  maxri <- (dim(X)[1]*(apply(X,2,max,na.rm=TRUE)))-n.NA               #maximum item raw scores with NA
+  TFcol <- ((ri==maxri) | (ri==0))
+  X.n <- X[,!TFcol]                                                   #new matrix with excluded items
+  item.ex <- (seq_len(ncol(X)))[TFcol]                                     #excluded items
+  if(length(item.ex) > 0) {
+    if(mpoints == 1){
+      warning(paste0(
+        "\n",
+        prettyPaste("The following items were excluded due to complete 0/full responses:"),
+        "\n",
+        paste(colnames(X)[item.ex], collapse=" ")
+      ), call. = FALSE, immediate.=TRUE)
+    } else {
+      stop(paste0(
+        "\n",
+        "The following items show complete 0/full responses:",
+        "\n",
+        paste(colnames(X)[item.ex], collapse=" "),
+        "\n",
+        prettyPaste("Estimation cannot be performed! Delete the corresponding items for the other measurement points as well!")
+      ), call. = FALSE)
+    }
+  }
+
+  if ((model=="PCM") || (model=="LPCM")) {                         #check if there are missing categories for PCM (for RSM doesn't matter)
+    tablist <- apply(X,2,function(x) list(as.vector(table(x))))
+    tablen <- sapply(tablist,function(x) length(x[[1]]))
+    xmax <- apply(X,2,max)+1
+    indwrong <- which(tablen != xmax)
+    if(length(indwrong) > 0){
+      warning(paste0(
+        "\n",
+        prettyPaste("The following items do not have responses on each category:"),
+        "\n",
+        paste(colnames(X)[indwrong], collapse=" "),
+        "\n",
+        prettyPaste("Estimation may not be feasible. Please check data matrix!")
+      ), call. = FALSE, immediate.=TRUE)
+    }
   }
-}
 
 
 #-------------------------- ill conditioned for RM and LLTM --------------
-if ((model=="RM") || (model=="LLTM")) {
-  if (length(table(X.n)) != 2) stop("Dichotomous data matrix required!")
-  k.t <- dim(X.n)[2]/mpoints                                    #check for each mpoint separately
-  t.ind <- rep(1:mpoints,1,each=k.t)
-  X.nlv <- split(t(X.n),t.ind)                                  #split X due to mpoints
-  cn.lv <- split(colnames(X.n),t.ind)
-  X.nl <- lapply(X.nlv,matrix,ncol=k.t,byrow=TRUE)
-  for (i in 1:length(X.nl)) colnames(X.nl[[i]]) <- cn.lv[[i]]
-
-  for (l in 1:mpoints) {                                       #check within mpoint
-    X.nll <- X.nl[[l]]
-    k <- ncol(X.nll)
-    adj <- matrix(0,nc=k,nr=k)
-    for (i in 1:k) for(j in 1:k) {
-        adj[i,j]<- 1*any(X.nll[,i]> X.nll[,j],na.rm=TRUE)
+  if ((model=="RM") || (model=="LLTM")) {
+    if (length(table(X.n)) != 2L) stop("Dichotomous data matrix required!")
+    k.t   <- dim(X.n)[2L]/mpoints                                    #check for each mpoint separately
+    t.ind <- rep(seq_len(mpoints), 1L, each=k.t)
+    X.nlv <- split(t(X.n),t.ind)                                  #split X due to mpoints
+    cn.lv <- split(colnames(X.n),t.ind)
+    X.nl  <- lapply(X.nlv,matrix,ncol=k.t,byrow=TRUE)
+    for(i in seq_len(length(X.nl))) colnames(X.nl[[i]]) <- cn.lv[[i]]
+
+    for(l in seq_len(mpoints)){                                       #check within mpoints
+      X.nll <- X.nl[[l]]
+      k <- ncol(X.nll)
+      adj <- matrix(0, ncol=k, nrow=k)
+      for(i in seq_len(k)) for(j in seq_len(k)) {
+        adj[i,j]<- 1*any(X.nll[,i] > X.nll[,j], na.rm = TRUE)
+      }
+      cd  <- component.dist(adj, connected = "strong")
+      cm  <- cd$membership
+      cmp <- max(cm)
+      if(cmp > 1L) {
+        cmtab <- table(cm)
+        maxcm.n <- as.numeric(names(cmtab)[cmtab!=max(cmtab)])
+        suspcol <- (seq_len(length(cm)))[tapply(cm, seq_len(length(cm)), function(x){ any(maxcm.n == x) })]
+        n.suspcol <- colnames(X.nll)[suspcol]
+        stop(paste0(
+          "\n",
+          prettyPaste("Estimation stopped due to ill-conditioned data matrix X! Suspicious items:"),
+          "\n",
+          paste(n.suspcol, collapse=" ")
+        ), call. = FALSE)
+      }
     }
-    cd <- component.dist(adj, connected = "strong")
-    cm <- cd$membership
-    cmp <- max(cm)
-    if(cmp>1) {
-         cmtab <- table(cm)
-         maxcm.n <- as.numeric(names(cmtab)[cmtab!=max(cmtab)])
-         suspcol <- (1:length(cm))[tapply(cm,1:length(cm),function(x) any(maxcm.n==x))]
-         n.suspcol <- colnames(X.nll)[suspcol]
-         cat("Suspicious items:",n.suspcol,"\n")
-         stop("Estimation stopped due to ill-conditioned data matrix X!")
-    }
-}}
+  }
 #----------------------- end ill-conditioned check -------------------------------
 
-list(X=X.n,groupvec=groupvec)
-}
+  return(list(X = X.n, groupvec = groupvec))
 
+}
diff --git a/R/datprep_LLTM.R b/R/datprep_LLTM.R
old mode 100755
new mode 100644
diff --git a/R/datprep_LPCM.R b/R/datprep_LPCM.R
old mode 100755
new mode 100644
diff --git a/R/datprep_LRSM.R b/R/datprep_LRSM.R
old mode 100755
new mode 100644
diff --git a/R/datprep_PCM.R b/R/datprep_PCM.R
old mode 100755
new mode 100644
diff --git a/R/datprep_RM.R b/R/datprep_RM.R
old mode 100755
new mode 100644
diff --git a/R/datprep_RSM.R b/R/datprep_RSM.R
old mode 100755
new mode 100644
diff --git a/R/fitcml.R b/R/fitcml.R
old mode 100755
new mode 100644
index 3cf9ce0..19c18e3
--- a/R/fitcml.R
+++ b/R/fitcml.R
@@ -1,85 +1,84 @@
-`fitcml` <-
-function (mt_ind,nrlist,x_mt,rtot,W,ngroups,gind,x_mtlist,NAstruc,g_NA,st.err,etaStart,gby)
-{
+fitcml <- function(mt_ind, nrlist, x_mt, rtot, W, ngroups, gind, x_mtlist, NAstruc, g_NA, st.err, etaStart, gby){
 
-#cml function for call in nlm
-cml <- function(eta)
-{
+  #cml function for call in nlm
+  cml <- function(eta){
 
- beta <- as.vector(W%*%eta)
- #FIXME!!! gby??
- beta.list <- split(beta,gind)  #gind index for treatment groups
- beta.list1 <- beta.list
+    beta <- as.vector(W %*% eta)
+    #FIXME!!! gby??
+    beta.list <- split(beta,gind)  #gind index for treatment groups
+    beta.list1 <- beta.list
+   
+    #beta and NAstructure (over Groups): 1st line parameter values, 2nd line which item NA 
+    betaNA <- mapply(function(x,y) {rbind(x,y)},beta.list1,NAstruc,SIMPLIFY=FALSE)  
+   
+    #likelihood term based on gamma functions for each Group x NAgroup combination
+    Lg <- lapply(betaNA, function(betaNAmat) {      
+      beta.vec <- betaNAmat[1,]                #get parameter vector beta
+   
+      #gamma functions for each NAgroup within Groups 
+      Lg.NA <- apply(matrix(betaNAmat[-1,],ncol=length(beta.vec)),1, function(NAvec) {
+        
+        #list of virtual item-category parameters per item
+        beta_list <- as.list(split(beta.vec[NAvec==1],mt_ind[1:(length(beta.vec[NAvec==1]))]))       
+        parlist <- lapply(beta_list,exp)                                #initial epsilon as list
+   
+               #------------------gamma functions----------------------
+               g_iter <- NULL                                                  #computation of the gamma functions
+               K <- length(parlist)
+               for (t in 1:(K-1)) {                                            #building up J1,...,Jt,...,Js
+   
+                 if (t==1) {                                                   #first iteration step
+                   gterm <- c(1,parlist[[t]])                                  #0th element included
+                 }else
+                 {
+                  gterm <- g_iter                                   #gamma previous iteration with 0th el
+                  g_iter <- NULL
+                 }
+   
+                 parvek <- c(1,parlist[[t+1]])                      #eps vector in current iteration with 0th el
+                 h <- length(parvek)                                #dimensions for matrix
+                 mt <- length(gterm)
+                 rtot1 <- h+mt-1                                    #number of possible raw scores (0 included)
+   
+                 gtermvek <- rep(c(gterm,rep(0,h)),h)                          #building up matrix for gamma term
+                 gtermvek <- gtermvek[-((length(gtermvek)-h+1):length(gtermvek))]      #eliminating last h 0's
+                 gmat <- matrix(gtermvek,nrow=rtot1,ncol=h)
+                 emat <- matrix(rep(parvek,rep(rtot1,h)),ncol=h,nrow=rtot1)    #building up matrix for eps term
+                 gmat_new <- gmat*emat                                                 #merge matrices
+                 g_iter <- rowSums(gmat_new)                     #gamma functions in current iteration are rowsums
+               }
+              #----------------- end gamma functions ------------------
+   
+              Lg.NA <- as.vector(g_iter[2:(rtot+1)])     #final gamma vector stored in gamma (without gamma0)
+              return(Lg.NA)
+              })
+    })
+    #----------------- compute likelihood components -----------------------
+    L1 <- sum(mapply(function(x,z) {
+                      x[!is.na(z)]%*%na.exclude(z)
+                      },nrlist,lapply(Lg,log)))        #sum up L1-terms (group-wise)
+   
+    L2 <- sum(mapply("%*%",x_mtlist,beta.list1))        #sum up L2-terms (group-wise)
+    L1-L2                                               #final likelihood value
+  }
+  #----------------- end likelihood -----------------------
 
- #beta and NAstructure (over Groups): 1st line parameter values, 2nd line which item NA 
- betaNA <- mapply(function(x,y) {rbind(x,y)},beta.list1,NAstruc,SIMPLIFY=FALSE)  
+  eta <- etaStart                                     #starting values for eta parameters
 
- #likelihood term based on gamma functions for each Group x NAgroup combination
- Lg <- lapply(betaNA, function(betaNAmat) {      
-   beta.vec <- betaNAmat[1,]                #get parameter vector beta
+  if(!exists("fitctrl")) fitctrl <- "nlm"   # if fitctrl undefined, set it to "nlm"   ### MjM 2014-01-27
 
-   #gamma functions for each NAgroup within Groups 
-   Lg.NA <- apply(matrix(betaNAmat[-1,],ncol=length(beta.vec)),1, function(NAvec) {
-     
-     #list of virtual item-category parameters per item
-     beta_list <- as.list(split(beta.vec[NAvec==1],mt_ind[1:(length(beta.vec[NAvec==1]))]))       
-     parlist <- lapply(beta_list,exp)                                #initial epsilon as list
+  if(fitctrl == "nlm"){
+    suppressWarnings(
+      fit <- nlm(cml, eta, hessian = st.err, iterlim = 5000L)   # NLM optimizer
+    )
+  } else if(fitctrl == "optim"){
+    suppressWarnings( 
+      fit <- optim(eta, cml, method = "BFGS", hessian = TRUE, control = list(maxit = 5000L))
+    )
+    fit$counts<-fit$counts[1]
+    names(fit)<-c("estimate","minimum","iterations","code","message","hessian")
+  } else stop("optimizer misspecified in fitctrl\n")
 
-            #------------------gamma functions----------------------
-            g_iter <- NULL                                                  #computation of the gamma functions
-            K <- length(parlist)
-            for (t in 1:(K-1)) {                                            #building up J1,...,Jt,...,Js
+  fit
 
-              if (t==1) {                                                   #first iteration step
-                gterm <- c(1,parlist[[t]])                                  #0th element included
-              }else
-              {
-               gterm <- g_iter                                   #gamma previous iteration with 0th el
-               g_iter <- NULL
-              }
-
-              parvek <- c(1,parlist[[t+1]])                      #eps vector in current iteration with 0th el
-              h <- length(parvek)                                #dimensions for matrix
-              mt <- length(gterm)
-              rtot1 <- h+mt-1                                    #number of possible raw scores (0 included)
-
-              gtermvek <- rep(c(gterm,rep(0,h)),h)                          #building up matrix for gamma term
-              gtermvek <- gtermvek[-((length(gtermvek)-h+1):length(gtermvek))]      #eliminating last h 0's
-              gmat <- matrix(gtermvek,nrow=rtot1,ncol=h)
-              emat <- matrix(rep(parvek,rep(rtot1,h)),ncol=h,nrow=rtot1)    #building up matrix for eps term
-              gmat_new <- gmat*emat                                                 #merge matrices
-              g_iter <- rowSums(gmat_new)                     #gamma functions in current iteration are rowsums
-            }
-           #----------------- end gamma functions ------------------
-
-           Lg.NA <- as.vector(g_iter[2:(rtot+1)])     #final gamma vector stored in gamma (without gamma0)
-           return(Lg.NA)
-           })
- })
- #----------------- compute likelihood components -----------------------
- L1 <- sum(mapply(function(x,z) {
-                   x[!is.na(z)]%*%na.exclude(z)
-                   },nrlist,lapply(Lg,log)))        #sum up L1-terms (group-wise)
-
- L2 <- sum(mapply("%*%",x_mtlist,beta.list1))        #sum up L2-terms (group-wise)
- L1-L2                                               #final likelihood value
 }
-#----------------- end likelihood -----------------------
-
-eta <- etaStart                                     #starting values for eta parameters
-
-err<-try(exists(fitctrl), TRUE)                # check if fitctrl is defined
-if(class(err)=="try-error") fitctrl <- "nlm"    # if undefined set it to "nlm"
-
-if(fitctrl=="nlm"){
-   options(warn=-1)                                    #turn off warnings for NA/Inf
-   fit <- nlm(cml,eta,hessian=st.err,iterlim=5000)     #NLM optimizer
-} else if(fitctrl=="optim"){
-   options(warn=0)
-   fit <- optim(eta,cml,method="BFGS",hessian=TRUE,control=list(maxit=5000))
-   fit$counts<-fit$counts[1]
-   names(fit)<-c("estimate","minimum","iterations","code","message","hessian")
-} else stop("optimizer misspecified in fitctrl\n")
-fit
-}
-
diff --git a/R/invalid.R b/R/invalid.R
old mode 100755
new mode 100644
diff --git a/R/item_info.R b/R/item_info.R
new file mode 100644
index 0000000..e55c50b
--- /dev/null
+++ b/R/item_info.R
@@ -0,0 +1,81 @@
+i_info<-function(hvec,itembeta,theta)
+#calculates information (Samejima, 1969) for an item i as a function of theta
+#
+#@input: hvec...number of categories of item
+#        itembeta...cumulative item parameters
+#        theta ... supporting or sampling points on latent trait
+#@output: a list with
+#         $c.info...matrix of category information in columns for theta (rows)
+#         $i.info...vector of item information at values of theta
+#@author: Thomas Rusch
+#@date:6.12.2013 Happy Nikolaus! 
+#
+  {
+   if(missing(theta)) theta <-seq(-5,5,0.01)
+   p.ih<-function(hvec,itembeta,theta)
+   #Calculates p.ih of given item i and the weird expression in its first derivative
+   #needs categories given (hvec) and the cumulative item parameters of the item (itembeta)
+   #@output: a list with
+   #         $p.ih...matrix of probabilities to fall into category h (colums) for given items as a function of theta (rows).
+   #         $weird...the weird expression from the derivative
+  {
+    beta <- c(0,itembeta) #eRm gives itempar with first fixed to zero
+    numerator<-exp(outer(hvec,theta)+beta) #Numerator
+    tmp<-hvec*numerator
+    weird.exp.num <- apply(tmp,2,sum) #numerator of weird expression in the derivative
+    denom <- apply(numerator,2,sum) #denominator
+    p.ih<-t(numerator)/denom    #categories in column,thetas in rows
+    weird.exp<- weird.exp.num/denom #weird expression in derivative
+    return(list("p.ih"=p.ih,"weird"=weird.exp))
+  }
+
+  ic.derivative<-function(hvec,itembeta,theta)
+   {
+   #Calculates first derivative of p.ih of given item i, needs number of categories and     cumulative item parameters
+   #
+   #@output: a list with
+   #         $out...first derivative of p.ih with categories h in columns and theta in ro   ws
+    f1<-p.ih(hvec,itembeta,theta) #to get p.ih and weird expression
+    out <- t(hvec*t(f1$p.ih))-f1$p.ih*f1$weird #first derivative
+    return(out)
+  }
+
+   tmp <- ic.derivative(hvec,itembeta,theta)#call ic.derivative
+   c.info <- tmp^2/p.ih(hvec,itembeta,theta)$p.ih #calculates category info (columns) for all theta(rows)
+   i.info <-apply(c.info,1,sum)#calculates item for all theta(rows)
+   return(list("c.info"=c.info,"i.info"=i.info))
+ }
+
+item_info <- function(ermobject,theta=seq(-5,5,0.01))
+##Calculates information (Samejima, 1969) of all items as a function of the latent trait, theta
+#        ermobject ... object of class eRm
+#        theta ... supporting or sampling points on latent trait
+#@output: a list where each element corresponds to an item and contains
+#         $c.info...matrix of category information in columns for theta (rows)
+#         $i.info...vector of item information at values of theta
+#@author: Thomas Rusch
+#@date:13.6.2011
+#
+{
+   vec.tmp <- get_item_cats(X=ermobject$X,nitems=dim(ermobject$X)[2],grp_n=dim(ermobject$X)[1])
+   betapar <- ermobject$betapar
+   veco <- unlist(lapply(vec.tmp,max))
+   alloc.list<-vector("list",length(veco))
+   hvec.list <- vector("list",length(veco))
+   out.list <- vector("list",length(veco))
+   for (i in 1:length(veco))
+     {
+       alloc.list[[i]] <- rep(i,veco[i])
+       hvec.list[[i]] <- 0:veco[i]
+     }
+   uu<-unlist(alloc.list)
+   itembeta.list <- split(betapar,uu)
+   for (i in 1:length(itembeta.list))
+     {
+      out.list[[i]] <- i_info(hvec.list[[i]],itembeta.list[[i]],theta) #patch
+    }
+   return(out.list)
+ }
+
+#THANK YOU FOR READING THE SOURCE
+
diff --git a/R/itemfit.R b/R/itemfit.R
old mode 100755
new mode 100644
diff --git a/R/itemfit.ppar.R b/R/itemfit.ppar.R
old mode 100755
new mode 100644
index a1f4ace..1f1e346
--- a/R/itemfit.ppar.R
+++ b/R/itemfit.ppar.R
@@ -31,8 +31,8 @@ function(object)
   qsq.infitMSQ <- colSums(Cmat-Vmat^2, na.rm=TRUE)/isumVmat^2
   q.infitMSQ <- sqrt(qsq.infitMSQ)
 
-  i.outfitZ <- (sqrt(i.outfitMSQ)-1)*(3/q.outfitMSQ)+(q.outfitMSQ/3)
-  i.infitZ <- (sqrt(i.infitMSQ)-1)*(3/q.infitMSQ)+(q.infitMSQ/3)
+  i.outfitZ <- (i.outfitMSQ^(1/3) - 1)*(3/q.outfitMSQ)+(q.outfitMSQ/3) # corr. rh 2011-06-15
+  i.infitZ  <- (i.infitMSQ^(1/3)  - 1)*(3/q.infitMSQ) +(q.infitMSQ/3)  # hint from rainer alexandrowicz
 
   result <- list(i.fit=ifit,i.df=idf,st.res=st.res,i.outfitMSQ=i.outfitMSQ,i.infitMSQ=i.infitMSQ,i.outfitZ=i.outfitZ,i.infitZ=i.infitZ)
 
diff --git a/R/labeling.internal.r b/R/labeling.internal.r
old mode 100755
new mode 100644
diff --git a/R/likLR.R b/R/likLR.R
old mode 100755
new mode 100644
index cb002cf..76a7beb
--- a/R/likLR.R
+++ b/R/likLR.R
@@ -1,40 +1,37 @@
-`likLR` <-
-function (X,W,mpoints,Groups,model,st.err,sum0,etaStart)
-{
+likLR <- function(X, W, mpoints, Groups, model, st.err, sum0, etaStart){
 
-if (any(is.na(X))) {
-  dichX <- ifelse(is.na(X),1,0)
-  strdata <- apply(dichX,1,function(x) {paste(x,collapse="")})
-  gmemb <- as.vector(data.matrix(data.frame(strdata)))
-} else {
-  gmemb <- rep(1,dim(X)[1])
-}
+  if (any(is.na(X))) {
+    dichX <- ifelse(is.na(X),1,0)
+    strdata <- apply(dichX,1,function(x) {paste(x,collapse="")})
+    gmemb <- as.vector(data.matrix(data.frame(strdata)))
+  } else {
+    gmemb <- rep(1,dim(X)[1])
+  }
 
-#data preparation, design matrix generation for various models
-if (model=="RM") { Xprep <- datprep_RM(X,W,sum0)
-} else if (model=="LLTM") { Xprep <- datprep_LLTM(X,W,mpoints,Groups,sum0)
-} else if (model=="RSM") { Xprep <- datprep_RSM(X,W,sum0)
-} else if (model=="PCM") { Xprep <- datprep_PCM(X,W,sum0)
-} else if (model=="LRSM") { Xprep <- datprep_LRSM(X,W,mpoints,Groups,sum0)
-} else if (model=="LPCM")  {Xprep <- datprep_LPCM(X,W,mpoints,Groups,sum0)
-}
+  #data preparation, design matrix generation for various models
+  if (model=="RM") { Xprep <- datprep_RM(X,W,sum0)
+  } else if (model=="LLTM") { Xprep <- datprep_LLTM(X,W,mpoints,Groups,sum0)
+  } else if (model=="RSM") { Xprep <- datprep_RSM(X,W,sum0)
+  } else if (model=="PCM") { Xprep <- datprep_PCM(X,W,sum0)
+  } else if (model=="LRSM") { Xprep <- datprep_LRSM(X,W,mpoints,Groups,sum0)
+  } else if (model=="LPCM")  {Xprep <- datprep_LPCM(X,W,mpoints,Groups,sum0)
+  }
 
-if (any(is.na(etaStart))) etaStart <- rep(0,dim(Xprep$W)[2])       #check starting vector
-if (length(etaStart) != dim(Xprep$W)[2]) stop("Vector with starting values does not match number of parameters!") 
-ng <- max(Groups)
-if ((dim(Xprep$W)[1]) != ((dim(Xprep$X01)[2])*ng)) stop("Mismatch between number of rows (beta's) in W and number of items (categories) in X!")
+  if (any(is.na(etaStart))) etaStart <- rep(0,dim(Xprep$W)[2])       #check starting vector
+  if (length(etaStart) != dim(Xprep$W)[2]) stop("Vector with starting values does not match number of parameters!") 
+  ng <- max(Groups)
+  if ((dim(Xprep$W)[1]) != ((dim(Xprep$X01)[2])*ng)) stop("Mismatch between number of rows (beta's) in W and number of items (categories) in X!")
 
 
-Lprep <- cmlprep(Xprep$X01,Xprep$mt_vek,mpoints,Groups,Xprep$W,gmemb)                   
-parest <- fitcml(Lprep$mt_ind,Lprep$nrlist,Lprep$x_mt,Lprep$rtot,Xprep$W,
-                 max(Groups),gind=Lprep$gind,x_mtlist=Lprep$x_mtlist,
-                 Lprep$NAstruc,g_NA=Lprep$g_NA,st.err,etaStart,gby=Lprep$gby)      
+  Lprep <- cmlprep(Xprep$X01,Xprep$mt_vek,mpoints,Groups,Xprep$W,gmemb)                   
+  parest <- fitcml(Lprep$mt_ind,Lprep$nrlist,Lprep$x_mt,Lprep$rtot,Xprep$W,
+                   max(Groups),gind=Lprep$gind,x_mtlist=Lprep$x_mtlist,
+                   Lprep$NAstruc,g_NA=Lprep$g_NA,st.err,etaStart,gby=Lprep$gby)      
 
-W1 <- Xprep$W
-#rownames(W1) <- NULL
-#colnames(W1) <- paste("eta",1:dim(W1)[2],sep="")
-options(warn=0)
-                         
-list(W=W1,parest=parest,X01=Xprep$X01)                          #returns design matrix and results
-}
+  W1 <- Xprep$W
+  #rownames(W1) <- NULL
+  #colnames(W1) <- paste("eta",1:dim(W1)[2],sep="")
+                           
+  return(list("W" = W1, "parest" = parest, "X01" = Xprep$X01))                          #returns design matrix and results
 
+}
diff --git a/R/llra.datprep.R b/R/llra.datprep.R
old mode 100755
new mode 100644
diff --git a/R/llra.internals.R b/R/llra.internals.R
old mode 100755
new mode 100644
index a9195ca..6ad34d1
--- a/R/llra.internals.R
+++ b/R/llra.internals.R
@@ -1,107 +1,104 @@
 #internal functions
-get_item_cats <- function(X,nitems,grp_n)
-  {
-   # returns list of vectors with length max(categories) for each item;
-   # 1:number categories are the first few entries and the rest is filed with zeros
-   # This later corresponds to the necessary setup in LPCM where the superfluous categories must be set to 0 
-   its <- rep(1:nitems,each=sum(grp_n)) 
-   cats <- lapply(split(X,its),max) #splits the data matrix according to items and finds the maximum category  
-   max.cat <- max(X) #overall maximum category
-   vec.cat <- lapply(cats,function(x) c(1:x,rep(0,max.cat-x)))
-   vec.cat #the ominous list of form c(1:categories,0,0,0) 
-  }
+get_item_cats <- function(X, nitems, grp_n){
+  # returns list of vectors with length max(categories) for each item;
+  # 1:number categories are the first few entries and the rest is filed with zeros
+  # This later corresponds to the necessary setup in LPCM where the superfluous categories must be set to 0
+  its     <- rep(seq_len(nitems), each = sum(grp_n))
+    ###mjm fix 2014-09-24: split() works differently with matrices (column-wise) and data.frames (row-wise), so: as.matrix() to be sure.
+    ###mjm fix 2014-09-24: if NAs in raw data X, results would be NA, so: added na.rm = TRUE to the routine
+  cats    <- lapply(split(as.matrix(X), its), max, na.rm = TRUE) #splits the data matrix according to items and finds the maximum category
+    ###mjm fix 2014-09-24: fix for NAs, see above
+  max.cat <- max(X, na.rm = TRUE) #overall maximum category
+  vec.cat <- lapply(cats, function(x){ c(seq_len(x), rep(0, max.cat-x)) })
+  vec.cat #the ominous list of form c(1:categories, 0, 0, 0)
+}
 
-build_effdes <- function(nitems,mpoints,pplgrps,categos,groupvec)
-  {
-    #builds treatment design structure for W
-    #
-    #mpoints>nitems>treat>catego
-    #build group design
-    tmp1 <- diag(pplgrps)
-    tmp1[pplgrps,pplgrps] <- 0
-    eff.tmp1 <- lapply(categos,function(x)(tmp1%x%x)) #list with categories per item, replicated per group
-    eff.tmp2 <- as.matrix(bdiag(eff.tmp1))  #blockdiagonal with blocks equal to the categories
-    eff.tmp3 <- diag(mpoints-1)%x%eff.tmp2  #blow up to mpoints
-    nuller <- matrix(0,nrow=dim(eff.tmp2)[1],ncol=dim(eff.tmp3)[2]) #baseline (tp=1)
-    gr.bu <- rbind(nuller,eff.tmp3) #combine baseline and effects
-    #labelling of effects
-    names1 <- unique(names(groupvec))
-    #names1 <- paste("G",pplgrps:1,sep="") 
-    names2 <- paste(names1,"I",sep=".")
-    names3 <- paste(names2,rep(1:nitems,each=pplgrps),sep="")
-    names4 <- paste(names3,"t",sep=".")
-    names5 <- paste(names4,rep(2:mpoints,each=pplgrps*nitems),sep="")
-    colnames(gr.bu) <- names5
-    #columns with zeros (baseline group) are removed now
-    rem.0 <- NA
-    for(i in 1:dim(gr.bu)[2]) {rem.0[i] <- all(gr.bu[,i]==0)}
-    gr.bu.red <- gr.bu[,which(rem.0==0)]
-    return(gr.bu.red)  
-  }
+build_effdes <- function(nitems, mpoints, pplgrps, categos, groupvec){
+  #builds treatment design structure for W
+  #
+  #mpoints>nitems>treat>catego
+  #build group design
+  tmp1 <- diag(pplgrps)
+  tmp1[pplgrps, pplgrps] <- 0
+  eff.tmp1 <- lapply(categos, function(x)(tmp1%x%x)) #list with categories per item, replicated per group
+  eff.tmp2 <- as.matrix(bdiag(eff.tmp1))  #blockdiagonal with blocks equal to the categories
+  eff.tmp3 <- diag(mpoints-1)%x%eff.tmp2  #blow up to mpoints
+  nuller <- matrix(0, nrow=dim(eff.tmp2)[1], ncol=dim(eff.tmp3)[2]) #baseline (tp=1)
+  gr.bu <- rbind(nuller, eff.tmp3) #combine baseline and effects
+  #labelling of effects
+  names1 <- unique(names(groupvec))
+  #names1 <- paste0("G", pplgrps:1)
+  names2 <- paste(names1, "I", sep=".")
+  names3 <- paste0(names2, rep(1:nitems, each=pplgrps))
+  names4 <- paste(names3, "t", sep=".")
+  names5 <- paste0(names4, rep(2:mpoints, each=pplgrps*nitems))
+  colnames(gr.bu) <- names5
+  #columns with zeros (baseline group) are removed now
+  rem.0 <- NA
+  for(i in 1:dim(gr.bu)[2]) {rem.0[i] <- all(gr.bu[, i]==0)}
+  gr.bu.red <- gr.bu[, which(rem.0==0)]
+  return(gr.bu.red)
+}
 
 
-build_trdes <- function(nitems,mpoints,pplgrps,categos)
-  {
-    #builds trend design structure for W
-    #
-    #mpoints>nitems>treat>catego
-    tr.tmp1 <- lapply(categos,function(x) rep(x,pplgrps)) #replicate number of categories per item times the groups
-    tr.tmp2 <- as.matrix(bdiag(tr.tmp1)) #build the blockdiaginal for all items
-    tr.tmp3 <- diag(mpoints-1)%x%tr.tmp2 #blow it up to the time points necessary
-    nuller <- matrix(0,nrow=dim(tr.tmp2)[1],ncol=dim(tr.tmp3)[2]) #baseline
-    tr.bu <- rbind(nuller,tr.tmp3) #combine mpoints and baseline
-    #structure: for each category multiply it with a vector of group indicators 
-    #hence the grouping is:
-    #tau1 t2-t1, tau2 t2-t1, ..., tauk t2-t1, tau1 t3-t1, tau2 t3-t1, .. tauk t3-t1
-   #cat("Design matrix columns are:","\n","tau_1^(t2-t1), tau_2^(t2-t1), ..., tau_k^(t2-t1), tau_1^(t3-t1), tau_2(t3-t1), ..., tau_k^(t3-t1), etc.","\n")
-    #labeling
-    names1 <- paste("trend.I",1:nitems,sep="")
-    names2 <- paste(names1,"t",sep=".")
-    names3 <- paste(names2,rep(2:mpoints,each=nitems),sep="")
-    colnames(tr.bu) <- names3
-    return(tr.bu)
-  }
+build_trdes <- function(nitems, mpoints, pplgrps, categos){
+  #builds trend design structure for W
+  #
+  #mpoints>nitems>treat>catego
+  tr.tmp1 <- lapply(categos, function(x) rep(x, pplgrps)) #replicate number of categories per item times the groups
+  tr.tmp2 <- as.matrix(bdiag(tr.tmp1)) #build the blockdiaginal for all items
+  tr.tmp3 <- diag(mpoints-1)%x%tr.tmp2 #blow it up to the time points necessary
+  nuller <- matrix(0, nrow=dim(tr.tmp2)[1], ncol=dim(tr.tmp3)[2]) #baseline
+  tr.bu <- rbind(nuller, tr.tmp3) #combine mpoints and baseline
+  #structure: for each category multiply it with a vector of group indicators
+  #hence the grouping is:
+  #tau1 t2-t1, tau2 t2-t1, ..., tauk t2-t1, tau1 t3-t1, tau2 t3-t1, .. tauk t3-t1
+ #cat("Design matrix columns are:","\n","tau_1^(t2-t1), tau_2^(t2-t1), ..., tau_k^(t2-t1), tau_1^(t3-t1), tau_2(t3-t1), ..., tau_k^(t3-t1), etc.","\n")
+  #labeling
+  names1 <- paste0("trend.I", 1:nitems)
+  names2 <- paste(names1, "t", sep=".")
+  names3 <- paste0(names2, rep(2:mpoints, each=nitems))
+  colnames(tr.bu) <- names3
+  return(tr.bu)
+}
 
-build_catdes <- function(nitems,mpoints,pplgrps,categos)
-  {
-    #builds category design matrix
-    #FIX ME: is a bit ugly, we might get the loops out somehow
-    #
-    #check if there are just binary items
-    if(max(unlist(categos))<2) stop("items are (at most) binary and need no design")
-    #currently equates cat.0 and cat.1
-    warning("Currently c0 and c1 are equated for each item","\n")
-    max.all <- max(unlist(categos)) #maximum category number
-    ls.ct.des <- list() #list of designs for each item
-    #here we walk through each item and build up the category design
-    for(i in 1:nitems)
-      {
-       max.it <- sum(categos[[i]]!=0) #maximum category number of item i
-       ct.des <- rbind(rep(0,dim(diag(max.all-1))[2]),diag(max.all-1)) #the design for the maximum number of categories in X
-       rems <- max.all-max.it #the number of superfluous columns
-       #here the superfluous columns are removed as the step from W to W*
-       #the necessary rows with zeros however are maintained:
-       #for a dichotomous item the structure is slightly different than for any other, since it returns an empty matrix of appropriate dimensions
-       #for a polytomous item the superfluous columns are removed from the back 
-       ifelse(rems==max.all-1, ct.des<- as.matrix(ct.des[,-(1:max.all-1)]), ct.des<- as.matrix(ct.des[,1:((max.all-1)-rems)]))
-       ct.des.gr <- rep(1,pplgrps)%x%ct.des #blow it up to the number of groups
-       ls.ct.des[[i]] <- ct.des.gr #list with all category designs for each item
-      }       
-    ct.tmp2 <- as.matrix(bdiag(ls.ct.des)) #blockdiagonal matrix for a single mpoints
-    ct.bu <- rep(1,mpoints)%x%ct.tmp2 #blow up to number of times points
-    #try to first build first item, then second and so on, then blow up
-    #labeling: pretty unelegant too
-    names <- NA
-    for(i in 1:nitems)
-      {
-       cat <- max(categos[[i]])
-       ifelse(cat==1,names1 <- "remove",names1 <- paste("c",2:cat,sep=""))
-       names2 <- paste("I",i,sep="")
-       names3 <- paste(names1,names2,sep=".")
-       names<- c(names,names3)
-     }
-    names <- names[-1]
-    if(length(grep("remove",names)>0)) names <- names[-grep("remove",names)]
-    colnames(ct.bu) <- names
-    return(ct.bu)
-  }         
+build_catdes <- function(nitems, mpoints, pplgrps, categos){
+  #builds category design matrix
+  #FIX ME: is a bit ugly, we might get the loops out somehow
+  #
+  #check if there are just binary items
+  if(max(unlist(categos))<2) stop("items are (at most) binary and need no design")
+  #currently equates cat.0 and cat.1
+  warning("Currently c0 and c1 are equated for each item", "\n")
+  max.all <- max(unlist(categos)) #maximum category number
+  ls.ct.des <- list() #list of designs for each item
+  #here we walk through each item and build up the category design
+  for(i in 1:nitems){
+    max.it <- sum(categos[[i]]!=0) #maximum category number of item i
+    ct.des <- rbind(rep(0, dim(diag(max.all-1))[2]), diag(max.all-1)) #the design for the maximum number of categories in X
+    rems <- max.all-max.it #the number of superfluous columns
+    #here the superfluous columns are removed as the step from W to W*
+    #the necessary rows with zeros however are maintained:
+    #for a dichotomous item the structure is slightly different than for any other, since it returns an empty matrix of appropriate dimensions
+    #for a polytomous item the superfluous columns are removed from the back
+    ifelse(rems==max.all-1, ct.des<- as.matrix(ct.des[, -(1:max.all-1)]), ct.des<- as.matrix(ct.des[, 1:((max.all-1)-rems)]))
+    ct.des.gr <- rep(1, pplgrps)%x%ct.des #blow it up to the number of groups
+    ls.ct.des[[i]] <- ct.des.gr #list with all category designs for each item
+  }
+  ct.tmp2 <- as.matrix(bdiag(ls.ct.des)) #blockdiagonal matrix for a single mpoints
+  ct.bu <- rep(1, mpoints)%x%ct.tmp2 #blow up to number of times points
+  #try to first build first item, then second and so on, then blow up
+  #labeling: pretty unelegant too
+  names <- NA
+  for(i in 1:nitems){
+    cat <- max(categos[[i]])
+    ifelse(cat==1, names1 <- "remove", names1 <- paste0("c", 2:cat))
+    names2 <- paste0("I", i)
+    names3 <- paste(names1, names2, sep=".")
+    names<- c(names, names3)
+  }
+  names <- names[-1]
+  if(length(grep("remove", names)>0)) names <- names[-grep("remove", names)]
+  colnames(ct.bu) <- names
+  return(ct.bu)
+}
diff --git a/R/logLik.eRm.r b/R/logLik.eRm.r
old mode 100755
new mode 100644
diff --git a/R/logLik.ppar.r b/R/logLik.ppar.r
old mode 100755
new mode 100644
diff --git a/R/model.matrix.eRm.R b/R/model.matrix.eRm.R
old mode 100755
new mode 100644
diff --git a/R/performance.R b/R/performance.R
old mode 100755
new mode 100644
diff --git a/R/performance_measures.R b/R/performance_measures.R
old mode 100755
new mode 100644
diff --git a/R/performance_plots.R b/R/performance_plots.R
old mode 100755
new mode 100644
diff --git a/R/person.parameter.R b/R/person.parameter.R
old mode 100755
new mode 100644
diff --git a/R/person.parameter.eRm.R b/R/person.parameter.eRm.R
old mode 100755
new mode 100644
index 0a7b423..71952d0
--- a/R/person.parameter.eRm.R
+++ b/R/person.parameter.eRm.R
@@ -1,244 +1,313 @@
-`person.parameter.eRm` <-
-function(object)
-# estimation of the person parameters with jml
-# object of class eRm
-# se... whether standard errors should be computed
-# splineInt... whether spline interpolation should be carried out
-
-{
-
-se <- TRUE
-splineInt <- TRUE
-options(warn=0)
-
-X <- object$X
-#collapse X
-#X.full <- object$X
-
-max.it <- apply(X,2,max,na.rm=TRUE)                               #maximum item raw score without NA
-rp <- rowSums(X,na.rm=TRUE)                                       #person raw scores
-maxrp <- apply(X,1,function(x.i) {sum(max.it[!is.na(x.i)])})      #maximum item raw score for person i
-TFrow <- ((rp==maxrp) | (rp==0))
-
-pers.exe <- (1:dim(X)[1])[TFrow]                                  #persons excluded from estimation due to 0/full
-pers.exe.names<-rownames(X)[pers.exe]
-pers.in<-(1:dim(X)[1])[-pers.exe]                                 #persons in estimation
-
-if (length(pers.exe) > 0) {                                 #data matrix persons (full/0) excluded)
-      X.ex <- object$X[-pers.exe,]                                        
+`person.parameter.eRm` <- function(object){
+  # estimation of the person parameters with jml
+  # object of class eRm
+  # se... whether standard errors should be computed
+
+  se            <- TRUE
+  interpolation <- TRUE
+
+  X <- object$X
+  #collapse X
+  #X.full <- object$X
+
+  max.it <- apply(X, 2L, max, na.rm = TRUE)                           #maximum item raw score without NA
+  rp     <- rowSums(X, na.rm = TRUE)                                  #person raw scores
+  maxrp  <- apply(X, 1L, function(x.i){ sum(max.it[!is.na(x.i)]) })   #maximum item raw score for person i
+  TFrow  <- ((rp==maxrp) | (rp==0))
+
+  pers.exe       <- (1L:nrow(X))[TFrow]       #persons excluded from estimation due to 0/full
+  pers.exe.names <- rownames(X)[pers.exe]
+  pers.in        <- (1L:nrow(X))[-pers.exe]   #persons in estimation
+
+  if(length(pers.exe) > 0L){   #data matrix persons (full/0) excluded)
+    X.ex <- object$X[-pers.exe,]                                        
   } else {
-      X.ex <- object$X
+    X.ex <- object$X
   }
 
+  if(any(is.na(X))){
+    dichX   <- ifelse(is.na(X), 1, 0)
+    strdata <- apply(dichX, 1L, function(x){ paste(x, collapse="") })
+    gmemb.X <- as.vector(data.matrix(data.frame(strdata)))
+  } else {
+    gmemb.X <- rep(1L, nrow(X))
+  }
 
-if (any(is.na(X))) {
-  dichX <- ifelse(is.na(X),1,0)
-  strdata <- apply(dichX,1,function(x) {paste(x,collapse="")})
-  gmemb.X <- as.vector(data.matrix(data.frame(strdata)))
-} else {
-  gmemb.X <- rep(1,dim(X)[1])
-}
-
-if (length(pers.exe) > 0) X <- X[-pers.exe,]
-X.dummy <- X
-
-if (any(is.na(X))) {
-  dichX <- ifelse(is.na(X),1,0)
-  strdata <- apply(dichX,1,function(x) {paste(x,collapse="")})
-  gmemb <- as.vector(data.matrix(data.frame(strdata)))
-  gmemb1 <- gmemb
-} else {
-  gmemb <- rep(1,dim(X)[1])
-  gmemb1 <- gmemb
-}
+  if(length(pers.exe) > 0L) X <- X[-pers.exe,]
 
-mt_vek <- apply(X,2,max,na.rm=TRUE)             #number of categories - 1 for each item
-mt_ind <- rep(1:length(mt_vek),mt_vek)          #index i for items
+  X.dummy <- X
 
-indvec <- NULL             #establish matrix with unique raw scores
-for (i in unique(gmemb)) {
-    gmemb.ind <- which(gmemb == i)
-    collapse.vec <- which(!duplicated(rowSums(rbind(X[gmemb==i,]),na.rm = TRUE)))
-    indvec <- c(indvec, gmemb.ind[collapse.vec])
+  if(any(is.na(X))){
+    dichX   <- ifelse(is.na(X), 1, 0)
+    strdata <- apply(dichX, 1L, function(x){ paste(x, collapse="") })
+    gmemb   <- as.vector(data.matrix(data.frame(strdata)))
+    gmemb1  <- gmemb
+  } else {
+    gmemb   <- rep(1L, nrow(X))
+    gmemb1  <- gmemb
   }
-#for (i in unique(gmemb)) indvec <- c(indvec,!duplicated(rowSums(rbind(X[gmemb==i,]),na.rm = TRUE)))
-indvec <- sort(indvec)
-X <- X[indvec,]                                 #collapsing X
 
-beta.all <- object$betapar
+  mt_vek <- apply(X, 2L, max, na.rm = TRUE)   #number of categories - 1 for each item
+  mt_ind <- rep(1L:length(mt_vek), mt_vek)    #index i for items
 
-if (!is.null(object$ngroups))
-  if (object$ngroups > 1) stop("Estimation of person parameters for models with group contrasts not possible!")
+  indvec <- NULL                              #establish matrix with unique raw scores
+  for(i in unique(gmemb)){
+    gmemb.ind    <- which(gmemb == i)
+    collapse.vec <- which(!duplicated(rowSums(rbind(X[gmemb==i,]), na.rm = TRUE)))
+    indvec       <- c(indvec, gmemb.ind[collapse.vec])
+  }
+  #for (i in unique(gmemb)) indvec <- c(indvec,!duplicated(rowSums(rbind(X[gmemb==i,]),na.rm = TRUE)))
 
-if (is.null(object$mpoints))  { mpoints <- 1
-} else {mpoints <- object$mpoints}
+  indvec <- sort(indvec)
+  X      <- X[indvec,]     #collapsing X
 
-r.pall <- rowSums(X,na.rm=TRUE)                 #person raw scores
+  beta.all <- object$betapar
 
+  if(!is.null(object$ngroups)){
+    if(object$ngroups > 1L) stop("Estimation of person parameters for models with group contrasts not possible!")
+  }
 
-X01 <- object$X01
-if (length(pers.exe) > 0) X01 <- X01[-pers.exe,]   #if persons excluded due to 0/full response
+  if(is.null(object$mpoints)){
+    mpoints <- 1L
+  } else {
+    mpoints <- object$mpoints
+  }
 
-X01 <- X01[indvec,]                                #collapsed version
-gmemb <- gmemb[indvec]                             #collapsed version
-rownames(X01) <- rownames(X)
+  r.pall <- rowSums(X, na.rm = TRUE)                 #person raw scores
+
+
+  X01 <- object$X01
+  if(length(pers.exe) > 0L) X01 <- X01[-pers.exe,]   #if persons excluded due to 0/full response
+
+  X01           <- X01[indvec,]                      #collapsed version
+  gmemb         <- gmemb[indvec]                     #collapsed version
+  rownames(X01) <- rownames(X)
+
+  rowvec <- 1L:nrow(X01)
+
+
+
+  fitlist <- tapply(rowvec, gmemb, function(rind){      #list with nlm outputs
+
+      if(length(rind) > 1L){
+         ivec <- !is.na(X[rind[1],])                       #non-NA elements
+         r.i  <- colSums(X[rind, ivec], na.rm = TRUE)      #item raw scores
+      } else {                                          #if only one person belongs to raw score group
+         ivec <- !is.na(X[rind[1],])
+         r.i  <- X[rind, ivec]
+        # r.i <- X[rind,]
+        # r.i[is.na(r.i)] <- 0
+      }
+      #r.i     <- colSums(object$X[rind,],na.rm=TRUE)       #item raw scores
+      r.p     <- r.pall[rind]                              #person raw scores for current NA group
+      X01g    <- rbind(X01[rind,])
+      beta    <- beta.all[!is.na(X01g[1L,])]
+      X01beta <- rbind(X01g, beta.all)                     #matrix with unique 0/1 response patterns and beta vector in the last row
+      theta   <- rep(0L, length(r.p))
+
+      #==================== ML routines ===================================
+      jml.rasch <- function(theta){         #fast ML for RM only
+        ksi   <- exp(theta)
+        denom <- 1/exp(-beta)               #-beta instead of beta since beta are easiness parameter
+        lnL   <- sum(r.p*theta) - sum(r.i*(-beta)) - sum(log(1 + outer(ksi,denom)))
+        return(-lnL)
+      }
+
+      jml <- function(theta){               #ML for all other models
+        t1t2.list <- tapply(1L:ncol(X01beta), mt_ind, function(xin){
+          #xb <- (t(X01beta)[xin,])
+          xb     <- rbind(t(X01beta)[xin,])     #0/1 responses and beta parameters for one item
+          beta.i <- c(0.0, xb[,ncol(xb)])       #item parameter with 0
+
+          #person responses (row-wise) on each category for current item
+          if((nrow(xb) > 1L) && (length(xin == 1L))){
+            x01.i <- as.matrix(xb[,1L:(ncol(xb) - 1L)])
+          } else {
+            x01.i <- rbind(xb[,1L:(ncol(xb) - 1L)])  #0/1 matrix for item i without beta
+          }
+
+          cat0 <- rep(0L, ncol(x01.i))
+          cat0[colSums(x01.i) == 0L] <- 1       #those with 0 on the 1-kth category get a 1
+          x01.i0 <- rbind(cat0, x01.i)          #appending response vector for 0th category
+
+          ind.h   <- 0L:(length(beta.i)-1L)
+          theta.h <- ind.h %*% t(theta)         #n. categories times theta
+          #!!!FIXME
+          term1 <- (theta.h + beta.i) * x01.i0  #category-person matrix
+          t1.i  <- sum(colSums(term1))          #sum over categories and persons
+          #print(t1.i)
+
+          term2 <- exp(theta.h+beta.i)
+          t2.i  <- sum(log(colSums(term2)))     #sum over categories and persons
+          #print(t2.i)
+
+          return(c(t1.i,t2.i))
+        })
+        termlist <- matrix(unlist(t1t2.list), ncol = 2L, byrow = TRUE)
+        termlist <- termlist[!is.na(rowSums(termlist)),]
+        st1st2   <- colSums(termlist, na.rm = TRUE) #sum term1, term2
+
+        lnL <- st1st2[1] - st1st2[2]
+        return(-lnL)
+      }
+      #==================== end ML routines ================================
+
+      #==================== call optimizer =================================
+      if (object$model == "RM") {
+        fit <- nlm(jml.rasch, theta, hessian = se, iterlim = 1000L)
+      } else {
+        fit <- nlm(jml, theta, hessian = se, iterlim = 1000)
+      }
+      #fit2 <- optim(theta, jml.rasch, method="BFGS", hessian=TRUE)
+
+      #=================== end call optimizer ==============================
+      loglik   <- -fit$minimum
+      niter    <- fit$iterations
+      thetapar <- fit$estimate
+      if(se){
+        se <- sqrt(diag(solve(fit$hessian)))
+      } else {
+        se          <- NA
+        fit$hessian <- NA
+      }
+
+      return(list("loglik"   = loglik,
+                  "niter"    = niter,
+                  "thetapar" = thetapar,
+                  "se"       = se,
+                  "hessian"  = fit$hessian))
+  })
+
+
+
+  loglik <- niter <- npar <- numeric(length(fitlist))
+  thetapar <- se.theta <- hessian <- vector(mode = "list", length = length(fitlist))
+
+  for(i in seq_along(fitlist)){
+    loglik[i] <- fitlist[[i]]$loglik
+    niter[i]  <- fitlist[[i]]$niter
+    npar[i]   <- length(fitlist[[i]]$thetapar)
+    thetapar[[i]] <- fitlist[[i]]$thetapar
+    se.theta[[i]] <- fitlist[[i]]$se
+    hessian[[i]]  <- fitlist[[i]]$hessian
+  }
 
-rowvec <- 1:(dim(X01)[1])
 
-fitlist <- tapply(rowvec,gmemb,function(rind) {         #list with nlm outputs
 
-    if (length(rind) > 1) {
-       ivec <- !is.na(X[rind[1],])                      #non-NA elements
-       r.i <- colSums(X[rind,ivec],na.rm=TRUE)          #item raw scores
-    } else {                                        #if only one person belongs to raw score group
-       ivec <- !is.na(X[rind[1],])
-       r.i <- X[rind,ivec]
-      # r.i <- X[rind,]
-      # r.i[is.na(r.i)] <- 0
+  if(interpolation){   #cubic spline interpolation for 0 and full raw scores
+    x     <- rowSums(X, na.rm = TRUE)
+    xlist <- split(x, gmemb)
+
+    splineMessage <- FALSE   # to display the spline-failure message only once
+    
+    max.rs.NAgroups <- lapply(sort(unique(gmemb.X)), function(i){  ## MM 2012-02-01
+                         sum(max.it[!is.na(object$X[which(gmemb.X == i)[1],])])   # get the max. raw score per NA-group
+                       })
+    NAgroups.min <- tapply(rowSums(object$X, na.rm=T), gmemb.X, min, na.rm = TRUE)
+    NAgroups.max <- tapply(rowSums(object$X, na.rm=T), gmemb.X, max, na.rm = TRUE)
+    
+    NAgroup_exclude <- numeric()
+    for(i in seq_along(unique(gmemb.X))){   # delete groups with 0/full patterns from max.rs.NAgroups, NAgroups.min, NAgroups.max
+      if(all(rowSums(object$X, na.rm=TRUE)[gmemb.X == i] %in% c(0, max.rs.NAgroups[[i]]))) NAgroup_exclude <- c(NAgroup_exclude, i)
     }
-    #r.i <- colSums(object$X[rind,],na.rm=TRUE)         #item raw scores
-    r.p <- r.pall[rind]                                 #person raw scores for current NA group
-    X01g <- rbind(X01[rind,])
-    beta <- beta.all[!is.na(X01g[1,])]
-    X01beta <- rbind(X01g,beta.all)                     #matrix with unique 0/1 response patterns and beta vector in the last row
-    theta <- rep(0,length(r.p))
-
-    #==================== ML routines ===================================
-    jml.rasch <- function(theta)         #fast ML for RM only
-    {
-      ksi <- exp(theta)
-      denom <- 1/exp(-beta)              #-beta instead of beta since beta are easiness parameter
-      lnL <- sum(r.p*theta)-sum(r.i*(-beta))-sum(log(1+outer(ksi,denom)))
-      -lnL
+    if(length(NAgroup_exclude) > 0L){
+      splineMessage <- TRUE   # so that the message is only printed once
+      max.rs.NAgroups <- max.rs.NAgroups[-NAgroup_exclude]
+      NAgroups.min <- NAgroups.min[-NAgroup_exclude]
+      NAgroups.max <- NAgroups.max[-NAgroup_exclude]
+      gmemb_reduced <- unique(gmemb.X[-which(gmemb.X %in% NAgroup_exclude)])
     }
-
-    jml <- function(theta)               #ML for all other models
-    {
-        t1t2.list <- tapply(1:(dim(X01beta)[2]),mt_ind, function(xin) {
-                     #xb <- (t(X01beta)[xin,])
-                     xb <- rbind(t(X01beta)[xin,])     #0/1 responses and beta parameters for one item
-                     beta.i <- c(0,xb[,dim(xb)[2]])    #item parameter with 0
-
-                     #person responses (row-wise) on each category for current item
-                     if ((dim(xb)[1] > 1) && (length(xin == 1))) {
-                       x01.i <-  as.matrix(xb[,1:(dim(xb)[2]-1)])
+    
+    pred.list <- Map(function(xx, yy, rs, NAmin, NAmax){   # Map(...)  is  mapply(..., SIMPLFY = FALSE)
+                   y    <- tapply(yy, xx, function(xy){ xy[1] })
+                   x    <- unique(sort(xx))
+                   from <- ifelse(NAmin == 0, 0, min(x))
+                   to   <- ifelse(NAmax == rs, rs, max(x))
+                   if((length(x) > 3) || (length(y) > 3)){   #otherwise splinereg is not admissible
+                     fm1 <- interpSpline(x,y)
+                     if((from == 0) | (to == rs)){
+                       pred.val <- unclass(predict(fm1, unique(c(from, x, to))))
                      } else {
-                       x01.i <- rbind(xb[,1:(dim(xb)[2]-1)])  #0/1 matrix for item i without beta
+                       list(x=unname(x),y=unname(y))   #MM2012-02-01
                      }
+                   } else {
+                     splineMessage <- TRUE   # so that the message is only printed once
+                     rval <- list(x=unname(x),y=unname(y))   #MM2012-02-01
+                     if(from == 0){  rval$x <- c(0, rval$x)
+                                     rval$y <- c(NA, rval$y) }
+                     if(to == rs){  rval$x <- c(rval$x, rs)
+                                    rval$y <- c(rval$y, NA) }
+                     rval
+                   }  
+                 }, xlist, thetapar, max.rs.NAgroups, NAgroups.min, NAgroups.max)
+    X.n <- object$X
+  #  if (any(sapply(pred.list,is.null)))  pred.list <- NULL    #no spline interpolation applicable   #MM2012-02-01
+
+    if(splineMessage) message("Spline interpolation in some subgroups not performed!\n  Less than 4 different person parameters estimable!\n  Perhaps NAs in subgroup(s).")
+  }
 
-                     cat0 <- rep(0,dim(x01.i)[2])
-                     cat0[colSums(x01.i)==0] <- 1      #those with 0 on the 1-kth category get a 1
-                     x01.i0 <- rbind(cat0,x01.i)       #appending response vector for 0th category
-
-                     ind.h <- 0:(length(beta.i)-1)
-                     theta.h <- ind.h %*% t(theta)     #n. categories times theta
-                     #!!!FIXME
-                     term1 <- (theta.h+beta.i)*x01.i0  #category-person matrix
-                     t1.i <- sum(colSums(term1))       #sum over categories and persons
-                     #print(t1.i)
-
-                     term2 <- exp(theta.h+beta.i)
-                     t2.i <- sum(log(colSums(term2)))   #sum over categories and persons
-                     #print(t2.i)
-
-                     return(c(t1.i,t2.i))
-                   })
-      termlist <- matrix(unlist(t1t2.list),ncol=2,byrow=TRUE)
-      termlist <- termlist[!is.na(rowSums(termlist)),]
-      st1st2 <- colSums(termlist, na.rm = TRUE) #sum term1, term2
-
-      lnL <- st1st2[1]-st1st2[2]
-      -lnL
-    }
-    #==================== end ML routines ================================
 
-    #==================== call optimizer =================================
-    if (object$model == "RM") {
-      fit <- nlm(jml.rasch,theta,hessian=se,iterlim=1000)
-    } else {
-      fit <- nlm(jml,theta,hessian=se,iterlim=1000)
-    }
-    #fit2 <- optim(theta,jml.rasch,method="BFGS",hessian=TRUE)
-
-    #=================== end call optimizer ==============================
-    loglik <- -fit$minimum
-    niter <- fit$iterations
-    thetapar <- fit$estimate
-    if (se) {
-      se <- sqrt(diag(solve(fit$hessian)))
-    } else {
-      se <- NA
-      fit$hessian <- NA }
-
-list(loglik=loglik,niter=niter,thetapar=thetapar,se=se,hessian=fit$hessian)
-})
-
-
-loglik <- NULL
-niter <- NULL
-npar <- NULL
-thetapar <- list(NULL)
-se.theta <- list(NULL)
-hessian <- list(NULL)
-for (i in 1:length(fitlist)) {
-  loglik <- c(loglik,fitlist[[i]]$loglik)
-  niter <- c(niter,fitlist[[i]]$niter)
-  npar <- c(npar,length(fitlist[[i]]$thetapar))
-  thetapar[[i]] <- fitlist[[i]]$thetapar
-  se.theta[[i]] <- fitlist[[i]]$se
-  hessian[[i]] <- fitlist[[i]]$hessian
-}
 
-if (splineInt) {                                           #cubic spline interpolation for missing, 0, full raw scores
-  x <- rowSums(X,na.rm=TRUE)
-  xlist <- split(x,gmemb)
-  pred.list <- mapply(function(xx,yy) {
-                       y <- tapply(yy,xx, function(xy) {xy[1]})
-                       x <- unique(sort(xx))
-                       if ((length(x) > 3) || (length(y) > 3)) {        #otherwise splinereg is not admissible
-                         fm1 <- interpSpline(x,y)
-                         pred.val <- predict(fm1, 0:sum(max.it))
-                       } else {
-                         warning("Spline interpolation is not performed!\n  Less than 4 different person parameters estimable!\n  Perhaps in (NA) subgroup(s).")
-                         NULL
-                       }},xlist,thetapar,SIMPLIFY=FALSE)
-  X.n <- object$X
-  if (any(sapply(pred.list,is.null)))  pred.list <- NULL                           #no spline interpolation applicable
+  names(thetapar) <- names(se.theta) <- paste("NAgroup", 1L:length(thetapar), sep="")
 
-}
+  #---------expand theta and se.theta, labeling -------------------
+  for(i in unique(gmemb)){
+    o.r        <- rowSums(rbind(X.dummy[gmemb1==i,]), na.rm = TRUE)             #orginal raw scores
+    names(o.r) <- rownames(X.dummy)[gmemb1 == i]
+    c.r       <- rowSums(rbind(X[gmemb==i,]), na.rm = TRUE)                     #collapsed raw scores
+    match.ind <- match(o.r, c.r)
+    thetapar[[i]] <- thetapar[[i]][match.ind]           #de-collapse theta's
+    se.theta[[i]] <- se.theta[[i]][match.ind]           #de-collapse se's
+    names(thetapar[[i]]) <- names(se.theta[[i]]) <- names(o.r)
+  }
+  #--------------- end expand, labeling ---------------------------
 
-names(thetapar) <- names(se.theta) <- paste("NAgroup",1:length(thetapar),sep="")
-
-#---------expand theta and se.theta, labeling -------------------
-for (i in unique(gmemb)) {
-  o.r <- rowSums(rbind(X.dummy[gmemb1==i,]), na.rm = TRUE)             #orginal raw scores
-  names(o.r) <- rownames(X.dummy)[gmemb1 == i]
-  c.r <- rowSums(rbind(X[gmemb==i,]), na.rm = TRUE)                     #collapsed raw scores
-  match.ind <- match(o.r, c.r)
-  thetapar[[i]] <- thetapar[[i]][match.ind]           #de-collapse theta's
-  se.theta[[i]] <- se.theta[[i]][match.ind]           #de-collapse se's
-  names(thetapar[[i]]) <- names(se.theta[[i]]) <- names(o.r)
-}
-#--------------- end expand, labeling ---------------------------
 
 
 
+  #---------------------- theta.table new ----------------------   ## MM2012-02-01 START
+  #thetavec <- unlist(thetapar)
+  #ind.orig <- as.vector(unlist(tapply(1:length(gmemb1), gmemb1, function(ind) {ind})))
+  #theta.orig <- tapply(thetavec, ind.orig, function(ii) return(ii))   #original order theta parameter
+  #theta.table <- data.frame(theta.orig, gmemb1)
+  #colnames(theta.table) <- c("Person Parameter","NAgroup")
+  #rownames(theta.table) <- rownames(X.ex)
 
-#---------------------- theta.table new ----------------------
-thetavec <- unlist(thetapar)
-ind.orig <- as.vector(unlist(tapply(1:length(gmemb1), gmemb1, function(ind) {ind})))
-theta.orig <- tapply(thetavec, ind.orig, function(ii) return(ii))   #original order theta parameter
-theta.table <- data.frame(theta.orig, gmemb1)
-colnames(theta.table) <- c("Person Parameter","NAgroup")
-rownames(theta.table) <- rownames(X.ex)
+  if(length(NAgroup_exclude) > 0L){
+    selector <- gmemb.X %in% gmemb_reduced
+  } else {
+    selector <- rep(TRUE, length(gmemb.X))
+  }
+  gmemb.X_final <- gmemb.X[selector]
+  while(any(diff(sort(unique(gmemb.X_final))) > 1L) | (min(gmemb.X_final) > 1L)){
+    if(!any(gmemb.X_final == 1L)){ gmemb.X_final <- gmemb.X_final - 1L; next }
+    for(i in 1:max(gmemb.X_final)){
+      if(sum(gmemb.X_final == i) == 0) gmemb.X_final[gmemb.X_final == (i+1L)] <- gmemb.X_final[gmemb.X_final == (i+1L)] - 1L
+    }
+  }
 
+  theta.table <- data.frame("Person Parameter" = rep(NA, nrow(object$X)),
+                            "NAgroup" = NA,
+                            "Interpolated" = FALSE,
+                            row.names=rownames(object$X),
+                            check.names = FALSE)
+
+  theta.table[selector, "Person Parameter"] <- mapply(function(rs, NAgroup){
+                                                 pred.list[[NAgroup]]$y[which(pred.list[[NAgroup]]$x == rs)]
+                                               }, rowSums(object$X, na.rm=T)[selector], gmemb.X_final)
+  theta.table[selector, "NAgroup"] <- gmemb.X_final
+                            
+  if(length(pers.exe) > 0) theta.table[pers.exe,"Interpolated"] <- TRUE
+                                                                   ## MM2012-02-01 END
+  
+
+  result <- list(X = X.n, X01 = object$X01, X.ex = X.ex, W = object$W, model = object$model,
+                 loglik = loglik, loglik.cml = object$loglik, npar = npar, iter = niter, betapar = object$betapar,
+                 thetapar = thetapar, se.theta = se.theta, theta.table = theta.table,
+                 pred.list = pred.list, hessian = hessian, mpoints = mpoints,
+                 pers.ex = pers.exe, gmemb = gmemb1)
+  class(result) <- "ppar"
+
+  return(result)
 
-result <- list(X = X.n, X01 = object$X01, X.ex = X.ex, W = object$W, model = object$model,
-               loglik = loglik, loglik.cml = object$loglik, npar = npar, iter = niter, betapar = object$betapar,
-               thetapar = thetapar, se.theta = se.theta, theta.table = theta.table,
-               pred.list = pred.list, hessian = hessian, mpoints = mpoints,
-               pers.ex = pers.exe, gmemb = gmemb1)
-class(result) <- "ppar"
-result
 }
-
diff --git a/R/personfit.R b/R/personfit.R
old mode 100755
new mode 100644
diff --git a/R/personfit.ppar.R b/R/personfit.ppar.R
old mode 100755
new mode 100644
index fbc6d32..302c819
--- a/R/personfit.ppar.R
+++ b/R/personfit.ppar.R
@@ -1,21 +1,17 @@
-`personfit.ppar` <-
-function(object)
+personfit.ppar <- function(object) {
 # computes Chi-square based itemfit statistics (Smith, p.77ff)
 # for object of class "ppar" (from person.parameter)
-{
 
-  if (length(object$pers.ex)==0) {
-    X <- object$X
-  } else {
-    X <- object$X[-object$pers.ex,]
-  }
+  excl_obs_num <- object$pers.ex                     #mjm 2014-09-07
+  excl_obs_chr <- rownames(object$X)[excl_obs_num]   #
+                                                     #
+  if(length(excl_obs_num) > 0L){                 # remove obs. to be excluded, but
+    X <- object$X[-excl_obs_num,]                # store information to use in
+  } else {                                           # subsequent functions
+    X <- object$X                                    #
+  }                                                  #
 
-  #rp <- rowSums(X,na.rm=TRUE)
-  #mt_vek <- apply(X,2,max,na.rm=TRUE)
-  #maxrp <- sum(mt_vek)
-  #TFrow <- ((rp==maxrp) | (rp==0))              #exclude full and 0 responses
-
-  VE <- pifit.internal(object)                  #compute expectation and variance term
+  VE <- pifit.internal(object)   # compute expectation and variance term
   Emat <- VE$Emat
   Vmat <- VE$Vmat
   Cmat <- VE$Cmat
@@ -26,7 +22,7 @@ function(object)
   sq.res <- st.res^2                            #squared standardized residuals
   pfit <- rowSums(sq.res,na.rm=TRUE)
 
-  pdf <- apply(X,1,function(x) {length(na.exclude(x))})
+  pdf <- apply(X, 1L, function(x){ length(na.exclude(x)) })
 
   #pdf <- apply(X[!TFrow,],1,function(x) {length(na.exclude(x))})   #degress of freedom (#of persons per item)
 
@@ -41,13 +37,20 @@ function(object)
   qsq.infitMSQ <- rowSums(Cmat-Vmat^2, na.rm=TRUE)/psumVmat^2
   q.infitMSQ <- sqrt(qsq.infitMSQ)
 
-  p.outfitZ <- (sqrt(p.outfitMSQ)-1)*(3/q.outfitMSQ)+(q.outfitMSQ/3)
-  p.infitZ <- (sqrt(p.infitMSQ)-1)*(3/q.infitMSQ)+(q.infitMSQ/3)
+  p.outfitZ <- ((p.outfitMSQ)^(1/3)-1)*(3/q.outfitMSQ)+(q.outfitMSQ/3)
+  p.infitZ <- ((p.infitMSQ)^(1/3)-1)*(3/q.infitMSQ)+(q.infitMSQ/3)
+
+  result <- structure(
+    list("p.fit"        = pfit,
+         "p.df"         = pdf,
+         "st.res"       = st.res,
+         "p.outfitMSQ"  = p.outfitMSQ,
+         "p.infitMSQ"   = p.infitMSQ,
+         "p.outfitZ"    = p.outfitZ,
+         "p.infitZ"     = p.infitZ,
+         "excl_obs_num" = excl_obs_num,
+         "excl_obs_chr" = excl_obs_chr),
+    class = "pfit")
+  return(result)
 
-  result <- list(p.fit = pfit, p.df = pdf, st.res = st.res, p.outfitMSQ = p.outfitMSQ,
-                 p.infitMSQ = p.infitMSQ,
-                 p.outfitZ = p.outfitZ, p.infitZ = p.infitZ)
-  class(result) <- "pfit"
-  result
 }
-
diff --git a/R/phi.range.R b/R/phi.range.R
new file mode 100644
index 0000000..fe0dbbe
--- /dev/null
+++ b/R/phi.range.R
@@ -0,0 +1,9 @@
+"phi.range" <-
+function(mat){
+  cmat<-cor(mat)+diag(NA,ncol(mat))
+  ma<-max(cmat,na.rm=TRUE)
+  mi<-min(cmat,na.rm=TRUE)
+  RET <- ma-mi
+  RET
+}
+
diff --git a/R/pifit.internal.r b/R/pifit.internal.R
old mode 100755
new mode 100644
similarity index 73%
rename from R/pifit.internal.r
rename to R/pifit.internal.R
index d130f7a..f09a342
--- a/R/pifit.internal.r
+++ b/R/pifit.internal.R
@@ -1,27 +1,25 @@
-pifit.internal <- function(object)
-{
+pifit.internal <- function(object){
 #object of class ppar
 #function is called in itemfit.ppar and personfit.ppar
 
-
-  X <- object$X
-  mt_vek <- apply(X,2,max,na.rm=TRUE)             #number of categories - 1 for each item
-  mt_ind <- rep(1:length(mt_vek),mt_vek)
+  X      <- object[["X"]]
+  mt_vek <- apply(X, 2L, max, na.rm = TRUE)   # (number of categories - 1) for each item
+  mt_ind <- rep(seq_along(mt_vek), mt_vek) # MjM 2014-07-11
   mt_seq <- sequence(mt_vek)
-  gmemb <- object$gmemb
+  gmemb  <- object$gmemb
 
   pmat <- pmat(object)                          #matrix with model probabilites
 
   #-----------------matrix with expected response patterns--------------
-  Emat.cat <- t(apply(pmat,1,function(x) x*mt_seq))
-  if ((object$model == "RM") || (object$model == "LLTM")) { 
+  Emat.cat <- t(apply(pmat, 1L, function(x) x*mt_seq))
+  if(object$model %in% c("RM", "LLTM")){ 
     Emat <- Emat.cat
   } else {
-    E.list <- tapply(1:length(mt_ind),mt_ind, function(ind) {rowSums(cbind(Emat.cat[,ind]),na.rm=TRUE)})
+    E.list <- tapply(seq_along(mt_ind), mt_ind, function(ind){ rowSums(cbind(Emat.cat[, ind]), na.rm = TRUE) })
     Emat <- matrix(unlist(E.list),ncol=dim(X)[2],dimnames=list(rownames(pmat),colnames(X)))
-  } 
+  }
   #------------------------variance term for standardized residuals------
-  pmat.l0 <- tapply(1:length(mt_ind),mt_ind, function(ind) {
+  pmat.l0 <- tapply(seq_along(mt_ind), mt_ind, function(ind){
                             vec0 <- 1-rowSums(as.matrix(pmat[,ind]))     #prob for 0th category
                             cbind(vec0,pmat[,ind])
                             })
diff --git a/R/plist.internal.R b/R/plist.internal.R
old mode 100755
new mode 100644
index e28fd1b..19de521
--- a/R/plist.internal.R
+++ b/R/plist.internal.R
@@ -1,25 +1,20 @@
-`plist.internal` <-
-function(object,theta)
 # computes a list of expected probabilities for objects of class Rm
 # with 0th category included!
-{
+plist.internal <- function(object, theta){
 
-X <- object$X
-mt_vek <- apply(X,2,max,na.rm=TRUE)             #number of categories - 1 for each item
-mt_ind <- rep(1:length(mt_vek),mt_vek)
+  X <- object$X
+  mt_vek <- apply(X, 2L, max, na.rm = TRUE)   # number of categories - 1 for each item
+  mt_ind <- rep(seq_along(mt_vek), mt_vek)
 
-
-#--------compute list matrix of probabilites for fixed theta) 
-p.list <- tapply(object$betapar,mt_ind,function(beta.i) {
-                 beta.i <- c(0,beta.i)
-                 ind.h <- 0:(length(beta.i)-1)
-                 theta.h <- ind.h %*% t(theta)          #multiply category with 
-                 #tb <- exp(theta.h-beta.i)
-                 tb <- exp(theta.h+beta.i)
-                 denom <- colSums(tb)
-                 pi.mat <- apply(tb,1,function(y) {y/denom})
-                 return(pi.mat)
-               })
-return(p.list)
+  #--------compute list matrix of probabilites for fixed theta)
+  p.list <- tapply(object$betapar, mt_ind, function(beta.i){
+              beta.i <- c(0, beta.i)
+              ind.h <- 0:(length(beta.i)-1)
+              theta.h <- tcrossprod(ind.h, theta) # ind.h %*% t(theta) # multiply category with 
+              tb <- exp(theta.h + beta.i)
+              denom <- colSums(tb)
+              pi.mat <- apply(tb, 1L, function(y){ y/denom })
+              return(pi.mat)
+            })
+  return(p.list)
 }
-
diff --git a/R/plot.ppar.r b/R/plot.ppar.r
old mode 100755
new mode 100644
index 010c2fd..cf03bc1
--- a/R/plot.ppar.r
+++ b/R/plot.ppar.r
@@ -1,4 +1,4 @@
-plot.ppar <- function(x,xlab="Person Raw Scores",ylab="Person Parameters (Theta)",main=NULL,...)
+plot.ppar <- function(x, xlab = "Person Raw Scores", ylab = "Person Parameters (Theta)", main = NULL, ...){
 ### function (x, y = NULL, type = "p", xlim = NULL, ylim = NULL,
 ###     log = "", main = NULL, sub = NULL,
 ###     xlab = "Person Raw Scores",
@@ -7,35 +7,39 @@ plot.ppar <- function(x,xlab="Person Raw Scores",ylab="Person Parameters (Theta)
 ###     panel.last = NULL, asp = NA, ...)
 # plot of the person raw scores against the person parameters
 # x...object of class "ppar" (resulting from person.parameter.eRm)
-{
+
   pl <- x$pred.list                              #list with spline interpolations
 
-  if (is.null(pl)) stop("Spline interpolation required in person.parameter.eRm!")
+  if(is.null(pl)) stop("Spline interpolation required in person.parameter.eRm!")
 
   X <- x$X
-  if (length(x$pers.ex) > 0) {
-    X <- X[-x$pers.ex,]
+  if(length(x$pers.ex) > 0L){
+    X <- X[-x$pers.ex, ]
     #gmemb <- x$gmemb[-x$pers.ex]
   }
   gmemb <- x$gmemb
-  X.list <- split(as.data.frame(X),as.factor(gmemb))
+  X.list <- split(as.data.frame(X), as.factor(gmemb))
 
-  if (length(pl) > 1) {
-    for (i in 1:length(pl)) main.text <- paste("Person Parameter Plot of Group",i)
+  if(length(pl) > 1L){
+    for(i in seq_along(pl)) main.text <- paste("Person Parameter Plot of Group",i)
   } else {
     main.text <- "Plot of the Person Parameters"
   }
 
-  if (!is.null(main)) main.text <- main
-
-  for (i in 1:length(pl)) {
-
-    #dev.new()
-    plot(rowSums(X.list[[i]],na.rm=TRUE),x$thetapar[[i]],xlim=c(min(pl[[i]]$x),max(pl[[i]]$x)),
-         ylim=c(min(pl[[i]]$y),max(pl[[i]]$y)),xlab=xlab,ylab=ylab,
-         main=main.text,...)
-    lines(pl[[i]]$x,pl[[i]]$y)
+  if(!is.null(main)) main.text <- main
+
+  for(i in seq_along(pl)){
+    # plotting without duplicates
+    plot(
+      unique(cbind( rowSums(X.list[[i]], na.rm = TRUE),
+                    x$thetapar[[i]] )),
+      xlim = range(pl[[i]]$x),
+      ylim = range(pl[[i]]$y),
+      xlab = xlab,
+      ylab = ylab,
+      main = main.text,
+      ...)
+    lines(pl[[i]]$x, pl[[i]]$y)
   }
-}
-
 
+}
diff --git a/R/plotCI.R b/R/plotCI.R
old mode 100755
new mode 100644
diff --git a/R/plotDIF.R b/R/plotDIF.R
old mode 100755
new mode 100644
index 8e74a8e..809251a
--- a/R/plotDIF.R
+++ b/R/plotDIF.R
@@ -34,7 +34,7 @@ plotDIF <- function(object, item.subset=NULL, gamma = 0.95, main=NULL,
   # loops for computing thresholds on LRtest objects
   for(m in 1:M){   # confidences for dichotomous items
     if(object[[m]]$model == "RM"){
-      confidences1[[m]] <- lapply(object[[m]]$fitobj,function(x){confint(x, level=gamma)})
+      confidences1[[m]] <- lapply(object[[m]]$fitobj,function(x){-confint(x, level=gamma)})
     } else {   # confidences for polytomous items
       confidences1[[m]] <- lapply(object[[m]]$fitobj, function(x){confint(thresholds(x),level=gamma)})
     }
@@ -123,7 +123,7 @@ plotDIF <- function(object, item.subset=NULL, gamma = 0.95, main=NULL,
       for(l in 1:length(factorlist)) {
         lines(as.data.frame(confidences[[k]])[factorlist[l],],
               rep(seq(l-.5+distance, l+.5-distance, length.out=length(confidences))[k], 2),
-              type="b", pch=c("[","]"), col=length(cumsum(n2)[cumsum(n2) < k])+1, lty=lty[k])
+              type="b", pch=20, col=length(cumsum(n2)[cumsum(n2) < k])+1, lty=lty[k])
       }
     }
   } else {
@@ -132,7 +132,7 @@ plotDIF <- function(object, item.subset=NULL, gamma = 0.95, main=NULL,
       for(l in 1:length(factorlist)) {
         lines(as.data.frame(confidences[[k]])[factorlist[l],],
               rep(seq(l-.5+distance, l+.5-distance, length.out=length(confidences))[k], 2),
-              type="b", pch=c("[","]"), col=col[k], lty=lty[k])
+              type="b", pch=20, col=col[k], lty=lty[k])
       }
     }
   }
diff --git a/R/plotGOF.LR.R b/R/plotGOF.LR.R
old mode 100755
new mode 100644
index fb3d7a9..586ffa7
--- a/R/plotGOF.LR.R
+++ b/R/plotGOF.LR.R
@@ -1,7 +1,23 @@
-`plotGOF.LR` <-
-function(x,beta.subset="all", main="Graphical Model Check", xlab=NULL,ylab=NULL,tlab="item",
-         ylim=c(-3,3),xlim=c(-3,3),type="p",pos="4", conf=NULL, ctrline=NULL,...)
-{
+plotGOF.LR <- function(
+  x,
+  beta.subset = "all",
+  main = "Graphical Model Check",
+  xlab,
+  ylab,
+  tlab = "item",
+  xlim,
+  ylim,
+  type = "p",
+  pos = 4,
+  conf = NULL,
+  ctrline = NULL,
+  asp = 1,
+  x_axis = TRUE,
+  y_axis = TRUE,
+  set_par = TRUE,
+  reset_par = TRUE,
+  ...
+){
 # graphical model check
 # beta.subset...plot only a subset of beta-parameters; either "all" or an index vector
 # x...object of class LR (from LRtest)
@@ -13,128 +29,89 @@ function(x,beta.subset="all", main="Graphical Model Check", xlab=NULL,ylab=NULL,
 # ctrline ... control lines (confidence bands): NULL or list(gamma=0.95,lty="solid", col="blue")
 # ...     additional graphic parameters
 
-if (length(x$likgroup) > 2) warning("Only the parameters for the first two subgroups are plotted!")
+  # save current options() and par() values and restore them on exit
+  old_options <- options(locatorBell = FALSE)
+  if(set_par){ old_par <- par(mar=c(4,4,3,0)+.5, no.readonly = TRUE) }
+  on.exit({
+    if(set_par && reset_par){ par(old_par) }
+    options(old_options)
+  })
 
+  if(length(x$likgroup) > 2L) warning("Only the parameters for the first two subgroups are plotted!")
 
-if (is.null(xlab)) xlab<-paste("Beta for Group: ",x$spl.gr[1],sep="")
-if (is.null(ylab)) ylab<-paste("Beta for Group: ",x$spl.gr[2],sep="")
+  if(missing(xlab)) xlab <- paste0("Beta for Group: ", x$spl.gr[1L])
+  if(missing(ylab)) ylab <- paste0("Beta for Group: ", x$spl.gr[2L])
 
-nparg1 <- length(x$betalist[[1]])
-nparg2 <- length(x$betalist[[2]])
-if (nparg1 != nparg2) stop("Unequal number of parameters in the subgroups! Plot cannot be produced, choose another split in LRtest!")
+  nparg1 <- length(x$betalist[[1L]])
+  nparg2 <- length(x$betalist[[2L]])
+  if(nparg1 != nparg2) stop("Unequal number of parameters in the subgroups! Plot cannot be produced, choose another split in LRtest!")
 
-beta1 <- x$betalist[[1]] * -1  # -1 to obtain difficulty parameters
-beta2 <- x$betalist[[2]] * -1
 
 
+  beta1 <- -x$betalist[[1L]] # -1 to obtain difficulty parameters
+  beta2 <- -x$betalist[[2L]]
 
-if (is.character(beta.subset)) {
-  if (beta.subset=="all") {
-    beta.subset <- 1:length(beta1)
-    #textlab <- names(beta1)
-    switch(EXPR=tlab,
-      item=textlab <- substr(names(beta1),6,100),  #remove "beta " from names
-      number=textlab <- 1:length(beta1),
-      identify=labs <- substr(names(beta1),6,100)
-    )
+  if(is.character(beta.subset)) {
+    if(beta.subset == "all"){
+      beta.subset <- seq_along(beta1)
+      #textlab <- names(beta1)
+      switch(EXPR = tlab,
+        item     = textlab <- gsub("^beta\\ I", "", names(beta1)), # remove "beta I" from names
+        number   = textlab <- seq_along(beta1),
+        identify = labs    <- gsub("^beta\\ I", "", names(beta1))
+      )
+    } else {
+      textlab <- beta.subset
+    }
   } else {
-    textlab <- beta.subset
-  }
-} else {
-  #textlab <- names(beta1)[beta.subset]
-  ##beta.subset<-sort(beta.subset)
-
-    switch(EXPR=tlab,
-      item=textlab <- substr(names(beta1)[beta.subset],6,100),  #remove "beta " from names
-      number=textlab <- beta.subset,
-      identify=labs <- substr(names(beta1)[beta.subset],6,100)
+    switch(EXPR = tlab,
+      item     = textlab <- gsub("^beta\\ I", "", names(beta1)[beta.subset]), # remove "beta I" from names
+      number   = textlab <- beta.subset,
+      identify = labs    <- gsub("^beta\\ I", "", names(beta1)[beta.subset])
     )
-}
-
-
-#yshift <- (ylim[2]-ylim[1])/30
-yshift<-0
-
-plot(beta1[beta.subset],beta2[beta.subset],main=main,xlab=xlab,
-       ylab=ylab,ylim=ylim,xlim=xlim,type=type,...)
-abline(0,1)
-if(exists("textlab")) {
-      text(beta1[beta.subset],beta2[beta.subset]+yshift,labels=textlab,pos=pos,...)
-}
-if(exists("labs")) {
-      options(locatorBell = FALSE)
-      xycoords <- cbind(beta1[beta.subset], beta2[beta.subset])
-      nothing<-identify(xycoords,labels = labs,atpen=TRUE,offset=1)
-}
+  }
 
 # se's needed for ellipses and control lines
-
-if(is.list(conf) || is.list(ctrline)){
-
-   if(any(is.na(unlist(x$selist)))) {
-      warning("Confidence ellipses or control lines cannot be plotted.\n  LR object without standard errors. Use option 'se=TRUE' in LRtest()")
+  if(is.list(conf) || is.list(ctrline)){
+    if(any(is.na(unlist(x$selist)))) {
+      warning('Confidence ellipses or control lines cannot be plotted.\n  LR object without standard errors.\n  Use option "se = TRUE" in LRtest()')
       conf <- ctrline <- NULL
-   } else {
-      s1 <- x$selist[[1]]
-      s2 <- x$selist[[2]]
+    } else {
+      s1 <- x$selist[[1L]]
+      s2 <- x$selist[[2L]]
       v1 <- s1^2
       v2 <- s2^2
-      suspicious.se<-any(cbind(s1,s2)[beta.subset]>10)
-      if(suspicious.se){
-         warning("Suspicious size of standard error(s).\n  Check model specification, split criterion, data.")
-      }
-   }
-
-   if(any(abs(cbind(beta1,beta2)[beta.subset])>8)){
-      warning("Suspicious size of parameter estimate(s).\n  Check model specification, split criterion, data.")
-      if(is.null(conf)) conf$ia <- FALSE
-   }
+      suspicious.se <- any(cbind(s1, s2)[beta.subset] > 10)
+      if(suspicious.se) warning("Suspicious size of standard error(s).\n  Check model specification, split criterion, data.")
+    }
 
-}
+    #if(any(abs(cbind(beta1,beta2)[beta.subset])>8)){
+    #   warning("Suspicious size of parameter estimate(s).\n  Check model specification, split criterion, data.")
+    #   if(is.null(conf)) conf$ia <- FALSE
+  }
 
-# confidence ellipses
-
-if(is.list(conf)){
-
-
-    # (interactive) plot of confidence ellipses
-
-    ## function ellipse() from package car
-    ellipse <-
-    function (center, shape, radius, center.pch = 19, center.cex = 1.5,
-        segments = 51, add = TRUE, xlab = "", ylab = "", las = par("las"),
-        col = palette()[2], lwd = 2, lty = 1, ...)
-    {
-        if (!(is.vector(center) && 2 == length(center)))
-            stop("center must be a vector of length 2")
-        if (!(is.matrix(shape) && all(2 == dim(shape))))
-            stop("shape must be a 2 by 2 matrix")
-        angles <- (0:segments) * 2 * pi/segments
-        unit.circle <- cbind(cos(angles), sin(angles))
-        ellipse <- t(center + radius * t(unit.circle %*% chol(shape)))
-        if (add)
-            lines(ellipse, col = col, lwd = lwd, lty = lty, ...)
-        else plot(ellipse, xlab = xlab, ylab = ylab, type = "l",
-            col = col, lwd = lwd, lty = lty, las = las, ...)
-        if (center.pch)
-            points(center[1], center[2], pch = center.pch, cex = center.cex,
-                col = col)
+###   confidence ellipses   ####################################################
+###   COMPUTATIONS   ###########################################################
+  if(is.list(conf)){
+    # simple ellipse to replace the function ellipse() from the car package
+    simple_ellipse <- function(center, a, b, n = 200L, border_col){
+      angle_t <- seq(0, 2*pi, length.out = n)[-1L]
+      polygon(center[1L] + a * cos(angle_t), center[2L] + b * sin(angle_t), lwd = 1.0, border = border_col)
     }
 
     # select items for which ellipses are drawn  ## rh 2011-05-31
-    if(is.null(conf$which)) conf$which<-beta.subset#seq_along(beta.subset)
-    ##conf$which <- sort(conf$which)
-    if(!all(conf$which %in% beta.subset))
-        stop("Incorrect item number(s) for which ellipses are to be drawn")
-    if(is.null(conf$col)) {
-        conf$c <- rep("red",length.out=length(beta1))
-    } else if (!is.null(conf$which)){
-##        conf$c <- rep(NA,length.out=length(beta.subset))
-        conf$c <- rep(NA,length.out=length(conf$which))
-        if (length(conf$c)!=length(conf$which))
-           stop("which and col must have the same length in specification of conf")
-        else
-           conf$c[conf$which]<-conf$col
+    if(is.null(conf$which)) conf$which <- beta.subset#seq_along(beta.subset)
+    if(!all(conf$which %in% beta.subset)) stop("Incorrect item number(s) for which ellipses are to be drawn")
+    if(is.null(conf$col)){
+      conf$c <- rep("red", length.out = length(beta1))
+    } else if(!is.null(conf$which)){
+      # conf$c <- rep(NA,length.out=length(beta.subset))
+      conf$c <- rep(NA, length.out = length(conf$which))
+      if(length(conf$c)!=length(conf$which)){
+        stop('"which" and "col" must have the same length in specification of "conf"')
+      } else {
+        conf$c[conf$which] <- conf$col
+      }
     }
     conf$col <- conf$c
 
@@ -142,79 +119,126 @@ if(is.list(conf)){
     if(is.null(conf$lty)) conf$lty <- "dotted"
     if(is.null(conf$ia)) conf$ia <- FALSE
 
-    z <- qnorm((conf$gamma+1)/2)
+    z <- qnorm((1.0-conf$gamma)/2.0, lower.tail = FALSE)
 
     ci1u <- beta1 + z*s1
     ci1l <- beta1 - z*s1
     ci2u <- beta2 + z*s2
     ci2l <- beta2 - z*s2
+  }
+################################################################################
 
+###   95% control lines (Wright)   #############################################
+###   COMPUTATIONS   ###########################################################
+  if(is.list(ctrline)){
+    if(is.null(ctrline$gamma)) ctrline$gamma <- 0.95
+    if(is.null(ctrline$col))   ctrline$col <- "blue"
+    if(is.null(ctrline$lty))   ctrline$lty <- "solid"
 
+    z <- qnorm((1.0 - ctrline$gamma)/2.0, lower.tail = FALSE)
 
-    if(conf$ia) {
-
-
-         identifyEll <- function(x, y, ci1u, ci1l, ci2u,ci2l, v1, v2, conf, n=length(x), ...)
-         ## source: example from help("identify")
-         ## a function to use identify to select points, and overplot the
-         ## points with a cofidence ellipse as they are selected
-         {
-             xy <- xy.coords(x, y); x <- xy$x; y <- xy$y
-             sel <- rep(FALSE, length(x)); res <- integer(0)
-             while(sum(sel) < n) {
-                 ans <- identify(x[!sel], y[!sel], n=1, plot=FALSE, ...)
-                 if(!length(ans)) break
-                 ans <- which(!sel)[ans]
-                 i <- ans
-            lines(rep(x[i],2),c(ci2u[i],ci2l[i]),col=conf$col[1], lty=conf$lty)
-            lines(c(ci1u[i],ci1l[i]), rep(y[i],2),col=conf$col[1],lty=conf$lty)
-            ellipse(center=c(x[i],y[i]),matrix(c(v1[i],0,0,v2[i]),2),z,segments=200,center.cex=0.5,lwd=1, col=conf$col[1])
-                 #points(x[ans], y[ans], pch = pch)
-                 sel[ans] <- TRUE
-                 res <- c(res, ans)
-             }
-             #res
-         }
-         identifyEll(beta1[beta.subset],beta2[beta.subset],
-                             ci1u[beta.subset], ci1l[beta.subset], ci2u[beta.subset], ci2l[beta.subset],
-                             v1[beta.subset], v2[beta.subset], conf)
-    } else {
+    d      <- (beta1 + beta2)/2
+    se.d   <- sqrt(v1 + v2)
+    d      <- sort(d)
+    se.d   <- se.d[order(d)]
+    upperx <- d - z*se.d/2
+    uppery <- d + z*se.d/2
+  }
+################################################################################
 
-         # non-interactive: plot of all ellipses at once
-
-         x<-beta1
-         y<-beta2
-         for (i in beta.subset) {
-            if(i %in% conf$which){
-              lines(rep(x[i],2),c(ci2u[i],ci2l[i]),col=conf$col[i], lty=conf$lty)
-              lines(c(ci1u[i],ci1l[i]), rep(y[i],2),col=conf$col[i],lty=conf$lty)
-              ellipse(center=c(x[i],y[i]),matrix(c(v1[i],0,0,v2[i]),2),z,segments=200,center.cex=0.5,lwd=1, col=conf$col[i])
-            }
-         }
-    }
-}
+  if(!exists("ci1l", inherits = FALSE)) ci1l <- NA
+  if(!exists("ci1u", inherits = FALSE)) ci1u <- NA
+  if(!exists("ci2l", inherits = FALSE)) ci2l <- NA
+  if(!exists("ci2u", inherits = FALSE)) ci2u <- NA
 
+  if(!exists("upperx", inherits = FALSE)) upperx <- NA
+  if(!exists("uppery", inherits = FALSE)) uppery <- NA
 
-# 95% control lines (Wright)
+  if(missing(xlim)){
+    xlim <- range(beta1[beta.subset], ci1l, ci1u, upperx, uppery, na.rm = TRUE)
+  }
+  if(missing(ylim)){
+    ylim <- range(beta2[beta.subset], ci2l, ci2u, upperx, uppery, na.rm = TRUE)
+  }
 
-if(is.list(ctrline)){
+  plot.new()
+  plot.window(xlim = xlim, ylim = ylim, asp = asp)
+  title(main = main, xlab = xlab, ylab = ylab)
+  if(x_axis) axis(1)
+  if(y_axis) axis(2)
+
+  abline(0, 1)
+
+# confidence ellipses - if not interactive
+  if(is.list(conf) && !conf$ia){
+    # non-interactive: plot of all ellipses at once
+    x <- beta1
+    y <- beta2
+    for(i in beta.subset){
+      if(i %in% conf$which){
+        segments( x0 = c(x[i], ci1l[i]), y0 = c(ci2l[i], y[i]),
+                  x1 = c(x[i], ci1u[i]), y1 = c(ci2u[i], y[i]),
+                  col = conf$col[i], lty = conf$lty )
+        simple_ellipse( center = c(x[i],y[i]),
+                        a = abs(diff(c(ci1u[i],ci1l[i])))/2,
+                        b = abs(diff(c(ci2u[i],ci2l[i])))/2,
+                        n = 200L, border_col = conf$col[i] )
+      }
+    }
+  }
 
-    if(is.null(ctrline$gamma)) ctrline$gamma <- 0.95
-    if(is.null(ctrline$col)) ctrline$col <- "blue"
-    if(is.null(ctrline$lty)) ctrline$lty <- "solid"
+# 95% control lines (Wright) - plotting
+  if(is.list(ctrline)){
+    lines(upperx, uppery, col = ctrline$col, lty = ctrline$lty)
+    lines(uppery, upperx, col = ctrline$col, lty = ctrline$lty)
+  }
 
-    z <- qnorm((ctrline$gamma+1)/2)
+  if(exists("textlab", inherits = FALSE)){
+    text(beta1[beta.subset], beta2[beta.subset], labels = textlab, pos = pos, ...)
+  }
 
-    d<-(beta1+beta2)/2
-    se.d<-sqrt(v1+v2)
-    d<-sort(d)
-    se.d<-se.d[order(d)]
-    upperx<-d-z*se.d/2
-    uppery<-d+z*se.d/2
-    lines(upperx,uppery, col=ctrline$col, lty=ctrline$lty)
-    lines(uppery,upperx, col=ctrline$col, lty=ctrline$lty)
+  points(x = beta1[beta.subset], y = beta2[beta.subset], type = type, ...)
 
+  if(exists("labs", inherits = FALSE)){
+    xycoords <- cbind(beta1[beta.subset], beta2[beta.subset])
+    nothing  <- identify(xycoords, labels = labs, atpen = TRUE, offset = 1)
+  }
 
-}
+  box()
+
+
+
+# interactive confidence ellipses
+  if(is.list(conf) && conf$ia){
+    identifyEll <- function(x, y, ci1u, ci1l, ci2u,ci2l, v1, v2, conf, n=length(x), ...){
+    ## source: example from help("identify")
+    ## a function to use identify to select points, and overplot the
+    ## points with a confidence ellipse as they are selected
+      xy <- xy.coords(x, y)
+      x <- xy$x
+      y <- xy$y
+      sel <- rep(FALSE, length(x))
+      res <- integer(0)
+      while(sum(sel) < n){
+        ans <- identify(x[!sel], y[!sel], n=1, plot=FALSE, ...)
+        if(!length(ans)) break
+        ans <- which(!sel)[ans]
+        i <- ans
+        segments( x0 = c(x[i], ci1l[i]), y0 = c(ci2l[i], y[i]),
+                  x1 = c(x[i], ci1u[i]), y1 = c(ci2u[i], y[i]),
+                  col = conf$col[i], lty = conf$lty )
+        simple_ellipse( center = c(x[i],y[i]),
+                        a = abs(diff(c(ci1u[i],ci1l[i])))/2,
+                        b = abs(diff(c(ci2u[i],ci2l[i])))/2,
+                        n = 200L, border_col = conf$col[i] )
+        sel[ans] <- TRUE
+        res <- c(res, ans)
+      }
+      #res
+    }
+    identifyEll(beta1[beta.subset],beta2[beta.subset],
+                        ci1u[beta.subset], ci1l[beta.subset], ci2u[beta.subset], ci2l[beta.subset],
+                        v1[beta.subset], v2[beta.subset], conf)
+  }
 
 }
diff --git a/R/plotGOF.R b/R/plotGOF.R
old mode 100755
new mode 100644
index ffb6d66..414a5cf
--- a/R/plotGOF.R
+++ b/R/plotGOF.R
@@ -1,3 +1,2 @@
-`plotGOF` <-
-function(x,...)UseMethod("plotGOF")
+plotGOF <- function(x, ...) UseMethod("plotGOF")
 
diff --git a/R/plotGR.R b/R/plotGR.R
old mode 100755
new mode 100644
index dae9b93..1768fa1
--- a/R/plotGR.R
+++ b/R/plotGR.R
@@ -1,54 +1,53 @@
-plotGR <- function(object,...)
-  {
-   #TODO: *Add CI around point estimates
-   require(lattice)
-   itms <- object$itms
-   tps <- object$mpoints
-   pplgrps <- object$ngroups/itms
-   if(pplgrps<2) stop("There are no treatment effects in this analysis.")
-   
-   #treatment effects for all treatment groups at tps>1
-   treat <- object$etapar[1:((pplgrps-1)*itms*(tps-1))]
-   time <- factor(rep(paste("t",2:tps,sep=""),each=itms*(pplgrps-1)))
-   item <- factor(rep(rep(paste("Item",1:itms),each=pplgrps-1),tps-1))
-   names1 <- unique(names(object$groupvec))[1:(length(unique(names(object$groupvec))))-1]
-   #labeling
-   group <- factor(rep(names1,itms*(tps-1)))
-   plotdats1 <- data.frame(treat,group,item,time)
-   
-   #effects (i.e. zeros) for all treatment groups at tp=1
-   treat0 <- rep(0,itms*(pplgrps-1))
-   time0 <- factor(rep("t1",each=itms*(pplgrps-1)))
-   item0 <- factor(rep(paste("Item",1:itms),each=(pplgrps-1)))
-   #labeling
-   group0 <- factor(rep(names1,itms))
-   plotdats0 <- data.frame(treat0,group0,item0,time0)
-   names(plotdats0) <- c("treat","group","item","time")
+plotGR <- function(object,...){
+  #TODO: *Add CI around point estimates
+#  require(lattice) # MJM20141101: lattice is imported
+  itms <- object$itms
+  tps <- object$mpoints
+  pplgrps <- object$ngroups/itms
+  if(pplgrps<2) stop("There are no treatment effects in this analysis.")
+  
+  #treatment effects for all treatment groups at tps>1
+  treat <- object$etapar[1:((pplgrps-1)*itms*(tps-1))]
+  time <- factor(rep(paste("t",2:tps,sep=""),each=itms*(pplgrps-1)))
+  item <- factor(rep(rep(paste("Item",1:itms),each=pplgrps-1),tps-1))
+  names1 <- unique(names(object$groupvec))[1:(length(unique(names(object$groupvec))))-1]
+  #labeling
+  group <- factor(rep(names1,itms*(tps-1)))
+  plotdats1 <- data.frame(treat,group,item,time)
+  
+  #effects (i.e. zeros) for all treatment groups at tp=1
+  treat0 <- rep(0,itms*(pplgrps-1))
+  time0 <- factor(rep("t1",each=itms*(pplgrps-1)))
+  item0 <- factor(rep(paste("Item",1:itms),each=(pplgrps-1)))
+  #labeling
+  group0 <- factor(rep(names1,itms))
+  plotdats0 <- data.frame(treat0,group0,item0,time0)
+  names(plotdats0) <- c("treat","group","item","time")
 
-   #effects (i.e. zeros) for control or baseline group for all tps 
-   treat00 <- rep(0,itms*tps)
-   time00 <- factor(rep(paste("t",1:tps,sep=""),each=itms))
-   item00 <- factor(rep(paste("Item",1:itms),tps))
-   group00 <- factor(rep(unique(names(object$groupvec))[length(unique(names(object$groupvec)))],itms*tps))
-   plotdats00 <- data.frame(treat00,group00,item00,time00)
-   names(plotdats00) <- c("treat","group","item","time")
+  #effects (i.e. zeros) for control or baseline group for all tps 
+  treat00 <- rep(0,itms*tps)
+  time00 <- factor(rep(paste("t",1:tps,sep=""),each=itms))
+  item00 <- factor(rep(paste("Item",1:itms),tps))
+  group00 <- factor(rep(unique(names(object$groupvec))[length(unique(names(object$groupvec)))],itms*tps))
+  plotdats00 <- data.frame(treat00,group00,item00,time00)
+  names(plotdats00) <- c("treat","group","item","time")
 
-   #all together
-   plotdats <- rbind(plotdats00,plotdats0,plotdats1)
+  #all together
+  plotdats <- rbind(plotdats00,plotdats0,plotdats1)
 
-   #plot
-   key.group <- list(space = "right", text = list(levels(plotdats$group)),
-                     points = list(pch = 1:length(levels(plotdats$group)),
-                     col = "black")
-                     )
-   plotout <- xyplot(treat ~ time | item, plotdats,
-                     aspect = "xy", type = "o", 
-                     groups = group, key = key.group,
-                     lty = 1, pch = 1:length(levels(plotdats$group)),
-                     col.line = "darkgrey", col.symbol = "black",
-                     xlab = "Time",
-                     ylab = "Effect", 
-                     main = "Treatment effect plot for LLRA"
-                     )
-   print(plotout)
- }
+  #plot
+  key.group <- list(space = "right", text = list(levels(plotdats$group)),
+                    points = list(pch = 1:length(levels(plotdats$group)),
+                    col = "black")
+                    )
+  plotout <- xyplot(treat ~ time | item, plotdats,
+                    aspect = "xy", type = "o", 
+                    groups = group, key = key.group,
+                    lty = 1, pch = 1:length(levels(plotdats$group)),
+                    col.line = "darkgrey", col.symbol = "black",
+                    xlab = "Time",
+                    ylab = "Effect", 
+                    main = "Treatment effect plot for LLRA"
+                    )
+  print(plotout)
+}
diff --git a/R/plotICC.R b/R/plotICC.R
old mode 100755
new mode 100644
index 98f39e0..272a1ff
--- a/R/plotICC.R
+++ b/R/plotICC.R
@@ -1,3 +1,2 @@
-`plotICC` <-
-function(object,...)UseMethod("plotICC")
+`plotICC` <- function(object, ...) UseMethod("plotICC")
 
diff --git a/R/plotICC.Rm.R b/R/plotICC.Rm.R
old mode 100755
new mode 100644
index 501a095..4180888
--- a/R/plotICC.Rm.R
+++ b/R/plotICC.Rm.R
@@ -1,63 +1,71 @@
-`plotICC.Rm` <-
-function(object, item.subset = "all", empICC = NULL, empCI = NULL, mplot = NULL,    # ask,mplot added rh 2007-12-01
-         xlim = c(-4,4), ylim = c(0,1),
-         xlab = "Latent Dimension", ylab = "Probability to Solve", main=NULL,       # main rh 2010-03-06
-         col = NULL, lty = 1, legpos = "left", ask = TRUE, ...)
-
 # produces ICC plots
 # object of class Rm
+plotICC.Rm <- function(
+  object,
+  item.subset = "all",
+  empICC = NULL,
+  empCI = NULL,
+  mplot = NULL,    # ask,mplot added rh 2007-12-01
+  xlim = c(-4,4),
+  ylim = c(0,1),
+  xlab = "Latent Dimension",
+  ylab = "Probability to Solve",
+  main = NULL,       # main rh 2010-03-06
+  col = NULL,
+  lty = 1,
+  legpos = "left",
+  ask = TRUE,
+  ...)
 {
+  # save and reset original graphics parameters
+  old_par <- par(mar = c(4,4,3,1)+.25, no.readonly = TRUE)
+  on.exit(par(old_par))
+  
+  if(item.subset != "all" && length(item.subset) == 1L) ask <- FALSE
 
   X <- object$X
-  if (is.null(col)) col <- 1:(max(apply(X,2,max,na.rm=TRUE))+1)
+  if(is.null(col)) col <- 1:(max(apply(X, 2L, max, na.rm = TRUE))+1)
   main.arg <- main # rh added 2010-11-23 otherwise always same item in title if NULL
 
-# some sanity checks
-
-if (is.null(empICC)) {
-      emp.plot <- FALSE
-
-} else if (!is.element(empICC[[1]], c("raw","loess","tukey","kernel"))) {
-      ##empirical[[1]] <- "none"
-      emp.plot <- FALSE
-      warning('empICC must be one of "raw","loess","tukey","kernel"!\n')
+  # some sanity checks
 
-} else  if (object$model != "RM") {
-      warning("Empirical ICCs can only be plotted for a dichotomous Rasch model!\n")
+  if(is.null(empICC)){
+    emp.plot <- FALSE
+  } else if(!(empICC[[1L]] %in% c("raw", "loess", "tukey", "kernel"))) {
+    emp.plot <- FALSE
+    warning('empICC must be one of "raw", "loess", "tukey", "kernel"!\n')
+  } else if(object$model != "RM"){
+    warning("Empirical ICCs can only be plotted for a dichotomous Rasch model!\n")
+    emp.plot <- FALSE
+  } else {
+    th.est   <- person.parameter(object)
+    thetapar <- th.est$thetapar
+    if(length(thetapar) != 1) {   # Too complicated with NA'groups (for each NAgroup separate plots...)
+      warning("Empirical ICCs are not produced for different NA groups!\n")
       emp.plot <- FALSE
-
-} else {
-
-      th.est <- person.parameter(object)
-      thetapar <- th.est$thetapar
-      if (length(thetapar)!=1) {      #Too complicated with NA'groups (for each NAgroup separate plots...)
-        warning("Empirical ICCs are not produced for different NA groups!\n")
+    } else {
+      thetapar.u <- unique(round(unlist(thetapar), 5))
+      if(length(thetapar.u) < 4){
+        warning("No empirical ICCs for less the 4 different person parameters!\n")
         emp.plot <- FALSE
       } else {
-        thetapar.u <- unique(round(unlist(thetapar),5))
-        if (length(thetapar.u)<4) {
-            warning("No empirical ICCs for less the 4 different person parameters!\n")
-        emp.plot <- FALSE
-        } else
-            emp.plot <- TRUE
-
+        emp.plot <- TRUE
       }
-}
-
-
+    }
+  }
 
-  theta <- seq(xlim[1],xlim[2],by=0.1)                          #x-axis
-  p.list <- plist.internal(object,theta)                        #matrix of probabilities
+  theta  <- seq(xlim[1], xlim[2], length.out = 201L)   # x-axis
+  p.list <- plist.internal(object, theta)              # matrix of probabilities
   th.ord <- order(theta)
 
-  if (any(item.subset=="all")) {
+  if(any(item.subset=="all")){
     textlab <- colnames(object$X)
-    ivec <- 1:length(p.list)
+    ivec <- seq_along(p.list)
   } else {
-      if (is.character(item.subset)) {                         #item names specified
-      ivectemp <- t(as.matrix(1:length(p.list)))
+    if(is.character(item.subset)){                         #item names specified
+      ivectemp <- matrix(seq_along(p.list), nrow = 1L)
       colnames(ivectemp) <- colnames(object$X)
-      ivec <- ivectemp[,item.subset]
+      ivec <- ivectemp[, item.subset]
       textlab <- item.subset
       textlab[ivec] <- textlab
       it.legend <- item.subset
@@ -68,112 +76,98 @@ if (is.null(empICC)) {
     }
   }
 
-  if (object$model=="RM") {                                     #Rasch model
-    p.list <- lapply(p.list,function(x) {x[,-1]})               #Delete 0-probabilites
-    p.mat <- matrix(unlist(p.list),ncol=length(p.list))         #matrix with solving probabilities
+  if(object$model=="RM"){                                       # Rasch model
+    p.list <- lapply(p.list,function(x) {x[,-1]})               # Delete 0-probabilites
+    p.mat <- matrix(unlist(p.list),ncol=length(p.list))         # matrix with solving probabilities
     text.ylab <- p.mat[(1:length(theta))[theta==median(theta)],]
   }
 
   ## plot for non RMs #################
-  if (object$model != "RM"){
-       if (ask) par("ask"=TRUE)                                 # added rh 2007-12-01
-       if (is.null(mplot))  mplot<-FALSE
-       if (mplot) par(mfrow=c(2,2))
-    for (j in 1:length(ivec)) {                                 # loop for items
-         i <- ivec[j]
-
-       yp <- as.matrix(p.list[[i]])
-       yy <- yp[th.ord,]
-
-       if(is.null(main.arg)) main<-paste("ICC plot for item ",textlab[i])    # rh 2010-03-06
-       matplot(sort(theta),yy,type="l",lty=lty,col=col,
-               #main=paste("ICC plot for item ",textlab[i]),xlim=xlim,  # replaced to allow for user titles rh 2010-03-06
-               main=main, xlim=xlim,
-               ylim=ylim,xlab=xlab,ylab=ylab,...)
-       if (is.character(legpos))
-          legend(legpos,legend=paste(c("Category"),0:(dim(yp)[2]-1)), col=col,lty=lty, ...)  # added rh 2007-12-01
+  if(object$model != "RM"){
+    if(ask) par("ask" = TRUE)                               # added rh 2007-12-01
+    if(is.null(mplot)) mplot <- FALSE
+    if(mplot) par(mfrow = c(2L, 2L))
+
+    for(j in seq_along(ivec)){                                 # loop for items
+      i <- ivec[j]
+
+      yp <- as.matrix(p.list[[i]])
+      yy <- yp[th.ord,]
+
+      if(is.null(main.arg)) main <- paste0("ICC plot for item ", textlab[i])    # rh 2010-03-06
+      matplot(sort(theta),yy,type="l",lty=lty,col=col,
+              #main=paste("ICC plot for item ",textlab[i]),xlim=xlim,  # replaced to allow for user titles rh 2010-03-06
+              main=main, xlim=xlim,
+              ylim=ylim,xlab=xlab,ylab=ylab,...)
+      if(is.character(legpos)) legend(legpos, legend = paste0(c("Category "), 0:(dim(yp)[2]-1)), col = col, lty = lty, ...)  # added rh 2007-12-01
     }
 
   ## plot for  RMs #####################
   } else {
-
-       if (is.null(mplot) && length(ivec)>1)  mplot<-TRUE else mplot<-FALSE  # rh 2010-03-06 no mfrow(2,2) if only 1 item
-       if (mplot) par(mfrow=c(2,2))
-
-       if (ask) par("ask"=TRUE)                       # added rh 2007-12-01
-    for (j in 1:length(ivec)) {                                 #runs over items
-         i <- ivec[j]
-
-       yp <- as.matrix(p.list[[i]])
-       yy <- yp[th.ord,]
-       if(is.null(main.arg)) main<-paste("ICC plot for item ",textlab[i])    # rh 2010-03-06
-       matplot(sort(theta),yy,type="l",lty=lty,col=col,
-               #main=paste("ICC plot for item ",textlab[i]),xlim=xlim,  # replaced to allow for user titles rh 2010-03-06
-               main=main, xlim=xlim,
-               ylim=ylim,xlab=xlab,ylab=ylab,...)
-               ##ylim=ylim,xlab=xlab,ylab=ylab,"ask"=TRUE,...)
-
-       ## empirical ICC
-       if (emp.plot) {
-          freq.table <- as.matrix(table(rowSums(X),X[,i]))
-          rel.freq <- freq.table[,2]/rowSums(freq.table)
-          idx <- as.numeric(rownames(freq.table))
-          xy<-cbind(th.est$pred.list[[1]]$y[idx+1],rel.freq)
-
-
-          if(empICC[[1]]=="loess")
-               if(!is.null(empICC$smooth)) smooth<-empICC$smooth else smooth<-0.75
-          if(empICC[[1]]=="kernel")
-               if(!is.null(empICC$smooth)) smooth<-empICC$smooth else smooth<-0.5
-
-          nn <- rowSums(freq.table)
-          switch(empICC[[1]],
-              "raw"={},
-              "loess"={xy[,2]<-loess(xy[,2]~xy[,1],span=smooth)$fitted},#+;cyf<-cbind(xy[,2] * nn, nn)},
-              "tukey"={xy[,2]<-smooth(xy[,2])},#;cyf<-cbind(xy[,2] * nn, nn)}
-              "kernel"={xy[,2]<-ksmooth(xy[,1],xy[,2],bandwidth=smooth,x.points=xy[,1])[[2]]}
-          )
-          xy[,2] <- ifelse(xy[,2]>1,1,ifelse(xy[,2]<0,0,xy[,2])) # bounding p in [0,1]
-
-          if(is.null(empICC$type)) empICC$type <- "p"
-          if(is.null(empICC$pch)) empICC$pch <- 1
-          if(is.null(empICC$col)) empICC$col <- "black"
-          if(is.null(empICC$lty)) empICC$lty <- "solid"
-
-
-          # confidence intervals for empirical ICC
-          if(!is.null(empCI)) {
-            # functions from prop.test()
-            p.L <- function(x, n, alpha) {
-                if (x <= 0) 0 else qbeta(alpha, x, n - x + 1)}
-            p.U <- function(x, n, alpha) {
-                if (x >= n) 1 else qbeta(1 - alpha, x + 1, n - x)}
-            CINT <- function(x, n, conf.level){
-                alpha <- (1 - conf.level)/2
-                c(p.L(x,n, alpha), p.U(x,n, alpha))
-            }
-
-            if(is.null(empCI$clevel)) empCI$clevel <- 0.95
-            if(is.null(empCI$col)) empCI$col <- "red"
-            if(is.null(empCI$lty)) empCI$lty <- "dotted"
-
-
-            cyf<-cbind(xy[,2] * nn, nn)
-            cy<-apply(cyf,1,function(x) CINT(x[1],x[2],empCI$clevel))
-
-
-            apply(cbind(xy[,1],t(cy)),1,function(x)segments(x[1],x[2],x[1],x[3],lty=empCI$lty,col=empCI$col))
-          }
-
-          # plots the point estimates of the empirical ICC
-          lines(xy[,1],xy[,2],type=empICC$type, pch=empICC$pch, col=empICC$col, lty=empICC$lty, ...)
-
-
-       } # end if(emp.plot)
+    if(is.null(mplot)) mplot <- TRUE       ### FIX MM 2012-03-18
+    if(length(ivec) == 1) mplot <- FALSE   ### FIX MM 2012-03-18
+    if(mplot) par(mfrow = c(2L, 2L))
+    if(ask) par("ask" = TRUE)                       # added rh 2007-12-01
+
+    for(j in seq_along(ivec)){                                 #runs over items
+      i <- ivec[j]
+
+      yp <- as.matrix(p.list[[i]])
+      yy <- yp[th.ord,]
+      if(is.null(main.arg)) main<-paste("ICC plot for item ",textlab[i])    # rh 2010-03-06
+      matplot(sort(theta),yy,type="l",lty=lty,col=col,
+              #main=paste("ICC plot for item ",textlab[i]),xlim=xlim,  # replaced to allow for user titles rh 2010-03-06
+              main=main, xlim=xlim,
+              ylim=ylim,xlab=xlab,ylab=ylab,...)
+              ##ylim=ylim,xlab=xlab,ylab=ylab,"ask"=TRUE,...)
+
+      ## empirical ICC
+      if(emp.plot){
+         freq.table <- as.matrix(table(rowSums(X), X[,i]))
+         rel.freq   <- freq.table[,2]/rowSums(freq.table)
+         idx        <- as.numeric(rownames(freq.table))
+         xy         <- cbind(th.est$pred.list[[1]]$y[idx+1], rel.freq)
+
+         if(empICC[[1]]=="loess")  if(!is.null(empICC$smooth)) smooth <- empICC$smooth else smooth <- 0.75
+         if(empICC[[1]]=="kernel") if(!is.null(empICC$smooth)) smooth <- empICC$smooth else smooth <- 0.5
+
+         nn <- rowSums(freq.table)
+         switch(empICC[[1]],
+           "raw"={},
+           "loess"={xy[,2]<-loess(xy[,2]~xy[,1],span=smooth)$fitted},#+;cyf<-cbind(xy[,2] * nn, nn)},
+           "tukey"={xy[,2]<-smooth(xy[,2])},#;cyf<-cbind(xy[,2] * nn, nn)}
+           "kernel"={xy[,2]<-ksmooth(xy[,1],xy[,2],bandwidth=smooth,x.points=xy[,1])[[2]]}
+         )
+         xy[,2] <- ifelse(xy[,2] > 1, 1, ifelse(xy[,2] < 0, 0, xy[,2])) # bounding p in [0,1]
+
+         if(is.null(empICC$type)) empICC$type <- "p"
+         if(is.null(empICC$pch)) empICC$pch <- 1
+         if(is.null(empICC$col)) empICC$col <- "black"
+         if(is.null(empICC$lty)) empICC$lty <- "solid"
+
+         # confidence intervals for empirical ICC
+         if(!is.null(empCI)) {
+           # functions from prop.test()
+           p.L <- function(x, n, alpha){ if (x <= 0) 0 else qbeta(alpha, x, n - x + 1) }
+           p.U <- function(x, n, alpha){ if (x >= n) 1 else qbeta(1 - alpha, x + 1, n - x) }
+           CINT <- function(x, n, conf.level){
+             alpha <- (1 - conf.level)/2
+             c(p.L(x,n, alpha), p.U(x,n, alpha))
+           }
+
+           if(is.null(empCI$clevel)) empCI$clevel <- 0.95
+           if(is.null(empCI$col)) empCI$col <- "red"
+           if(is.null(empCI$lty)) empCI$lty <- "dotted"
+
+           cyf <- cbind(xy[,2L]*nn, nn)
+           cy  <- apply(cyf, 1L, function(x){ CINT(x[1L], x[2L], empCI$clevel) })
+
+           apply(cbind(xy[,1L], t(cy)), 1L, function(x){ segments(x[1L],x[2L],x[1L],x[3L],lty=empCI$lty,col=empCI$col) })
+         }
+
+         # plots the point estimates of the empirical ICC
+         lines(xy[,1], xy[,2], type = empICC$type, pch = empICC$pch, col = empICC$col, lty = empICC$lty, ...)
+      } # end if(emp.plot)
     }
   }
-  ## reset graphics parameters
-  par("ask"=FALSE) # added rh 2007-12-01
-  par(mfrow=c(1,1))
 }
-
diff --git a/R/plotINFO.R b/R/plotINFO.R
new file mode 100644
index 0000000..0206b90
--- /dev/null
+++ b/R/plotINFO.R
@@ -0,0 +1,29 @@
+plotINFO <- function(ermobject, type = "both", theta = seq(-6, 6, length.out = 1001L), ...){
+
+  ddd <- list(...)
+  get_dots <- function(element, default){ ifelse(!is.null(ddd[[element]]), ddd[[element]], default) }
+  extraVars <- c("mainI", "mainT", "ylabI", "ylabT", "xlab", "legpos")
+  if(any(!(names(ddd) %in% extraVars))){ warning("additional argument(s) ignored.") }
+
+  type <- match.arg(type, c("item", "test", "both"))
+
+  if(type == "both"){
+    old_pars <- par(mfrow=c(2L, 1L), no.readonly = TRUE)
+    on.exit(par(old_pars))
+  }
+
+  if(type %in% c("item", "both")){
+    iinfo <- item_info(ermobject, theta)
+    info <- lapply(iinfo, function(x) x$i.info)
+    pltinfo <- matrix(unlist(info), ncol = ncol(ermobject$X))
+    matplot(x = theta, y = pltinfo, type = "l", main = get_dots("mainI", "Item Information"), xlab = get_dots("xlab", "Latent Trait"), ylab = get_dots("ylabI", "Information"))
+    itmnames <- paste("Item", seq_len(ncol(ermobject$X)))
+    legend(x = get_dots("legpos", "topright"), legend = itmnames, pch = NULL, lty = 1:5, col = 1:6)
+  }
+
+  if(type %in% c("item", "both")){
+    tinfo <- test_info(ermobject, theta)
+    plot(x = theta, y = tinfo, type = "l", main = get_dots("mainT", "Test Information"), xlab = get_dots("xlab", "Latent Trait"), ylab = get_dots("ylabT", "Scale Information"))
+  }
+
+}
diff --git a/R/plotPImap.R b/R/plotPImap.R
old mode 100755
new mode 100644
diff --git a/R/plotPWmap.R b/R/plotPWmap.R
old mode 100755
new mode 100644
index 6b5eb08..e24a7b5
--- a/R/plotPWmap.R
+++ b/R/plotPWmap.R
@@ -5,7 +5,7 @@ function(object, pmap=FALSE, imap=TRUE, item.subset="all", person.subset="all",
                  tlab="Infit t statistic", pp=NULL, cex.gen=0.6, cex.pch=1,
                  person.pch=1, item.pch=16, personCI=NULL, itemCI=NULL, horiz=FALSE)
 {
-  def.par <- par(no.readonly = TRUE) ## save default, for resetting...
+#mjm  def.par <- par(no.readonly = TRUE) ## save default, for resetting...
 
   ## Pathway map currently only for RM, PCM and RSM
 
@@ -50,25 +50,43 @@ function(object, pmap=FALSE, imap=TRUE, item.subset="all", person.subset="all",
     tl<-tlevels
   }
 
-  if (is.null(pp))
+  if(is.null(pp)){
     suppressWarnings(pp<-person.parameter(object))
-  else if (class(pp) != "ppar" || !identical(pp$X,object$X))
+  } else if(class(pp) != "ppar" || !identical(pp$X,object$X)){
     stop("pp is not a person.parameter object which matches the main Rasch data object!")
+  }
+  
 
 
   ## We will be plotting the infit data versus the parameters for
   ## both items and persons
-  iloc<-tt[,1]
-  ise<-tt[,2]
-  ifit <- itemfit(pp)
+  # item fit
+  iloc  <- tt[, 1L]
+  ise   <- tt[, 2L]
+  ifit  <- itemfit(pp)
   ifitZ <- ifit$i.infitZ
 
-  ploc <- as.matrix(pp$theta.table['Person Parameter'])[,1]
-  pse <- unlist(pp$se.theta, recursive=FALSE)
+  # person fit
+  pfit       <- personfit(pp)
+  pfitZ      <- pfit$p.infitZ
+  if(length(pfit$excl_obs_num) > 0L){                  # mjm 2014-09-17
+    temp_namevec <- pfit$excl_obs_chr                  #
+    for(ex_pers in pfit$excl_obs_num){                 # workaround: add deleted persons and their names
+      pfitZ <- append(pfitZ, NA, ex_pers - 1L)         #
+      names(pfitZ)[ex_pers] <- temp_namevec[ex_pers]   #
+    }                                                  #
+    rm(temp_namevec)                                   #
+  }                                                    #
+  
+  ploc       <- as.matrix(pp$theta.table['Person Parameter'])[,1]
+
+  if(length(pfit$excl_obs_num) > 0L){   # mjm 2014-09-17
+    ploc[pfit$excl_obs_num] <- NA       # set all parameters of excluded obs NA
+  }                                     #
+  
+  pse        <- unlist(pp$se.theta, recursive=FALSE)
   names(pse) <- sub("^NAgroup[0-9]*\\.","",names(pse))
-  pse <- pse[names(ploc)]
-  pfit <- personfit(pp)
-  pfitZ <- pfit$p.infitZ
+  pse        <- pse[names(ploc)]
 
   ## We can now do item and person subsetting; the item subsetting is
   ## pretty ugly as there are multiple cases.  (We dare not do it earlier
@@ -86,16 +104,17 @@ function(object, pmap=FALSE, imap=TRUE, item.subset="all", person.subset="all",
         stop("item.subset misspecified. Use 'all' or vector of at least two valid item indices/names.")
     } else {
       if (length(item.subset)>1 && all(item.subset %in% rownames(as.matrix(tl)))) {
-        iloc  <- iloc[item.subset]
-        ise   <- ise[item.subset]
-        ifitZ <- ifitZ[item.subset]
         tl    <- tl[item.subset]
-
+        keep.subset <- c()
         for (i in rownames(as.matrix(tl)))
           if (tl[i]==1)
             keep.subset<-c(keep.subset,i)
           else
             keep.subset<-c(keep.subset,paste(i,1:tl[i],sep=":"))
+
+        iloc  <- iloc[keep.subset]
+        ise   <- ise[keep.subset]
+        ifitZ <- ifitZ[item.subset]
         tt<-tt[keep.subset,]
       }
       else if(length(item.subset)!=1 || !(item.subset=="all"))
@@ -115,16 +134,17 @@ function(object, pmap=FALSE, imap=TRUE, item.subset="all", person.subset="all",
     }
     else {
       if (length(item.subset)>1 && all(item.subset %in% 1:length(tl))) {
-        iloc  <- iloc[item.subset]
-        ise   <- ise[item.subset]
-        ifitZ <- ifitZ[item.subset]
         tl    <- tl[item.subset]
-
+        keep.subset <- c()
         for (i in rownames(as.matrix(tl)))
           if (tl[i]==1)
             keep.subset<-c(keep.subset,i)
           else
             keep.subset<-c(keep.subset,paste(i,1:tl[i],sep=":"))
+
+        iloc  <- iloc[keep.subset]
+        ise   <- ise[keep.subset]
+        ifitZ <- ifitZ[item.subset]
         tt<-tt[keep.subset,]
       }
       else
@@ -145,7 +165,7 @@ function(object, pmap=FALSE, imap=TRUE, item.subset="all", person.subset="all",
       stop("person.subset misspecified. Use 'all' or vector of at least two valid person indices/names.")
   } else if (pmap) {
     ## Case 2: person subsetting by person numbers
-    if (length(person.subset)>1 && all(person.subset %in% 1:length(ploc))) {
+    if (length(person.subset)>1 && all(person.subset %in% seq_along(ploc))) {
       ploc  <- ploc[person.subset]
       pse   <- pse[person.subset]
       pfitZ <- pfitZ[person.subset]
@@ -159,8 +179,8 @@ function(object, pmap=FALSE, imap=TRUE, item.subset="all", person.subset="all",
   ## Need defaults for multiple of standard error for purpose of range
   ## calculation; these are zero as default is not to draw confidence
   ## intervals
-  pci=0
-  ici=0
+  pci <- 0
+  ici <- 0
 
   ## Our calculation is simplistic; we use the normal distribution to
   ## estimate our confidence interval from our standard error.  However,
@@ -182,20 +202,20 @@ function(object, pmap=FALSE, imap=TRUE, item.subset="all", person.subset="all",
   ## Now we can plot the Pathway Map
 
   if (pmap) { ## person map
-    xrange.pmap <- range(pfitZ,na.rm=TRUE)
+    xrange.pmap <- range(pfitZ,finite=TRUE)
     xrange.pmap[1] <- min(-2.5,xrange.pmap[1])
     xrange.pmap[2] <- max(2.5,xrange.pmap[2]+1) ## need space for labels
-    yrange.pmap<-range(ploc,na.rm=TRUE)
-    yrange.pmap[1]<-yrange.pmap[1]-pci*max(pse)
-    yrange.pmap[2]<-yrange.pmap[2]+pci*max(pse)
+    yrange.pmap<-range(ploc,finite=TRUE)
+    yrange.pmap[1]<-yrange.pmap[1]-pci*max(pse, na.rm=TRUE)
+    yrange.pmap[2]<-yrange.pmap[2]+pci*max(pse, na.rm=TRUE)
   }
   if (imap) { ## item map
-    xrange.imap <- range(ifitZ,na.rm=TRUE)
+    xrange.imap <- range(ifitZ,finite=TRUE)
     xrange.imap[1] <- min(-2.5,xrange.imap[1])
     xrange.imap[2] <- max(2.5,xrange.imap[2]+1) ## need space for labels
-    yrange.imap<-range(iloc,na.rm=TRUE)
-    yrange.imap[1]<-yrange.imap[1]-ici*max(ise)
-    yrange.imap[2]<-yrange.imap[2]+ici*max(ise)
+    yrange.imap<-range(iloc,finite=TRUE)
+    yrange.imap[1]<-yrange.imap[1]-ici*max(ise, na.rm=TRUE)
+    yrange.imap[2]<-yrange.imap[2]+ici*max(ise, na.rm=TRUE)
   }
 
   if (pmap && !imap) {
@@ -206,27 +226,27 @@ function(object, pmap=FALSE, imap=TRUE, item.subset="all", person.subset="all",
     xrange <- xrange.imap
     yrange <- yrange.imap
     maintitle <- mainitem
-  } else {
+  } else if(pmap && imap){
     xrange <- numeric(2)
     yrange <- numeric(2)
-    xrange[1] <- min(xrange.pmap[1], xrange.imap[1])
-    xrange[2] <- max(xrange.pmap[2], xrange.imap[2])
-    yrange[1] <- min(yrange.pmap[1], yrange.imap[1])
-    yrange[2] <- max(yrange.pmap[2], yrange.imap[2])
+    xrange[1] <- min(xrange.pmap[1], xrange.imap[1], na.rm = TRUE)
+    xrange[2] <- max(xrange.pmap[2], xrange.imap[2], na.rm = TRUE)
+    yrange[1] <- min(yrange.pmap[1], yrange.imap[1], na.rm = TRUE)
+    yrange[2] <- max(yrange.pmap[2], yrange.imap[2], na.rm = TRUE)
     maintitle <- mainboth
+  } else {
+    stop("error determining plot ranges.")
   }
 
 
-  par(mar=c(5,4,4,2))
+#mjm  par(mar=c(5,4,4,2))
 
   if (!horiz){  # rh 2010-12-09
-    plot(xrange,yrange, xlim=xrange, ylim=yrange, main=maintitle,
-         ylab=latdim, xlab=tlab, type="n")
-    abline(v=c(-2,2),col="lightgreen")
+    plot(xrange, yrange, xlim = xrange, ylim = yrange, main = maintitle, ylab = latdim, xlab = tlab, type = "n")
+    abline(v = c(-2, 2), col = "lightgreen")
   } else {
-    plot(yrange,xrange, xlim=yrange, ylim=xrange, main=maintitle,
-         ylab=tlab, xlab=latdim, type="n")
-    abline(h=c(-2,2),col="lightgreen")
+    plot(yrange, xrange, xlim = yrange, ylim = xrange, main = maintitle, ylab = tlab, xlab = latdim, type = "n")
+    abline(h = c(-2, 2), col = "lightgreen")
   }
 
   if (pmap) { ## person map
@@ -255,21 +275,21 @@ function(object, pmap=FALSE, imap=TRUE, item.subset="all", person.subset="all",
       zt <- rep(ifitZ,times=tl)
 
     if (!horiz){
-       if (ici>0) ## draw confidence intervals
-         arrows(zt,iloc+ici*ise, zt,iloc-ici*ise, angle=90, code=3, length=0.04,
-              col=itemCI$col, lty=itemCI$lty)
-       points(zt,iloc,pch=item.pch,cex=cex.pch)
-       text(zt,iloc,rownames(tt),cex=cex.gen,pos=4)
+      if (ici>0) ## draw confidence intervals
+        arrows(zt,iloc+ici*ise, zt,iloc-ici*ise, angle=90, code=3, length=0.04,
+               col=itemCI$col, lty=itemCI$lty)
+      points(zt,iloc,pch=item.pch,cex=cex.pch)
+      text(zt,iloc,rownames(tt),cex=cex.gen,pos=4)
     } else {
-       if (ici>0) ## draw confidence intervals
-         arrows(iloc+ici*ise, zt,iloc-ici*ise,zt, angle=90, code=3, length=0.04,
-              col=itemCI$col, lty=itemCI$lty)
+      if (ici>0) ## draw confidence intervals
+        arrows(iloc+ici*ise, zt,iloc-ici*ise,zt, angle=90, code=3, length=0.04,
+               col=itemCI$col, lty=itemCI$lty)
       points(iloc, zt,pch=item.pch,cex=cex.pch)
       text(iloc,zt, rownames(tt),cex=cex.gen,pos=4)
     }
   }
 
-  par(def.par)
+#mjm  par(def.par)
 
   invisible(NULL)
 }
diff --git a/R/plotTR.R b/R/plotTR.R
old mode 100755
new mode 100644
index 77fe609..293ba68
--- a/R/plotTR.R
+++ b/R/plotTR.R
@@ -1,31 +1,29 @@
-
-plotTR <-function(object,...)
-  {
-   #TODO : *Add CI around point estimates
-   require(lattice)
-   #plot trend over time for all items
-   itms <- object$itms
-   tps <- object$mpoints
-   pplgrps <- object$ngroups/itms
-   trend <- object$etapar[((pplgrps-1)*itms*(tps-1)+1):((pplgrps-1)*itms*(tps-1)+(itms*(tps-1)))]
-   tips <-rep(paste("t",1:tps,sep=""),each=itms)
-   items <- rep(paste("Item",1:itms),tps)
-   tr0 <- rep(0,itms)
-   trend <- c(tr0,trend)
-   plotdats <- data.frame(trend,items,tips)
-   key.items <- list(space = "right", text = list(levels(plotdats$items)),
-                    points = list(pch = 1:length(levels(plotdats$items)),
-                    col = "black")
-                    )
-  plotout <- xyplot(trend~tips,data=plotdats,
-                    aspect="fill", type="o",
-                    groups=items, 
-                    key=key.items,
-                    lty=1,pch = 1:length(levels(plotdats$items)),
-                    col.line = "darkgrey", col.symbol = "black",
-                    xlab = "Time",
-                    ylab = "Effect", 
-                    main = "Trend effect plot for LLRA"
-                    )
-   print(plotout)
- }
+plotTR <-function(object,...){
+  #TODO : *Add CI around point estimates
+#  require(lattice) # MJM20141101: lattice is imported
+  #plot trend over time for all items
+  itms <- object$itms
+  tps <- object$mpoints
+  pplgrps <- object$ngroups/itms
+  trend <- object$etapar[((pplgrps-1)*itms*(tps-1)+1):((pplgrps-1)*itms*(tps-1)+(itms*(tps-1)))]
+  tips <-rep(paste("t",1:tps,sep=""),each=itms)
+  items <- rep(paste("Item",1:itms),tps)
+  tr0 <- rep(0,itms)
+  trend <- c(tr0,trend)
+  plotdats <- data.frame(trend,items,tips)
+  key.items <- list(space = "right", text = list(levels(plotdats$items)),
+                   points = list(pch = 1:length(levels(plotdats$items)),
+                   col = "black")
+                   )
+ plotout <- xyplot(trend~tips,data=plotdats,
+                   aspect="fill", type="o",
+                   groups=items, 
+                   key=key.items,
+                   lty=1,pch = 1:length(levels(plotdats$items)),
+                   col.line = "darkgrey", col.symbol = "black",
+                   xlab = "Time",
+                   ylab = "Effect", 
+                   main = "Trend effect plot for LLRA"
+                   )
+  print(plotout)
+}
diff --git a/R/plotjointICC.R b/R/plotjointICC.R
old mode 100755
new mode 100644
index ad9d5fb..aba9e5b
--- a/R/plotjointICC.R
+++ b/R/plotjointICC.R
@@ -1,3 +1 @@
-`plotjointICC` <-
-function(object,...)UseMethod("plotjointICC")
-
+plotjointICC <- function(object, ...) UseMethod("plotjointICC")
diff --git a/R/plotjointICC.dRm.R b/R/plotjointICC.dRm.R
old mode 100755
new mode 100644
index 66c8aac..bb33394
--- a/R/plotjointICC.dRm.R
+++ b/R/plotjointICC.dRm.R
@@ -1,23 +1,30 @@
-`plotjointICC.dRm` <-
-function(object, item.subset = "all", legend=TRUE, xlim=c(-4,4),ylim=c(0,1),
-         xlab="Latent Dimension",ylab="Probability to Solve",lty=1,legpos="left",
-         main="ICC plot",col=NULL,...)
-
-
-#produces one common ICC plot for Rasch models only
-#object of class "dRm"
-#item.subset...specify items that have to be plotted; if NA, all items are used
-#legend...if legend should be plotted
-
-{
-  theta <- seq(xlim[1],xlim[2],by=0.1)
-
-  if (any(item.subset=="all")) {
+plotjointICC.dRm <- function(
+  object,
+  item.subset = "all",
+  legend = TRUE,
+  xlim = c(-4, 4),
+  ylim = c(0, 1),
+  xlab = "Latent Dimension",
+  ylab = "Probability to Solve",
+  lty = 1,
+  legpos="topleft",
+  main = "ICC plot",
+  col = NULL,
+  ...
+){
+# produces one common ICC plot for Rasch models only
+# object of class "dRm"
+# item.subset...specify items that have to be plotted; if NA, all items are used
+# legend...if legend should be plotted
+
+  theta <- seq(xlim[1L], xlim[2L], length.out = 201L)
+
+  if(any(item.subset=="all")){
     it.legend <- 1:dim(object$X)[2]
   } else {
-    if (is.character(item.subset)) {
+    if(is.character(item.subset)){
       it.legend <- item.subset
-      betatemp <- t(as.matrix(object$betapar))
+      betatemp  <- t(as.matrix(object$betapar))
       colnames(betatemp) <- colnames(object$X)
       object$betapar <- betatemp[,item.subset]
     } else {
@@ -27,28 +34,28 @@ function(object, item.subset = "all", legend=TRUE, xlim=c(-4,4),ylim=c(0,1),
     object$X <- object$X[,item.subset]                            #pick out items defined in itemvec
   }
 
-
   th.ord <- order(theta)
 
-  p.list <- plist.internal(object,theta)
-  p.list <- lapply(p.list,function(x) {x[,-1]})               #Delete 0-probabilites
-  p.mat <- matrix(unlist(p.list),ncol=length(p.list))
+  p.list <- plist.internal(object, theta)
+  p.list <- lapply(p.list, function(x){ x[,-1L] })               #Delete 0-probabilites
+  p.mat  <- matrix(unlist(p.list), ncol = length(p.list))
   text.ylab <- p.mat[(1:length(theta))[theta==median(theta)],]
 
-  #dev.new()
-
   if(is.null(main)) main=""
   if(is.null(col)) col=1:(dim(p.mat)[2])
   #pmICCs<-cbind(sort(theta),p.mat[th.ord,])
   matplot(sort(theta),p.mat[th.ord,],type="l",lty=lty,col=col,
           main=main,xlim=xlim,ylim=ylim,xlab=xlab,ylab=ylab,...)
-  if (is.character(legpos)){
-     if (!legend) {
-         #text(x=median(theta),y=text.ylab,labels=paste("I",1:(dim(p.mat)[2]),sep=""),col=1:(dim(p.mat)[2]))
-         text(x=median(theta),y=text.ylab,labels=it.legend,col=1:(dim(p.mat)[2]))
-     } else {
-         legend(legpos,legend=paste("Item",it.legend),col=1:(dim(p.mat)[2]),lty=lty,...)
-     }
+  if(length(object$betapar)>20) old_par <- par(cex=0.7) else old_par <- par(cex=1)
+  on.exit(par(old_par))
+  
+  if(is.character(legpos)){
+    if(!legend){
+      sq <- seq(0.65,0.35,length.out=length(object$betapar))
+      x  <- qlogis(sq,sort(-object$betapar))
+      text(x=x,y=sq,labels=it.legend[order(-object$betapar)],col=col[order(-object$betapar)],...)
+    } else {
+      legend(legpos,legend=paste("Item",it.legend[order(-object$betapar)]),lty=lty, col=col[order(-object$betapar)],...)
+    }
   }
 }
-
diff --git a/R/pmat.R b/R/pmat.R
old mode 100755
new mode 100644
diff --git a/R/pmat.default.R b/R/pmat.default.R
old mode 100755
new mode 100644
diff --git a/R/pmat.ppar.R b/R/pmat.ppar.R
old mode 100755
new mode 100644
diff --git a/R/prediction.R b/R/prediction.R
old mode 100755
new mode 100644
diff --git a/R/print.ICr.r b/R/print.ICr.r
old mode 100755
new mode 100644
diff --git a/R/print.LR.R b/R/print.LR.R
old mode 100755
new mode 100644
diff --git a/R/print.MLoef.r b/R/print.MLoef.r
old mode 100755
new mode 100644
diff --git a/R/print.eRm.R b/R/print.eRm.R
old mode 100755
new mode 100644
index 6d8cd88..a67ffa7
--- a/R/print.eRm.R
+++ b/R/print.eRm.R
@@ -10,7 +10,11 @@ function(x,...)  {                                         #print method for all
   cat("Number of parameters:", x$npar, "\n")
   cat("\n")
   if (x$model %in% c("RM","RSM","PCM"))                    #eta parameters
+    if(is.null(x$call$W)){                                 # labelling based on whether W was specified mm 2012-05-02
       cat("Item (Category) Difficulty Parameters (eta):")  # new labelling rh 25-03-2010
+    } else {
+      cat("Item (Category) Parameters (eta):\nBased on design matrix W =", deparse(x$call$W))
+    }
   else                                                     # now difficulty for RM, RSM, PCM
       cat("Basic Parameters eta:")
   cat("\n")
diff --git a/R/print.ifit.R b/R/print.ifit.R
old mode 100755
new mode 100644
diff --git a/R/print.llra.R b/R/print.llra.R
old mode 100755
new mode 100644
diff --git a/R/print.logLik.eRm.R b/R/print.logLik.eRm.R
old mode 100755
new mode 100644
diff --git a/R/print.logLik.ppar.r b/R/print.logLik.ppar.r
old mode 100755
new mode 100644
diff --git a/R/print.pfit.R b/R/print.pfit.R
old mode 100755
new mode 100644
diff --git a/R/print.ppar.R b/R/print.ppar.R
old mode 100755
new mode 100644
diff --git a/R/print.resid.R b/R/print.resid.R
old mode 100755
new mode 100644
diff --git a/R/print.step.r b/R/print.step.r
old mode 100755
new mode 100644
diff --git a/R/print.summary.llra.R b/R/print.summary.llra.R
old mode 100755
new mode 100644
diff --git a/R/print.threshold.r b/R/print.threshold.r
old mode 100755
new mode 100644
diff --git a/R/print.wald.R b/R/print.wald.R
old mode 100755
new mode 100644
diff --git a/R/residuals.ppar.R b/R/residuals.ppar.R
old mode 100755
new mode 100644
diff --git a/R/rsampler.R b/R/rsampler.R
new file mode 100644
index 0000000..05e30a3
--- /dev/null
+++ b/R/rsampler.R
@@ -0,0 +1,48 @@
+"rsampler" <-
+function(inpmat,controls=rsctrl()){
+
+   if (!(class(controls)=="RSctr"))
+         stop("controls is not a control object - see help(\"rsctrl\")")
+
+   n       <- dim(inpmat)[1]
+   k       <- dim(inpmat)[2]
+   burn_in <- controls$burn_in
+   n_eff   <- controls$n_eff
+   step  <- controls$step
+   seed    <- controls$seed
+   tfixed  <- controls$tfixed
+
+   if (seed == 0) {
+      # generates random seed in the range [536870911,772830910]
+      seed <- as.integer(as.double(format(Sys.time(), "%H%M%OS3"))*1000)
+                   + 2**29 - 1
+   }
+
+   # allocation of memory for simulated matrices
+   vec<-vector( length = (n_eff+1)*n*trunc((k+31)/32) )
+   ier<-0
+
+   # calls the external Fortran subroutine sampler
+   # simulated matrices are returned in vec
+   RET<-.Fortran("sampler",
+               n=as.integer(n),
+               k=as.integer(k),
+               inpmat=as.integer(inpmat),
+               tfixed=as.logical(tfixed),
+               burn_in=as.integer(burn_in),
+               n_eff=as.integer(n_eff),
+               step=as.integer(step),
+               seed=as.integer(seed),
+               outvec=as.integer(vec),
+               ier=as.integer(ier)
+   )
+   n_tot <- n_eff+1
+   if (RET$ier>0) {
+         rserror(RET$ier)
+   } else {
+         RET<-c(RET[1:8],n_tot=n_eff+1,RET[9:10])
+         class(RET)<-"RSmpl"
+         RET
+   }
+}
+
diff --git a/R/rsctrl.R b/R/rsctrl.R
new file mode 100644
index 0000000..57d10ca
--- /dev/null
+++ b/R/rsctrl.R
@@ -0,0 +1,15 @@
+"rsctrl" <-
+function(burn_in=100, n_eff=100, step=16,
+                 seed=0, tfixed=FALSE)
+{   ier <- 0
+    if(n_eff < 0 | n_eff > 8191)    ier <- ier + 4
+    if(burn_in < 0)                 ier <- ier + 8
+    if(step <= 0)                   ier <- ier + 16
+    if(seed < 0 | seed > (2**31-2)) ier <- ier + 128
+    if (ier>0) rserror(ier)
+    RET<-list(burn_in=burn_in, n_eff=n_eff, step=step,
+              seed=seed, tfixed=tfixed)
+    class(RET)<-"RSctr"
+    RET
+}
+
diff --git a/R/rserror.R b/R/rserror.R
new file mode 100644
index 0000000..0bec7e5
--- /dev/null
+++ b/R/rserror.R
@@ -0,0 +1,32 @@
+"rserror" <-
+function(err)
+{
+    bin2int<-function(x){
+      r<-vector(mode="integer",length=0)
+      while(x>1){
+        r<-c(x%%2,r)
+        x<-floor(x/2)
+      }
+      r<-c(1,r)
+      rev(r)
+    }
+    errortxt<-c("\tn < 0 or n > 4096\n",
+                "\tk < 0 or k > 128\n",
+                "\tn_eff < 0 or n_eff > 8191\n",
+                "\tburn_in < 0\n",
+                "\tstep <= 0\n",
+                "\tone or more entries in the input matrix are different from 1 or 0\n",
+                "\tthe input matrix is of Guttman form; the sample space has only one element\n",
+                "\tseed < 0 or seed > 2147483646\n"
+                )
+
+    if (err == 0) {
+      cat("\nno error\n")
+    } else {
+      x <- bin2int(err)
+      errstring <- paste("\n",paste(errortxt[(1:length(x))*x],sep="",collapse=""))
+      stop(errstring, call.=FALSE)
+    }
+    invisible(err)
+}
+
diff --git a/R/rsextrmat.R b/R/rsextrmat.R
new file mode 100644
index 0000000..858559f
--- /dev/null
+++ b/R/rsextrmat.R
@@ -0,0 +1,15 @@
+"rsextrmat" <-
+function(RSobj, mat.no = 1)
+{
+    obj.name <- deparse(substitute(RSobj))
+    if (!(class(RSobj)=="RSmpl" || class(RSobj)=="RSmplext")){
+         err.text<-paste(obj.name," is not a sample object - see help(\"rsextrobj\")",sep ="",collapse="")
+         stop(err.text)
+    }
+    if(mat.no > RSobj$n_tot)
+         stop("\n\tElement ",mat.no," not available (",obj.name," has ", RSobj$n_tot, " elements).")
+    obj<-rsextrobj(RSobj, start = mat.no, end = mat.no)
+    RET<-rstats(obj, function(x) matrix(x, nrow = obj$n))[[1]]
+    RET
+}
+
diff --git a/R/rsextrobj.R b/R/rsextrobj.R
new file mode 100644
index 0000000..0081713
--- /dev/null
+++ b/R/rsextrobj.R
@@ -0,0 +1,32 @@
+"rsextrobj" <-
+function(RSobj,start=1,end=8192)
+{
+    obj.name <- deparse(substitute(RSobj))
+    if (!(class(RSobj)=="RSmpl" || class(RSobj)=="RSmplext")){
+         err.text<-paste(obj.name,"not a sample object - see help(\"rsextrobj\")",sep ="",collapse="")
+         stop(err.text)
+    }
+
+    n_tot  <- RSobj$n_tot
+    if (end>n_tot) end<-n_tot
+    n      <- RSobj$n
+    k      <- RSobj$k
+    nwords <- c(trunc((k+31)/32))
+
+    objnew <- RSobj
+    l_one_mat <- n*nwords
+    b <- (start-1)*l_one_mat+1
+    e <- end*l_one_mat
+    objnew$outvec <- RSobj$outvec[b:e]
+    objnew$n_tot <- end-start+1
+    if (start==1) {
+         objnew$n_eff <- objnew$n_tot - 1
+    } else {
+         objnew$n_eff <- objnew$n_tot
+    }
+    class(objnew)="RSmplext"
+
+    RET<-objnew
+    RET
+}
+
diff --git a/R/rstats.R b/R/rstats.R
new file mode 100644
index 0000000..b771856
--- /dev/null
+++ b/R/rstats.R
@@ -0,0 +1,25 @@
+"rstats" <-
+function(RSobj,userfunc,...)
+{
+    obj.name <- deparse(substitute(RSobj))
+    if (!(class(RSobj)=="RSmpl" || class(RSobj)=="RSmplext")){
+         err.text<-paste(obj.name," is not a sample object - see help(\"rsextrobj\")",sep ="",collapse="")
+         stop(err.text)
+    }
+
+    # extracts simulated matrices into three dimensional array sim
+    n_tot  <- RSobj$n_tot
+    n      <- RSobj$n
+    k      <- RSobj$k
+    nwords <- c(trunc((k+31)/32))
+
+    # store coded simulated matrices in list with n_eff+1 elements
+    sim<-split(RSobj$outvec,gl(n_tot,n*nwords))
+
+
+    # decode simulated matrices and apply user function
+    #RET<-unlist(lapply(sim,rsunpack,n,k,nwords,userfunc))
+    RET<-lapply(sim,rsunpack,n,k,nwords,userfunc,...)
+    RET
+}
+
diff --git a/R/rsunpack.R b/R/rsunpack.R
new file mode 100644
index 0000000..fa05ba0
--- /dev/null
+++ b/R/rsunpack.R
@@ -0,0 +1,37 @@
+"rsunpack" <-
+function(x,n,k,nwords,userfunc,...){
+     # check for NAs (-2^31 not defined in R) in simulated matrices
+     # set value to 0
+
+     nas<-FALSE
+     if (k>=32) {
+       idx<-(1:length(x))[is.na(x)]   # indexvector for NAs
+       nas<-(length(idx)>0)
+       x[idx]<-0
+     }
+
+     t<-vector(length=n*k)
+
+     # calls unpacking routine
+     out<-.Fortran("unpack",
+                 as.integer(x),
+                 as.integer(nwords),
+                 mat=as.integer(t),
+                 as.integer(n),
+                 as.integer(k)
+     )
+     m<-matrix(out$mat,nrow=n)
+     # replace NAs with bitpattern corresponding to -2^31,
+     # i.e., 0 0 0.... 0 1
+     if (nas) {
+        idx1 <- ceiling(idx/nwords)                  # index for rows
+        targetbyte <- idx%%nwords                    # which byte in row is affected
+        last <- k%%32                                # last column in targetbyte
+        idx2 <- (targetbyte - 1*(last!=0))*32 + last # index for column
+        m[idx1,idx2]<-1
+     }
+     # calls user function to calculate statistic(s)
+     RET<-userfunc(m,...)
+     RET
+}
+
diff --git a/R/sim.2pl.R b/R/sim.2pl.R
old mode 100755
new mode 100644
diff --git a/R/sim.locdep.R b/R/sim.locdep.R
old mode 100755
new mode 100644
diff --git a/R/sim.rasch.R b/R/sim.rasch.R
old mode 100755
new mode 100644
diff --git a/R/sim.xdim.R b/R/sim.xdim.R
old mode 100755
new mode 100644
index 62daaac..1c04876
--- a/R/sim.xdim.R
+++ b/R/sim.xdim.R
@@ -4,34 +4,6 @@ sim.xdim <- function(persons, items, Sigma, weightmat, seed = NULL, cutpoint = "
 # Sigma ... VC matrix for multinormal distribution
 # weightmat ... matrix of dimension k times D with weights. If omitted, equal weights are used.
 
-## function from MASS
-mvrnorm<-function (n = 1, mu, Sigma, tol = 1e-06, empirical = FALSE)
-{
-    p <- length(mu)
-    if (!all(dim(Sigma) == c(p, p)))
-        stop("incompatible arguments")
-    eS <- eigen(Sigma, symmetric = TRUE, EISPACK = TRUE)
-    ev <- eS$values
-    if (!all(ev >= -tol * abs(ev[1])))
-        stop("'Sigma' is not positive definite")
-    X <- matrix(rnorm(p * n), n)
-    if (empirical) {
-        X <- scale(X, TRUE, FALSE)
-        X <- X %*% svd(X, nu = 0)$v
-        X <- scale(X, FALSE, TRUE)
-    }
-    X <- drop(mu) + eS$vectors %*% diag(sqrt(pmax(ev, 0)), p) %*%
-        t(X)
-    nm <- names(mu)
-    if (is.null(nm) && !is.null(dn <- dimnames(Sigma)))
-        nm <- dn[[1]]
-    dimnames(X) <- list(nm, NULL)
-    if (n == 1)
-        drop(X)
-    else t(X)
-}
-
-
 if (missing(Sigma)) {
   ndim <- ncol(persons)
 } else {
diff --git a/R/stepwiseIt.R b/R/stepwiseIt.R
old mode 100755
new mode 100644
diff --git a/R/stepwiseIt.eRm.R b/R/stepwiseIt.eRm.R
old mode 100755
new mode 100644
diff --git a/R/summary.LR.r b/R/summary.LR.r
old mode 100755
new mode 100644
index 421e82e..859eb80
--- a/R/summary.LR.r
+++ b/R/summary.LR.r
@@ -12,7 +12,7 @@ summary.LR <- function(object,...)
 
   for (i in 1:length(object$betalist)) {
     cat("\n")
-    cat("Subject subgroup ",object$spl.gr[i],":",sep="")
+    cat("Subject Subgroup: ",object$spl.gr[i],":",sep="")
     cat("\n")
     cat("Log-likelihood: ",object$likgroup[i])
     cat("\n\n")
diff --git a/R/summary.MLoef.r b/R/summary.MLoef.r
old mode 100755
new mode 100644
diff --git a/R/summary.RSctr.R b/R/summary.RSctr.R
new file mode 100644
index 0000000..5db9ad9
--- /dev/null
+++ b/R/summary.RSctr.R
@@ -0,0 +1,12 @@
+"summary.RSctr" <-
+function(object,...)
+{
+  cat("\nCurrent sampler control specifications in ",deparse(substitute(object)),":\n", sep="")
+  cat("\tburn_in =",object$burn_in,"\n")
+  cat("\tn_eff =",object$n_eff,"\n")
+  cat("\tstep =",object$step,"\n")
+  cat("\tseed =",object$seed,"\n")
+  cat("\ttfixed =",object$tfixed,"\n\n")
+  invisible(object)
+}
+
diff --git a/R/summary.RSmpl.R b/R/summary.RSmpl.R
new file mode 100644
index 0000000..2eed915
--- /dev/null
+++ b/R/summary.RSmpl.R
@@ -0,0 +1,15 @@
+"summary.RSmpl" <-
+function(object,...)
+{
+  cat("\nStatus of object",deparse(substitute(object)),"after call to RSampler:\n")
+  cat("\tn =",object$n,"\n")
+  cat("\tk =",object$k,"\n")
+  cat("\tburn_in =",object$burn_in,"\n")
+  cat("\tn_eff =",object$n_eff,"\n")
+  cat("\tstep =",object$step,"\n")
+  cat("\tseed =",object$seed,"\n")
+  cat("\ttfixed =",object$tfixed,"\n")
+  cat("\tn_tot =",object$n_tot,"\n")
+  cat("\toutvec contains",length(object$outvec),"elements\n\n")
+}
+
diff --git a/R/summary.RSmplext.R b/R/summary.RSmplext.R
new file mode 100644
index 0000000..e4ae177
--- /dev/null
+++ b/R/summary.RSmplext.R
@@ -0,0 +1,14 @@
+"summary.RSmplext" <-
+function(object,...)
+{
+  cat("\nStatus of extracted object ",deparse(substitute(object)),":\n", sep="")
+  cat("\tn =",object$n,"\n")
+  cat("\tk =",object$k,"\n")
+  cat("\tburn_in =",object$burn_in,"\n")
+  cat("\tn_eff =",object$n_eff,"\n")
+  cat("\tstep =",object$step,"\n")
+  cat("\tseed =",object$seed,"\n")
+  cat("\ttfixed =",object$tfixed,"\n")
+  cat("\tn_tot =",object$n_tot,"\n")
+  cat("\toutvec contains",length(object$outvec),"elements\n\n")
+}
diff --git a/R/summary.eRm.R b/R/summary.eRm.R
old mode 100755
new mode 100644
index 1b0299c..5a7e7fd
--- a/R/summary.eRm.R
+++ b/R/summary.eRm.R
@@ -2,46 +2,50 @@
 function(object,...)
 {
 
-#labels...whether the item parameters should be labelled
-
-cat("\n")
-cat("Results of",object$model,"estimation: \n")
-cat("\n")
-cat("Call: ", deparse(object$call), "\n")
-cat("\n")
-
-cat("Conditional log-likelihood:",object$loglik,"\n")
-cat("Number of iterations:",object$iter,"\n")
-cat("Number of parameters:",object$npar,"\n")
-cat("\n")
-
-X <- object$X
-X01 <- object$X01
-mt_vek <- apply(X,2,max,na.rm=TRUE)
-
-ci <- confint(object,"eta")                                         # eta parameters:
-if (object$model %in% c("RM","RSM","PCM"))                          # now difficulty for RM, RSM, PCM
-    cat("Item (Category) Difficulty Parameters (eta) ")             # new labelling rh 25-03-2010
-else
-    cat("Basic Parameters eta ")
-cat("with 0.95 CI:\n")
-
-coeftable <- as.data.frame(cbind(round(object$etapar,3),
-                           round(object$se.eta,3),round(ci,3)))
-colnames(coeftable) <- c("Estimate","Std. Error","lower CI","upper CI")
-rownames(coeftable) <- names(object$etapar)
-print(coeftable)
-
-
-ci <- confint(object,"beta")
-cat("\nItem Easiness Parameters (beta) with 0.95 CI:\n")
-#coeftable <- as.data.frame(cbind(round(object$betapar),3),
-#                           round(object$se.beta,3),round(ci,3))
-coeftable <- cbind(round(object$betapar,3), round(object$se.beta,3), round(ci,3))
-
-colnames(coeftable) <- c("Estimate","Std. Error","lower CI","upper CI")
-rownames(coeftable) <- names(object$betapar)
-print(coeftable)
-cat("\n")
+  #labels...whether the item parameters should be labelled
+  
+  cat("\n")
+  cat("Results of",object$model,"estimation: \n")
+  cat("\n")
+  cat("Call: ", deparse(object$call), "\n")
+  cat("\n")
+  
+  cat("Conditional log-likelihood:",object$loglik,"\n")
+  cat("Number of iterations:",object$iter,"\n")
+  cat("Number of parameters:",object$npar,"\n")
+  cat("\n")
+  
+  X <- object$X
+  X01 <- object$X01
+  mt_vek <- apply(X,2,max,na.rm=TRUE)
+  
+  ci <- confint(object,"eta")                                         # eta parameters:
+  if (object$model %in% c("RM","RSM","PCM"))                          # now difficulty for RM, RSM, PCM
+    if(is.null(object$call$W)){                                 # labelling based on whether W was specified mm 2012-05-02
+      cat("Item (Category) Difficulty Parameters (eta):")  # new labelling rh 25-03-2010
+    } else {
+      cat("Item (Category) Parameters (eta):\nBased on design matrix W =", deparse(object$call$W))
+    }
+  else
+      cat("Basic Parameters eta")
+  cat(" with 0.95 CI:\n")
+  
+  coeftable <- as.data.frame(cbind(round(object$etapar,3),
+                             round(object$se.eta,3),round(ci,3)))
+  colnames(coeftable) <- c("Estimate","Std. Error","lower CI","upper CI")
+  rownames(coeftable) <- names(object$etapar)
+  print(coeftable)
+  
+  
+  ci <- confint(object,"beta")
+  cat("\nItem Easiness Parameters (beta) with 0.95 CI:\n")
+  #coeftable <- as.data.frame(cbind(round(object$betapar),3),
+  #                           round(object$se.beta,3),round(ci,3))
+  coeftable <- cbind(round(object$betapar,3), round(object$se.beta,3), round(ci,3))
+  
+  colnames(coeftable) <- c("Estimate","Std. Error","lower CI","upper CI")
+  rownames(coeftable) <- names(object$betapar)
+  print(coeftable)
+  cat("\n")
 }
 
diff --git a/R/summary.llra.R b/R/summary.llra.R
old mode 100755
new mode 100644
index 12c129d..c666c44
--- a/R/summary.llra.R
+++ b/R/summary.llra.R
@@ -1,6 +1,6 @@
 summary.llra <- function(object, ...) UseMethod("summary.llra")
 
-summary.llra <- function(object, gamma=0.95, ...)
+summary.llra <- function(object, level=0.95, ...)
   #summary for class llra
   {
     modi <- object$model
@@ -8,7 +8,7 @@ summary.llra <- function(object, gamma=0.95, ...)
     logli <- object$loglik
     iti <- object$iter
     pari <- object$npar
-    cii <- confint(object, "eta", level=gamma)
+    cii <- confint(object, "eta", level=level)
     se.eta <- object$se.eta
     names(se.eta) <- names(object$etapar)
     res <- list(etapar=object$etapar,se.eta=se.eta,ci=cii,iter=iti,model=modi,call=calli,npar=pari,loglik=logli,refGroup=object$refGroup)
diff --git a/R/summary.ppar.R b/R/summary.ppar.R
old mode 100755
new mode 100644
diff --git a/R/summary.threshold.r b/R/summary.threshold.r
old mode 100755
new mode 100644
diff --git a/R/test_info.R b/R/test_info.R
new file mode 100644
index 0000000..f7b5d11
--- /dev/null
+++ b/R/test_info.R
@@ -0,0 +1,18 @@
+test_info <- function(ermobject,theta=seq(-5,5,0.01))
+##Calculates info of a scale of items
+#
+#@input: ermobject ... Object of class eRm
+#        theta ... supporting or sampling points on latent trait
+#@output: a list where each element corresponds to an item and contains
+#         $c.info...matrix of category information in columns for theta (rows)  
+#         $i.info...vector of item information at values of theta
+#@author: Thomas Rusch
+#@date:12.6.2011
+#  
+  {
+   infos <- item_info(ermobject,theta)
+   tmp <- lapply(infos, function(x) x$i.info)
+   tmp <- matrix(unlist(tmp),ncol=dim(ermobject$X)[2])
+   tinfo <- rowSums(tmp)
+   return(tinfo)
+ }
diff --git a/R/thresholds.eRm.r b/R/thresholds.eRm.r
old mode 100755
new mode 100644
index f554671..b6ca349
--- a/R/thresholds.eRm.r
+++ b/R/thresholds.eRm.r
@@ -3,59 +3,65 @@ thresholds.eRm <- function(object)                # uses matrix approach
 #Computation of threshold parameters for polytomous models
 #object of class "eRm" (but not "dRm")
 
-  if ((object$model == "LLTM") || (object$model == "RM")) stop("Threshold parameters are computed only for polytomous models!")
-  if ((object$model == "LRSM") || (object$model == "LPCM")) {
+  if(object$model %in% c("LLTM", "RM")) stop("Threshold parameters are computed only for polytomous models!")
+  if(object$model %in% c("LRSM", "LPCM")) {
     mpoints <- object$mpoints
     ngroups <- object$ngroups
-    vecrep <- mpoints * ngroups                      
+    vecrep  <- mpoints * ngroups                      
   } else {
-    mpoints <- 1
-    ngroups <- 1
-    vecrep <- 1
+    mpoints <- 1L
+    ngroups <- 1L
+    vecrep  <- 1L
   }
   
-  betapar <- object$betapar
-  indmt <- apply(object$X,2,max,na.rm=TRUE)         #number of categories per item
-  mt_vek1 <- sequence(indmt[1:(length(indmt)/mpoints)]) #1 block of beta-items
-  mt_vek <- rep(mt_vek1, vecrep) 
-  sq<-ifelse(mt_vek > 1,-1,0)
-  d1<-diag(sq[-1])
-  k<-length(betapar)
-  d2<-diag(k)
-  d2[-k,-1]<-d2[-k,-1]+d1
-  threshpar <-as.vector(crossprod(betapar,d2)*-1)                  #vector with threshold parameters
+  betapar    <- object$betapar
+  indmt      <- apply(object$X, 2L, max, na.rm = TRUE)      # number of categories per item
+  mt_vek1    <- sequence(indmt[1L:(length(indmt)/mpoints)]) # 1 block of beta-items
+  mt_vek     <- rep(mt_vek1, vecrep)
+  sq         <- ifelse(mt_vek > 1, -1, 0)
+  d1         <- diag(sq[-1L])
+  k          <- length(betapar)
+  d2         <- diag(k)
+  d2[-k,-1L] <- d2[-k, -1L] + d1
+  T_mat      <- t(d2)                           # MM 2010-02-20
+  threshpar  <- -as.vector(T_mat %*% betapar)   # vector with threshold parameters - fix: MM 2010-02-20
   
-  names(threshpar) <- paste("thresh",names(betapar))
+  names(threshpar) <- paste("thresh", names(betapar))
   
-  vc.beta <- (object$W%*%solve(object$hessian)%*%t(object$W)) #VC matrix beta's
-  se.thresh <- sqrt(diag(d2%*%(vc.beta)%*%t(d2)))             #standard errors of thresholds
+  vc.beta   <- object$W %*% solve(object$hessian) %*% t(object$W) # VC matrix beta's
+  se.thresh <- sqrt(diag( T_mat %*% vc.beta %*% t(T_mat) ))       # standard errors of thresholds - fix: MM 2010-02-20
   names(se.thresh) <- names(threshpar)
 
-  blocks <- rep(1:vecrep, each = length(mt_vek1))
-  thblock <- split(threshpar,blocks)                          #block of threshholds (as in design matrix)
-  indmt1 <- indmt[1:(length(indmt)/mpoints)]
-  indvec <- rep(1:length(indmt1),indmt1)
-  
+  blocks  <- rep(1L:vecrep, each = length(mt_vek1))
+  thblock <- split(threshpar, blocks)                 #block of threshholds (as in design matrix)
+  indmt1  <- indmt[1L:(length(indmt)/mpoints)]
+  indvec  <- rep(1L:length(indmt1), indmt1)
+
   threshtab.l <- lapply(thblock, function(x) {                     #list of table-blocks
-                     Location <- tapply(x,indvec,mean)             #location parameters
-                     thresh.l <- split(x, indvec)
-                     threshmat <- t(sapply(thresh.l,"[",1:max(mt_vek)))
+                     location  <- tapply(x,indvec,mean)             #location parameters
+                     thresh.l  <- split(x, indvec)
+                     threshmat <- t(as.data.frame(lapply(thresh.l, function(i_th){
+                                    c(i_th, rep(NA, length.out=max(mt_vek)-length(i_th)))
+                                  })))
                      colnames(threshmat) <- paste("Threshold", 1:dim(threshmat)[2])
-                     parmat <- cbind(Location,threshmat)
-                     }) 
+                     parmat <- cbind("Location" = location, threshmat)
+  }) 
   
   #determine item names for block-table
-  cnames <- colnames(object$X)
-  ind.it <- rep(1:mpoints,each = length(cnames)/mpoints)           #item label index
-  itnames1 <- as.vector(unlist(tapply(cnames, ind.it, function(x) rep(x, ngroups)))) 
-  rep.ind <- sapply(threshtab.l, function(x) dim(x)[1])
-  sp.ind <- rep(1:length(rep.ind), rep.ind)
+  cnames   <- colnames(object$X)
+  ind.it   <- rep(1L:mpoints, each = length(cnames)/mpoints)           #item label index
+  itnames1 <- as.vector(unlist(tapply(cnames, ind.it, function(x){ rep(x, ngroups) }))) 
+  rep.ind  <- unlist(lapply(threshtab.l, nrow))
+  sp.ind   <- rep(1L:length(rep.ind), rep.ind)
 
   names.l <- split(itnames1, sp.ind)                   #names as list
-  for (i in 1:length(threshtab.l)) rownames(threshtab.l[[i]]) <- names.l[[i]]              #name the items
+  for(i in seq_along(threshtab.l)) rownames(threshtab.l[[i]]) <- names.l[[i]]              #name the items
 
-  result <- list(threshpar = threshpar,se.thresh = se.thresh, threshtable = threshtab.l)
+  result <- list("threshpar"   = threshpar,
+                 "se.thresh"   = se.thresh,
+                 "threshtable" = threshtab.l)
   class(result) <- "threshold"
-  result
 
-}
\ No newline at end of file
+  return(result)
+
+}
diff --git a/R/thresholds.r b/R/thresholds.r
old mode 100755
new mode 100644
diff --git a/R/vcov.eRm.R b/R/vcov.eRm.R
old mode 100755
new mode 100644
diff --git a/R/zzz.R b/R/zzz.R
old mode 100755
new mode 100644
index aec2cd9..4d30c1d
--- a/R/zzz.R
+++ b/R/zzz.R
@@ -23,3 +23,7 @@ setClass("performance",
 #          function(x,y,...) {
 #              .plot.performance(x,...)
 #          })
+
+prettyPaste <- function(...){
+  paste(strwrap(paste0(..., collapse = ""), width = getOption("width")), sep="\n", collapse="\n")
+}
diff --git a/build/partial.rdb b/build/partial.rdb
new file mode 100644
index 0000000..07e76c5
Binary files /dev/null and b/build/partial.rdb differ
diff --git a/build/vignette.rds b/build/vignette.rds
new file mode 100644
index 0000000..31a325c
Binary files /dev/null and b/build/vignette.rds differ
diff --git a/data/llraDat1.rda b/data/llraDat1.rda
old mode 100755
new mode 100644
index f63e20d..6d489eb
Binary files a/data/llraDat1.rda and b/data/llraDat1.rda differ
diff --git a/data/llraDat2.rda b/data/llraDat2.rda
old mode 100755
new mode 100644
index 58c0762..3894a20
Binary files a/data/llraDat2.rda and b/data/llraDat2.rda differ
diff --git a/data/llradat3.rda b/data/llradat3.rda
old mode 100755
new mode 100644
index aa2a06b..05d1a3d
Binary files a/data/llradat3.rda and b/data/llradat3.rda differ
diff --git a/data/lltmdat1.rda b/data/lltmdat1.rda
old mode 100755
new mode 100644
index 77bc94c..5c9da0c
Binary files a/data/lltmdat1.rda and b/data/lltmdat1.rda differ
diff --git a/data/lltmdat2.rda b/data/lltmdat2.rda
old mode 100755
new mode 100644
index fd85cd1..92b8ed8
Binary files a/data/lltmdat2.rda and b/data/lltmdat2.rda differ
diff --git a/data/lpcmdat.rda b/data/lpcmdat.rda
old mode 100755
new mode 100644
index b6876ad..99bcd1a
Binary files a/data/lpcmdat.rda and b/data/lpcmdat.rda differ
diff --git a/data/lrsmdat.rda b/data/lrsmdat.rda
old mode 100755
new mode 100644
index d28b990..8ff6d29
Binary files a/data/lrsmdat.rda and b/data/lrsmdat.rda differ
diff --git a/data/pcmdat.rda b/data/pcmdat.rda
old mode 100755
new mode 100644
index 31c88f5..1482c08
Binary files a/data/pcmdat.rda and b/data/pcmdat.rda differ
diff --git a/data/pcmdat2.rda b/data/pcmdat2.rda
old mode 100755
new mode 100644
index ea9e442..b455b6a
Binary files a/data/pcmdat2.rda and b/data/pcmdat2.rda differ
diff --git a/data/raschdat1.rda b/data/raschdat1.rda
old mode 100755
new mode 100644
index f24e872..3ddf3c6
Binary files a/data/raschdat1.rda and b/data/raschdat1.rda differ
diff --git a/data/raschdat1_RM_fitted.RData b/data/raschdat1_RM_fitted.RData
new file mode 100644
index 0000000..d1083e4
Binary files /dev/null and b/data/raschdat1_RM_fitted.RData differ
diff --git a/data/raschdat1_RM_lrres2.RData b/data/raschdat1_RM_lrres2.RData
new file mode 100644
index 0000000..f690654
Binary files /dev/null and b/data/raschdat1_RM_lrres2.RData differ
diff --git a/data/raschdat1_RM_plotDIF.RData b/data/raschdat1_RM_plotDIF.RData
new file mode 100644
index 0000000..677f60b
Binary files /dev/null and b/data/raschdat1_RM_plotDIF.RData differ
diff --git a/data/raschdat2.rda b/data/raschdat2.rda
old mode 100755
new mode 100644
index 879c8ae..edc1e2a
Binary files a/data/raschdat2.rda and b/data/raschdat2.rda differ
diff --git a/data/raschdat3.rda b/data/raschdat3.rda
new file mode 100644
index 0000000..f58f08e
Binary files /dev/null and b/data/raschdat3.rda differ
diff --git a/data/raschdat4.rda b/data/raschdat4.rda
new file mode 100644
index 0000000..1ee1663
Binary files /dev/null and b/data/raschdat4.rda differ
diff --git a/data/rsmdat.rda b/data/rsmdat.rda
old mode 100755
new mode 100644
index c62ba4d..f60debc
Binary files a/data/rsmdat.rda and b/data/rsmdat.rda differ
diff --git a/data/xmpl.RData b/data/xmpl.RData
new file mode 100644
index 0000000..d7896f7
Binary files /dev/null and b/data/xmpl.RData differ
diff --git a/data/xmplbig.RData b/data/xmplbig.RData
new file mode 100644
index 0000000..ac0e869
Binary files /dev/null and b/data/xmplbig.RData differ
diff --git a/inst/CITATION b/inst/CITATION
new file mode 100644
index 0000000..fe9d72c
--- /dev/null
+++ b/inst/CITATION
@@ -0,0 +1,129 @@
+citHeader("To cite package 'eRm' in publications use:")
+year <- sub(".*(2[[:digit:]]{3})-.*", "\\1", meta$Date)
+vers <- meta$Version
+note <- sprintf("R package version %s", meta$Version)
+
+PM  <- person("Patrick", "Mair")
+RH  <- person("Reinhold", "Hatzinger")
+MJM <- person(c("Marco", "Johannes"), "Maier")
+TR  <- person("Thomas", "Rusch")
+IK  <- person("Ingrid", "Koller")
+
+# Package Citation
+citEntry(
+  header = paste0("\n", paste0(rep("#", getOption("width")-1L), collapse=""), "\nThe current Package Version:\n"),
+  entry  = "MANUAL",
+  title  = "{eRm: Extended Rasch Modeling}",
+  author = c(PM, RH, MJM),
+  year   = year,
+  note   = vers,
+  url    = "http://erm.r-forge.r-project.org/",
+  textVersion = paste0(
+    "Mair, P., Hatzinger, R., & Maier M. J. (",
+    year,
+    "). eRm: Extended Rasch Modeling. ",
+    vers,
+    ". http://erm.r-forge.r-project.org/"
+  )
+)
+
+# JSS 2007
+citEntry(
+  header  = paste0("\n", paste0(rep("#", getOption("width")-1L), collapse=""), "\nThe original JSS Article:\n"),
+  entry   = "ARTICLE",
+  title   = "{Extended Rasch modeling: The eRm package for the application of IRT models in R}",
+  author  = c(PM, RH),
+  year    = 2007,
+  page    = "1--20",
+  journal = "{Journal of Statistical Software}",
+  volume  = 20,
+  issue   = 9,
+  url     = "http://www.jstatsoft.org/v20/i09",
+  textVersion = paste0(
+    "Mair, P., & Hatzinger, R. (2007). ",
+    "Extended Rasch modeling: The eRm package for the application of IRT models in R. ",
+    "Journal of Statistical Software, 20(9), 1-20. ",
+    "http://www.jstatsoft.org/v20/i09"
+  )
+)
+
+# PS 2007
+citEntry(
+  header  = paste0("\n", paste0(rep("#", getOption("width")-1L), collapse=""), "\nArticle about CML Estimation in eRm:\n"),
+  entry   = "ARTICLE",
+  title   = "{CML based estimation of extended Rasch models with the eRm package in R}",
+  author  = c(PM, RH),
+  year    = 2007,
+  page    = "26--43",
+  journal = "{Psychology Science}",
+  volume  = 49,
+  issue   = 1,
+  textVersion = paste0(
+    "Mair, P., & Hatzinger, R. (2007). ",
+    "CML based estimation of extended Rasch models with the eRm package in R. ",
+    "Psychology Science, 49(1), 26-43."
+  )
+)
+
+# PSQ 2009
+citEntry(
+  header  = paste0("\n", paste0(rep("#", getOption("width")-1L), collapse=""), "\nArticle about LLRAs in eRm:\n"),
+  entry   = "ARTICLE",
+  title   = "{IRT models with relaxed assumptions in eRm: A manual-like instruction}",
+  author  = c(RH, TR),
+  year    = 2009,
+  page    = "87--120",
+  journal = "{Psychology Science Quarterly}",
+  volume  = 51,
+  issue   = 1,
+  textVersion = paste0(
+    "Hatzinger, R., & Rusch, T. (2009). ",
+    "IRT models with relaxed assumptions in eRm: A manual-like instruction. ",
+    "Psychology Science Quarterly, 51(1), 87-120."
+  )
+)
+
+# LLRA Springer 2013
+citEntry(
+  header  = paste0("\n", paste0(rep("#", getOption("width")-1L), collapse=""), "\nBook Chapter about LLRAs:\n"),
+  entry   = "INPROCEEDINGS",
+  title   = "{Linear logistic models with relaxed assumptions in R}",
+  author  = c(TR, MJM, RH),
+  booktitle = "{Algorithms from and for Nature and Life}",
+  editor  = c(person("Berthold", "Lausen"), person("Dirk", "van den Poel"), person("Alfred", "Ultsch")),
+  series  = "{Studies in Classification, Data Analysis, and Knowledge Organization}",
+  year    = 2013,
+  page    = "337--347",
+  address = "{New York}",
+  publisher = "{Springer}",
+  doi     = "10.1007/978-3-319-00035-0_34",
+  textVersion = paste0(
+    "Rusch T., Maier M. J., & Hatzinger R. (2013). ",
+    "Linear logistic models with relaxed assumptions in R. ",
+    "In: B. Lausen, D. van den Poel, & A. Ultsch (Eds.) ",
+    "Algorithms from and for Nature and Life.",
+    "New York: Springer. 337-347. ",
+    "http://dx.doi.org/10.1007/978-3-319-00035-0_34"
+  )
+)
+
+
+# NPar Tests 2015
+citEntry(
+  header  = paste0("\n", paste0(rep("#", getOption("width")-1L), collapse=""), "\nArticle about the performance of Quasi-Exact Tests in eRm:\n"),
+  entry   = "ARTICLE",
+  title   = "{An Empirical Power Analysis of Quasi-Exact Tests for the Rasch Model: Measurement Invariance in Small Samples}",
+  author  = c(IK, MJM, RH),
+  year    = 2015,
+  volume  = 11,
+  issue   = 2,
+  page    = "45--54",
+  journal = "{Methodology}",
+  doi     = "10.1027/1614-2241/a000090",
+  textVersion = paste0(
+    "Koller, I., Maier, M. J., & Hatzinger, R. (2015). ",
+    "An Empirical Power Analysis of Quasi-Exact Tests for the Rasch Model: Measurement Invariance in Small Samples. ",
+    "Methodology, 11(2), 45-54. ",
+    "http://dx.doi.org/10.1027/1614-2241/a000090"
+  )
+)
diff --git a/inst/NEWS.Rd b/inst/NEWS.Rd
new file mode 100644
index 0000000..fb8acbe
--- /dev/null
+++ b/inst/NEWS.Rd
@@ -0,0 +1,170 @@
+\name{NEWS}
+\title{News for Package \pkg{eRm}}
+\encoding{UTF-8}
+\section{Changes in Version 0.15-7}{
+  \itemize{
+	  \item \code{LRtest.Rm()}: bugfix due to updates in base R.
+	}
+}
+\section{Changes in Version 0.15-6}{
+  \itemize{
+	  \item \code{plotGOF()}: added arguments \code{x_axis}, \code{y_axis}, \code{set_par}, and \code{reset_par} to ease customization of the plot.
+		\item Imports functions from default packages packages, as required by the new CRAN-check.
+	}
+}
+\section{Changes in Version 0.15-5}{
+  \itemize{
+    \item implemented an \code{anova()} method for all models (except \acronym{LLRA}s, which have their own: \code{anova.llra}). see \code{?anova.eRm}
+    \item added a function to compute separation reliability, as proposed in Wright & Stone (1999; see \code{?SepRel}).
+    \item \code{plotINFO()} accepts more arguments via \code{\dots} (see \code{?plotINFO}).
+    \item fixed a bug in \code{plotPWmap()}.
+    \item fixed a bug in the internal function \code{get_item_cats()} related to \code{NA}s (affecting \acronym{LLRA} and item-information functions).
+    \item switched encoding to \acronym{UTF}-8 to avoid problems with diacritics, etc. (e.g., Martin-Löf).
+		\item updated citations.
+    \item general improvements.
+  }
+}
+\section{Changes in Version 0.15-4}{
+  \itemize{
+    \item streamlining some functions, updating the vignette, etc.
+    \item warnings are now treated more consistently and can be suppressed with \code{suppressWarnings()}
+    \item the \code{plotGOF()} function was revamped and tidied up in a couple of places, e.g.:
+    \itemize{
+      \item the plotting order of elements in a non-interactive call has been changed to put confidence ellipses and lines in the background and text in the foreground.
+      \item the \eqn{x}{x}- and \eqn{y}{y}-limits are now dynamically computed by default, so that confidence regions and ellipses are inside the plotting region.
+      \item the leading \dQuote{\code{I}} before item numbers has been removed for better legibility.
+    }
+    \item moved \code{NEWS} to the new fancy \code{NEWS.Rd} file/format
+  }
+}
+\section{Changes in eRm version 0.15-3}{
+  \itemize{
+    \item an error in \code{test_info()} was fixed.
+    \item \pkg{eRm} now depends on \code{R} \eqn{\geq3.0.0}{>= 3.0.0}
+  }
+}
+\section{Changes in eRm version 0.15-2}{
+  \itemize{
+    \item an error in \code{Tpbis.stat} was fixed.
+  }
+}
+\section{Changes in eRm version 0.15-1}{
+  \itemize{
+    \item a bug in \code{person.parameter()} has been fixed that caused the estimation to crash in some cases.
+    \item a bug in \code{thresholds()} has been fixed that caused the routine to crash.
+  }
+}
+\section{Changes in eRm version 0.15-0}{
+  \itemize{
+    \item the \pkg{RaschSampler} package has been merged into \pkg{eRm} for convenience (still available as a separate package).
+  }
+}
+\section{Changes in eRm version 0.14-5}{
+  \itemize{
+    \item the package is now byte-compiled by default.
+    \item some statistics added to \code{NPtest()}, \code{T7} and \code{T7a} removed.
+    \item fixed a bug in \code{plotPWmap()}.
+    \item fixed the \code{mplot} argument in plotting routines.
+    \item fixed the split-criterion \code{"all.r"} in \code{LRtest()}.
+    \item deleted all usages of \code{data()} in examples, since eRm uses \dQuote{lazy data.}
+  }
+}
+\section{Changes in eRm version 0.14-4}{
+  \itemize{
+    \item when calling \code{NPtest()}, the \pkg{RaschSampler} can now be controlled more specifically (\code{burn_in}, \code{step}, \code{seed}).
+    \item various improvements and bugfixes for LLRA-related functions.
+    \item person parameter values can be extracted now for all persons using \code{coef()}.
+      Additionally, in-/exclusion of extrapolated values (for 0 and perfect scores) can be controlled via the argument \code{extrapolated}.
+    \item LRtest now computes standard errors (\code{se = TRUE}) by default.
+    \item plotDIF now plots \dQuote{difficulties} for all models (formerly, \dQuote{easiness} parameters were plotted for Rasch models).
+  }
+}
+\section{Changes in eRm version 0.14-3}{
+  \itemize{
+    \item minor bug fixed in \code{plotGOF()}, where on rare occasions confidence ellipses were plotted together with control lines (spotted by Peter Parker)
+    \item improved labelling in \code{plotjointICC()}
+  }
+}
+\section{Changes in eRm version 0.14-2}{
+  \itemize{
+    \item warning regarding group assignment when using median or mean split removed from \code{MLoef()}
+    \item modification in \code{NPtest()} to split long output lines
+    \item changed the delimiters of \code{plotDIF()} confidence intervals to \code{pch = 20} (small bullet).
+  }
+}
+\section{Changes in eRm version 0.14-1}{
+  \itemize{
+    \item new experimental functions to calculate and plot item and test information (by Thomas Rusch)
+    \item bug fixed in the calculation of item and person Infit t and Outfit t (hint from Rainer Alexandrowicz).
+    \item \pkg{eRm} no longer depends on the \pkg{RaschSampler} package.
+      However, it must be installed to use \code{NPtest()}.
+    \item changed the delimiters of \code{plotDIF()} confidence intervals to \code{pch = 20}.
+  }
+}
+\section{Changes in eRm version 0.14-0}{
+  \itemize{
+    \item new (wrapper) function \code{LLRA()} for fitting linear logistic models with relaxed assumptions including utilities for preparing data (\code{llra.datprep()}), setting up (\code{build_W()}) and modifying (\code{collapse_W()}) design matrices, comparing LLRA models (\code{anova()}) and plotting results (\code{plotTR()} and \code{plotGR()}) (by Thomas Rusch).
+    \item \dQuote{exact} version of the Martin-Löf test for binary items and arbitrary splits added as method to \code{NPtest()}.
+    \item in \code{plotGOF()} confidence ellipses can now be drawn for subsets of items, optionally using different colours
+    \item new function \code{plotDIF()} (by Kathrin Gruber): plots confidence intervals for item parameters estimated separately in subgroups, uses LR objects as input
+    \item adapted the \code{MLoef()} function to work with polytomous data and more than two item groups
+    \item error checks in NPtest:
+    \enumerate{
+      \item 0/full responses for items meaningless for NPtest,
+      \item group in \code{method = "T4"} must be of type logical,
+      \item specifying all items for T4 gives meaningless results.
+    }
+    \item warning regarding group assignment when using median split removed from \code{LRtest()} and \code{Waldtest()}.
+    \item some modifications in \code{plotPWmap()}: horizontal plotting, different default plotting symbols, option to change size of plotting symbols
+    \item bug in \code{MLoef()} fixed (now using logs in calculating the person contributions)
+    \item \pkg{eRm} now depends on \code{R} \eqn{\geq2.12.0}{>= 2.12.0}
+    \item Latin1 encoding removed
+    \item bug in \code{plotICC()} (always same title) fixed
+  }
+}
+\section{Changes in eRm version 0.13-0}{
+  \itemize{
+    \item \code{LLTM()}, \code{LRSM()}, and \code{LPCM()} work now for repeated measurement designs with treatment groups and missing values.
+    \item Rename vignette to \dQuote{eRm}.
+  }
+}
+\section{Changes in eRm version 0.12-2}{
+  \itemize{
+    \item new function \code{plotPWmap()} to plot Bond-and-Fox style pathway maps for the data by Julian Gilbey.
+      Since calculation of the \eqn{t}{t}-statistics requires calculation of the kurtosis of the standardized residuals, according changes to \code{itemfit.ppar()}, \code{personfit.ppar()}, \code{pifit.internal()}, \code{print.ifit()}, and \code{print.pfit()}.
+    \item \code{plotPImap()} patched by Julian Gilbey: length of \code{item.subset} did not match the documentation, warning stars did not all appear, pre-calculated person.parameter data can be passed to the function via pp, mis-ordered items can be coloured.
+      some minor bugs fixed.
+    \item the optimizer can be changed to \code{optim()} using \code{fitctrl <- "optim"} and reset to \code{nlm()} (the default) with \code{fitctrl <- "nlm"}
+    \item value of \code{LRtest()} now contains the list \code{fitobj} which contains the model objects according to the subgroups specified by \code{splitcr}
+    \item \code{MLoef()} no longer supports missing values
+  }
+}
+\section{Changes in eRm version 0.12-1}{
+  \itemize{
+    \item function invalid from package \pkg{gtools} integrated into \pkg{eRm}.
+      \pkg{eRm} no longer depends on \pkg{gtools}.
+  }
+}
+\section{Changes in eRm version 0.12-0}{
+  \itemize{
+    \item for \code{RM()}, \code{RSM()}, and \code{PCM()}: eta parameters are now displayed as difficulty parameters; \code{print()} and \code{summary()} methods changed accordingly.
+    \item new labeling of eta parameters in \code{RM()}, \code{RSM()}, and \code{PCM()}.
+      they now are labeled according to the estimated parameters for items (\code{RM()}), items + categories (\code{RSM()}), items x categories (\code{PCM()})
+    \item function \code{MLoef()} for Martin-Löf-Test added
+    \item \code{df} in \code{personfit()} and \code{itemfit()} corrected
+    \item the \code{logLik()} functions now extract the log-likelhood and df into objects of class \code{"logLik.eRm"} and \code{"loglik.ppar"} with elements \code{loglik} and \code{df}.
+      the corresponding print methods have been modified accordingly.
+    \item method \code{coef.ppar()} to extract person parameter estimates added
+    \item option for beta parameters added to coef.eRm
+    \item in confint.eRm: default \code{parm = "beta"}
+    \item minor modifications in the help file for \code{IC()}
+    \item \code{plotPImap()}: revised rug added, bug concerning \code{item.subset} fixed, minor modifications to enhance readability
+    \item minor modifications in \code{plotjointICC()}: allows for main title and colors, option \code{legpos = FALSE} suppresses legends, dev.new removed, \code{legend = FALSE} produced incorrect labeling
+    \item minor modifications in \code{plotICC()}: allows for main title and colors, default coloring with \code{col = NULL} instead of \code{NA} for compatibility, option \code{legpos = FALSE} suppresses legends, \code{mplot} is now \code{FALSE} if only one item is specified
+    \item plot.ppar: dev.new removed
+    \item option \dQuote{visible} in print.ifit und print.pfit to allow for avoiding overly long output and for extraction of infit and outfit values (maybe changed to a coef method later)
+    \item \code{strwrap()} for NPtest print methods to break long lines
+    \item new methods \code{IC.default()} and \code{pmat.default()} for enhanced error messages
+    \item lazy loading package and datafiles
+  }
+}
diff --git a/inst/NEWS.pdf b/inst/NEWS.pdf
new file mode 100644
index 0000000..2d53429
Binary files /dev/null and b/inst/NEWS.pdf differ
diff --git a/inst/doc/UCML.jpg b/inst/doc/UCML.jpg
deleted file mode 100755
index b9aa6a0..0000000
Binary files a/inst/doc/UCML.jpg and /dev/null differ
diff --git a/inst/doc/Z.cls b/inst/doc/Z.cls
deleted file mode 100755
index 4a9c8c3..0000000
--- a/inst/doc/Z.cls
+++ /dev/null
@@ -1,239 +0,0 @@
-\def\fileversion{1.1}
-\def\filename{Z}
-\def\filedate{2006/10/11}
-%%
-%% Package `Z' to use with LaTeX2e for Z reports
-%% Copyright (C) 2004 Achim Zeileis
-%%
-\NeedsTeXFormat{LaTeX2e}
-\ProvidesClass{Z}[\filedate\space\fileversion\space Z class by Achim Zeileis]
-
-%% options
-\LoadClass[10pt,a4paper,twoside]{article}
-\newif\if at notitle
-\@notitlefalse
-\newif\if at noheadings
-\@noheadingsfalse
-\DeclareOption{notitle}{\@notitletrue}
-\DeclareOption{noheadings}{\@noheadingstrue}
-\ProcessOptions
-
-%% required packages
-\RequirePackage{graphicx,a4wide,color,hyperref,ae,fancyvrb,thumbpdf}
-\RequirePackage[T1]{fontenc}
-\usepackage[authoryear,round,longnamesfirst]{natbib}
-\bibpunct{(}{)}{;}{a}{}{,}
-\bibliographystyle{jss}
-
-%% paragraphs
-\setlength{\parskip}{0.7ex plus0.1ex minus0.1ex}
-\setlength{\parindent}{0em}
-
-%% for all publications
-\newcommand{\Plaintitle}[1]{\def\@Plaintitle{#1}}
-\newcommand{\Shorttitle}[1]{\def\@Shorttitle{#1}}
-\newcommand{\Plainauthor}[1]{\def\@Plainauthor{#1}}
-\newcommand{\Keywords}[1]{\def\@Keywords{#1}}
-\newcommand{\Plainkeywords}[1]{\def\@Plainkeywords{#1}}
-\newcommand{\Abstract}[1]{\def\@Abstract{#1}}
-
-%% defaults
-\author{Firstname Lastname\\Affiliation}
-\title{Title}
-\Abstract{---!!!---an abstract is required---!!!---}
-\Plainauthor{\@author}
-\Plaintitle{\@title}
-\Shorttitle{\@title}
-\Keywords{---!!!---at least one keyword is required---!!!---}
-\Plainkeywords{\@Keywords}
-
-%% Sweave(-like)
-%\DefineVerbatimEnvironment{Sinput}{Verbatim}{fontshape=sl}
-%\DefineVerbatimEnvironment{Soutput}{Verbatim}{}
-%\DefineVerbatimEnvironment{Scode}{Verbatim}{fontshape=sl}
-%\newenvironment{Schunk}{}{}
-%\setkeys{Gin}{width=0.8\textwidth}
-
-%% new \maketitle
-\def\maketitle{
- \begingroup
-   \def\thefootnote{\fnsymbol{footnote}}
-   \def\@makefnmark{\hbox to 0pt{$^{\@thefnmark}$\hss}}
-   \long\def\@makefntext##1{\parindent 1em\noindent
-			    \hbox to1.8em{\hss $\m at th ^{\@thefnmark}$}##1}
-   \@maketitle \@thanks
- \endgroup
- \setcounter{footnote}{0}
-
- \if at noheadings
-   %% \thispagestyle{empty}
-   %% \markboth{\centerline{\@Shorttitle}}{\centerline{\@Plainauthor}}
-   %% \pagestyle{myheadings}
- \else
-   \thispagestyle{empty}
-   \markboth{\centerline{\@Shorttitle}}{\centerline{\@Plainauthor}}
-   \pagestyle{myheadings}
- \fi
-
- \let\maketitle\relax \let\@maketitle\relax
- \gdef\@thanks{}\gdef\@author{}\gdef\@title{}\let\thanks\relax
-}
-
-% Author information can be set in various styles:
-% For several authors from the same institution:
-% \author{Author 1 \and ... \and Author n \\
-%     Address line \\ ... \\ Address line}
-% if the names do not fit well on one line use
-%         Author 1 \\ {\bf Author 2} \\ ... \\ {\bf Author n} \\
-% For authors from different institutions:
-% \author{Author 1 \\ Address line \\  ... \\ Address line
-%     \And  ... \And
-%     Author n \\ Address line \\ ... \\ Address line}
-% To start a seperate ``row'' of authors use \AND, as in
-% \author{Author 1 \\ Address line \\  ... \\ Address line
-%     \AND
-%     Author 2 \\ Address line \\ ... \\ Address line \And
-%     Author 3 \\ Address line \\ ... \\ Address line}
-
-\def\@maketitle{\vbox{\hsize\textwidth \linewidth\hsize
- {\centering
- {\LARGE\bf \@title\par}
- \vskip 0.2in plus 1fil minus 0.1in
- {
-     \def\and{\unskip\enspace{\rm and}\enspace}%
-     \def\And{\end{tabular}\hss \egroup \hskip 1in plus 2fil
- 	      \hbox to 0pt\bgroup\hss \begin{tabular}[t]{c}\large\bf\rule{\z@}{24pt}\ignorespaces}%
-     \def\AND{\end{tabular}\hss\egroup \hfil\hfil\egroup
- 	      \vskip 0.1in plus 1fil minus 0.05in
- 	      \hbox to \linewidth\bgroup\rule{\z@}{10pt} \hfil\hfil
- 	      \hbox to 0pt\bgroup\hss \begin{tabular}[t]{c}\large\bf\rule{\z@}{24pt}\ignorespaces}
-     \hbox to \linewidth\bgroup\rule{\z@}{10pt} \hfil\hfil
-     \hbox to 0pt\bgroup\hss \begin{tabular}[t]{c}\large\bf\rule{\z@}{24pt}\@author
-     \end{tabular}\hss\egroup
- \hfil\hfil\egroup}
- \vskip 0.3in minus 0.1in
- \hrule
- \begin{abstract}
- \@Abstract
- \end{abstract}}
- \textit{Keywords}:~\@Keywords.
- \vskip 0.1in minus 0.05in
- \hrule
- \vskip 0.2in minus 0.1in
-}}
-
-%% \def\@maketitle{\vbox{\hsize\textwidth \linewidth\hsize 
-%%  {\centering
-%%  {\LARGE\bf \@title\par}
-%%    \def\And{\end{tabular}\hfil\linebreak[0]\hfil
-%% 	    \begin{tabular}[t]{c}\large\bf\rule{\z@}{24pt}\ignorespaces}%
-%%     \begin{tabular}[t]{c}\large\bf\rule{\z@}{24pt}\@author\end{tabular}%
-%%  \vskip 0.3in minus 0.1in
-%%  \hrule
-%%  \begin{abstract}
-%%  \@Abstract
-%%  \end{abstract}}
-%%  \textit{Keywords}:~\@Keywords.
-%%  \vskip 0.1in minus 0.05in
-%%  \hrule
-%%  \vskip 0.2in minus 0.1in
-%% }}
-
-
-%% sections, subsections, and subsubsections
-\newlength{\preXLskip}
-\newlength{\preLskip}
-\newlength{\preMskip}
-\newlength{\preSskip}
-\newlength{\postMskip}
-\newlength{\postSskip}
-\setlength{\preXLskip}{1.8\baselineskip plus 0.5ex minus 0ex}
-\setlength{\preLskip}{1.5\baselineskip plus 0.3ex minus 0ex}
-\setlength{\preMskip}{1\baselineskip plus 0.2ex minus 0ex}
-\setlength{\preSskip}{.8\baselineskip plus 0.2ex minus 0ex}
-\setlength{\postMskip}{.5\baselineskip plus 0ex minus 0.1ex}
-\setlength{\postSskip}{.3\baselineskip plus 0ex minus 0.1ex}
-
-\newcommand{\jsssec}[2][default]{\vskip \preXLskip%
-  \pdfbookmark[1]{#1}{Section.\thesection.#1}%
-  \refstepcounter{section}%
-  \centerline{\textbf{\Large \thesection. #2}} \nopagebreak
-  \vskip \postMskip \nopagebreak}
-\newcommand{\jsssecnn}[1]{\vskip \preXLskip%
-  \centerline{\textbf{\Large #1}} \nopagebreak
-  \vskip \postMskip \nopagebreak}
-
-\newcommand{\jsssubsec}[2][default]{\vskip \preMskip%
-  \pdfbookmark[2]{#1}{Subsection.\thesubsection.#1}%
-  \refstepcounter{subsection}%
-  \textbf{\large \thesubsection. #2} \nopagebreak
-  \vskip \postSskip \nopagebreak}
-\newcommand{\jsssubsecnn}[1]{\vskip \preMskip%
-  \textbf{\large #1} \nopagebreak
-  \vskip \postSskip \nopagebreak}
-
-\newcommand{\jsssubsubsec}[2][default]{\vskip \preSskip%
-  \pdfbookmark[3]{#1}{Subsubsection.\thesubsubsection.#1}%
-  \refstepcounter{subsubsection}%
-  {\large \textit{#2}} \nopagebreak
-  \vskip \postSskip \nopagebreak}
-\newcommand{\jsssubsubsecnn}[1]{\vskip \preSskip%
-  {\textit{\large #1}} \nopagebreak
-  \vskip \postSskip \nopagebreak}
-
-\newcommand{\jsssimplesec}[2][default]{\vskip \preLskip%
-%%  \pdfbookmark[1]{#1}{Section.\thesection.#1}%
-  \refstepcounter{section}%
-  \textbf{\large #1} \nopagebreak
-  \vskip \postSskip \nopagebreak}
-\newcommand{\jsssimplesecnn}[1]{\vskip \preLskip%
-  \textbf{\large #1} \nopagebreak
-  \vskip \postSskip \nopagebreak}
-
-\renewcommand{\section}{\secdef \jsssec \jsssecnn}
-\renewcommand{\subsection}{\secdef \jsssubsec \jsssubsecnn}
-\renewcommand{\subsubsection}{\secdef \jsssubsubsec \jsssubsubsecnn}
-
-%% colors
-\definecolor{Red}{rgb}{0.7,0,0}
-\definecolor{Blue}{rgb}{0,0,0.8}
-\hypersetup{%
-  hyperindex = {true},
-  colorlinks = {true},
-  linktocpage = {true},
-  plainpages = {false},
-  linkcolor = {Blue},
-  citecolor = {Blue},
-  urlcolor = {Red},
-  pdfstartview = {Fit},
-  pdfpagemode = {UseOutlines},
-  pdfview = {XYZ null null null}
-}
-
-\AtBeginDocument{
-  \hypersetup{%
-    pdfauthor = {\@Plainauthor},
-    pdftitle = {\@Plaintitle},
-    pdfkeywords = {\@Plainkeywords}
-  }
-}
-\if at notitle
-  %% \AtBeginDocument{\maketitle}
-\else
-  \AtBeginDocument{\maketitle}
-\fi
-
-%% commands
-\makeatletter
-\newcommand\code{\bgroup\@makeother\_\@codex}
-\def\@codex#1{{\normalfont\ttfamily\hyphenchar\font=-1 #1}\egroup}
-\makeatother
-%%\let\code=\texttt
-\let\proglang=\textsf
-\newcommand{\pkg}[1]{{\normalfont\fontseries{b}\selectfont #1}}
-\newcommand{\email}[1]{\href{mailto:#1}{\normalfont\texttt{#1}}}
-\newcommand{\doi}[1]{\href{http://dx.doi.org/#1}{\normalfont\texttt{doi:#1}}}
-\newcommand{\E}{\mathsf{E}}
-\newcommand{\VAR}{\mathsf{VAR}}
-\newcommand{\COV}{\mathsf{COV}}
-\newcommand{\Prob}{\mathsf{P}}
diff --git a/inst/doc/eRm.R b/inst/doc/eRm.R
new file mode 100644
index 0000000..b36a18e
--- /dev/null
+++ b/inst/doc/eRm.R
@@ -0,0 +1,107 @@
+### R code from vignette source 'eRm.Rnw'
+### Encoding: UTF-8
+
+###################################################
+### code chunk number 1: eRm.Rnw:634-637
+###################################################
+library("eRm")
+res.rasch <- RM(raschdat1)
+pres.rasch <- person.parameter(res.rasch)
+
+
+###################################################
+### code chunk number 2: eRm.Rnw:640-642
+###################################################
+lrres.rasch <- LRtest(res.rasch, splitcr = "mean")
+lrres.rasch
+
+
+###################################################
+### code chunk number 3: plotGOF-lrres-rasch (eval = FALSE)
+###################################################
+## plotGOF(lrres.rasch, beta.subset=c(14,5,18,7,1), tlab="item", conf=list(ia=FALSE,col="blue",lty="dotted"))
+
+
+###################################################
+### code chunk number 4: plotGOF-lrres-rasch-plot
+###################################################
+plotGOF(lrres.rasch, beta.subset=c(14,5,18,7,1), tlab="item", conf=list(ia=FALSE,col="blue",lty="dotted"))
+
+
+###################################################
+### code chunk number 5: eRm.Rnw:665-668
+###################################################
+W <- matrix(c(1,2,1,3,2,2,2,1,1,1),ncol=2)
+res.lltm <- LLTM(lltmdat2, W)
+summary(res.lltm)
+
+
+###################################################
+### code chunk number 6: eRm.Rnw:681-684
+###################################################
+data(pcmdat2)
+res.rsm <- RSM(pcmdat2)
+thresholds(res.rsm)
+
+
+###################################################
+### code chunk number 7: plotICC-res-rsm (eval = FALSE)
+###################################################
+## plotICC(res.rsm, mplot=TRUE, legpos=FALSE,ask=FALSE)
+
+
+###################################################
+### code chunk number 8: plotICC-res-rsm-plot
+###################################################
+plotICC(res.rsm, mplot=TRUE, legpos=FALSE,ask=FALSE)
+
+
+###################################################
+### code chunk number 9: plotPImap-res-pcm (eval = FALSE)
+###################################################
+## res.pcm <- PCM(pcmdat2)
+## plotPImap(res.pcm, sorted = TRUE)
+
+
+###################################################
+### code chunk number 10: plotPImap-res-pcm-plot
+###################################################
+res.pcm <- PCM(pcmdat2)
+plotPImap(res.pcm, sorted = TRUE)
+
+
+###################################################
+### code chunk number 11: eRm.Rnw:714-716
+###################################################
+pres.pcm <- person.parameter(res.pcm)
+itemfit(pres.pcm)
+
+
+###################################################
+### code chunk number 12: eRm.Rnw:720-724
+###################################################
+lr<- 2*(res.pcm$loglik-res.rsm$loglik)
+df<- res.pcm$npar-res.rsm$npar
+pvalue<-1-pchisq(lr,df)
+cat("LR statistic: ", lr, "  df =",df, "  p =",pvalue, "\n")
+
+
+###################################################
+### code chunk number 13: eRm.Rnw:741-742
+###################################################
+grouplpcm <- rep(1:2, each = 10)
+
+
+###################################################
+### code chunk number 14: eRm.Rnw:746-748
+###################################################
+reslpcm <- LPCM(lpcmdat, mpoints = 2, groupvec = grouplpcm, sum0 = FALSE)
+model.matrix(reslpcm)
+
+
+###################################################
+### code chunk number 15: eRm.Rnw:751-752
+###################################################
+coef(reslpcm, parm="eta")
+
+
diff --git a/inst/doc/eRm.Rnw b/inst/doc/eRm.Rnw
old mode 100755
new mode 100644
index 5ee2398..6570817
--- a/inst/doc/eRm.Rnw
+++ b/inst/doc/eRm.Rnw
@@ -1,313 +1,332 @@
 %\VignetteIndexEntry{eRm Basics}
+\SweaveOpts{keep.source=FALSE}
+\documentclass[10pt,nojss,nofooter,fleqn]{jss}
 
-\documentclass[article]{Z}
-\usepackage{amsmath, thumbpdf}
-\usepackage{Sweave}
-\usepackage{graphicx}
+\usepackage[utf8]{inputenx}
 
-\author{Patrick Mair\\Wirtschaftsuniversit\"at Wien \And
-        Reinhold Hatzinger\\Wirtschaftsuniversit\"at Wien}
-\Plainauthor{Patrick Mair, Reinhold Hatzinger}
+\usepackage[noae]{Sweave}
 
-\title{Extended Rasch Modeling: The R Package \pkg{eRm}}
+\usepackage{amsmath,amssymb,amsfonts}
+\usepackage{lmodern}
+\usepackage[nosf,nott,notextcomp,largesmallcaps,easyscsl]{kpfonts}
+\usepackage{booktabs}
+\usepackage{bm}
+\usepackage{microtype}
+
+\makeatletter%
+\let\P\@undefined%
+\makeatother%
+\DeclareMathOperator{\P}{P}
+
+\newcommand{\gnuR}{\proglang{R}}
+\newcommand{\eRm}{\pkg{eRm}}
+\newcommand{\ie}{i.\,e.}
+\newcommand{\eg}{e.\,g.}
+
+\newcommand{\acronym}[1]{\textsc{\lowercase{#1}}} % from Rd.sty
+
+\author{Patrick Mair\\Wirtschaftsuniversität Wien\And%
+Reinhold Hatzinger\\Wirtschaftsuniversität Wien\And%
+Marco J.\ Maier\\Wirtschaftsuniversität Wien}
+\Plainauthor{Patrick Mair, Reinhold Hatzinger, Marco J. Maier}
+
+\title{Extended Rasch Modeling: The \gnuR\ Package \eRm}
 \Plaintitle{Extended Rasch Modeling: The R Package eRm}
-\Shorttitle{The R Package \pkg{eRm}}
-
-\Abstract{
-
-This package vignette is an update of the \pkg{eRm} papers by published
-in a special issue on Psychometrics in the Journal of Statistical
-Software and in Psychology Science \citep{Mair+Hatzinger:2007,
-Mair+Hatzinger:2007b}.  Since the publication of these papers various
-extensions and additional features have been incorporated into the
-package.  We start with a methodological introduction to extended
-Rasch models followed by a general program description and application
-topics.  The package allows for the computation of simple Rasch models,
-rating scale models, partial credit models and linear extensions of
-these.  The incorporation of such linear structures allows for modeling
-the effects of covariates and enables the analysis of repeated
-categorical measurements.  The item parameter estimation is
-performed by means of CML, for the person parameters we use ordinary ML.
-The estimation routines work for incomplete data matrices as well.
-Based on these estimators, item-wise and global goodness-of-fit
-statistics are described and various plots are presented.  }
-
-\Keywords{eRm package, Rasch model, LLTM, RSM, LRSM, PCM, LPCM, CML estimation}
-
-%\Volume{20}
-%\Issue{9}
-%\Month{April}
-%\Year{2007}
-%FIXME%
-%% \Submitdate{2004-06-21}
-%% \Acceptdate{2004-12-04}
-
-%\Address{
-%  Patrick Mair\\
-%  Department f\"ur Statistik und Mathematik\\
-%  Wirtschaftsuniversit\"at Wien\\
-%  A-1090 Wien, Austria\\
-%  E-mail: \email{patrick.mair at wu-wien.ac.at}\\
-%  URL: \url{http://statmath.wu-wien.ac.at/~mair/}
-%}
+\Shorttitle{The \gnuR\ Package \eRm}
 
-\begin{document}
+\Abstract{\noindent%
+This package vignette is an update and extension of the papers published in the Journal of Statistical Software (special issue on Psychometrics, volume 20) and Psychology Science \citep{Mair+Hatzinger:2007, Mair+Hatzinger:2007b}.
+Since the publication of these papers, various extensions and additional features have been incorporated into the package.
 
-\section{Introduction}
+We start with a methodological introduction to extended Rasch models followed by a general program description and application topics.
+The package allows for the computation of simple Rasch models, rating scale models, partial credit models and linear extensions thereof.
+Incorporation of such linear structures allows for modeling the effects of covariates and enables the analysis of repeated categorical measurements.
+Item parameter estimation is performed using \acronym{CML}, for the person parameters we use joint \acronym{ML}.
+These estimation routines work for incomplete data matrices as well.
+Based on these estimators, item-wise and global (parametric and non-parametric) goodness-of-fit statistics are described and various plots are presented.}
+%%% ADD: LLRA
+%%% ADD: NP-Tests
 
-\citet{Ro:99} claimed in his article that ``even though the Rasch model
-has been existing for such a long time, 95\% of the current tests in
-psychology are still constructed by using methods from classical test
-theory" (p. 140).  Basically, he quotes the following reasons why the
-Rasch model (RM) is being rarely used:  The Rasch model in its original form
-\citep{Ra:60}, which was limited to dichotomous items, is arguably too
-restrictive for practical testing purposes.  Thus, researchers should
-focus on extended Rasch models.  In addition, Rost argues that there is
-a lack of user-friendly software for the computation of such models.
-Hence, there is a need for a comprehensive, user-friendly software
-routine.  Corresponding recent discussions can be found in
-\citet{Kub:05} and \citet{Bor:06}.
-
-In addition to the RM, the models that can be computed by means of the \pkg {eRm} package are:
-the linear logistic test model \citep{Scheib:72}, the rating scale model
-\citep{And:78}, the linear rating scale model \citep{FiPa:91}, the
-partial credit model \citep{Mast:82}, and the linear partial credit
-model \citep{GlVe:89,FiPo:94}.  These models and their main
-characteristics are presented in Section \ref{sec:erm}.
-
-Concerning parameter estimation, these models have an important feature
-in common:  Separability of item and person parameters.  This implies
-that the item parameters $\mathbf{\beta}$ can be estimated without
-estimating the person parameters achieved by conditioning the likelihood
-on the sufficient person raw score.  This conditional maximum likelihood
-(CML) approach is described in Section \ref{sec:cml}.
+\Keywords{\eRm\ Package, Rasch Model (\acronym{RM}), \acronym{LLTM}, \acronym{RSM}, \acronym{LRSM}, \acronym{PCM}, \acronym{LPCM}, \acronym{LLRA}, \acronym{CML} estimation}
 
-Several diagnostic tools and tests to evaluate model fit are presented in Section \ref{Gof}.
+\begin{document}
+%
+%
+%
+%
+%
+%\citep{RuschMaierHatzinger:2013:LLRA} %%% LLRA Proceedings
+%\citep{HatzingerRusch:2009:IRTwLLRA} %%% PSQ
+%\citep{Ponocny:2002:ApplicabilitysomeIRT}
+%
+%
+%
+%
+%
+\section{Introduction}
+\citet{Ro:99} claimed in his article that ``even though the Rasch model has been existing for such a long time, 95\% of the current tests in psychology are still constructed by using methods from classical test theory'' (p.\ 140).
+Basically, he quotes the following reasons why the Rasch model \acronym{(RM)} is being rarely used: The Rasch model in its original form \citep{Ra:60}, which was limited to dichotomous items, is arguably too restrictive for practical testing purposes.
+Thus, researchers should focus on extended Rasch models.
+In addition, Rost argues that there is a lack of user-friendly software for the computation of such models.
+Hence, there is a need for a comprehensive, user-friendly software package.
+Corresponding recent discussions can be found in \citet{Kub:05} and \citet{Bor:06}.
+
+In addition to the basic \acronym{RM}, the models that can be computed with \eRm\ package are: the linear logistic test model \citep{Scheib:72}, the rating scale model \citep{And:78}, the linear rating scale model \citep{FiPa:91}, the partial credit model \citep{Mast:82}, and the linear partial credit model \citep{GlVe:89,FiPo:94}.
+These models and their main characteristics are presented in Section \ref{sec:erm}.
+A more recent addition to \eRm\ has been the linear logistic models with relaxed assumptions \citep{Fisch:95b,FischPonocny:95} that provides a very flexible framework with a wide range of applications.
+%%% ADD: ref to sec
+
+Concerning estimation of parameters, all models have an important feature in common: Conditional maximum likelihood \acronym{(CML)} estimation, which leads to separability of item and person parameters.
+Item parameters $\beta$ can be estimated without estimating the person parameters $\theta$ by conditioning the likelihood on the sufficient person raw score.
+\acronym{CML} estimation is described in Section \ref{sec:cml}.
 
-In Section \ref{sec:pack}, the corresponding implementation in
-\proglang{R} \citep{R:06} is described by means of several examples.
-The \pkg{eRm} package uses a design matrix approach which allows
-to reparameterize the item parameters to model common characteristics of
-the items or to enable the
-user to impose repeated measurement designs as well as group contrasts.
-By combining these types of contrasts one allows that the item parameter
-may differ over time with respect to certain subgroups.  To illustrate
-the flexibility of the \pkg{eRm} package some examples are given to show
-how suitable design matrices can be constructed.
+Several diagnostic tools and tests to evaluate model fit are presented in Section \ref{Gof}.
 
+In Section \ref{sec:pack}, the corresponding implementation in \gnuR\ \citep{gnuR} is described by means of several examples.
+The \eRm\ package uses a design matrix approach which allows to reparameterize the item parameters to model common characteristics of the items or to enable the user to impose repeated measurement designs as well as group contrasts.
+By combining these types of contrasts, item parameter may differ over time with respect to certain subgroups.
+To illustrate the flexibility of \eRm, some examples are given to show how suitable design matrices can be constructed.
+%
+%
+%
+%
 %----------------- end introduction ----------------
 \section{Extended Rasch models}
 \label{sec:erm}
-
+%
+%
+%
 \subsection{General expressions}
-Briefly after the first publication of the basic Rasch Model \citep{Ra:60}, the author worked on polytomous generalizations which can be found in \citet{Ra:61}. \citet{And:95} derived the representations below which are based on Rasch's general expression for polytomous data. The data matrix is denoted as $\mathbf{X}$ with the persons in the rows and the items in the columns. In total there are $v=1,...,n$ persons and $i=1,...,k$ items. A single element in the data matrix $\mathbf{X}$ is [...]
-
-\begin{equation}
-\label{eq1}
-    P(X_{vi}=h)=\frac{\exp[\phi_h(\theta_v+\beta_i)+\omega_h]}{\sum_{l=0}^{m_i} \exp[\phi_l (\theta_v+\beta_i)+\omega_l]}
+Briefly after the first publication of the basic Rasch Model \citep{Ra:60}, the author worked on polytomous generalizations which can be found in \citet{Ra:61}.
+\citet{And:95} derived the representations below which are based on Rasch's general expression for polytomous data.
+The data matrix is denoted as $\bm{X}$ with the persons $v$ in the rows and items $i$ in the columns.
+In total there are $v=1,\,\ldots,\,n$ persons and $i=1,\,\ldots,\,k$ items.
+A single element in the data matrix $\bm{X}$ is expressed as $x_{vi}$.
+Furthermore, each item $i$ has a certain number of response categories, denoted by $h=0,\,\ldots,\,m_i$.
+The corresponding probability of response $h$ on item $i$ can be derived in terms of the following two expressions \citep{And:95}:
+\begin{equation}\label{eq1}
+  \P(X_{vi}=h)=\frac{\exp[\phi_h(\theta_v+\beta_i)+\omega_h]}{\sum_{l=0}^{m_i} \exp[\phi_l (\theta_v+\beta_i)+\omega_l]} %%%Q: X_vi or x_vi?
 \end{equation}
-
 or
-
-\begin{equation}
-\label{eq2}
-    P(X_{vi}=h)=\frac{\exp[\phi_h \theta_v+\beta_{ih}]}{\sum_{l=0}^{m_i} \exp[\phi_l \theta_v+\beta_{il}]}.
+\begin{equation}\label{eq2}
+  \P(X_{vi}=h)=\frac{\exp[\phi_h \theta_v+\beta_{ih}]}{\sum_{l=0}^{m_i} \exp[\phi_l \theta_v+\beta_{il}]}.
 \end{equation}
-
-Here, $\phi_h$ are scoring functions for the item parameters, $\theta_v$ are the uni-dimensional person parameters, and $\beta_i$ are the item parameters. In Equation \ref{eq1}, $\omega_h$ corresponds to category parameters, whereas in Equation \ref{eq2} $\beta_{ih}$ are the item-category parameters. The meaning of these parameters will be discussed in detail below. Within the framework of these two equations, numerous models have been suggested that retain the basic properties of the Ra [...]
-
-
+Here, $\phi_h$ are scoring functions for the item parameters, $\theta_v$ are the uni-dimensional person parameters, and $\beta_i$ are the item parameters.
+In Equation \ref{eq1}, $\omega_h$ corresponds to category parameters, whereas in Equation \ref{eq2} $\beta_{ih}$ are the item-category parameters.
+The meaning of these parameters will be discussed in detail below.
+Within the framework of these two equations, numerous models have been suggested that retain the basic properties of the Rasch model so that \acronym{CML} estimation can be applied.
+%
+%
+%
 \subsection{Representation of extended Rasch models}
 \label{Rep}
 For the ordinary Rasch model for dichotomous items, Equation \ref{eq1} reduces to
-\begin{equation}
-\label{eq:rasch}
-  P(X_{vi}=1)=\frac{\exp(\theta_v - \beta_i)}{1+\exp(\theta_v-\beta_i)}.
+\begin{equation}\label{eq:rasch}
+  \P(X_{vi}=1)=\frac{\exp(\theta_v - \beta_i)}{1+\exp(\theta_v-\beta_i)}.
 \end{equation}
-The main assumptions, which hold as well for the generalizations presented in this paper, are: uni-dimensionality of the latent trait, sufficiency of the raw score, local independence, and parallel item characteristic curves (ICCs). Corresponding explanations can be found, e.g., in \citet{Fisch:74} and mathematical derivations and proofs in \citet{Fisch:95a}.
-
-\begin{figure}[hbt]
-\centering
+The main assumptions, which hold as well for the generalizations presented in this paper, are: uni-dimensionality of the latent trait, sufficiency of the raw score, local independence, and parallel item characteristic curves (\acronym{ICC}s).
+Corresponding explanations can be found, e.g., in \citet{Fisch:74} and mathematical derivations and proofs in \citet{Fisch:95a}.
+\begin{figure}[hbt]\centering%
 \includegraphics[height=60mm, width=40mm]{modelhierarchy.pdf}
-\caption{\label{fig1} Model hierarchy}
+\caption{Model hierarchy}
+\label{fig1}
 \end{figure}
 
-For dichotomous items, \citet{Scheib:72} proposed the (even more restricted) linear logistic test model (LLTM), later formalized by \citet{Fisch:73}, by splitting up the item parameters into the linear combination
+For dichotomous items, \citet{Scheib:72} proposed the (even more restricted) linear logistic test model \acronym{(LLTM)}, later formalized by \citet{Fisch:73}, by splitting up the item parameters into the linear combination
 
 \begin{equation}
 \label{eq4}
   \beta_i=\sum_{j=1}^p w_{ij} \eta_j.
 \end{equation}
 
-\citet{Scheib:72} explained the dissolving process of items in a test for logics (``Mengenrechentest") by so-called ``cognitive operations" $\eta_j$ such as negation, disjunction, conjunction, sequence, intermediate result, permutation, and material. Note that the weights $w_{ij}$ for item $i$ and operation $j$ have to be fixed a priori. Further elaborations about the cognitive operations can be found in \citet[p.~361ff.]{Fisch:74}. Thus, from this perspective the LLTM is more parsimonou [...]
-
-Though, there exists another way to look at the LLTM: A generalization of the basic Rasch model in terms of repeated measures and group contrasts. It should be noted that both types of reparameterization also apply to the linear rating scale model (LRSM) and the linear partial credit model (LPCM) with respect to the basic rating scale model (RSM) and the partial credit model (PCM) presented below. Concerning the LLTM, the possibility to use it as a generalization of the Rasch model for r [...]
-
-At this point we will focus on a simple polytomous generalization of the Rasch model, the RSM \citep{And:78}, where each item $I_i$ must have the same number of categories. Pertaining to Equation \ref{eq1}, $\phi_h$ may be set to $h$ with $h=0,...,m$. Since in the RSM the number of item categories is constant, $m$ is used instead of $m_i$. Hence, it follows that
-
-\begin{equation}
-\label{eq5}
-    P(X_{vi}=h)=\frac{\exp[h(\theta_v+\beta_i)+\omega_h]}{\sum_{l=0}^m \exp[l(\theta_v+ \beta_i)+\omega_l]},
+\citet{Scheib:72} explained the dissolving process of items in a test for logics (``Mengenrechentest'') by so-called ``cognitive operations'' $\eta_j$ such as negation, disjunction, conjunction, sequence, intermediate result, permutation, and material.
+Note that the weights $w_{ij}$ for item $i$ and operation $j$ have to be fixed a priori.
+Further elaborations about the cognitive operations can be found in \citet[p.~361ff.]{Fisch:74}.
+Thus, from this perspective the \acronym{LLTM} is more parsimonious than the Rasch model.
+
+Though, there exists another way to look at the \acronym{LLTM}: A generalization of the basic Rasch model in terms of repeated measures and group contrasts.
+It should be noted that both types of reparameterization also apply to the linear rating scale model \acronym{(LRSM)} and the linear partial credit model \acronym{(LPCM)} with respect to the basic rating scale model \acronym{(RSM)} and the partial credit model \acronym{(PCM)} presented below.
+Concerning the \acronym{LLTM}, the possibility to use it as a generalization of the Rasch model for repeated measurements was already introduced by \citet{Fisch:74}.
+Over the intervening years this suggestion has been further elaborated.
+\citet{Fisch:95b} discussed certain design matrices which will be presented in Section \ref{sec:design} and on the basis of examples in Section \ref{sec:pack}.
+
+At this point we will focus on a simple polytomous generalization of the Rasch model, the \acronym{RSM} \citep{And:78}, where each item $I_i$ must have the same number of categories.
+Pertaining to Equation \ref{eq1}, $\phi_h$ may be set to $h$ with $h=0,\,\ldots,\,m$.
+Since in the \acronym{RSM} the number of item categories is constant, $m$ is used instead of $m_i$.
+Hence, it follows that
+\begin{equation}\label{eq5}
+  \P(X_{vi}=h)=\frac{\exp[h(\theta_v+\beta_i)+\omega_h]}{\sum_{l=0}^m \exp[l(\theta_v+ \beta_i)+\omega_l]},
 \end{equation}
-
-with $k$ item parameters $\beta_1,...,\beta_k$ and $m+1$ category parameters $\omega_0,...,\omega_m$. This parameterization causes a scoring of the response categories $C_h$ which is constant over the single items. Again, the item parameters can be split up in a linear combination as in Equation \ref{eq4}. This leads to the LRSM proposed by \citet{FiPa:91}.
-
-Finally, the PCM developed by \citet{Mast:82} and its linear extension, the LPCM \citep{FiPo:94}, are presented. The PCM assigns one parameter $\beta_{ih}$ to each $I_i \times C_h$ combination for $h=0,...,m_i$. Thus, the constant scoring property must not hold over the items and in addition, the items can have different numbers of response categories denoted by $m_i$. Therefore, the PCM can be regarded as a generalization of the RSM and the probability for a response of person $v$ on ca [...]
-
-\begin{equation}
-\label{eq6}
-    P(X_{vih}=1)=\frac{\exp[h\theta_v + \beta_{ih}]}{\sum_{l=0}^{m_i}\exp[l\theta_v + \beta_{il}]}.
+with $k$ item parameters $\beta_1,\,\ldots,\,\beta_k$ and $m+1$ category parameters $\omega_0,\,\ldots,\,\omega_m$.
+This parameterization causes a scoring of the response categories $C_h$ which is constant over the single items.
+Again, the item parameters can be split up in a linear combination as in Equation \ref{eq4}.
+This leads to the \acronym{LRSM} proposed by \citet{FiPa:91}.
+
+Finally, the \acronym{PCM} developed by \citet{Mast:82} and its linear extension, the \acronym{LPCM} \citep{FiPo:94}, are presented.
+The \acronym{PCM} assigns one parameter $\beta_{ih}$ to each $I_i \times C_h$ combination for $h=0,\,\ldots,\,m_i$.
+Thus, the constant scoring property must not hold over the items and in addition, the items can have different numbers of response categories denoted by $m_i$.
+Therefore, the \acronym{PCM} can be regarded as a generalization of the \acronym{RSM} and the probability for a response of person $v$ on category $h$ (item $i$) is defined as
+\begin{equation}\label{eq6}
+  \P(X_{vih}=1)=\frac{\exp[h\theta_v + \beta_{ih}]}{\sum_{l=0}^{m_i}\exp[l\theta_v + \beta_{il}]}.
 \end{equation}
-
-It is obvious that (\ref{eq6}) is a simplification of (\ref{eq2}) in terms of $\phi_h = h$. As for the LLTM and the LRSM, the LPCM is defined by reparameterizing the item parameters of the basic model, i.e.,
-
-\begin{equation}
-\label{eq:lpcmeta}
+It is obvious that (\ref{eq6}) is a simplification of (\ref{eq2}) in terms of $\phi_h = h$.
+As for the \acronym{LLTM} and the \acronym{LRSM}, the \acronym{LPCM} is defined by reparameterizing the item parameters of the basic model, i.e.,
+\begin{equation}\label{eq:lpcmeta}
   \beta_{ih}=\sum_{j=1}^p w_{ihj}\eta_j.
 \end{equation}
-
-These six models constitute a hierarchical order as displayed in Figure \ref{fig1}. This hierarchy is the base for a unified CML approach presented in the next section. It is outlined again that the linear extension models can be regarded either as generalizations or as more restrictive formulations pertaining to the underlying base model. The hierarchy for the basic model is straightforward: The RM allows only items with two categories, thus each item is represented by one parameter $\b [...]
-
-To conclude, the most general model is the LPCM. All other models can be considered as simplifications of Equation \ref{eq6} combined with Equation \ref{eq:lpcmeta}. As a consequence, once an estimation procedure is established for the LPCM, this approach can be used for any of the remaining models. This is what we quote as \textit{unified CML approach}. The corresponding likelihood equations follow in Section \ref{sec:cml}.
-
+These six models constitute a hierarchical order as displayed in Figure \ref{fig1}.
+This hierarchy is the base for a unified \acronym{CML} approach presented in the next section.
+It is outlined again that the linear extension models can be regarded either as generalizations or as more restrictive formulations pertaining to the underlying base model.
+The hierarchy for the basic model is straightforward: The \acronym{RM} allows only items with two categories, thus each item is represented by one parameter $\beta_i$.
+The \acronym{RSM} allows for more than two (ordinal) categories each represented by a category parameter $\omega_h$.
+Due to identifiability issues, $\omega_0$ and $\omega_1$ are restricted to 0.
+Hence, the \acronym{RM} can be seen as a special case of the \acronym{RSM} whereas, the \acronym{RSM} in turn, is a special case of the \acronym{PCM}.
+The latter model assigns the parameter $\beta_{ih}$ to each $I_i \times C_h$ combination.
+
+To conclude, the most general model is the \acronym{LPCM}.
+All other models can be considered as simplifications of Equation \ref{eq6} combined with Equation \ref{eq:lpcmeta}.
+As a consequence, once an estimation procedure is established for the \acronym{LPCM}, this approach can be used for any of the remaining models.
+This is what we quote as \textit{unified \acronym{CML} approach}.
+The corresponding likelihood equations follow in Section \ref{sec:cml}.
+%
+%
+%
 \subsection{The concept of virtual items}
 \label{sec:design}
-When operating with longitudinal models, the
-main research question  is whether an individual's test
-performance changes over time. The most intuitive way would be to
-look at the shift in ability $\theta_v$ across time points. Such
-models are presented e.g. in \citet{Mi:85}, \citet{Glas:1992}, and
-discussed by \citet{Ho:95}.
-
-Yet there exists another look onto time dependent changes, as presented in \citet[p~158ff.]{Fisch:95b}: The
-person parameters are fixed over time and instead of them the item
-parameters change. The basic idea is that one item $I_i$ is presented at two different times to the same person $S_v$
-is regarded as a pair of \textit{virtual items}. Within the framework of extended Rasch models, any change in $\theta_v$ occuring between the testing occasions can be described without loss of generality as a change of the item parameters, instead of describing change in terms of the person parameter. Thus, with only two measurement points, $I_i$ with the corresponding parameter $\beta_i$ generates two virtual items $I_r$ and $I_s$ with associated item parameters $\beta^{\ast}_r$ and $\b [...]
-
-Correspondingly, for each measurement point $t$ we have a vector of
-\textit{virtual item parameters} $\boldsymbol{\beta}^{\ast(t)}$ of
-length $k$. These are linear reparameterizations of the original
-$\boldsymbol{\beta}^{(t)}$, and thus the CML approach can be used
-for estimation. In general, for a simple LLTM with two measurement points the design
-matrix $\boldsymbol{W}$ is of the form as given in Table \ref{tab1}.
-
-\begin{table}
-\centering
-\[
-\begin{array}{c|c|rrrr|r}
-& & \eta_1 & \eta_2 & \hdots & \eta_k & \eta_{k+1}\\
-\hline
-\textrm{Time 1} & \beta_1^{\ast(1)} & 1 & 0 & 0 & 0 & 0\\
-& \beta_2^{\ast(1)} & 0 & 1 & 0 & 0 & 0\\
-& \vdots        &   &   & \ddots& & \vdots\\
-& \beta_{k}^{\ast(1)} & 1 & 0 & 0 & 1 & 0\\
-\hline
-\textrm{Time 2} & \beta_{k+1}^{\ast(2)} & 1 & 0 & 0 & 0 & 1\\
-& \beta_{k+2}^{\ast(2)} & 0 & 1 & 0 & 0 & 1\\
-& \vdots        &   &   & \ddots& & \vdots\\
-& \beta_{2k}^{\ast(2)} & 1 & 0 & 0 & 1 & 1\\
-\end{array}
-\]
-\caption{\label{tab1}A design matrix for an LLTM with two timepoints.}
+When operating with longitudinal models, the main research question  is whether an individual's test performance changes over time.
+The most intuitive way would be to look at the shift in ability $\theta_v$ across time points.
+Such models are presented, e.g., in \citet{Mi:85}, \citet{Glas:1992}, and discussed by \citet{Ho:95}.
+
+Yet there exists another look onto time dependent changes, as presented in \citet[p~158ff.]{Fisch:95b}: The person parameters are fixed over time and instead of them the item parameters change.
+The basic idea is that one item $I_i$ is presented at two different times to the same person $S_v$ is regarded as a pair of \textit{virtual items}.
+Within the framework of extended Rasch models, any change in $\theta_v$ occuring between the testing occasions can be described without loss of generality as a change of the item parameters, instead of describing change in terms of the person parameter.
+Thus, with only two measurement points, $I_i$ with the corresponding parameter $\beta_i$ generates two virtual items $I_r$ and $I_s$ with associated item parameters $\beta^{\ast}_r$ and $\beta^{\ast}_s$.
+For the first measurement point $\beta^{\ast}_r=\beta_i$, whereas for the second $\beta^{\ast}_s=\beta_i+\tau$.
+In this linear combination the $\beta^{\ast}$-parameters are composed additively by means of the real item parameters $\beta$ and the treatment effects $\tau$.
+This concept extends to an arbitrary number of time points or testing occasions.
+
+Correspondingly, for each measurement point $t$ we have a vector of \textit{virtual item parameters} $\bm{\beta}^{\ast(t)}$ of length $k$.
+These are linear reparameterizations of the original $\bm{\beta}^{(t)}$, and thus the \acronym{CML} approach can be used for estimation.
+In general, for a simple \acronym{LLTM} with two measurement points the design matrix $\bm{W}$ is of the form as given in Table \ref{tab1}.
+\begin{table}\centering%
+  $\begin{array}{c|c|rrrr|r}
+  & & \eta_1 & \eta_2 & \hdots & \eta_k & \eta_{k+1}\\
+  \hline
+  \textrm{Time 1} & \beta_1^{\ast(1)} & 1 & 0 & 0 & 0 & 0\\
+  & \beta_2^{\ast(1)} & 0 & 1 & 0 & 0 & 0\\
+  & \vdots        &   &   & \ddots& & \vdots\\
+  & \beta_{k}^{\ast(1)} & 1 & 0 & 0 & 1 & 0\\
+  \hline
+  \textrm{Time 2} & \beta_{k+1}^{\ast(2)} & 1 & 0 & 0 & 0 & 1\\
+  & \beta_{k+2}^{\ast(2)} & 0 & 1 & 0 & 0 & 1\\
+  & \vdots        &   &   & \ddots& & \vdots\\
+  & \beta_{2k}^{\ast(2)} & 1 & 0 & 0 & 1 & 1\\
+  \end{array}$
+  \caption{A design matrix for an \acronym{LLTM} with two timepoints.}
+  \label{tab1}
 \end{table}
 
-The parameter vector $\boldsymbol{\beta}^{\ast(1)}$ represents the
-item parameters for the first test occasion,
-$\boldsymbol{\beta}^{\ast(2)}$ the parameters for the second
-occasion. It might be of interest whether these vectors differ. The
-corresponding trend contrast is $\eta_{k+1}$. Due to this contrast,
-the number of original $\beta$-parameters is doubled by introducing
-the $2k$ virtual item parameters. If we assume a constant shift for
-all item parameters, it is only necessary to estimate
-$\hat{\boldsymbol{\eta}}'=(\hat{\eta}_1,...,\hat{\eta}_{k+1})$
-where $\hat{\eta}_{k+1}$ gives the amount of shift. Since according to (\ref{eq4}), the vector
-$\hat{\boldsymbol{\beta}}^\ast$ is just a linear combination of
-$\hat{\boldsymbol{\eta}}$.
-
-As mentioned in the former section, when using models with linear
-extensions it is possible to impose group contrasts. By doing this,
-one allows that the item difficulties are different across
-subgroups. However, this is possible only for models with repeated
-measurements and virtual items since otherwise the introduction of a
-group contrast leads to overparameterization and the group effect
-cannot be estimated by using CML.
-
-Table \ref{tab2} gives an example for a repeated measurement design
-where the effect of a treatment is to be evaluated by comparing item
-difficulties regarding a control and a treatment group. The number
-of virtual parameters is doubled compared to the model matrix given
-in Table \ref{tab1}.
-
-\begin{table}[h]
-  \centering
-\[
-\begin{array}{c|c|c|rrrr|rrr}
-& & & \eta_1 & \eta_2 & \hdots & \eta_k & \eta_{k+1} & \eta_{k+2} \\
-\hline
-\textrm{Time 1} & \textrm{Group 1} & \beta_1^{\ast(1)} & 1 & 0 & 0 & 0 & 0 &  0\\
-& & \beta_2^{\ast(1)} & 0 & 1 & 0 & 0 & 0&  0\\
-& & \vdots        &   &   & \ddots& &\vdots &\vdots\\
-& & \beta_{k}^{\ast(1)} & 1 & 0 & 0 & 1 & 0 & 0\\
-\cline{2-9}
-& \textrm{Group 2} & \beta_{k+1}^{\ast(1)} & 1 & 0 & 0 & 0 & 0 & 0\\
-& & \beta_{k+2}^{\ast(1)} & 0 & 1 & 0 & 0 & 0 & 0\\
-& & \vdots        &   &   & \ddots& &\vdots & \vdots\\
-& & \beta_{2k}^{\ast(1)} & 1 & 0 & 0 & 1 & 0& 0\\
-\hline
-\textrm{Time 2} & \textrm{Group 1} & \beta_1^{\ast(2)} & 1 & 0 & 0 & 0 & 1 & 0\\
-& & \beta_2^{\ast(2)} & 0 & 1 & 0 & 0 & 1 & 0\\
-& & \vdots        &   &   & \ddots& &\vdots &\vdots\\
-& & \beta_{k}^{\ast(2)} & 1 & 0 & 0 & 1 & 1 & 0\\
-\cline{2-9}
-& \textrm{Group 2} & \beta_{k+1}^{\ast(2)} & 1 & 0 & 0 & 0 & 1 & 1\\
-& & \beta_{k+2}^{\ast(2)} & 0 & 1 & 0 & 0 & 1 & 1\\
-& & \vdots        &   &   & \ddots& &\vdots  & \vdots\\
-& & \beta_{2k}^{\ast(2)} & 1 & 0 & 0 & 1 & 1 & 1\\
-\end{array} \]
-\caption{\label{tab2} Design matrix for a repeated measurements design with treatment and control group.}
+The parameter vector $\bm{\beta}^{\ast(1)}$ represents the item parameters for the first test occasion, $\bm{\beta}^{\ast(2)}$ the parameters for the second occasion.
+It might be of interest whether these vectors differ.
+The corresponding trend contrast is $\eta_{k+1}$.
+Due to this contrast, the number of original $\beta$-parameters is doubled by introducing the $2k$ virtual item parameters.
+If we assume a constant shift for all item parameters, it is only necessary to estimate $\hat{\bm{\eta}}'=(\hat{\eta}_1,\,\ldots,\,\hat{\eta}_{k+1})$ where $\hat{\eta}_{k+1}$ gives the amount of shift.
+Since according to (\ref{eq4}), the vector $\hat{\bm{\beta}}^\ast$ is just a linear combination of $\hat{\bm{\eta}}$.
+
+As mentioned in the former section, when using models with linear extensions it is possible to impose group contrasts.
+By doing this, one allows that the item difficulties are different across subgroups.
+However, this is possible only for models with repeated measurements and virtual items since otherwise the introduction of a group contrast leads to overparameterization and the group effect cannot be estimated by using \acronym{CML}.
+
+Table \ref{tab2} gives an example for a repeated measurement design where the effect of a treatment is to be evaluated by comparing item difficulties regarding a control and a treatment group.
+The number of virtual parameters is doubled compared to the model matrix given in Table \ref{tab1}.
+\begin{table}[h]\centering%
+  $\begin{array}{c|c|c|rrrr|rrr}
+  & & & \eta_1 & \eta_2 & \hdots & \eta_k & \eta_{k+1} & \eta_{k+2} \\
+  \hline
+  \textrm{Time 1} & \textrm{Group 1} & \beta_1^{\ast(1)} & 1 & 0 & 0 & 0 & 0 &  0\\
+  & & \beta_2^{\ast(1)} & 0 & 1 & 0 & 0 & 0&  0\\
+  & & \vdots        &   &   & \ddots& &\vdots &\vdots\\
+  & & \beta_{k}^{\ast(1)} & 1 & 0 & 0 & 1 & 0 & 0\\
+  \cline{2-9}
+  & \textrm{Group 2} & \beta_{k+1}^{\ast(1)} & 1 & 0 & 0 & 0 & 0 & 0\\
+  & & \beta_{k+2}^{\ast(1)} & 0 & 1 & 0 & 0 & 0 & 0\\
+  & & \vdots        &   &   & \ddots& &\vdots & \vdots\\
+  & & \beta_{2k}^{\ast(1)} & 1 & 0 & 0 & 1 & 0& 0\\
+  \hline
+  \textrm{Time 2} & \textrm{Group 1} & \beta_1^{\ast(2)} & 1 & 0 & 0 & 0 & 1 & 0\\
+  & & \beta_2^{\ast(2)} & 0 & 1 & 0 & 0 & 1 & 0\\
+  & & \vdots        &   &   & \ddots& &\vdots &\vdots\\
+  & & \beta_{k}^{\ast(2)} & 1 & 0 & 0 & 1 & 1 & 0\\
+  \cline{2-9}
+  & \textrm{Group 2} & \beta_{k+1}^{\ast(2)} & 1 & 0 & 0 & 0 & 1 & 1\\
+  & & \beta_{k+2}^{\ast(2)} & 0 & 1 & 0 & 0 & 1 & 1\\
+  & & \vdots        &   &   & \ddots& &\vdots  & \vdots\\
+  & & \beta_{2k}^{\ast(2)} & 1 & 0 & 0 & 1 & 1 & 1\\
+  \end{array}$
+  \caption{Design matrix for a repeated measurements design with treatment and control group.}
+  \label{tab2}
 \end{table}
 
-Again, $\eta_{k+1}$ is the parameter that refers to the time
-contrast, and $\eta_{k+2}$ is a group effect within
-measurement point 2. More examples are given in Section \ref{sec:pack}
-and further explanations can be found in \citet{Fisch:95b},
-\citet{FiPo:94}, and in the software manual for the LPCM-Win program
-by \citet{FiPS:98}.
-
-By introducing the concept of virtual persons, \pkg{eRm} allows for the computation of the linear logistic test model with relaxed assumptions \citep[LLRA][]{Fisch:77}. Corresponding explanations will be given in a subsequent version of this vignette.
-
+Again, $\eta_{k+1}$ is the parameter that refers to the time contrast, and $\eta_{k+2}$ is a group effect within measurement point 2.
+More examples are given in Section \ref{sec:pack} and further explanations can be found in \citet{Fisch:95b},
+\citet{FiPo:94}, and in the software manual for the LPCM-Win program by \citet{FiPS:98}.
 
+By introducing the concept of virtual persons, \pkg{eRm} allows for the computation of the linear logistic test model with relaxed assumptions \citep[\acronym{LLRA};][]{Fisch:77}.
+Corresponding explanations will be given in a subsequent version of this vignette.
+%
+%
+%
+%
 %------------------------ end extended Rasch models --------------------------
-
 \section{Estimation of item and person parameters}
 \label{sec:cml}
-
-\subsection{CML for item parameter estimation}
-The main idea behind the CML estimation is that the person's raw score $r_v=\sum_{i=1}^k x_{vi}$ is a sufficient statistic. Thus, by conditioning the likelihood onto $\boldsymbol{r}'=(r_1,...,r_n)$, the person parameters $\boldsymbol{\theta}$, which in this context are nuisance parameters, vanish from the likelihood equation, thus, leading to consistently estimated item parameters $\hat{\boldsymbol{\beta}}$.
-
-Some restrictions have to be imposed on the parameters to ensure identifiability. This can be achieved, e.g., by setting certain parameters to zero depending on the model. In the Rasch model one item parameter has to be fixed to 0. This parameter may be considered as baseline difficulty. In addition, in the RSM the category parameters $\omega_0$ and $\omega_1$ are also constrained to 0. In the PCM all parameters representing the first category, i.e. $\beta_{i0}$ with $i=1,\ldots,k$, and  [...]
-
-At this point, for the LPCM the likelihood equations with corresponding first and second order derivatives are presented (i.e. \textit{unified CML equations}). In the first version of the \pkg {eRm} package numerical approximations of the Hessian matrix are used. However, to ensure numerical accuracy and to speed up the estimation process, it is planned to implement the analytical solution as given below.
-
-The conditional log-likelihood equation for the LPCM is
+%
+%
+%
+\subsection[CML for item parameter estimation]{\protect\acronym{CML} for item parameter estimation}
+The main idea behind the \acronym{CML} estimation is that the person's raw score $r_v=\sum_{i=1}^k x_{vi}$ is a sufficient statistic.
+Thus, by conditioning the likelihood onto $\bm{r}'=(r_1,\,\ldots,\,r_n)$, the person parameters $\bm{\theta}$, which in this context are nuisance parameters, vanish from the likelihood equation, thus, leading to consistently estimated item parameters $\hat{\bm{\beta}}$.
+
+Some restrictions have to be imposed on the parameters to ensure identifiability.
+This can be achieved, e.g., by setting certain parameters to zero depending on the model.
+In the Rasch model one item parameter has to be fixed to 0.
+This parameter may be considered as baseline difficulty.
+In addition, in the \acronym{RSM} the category parameters $\omega_0$ and $\omega_1$ are also constrained to 0.
+In the \acronym{PCM} all parameters representing the first category, i.e., $\beta_{i0}$ with $i=1,\ldots,k$, and one additional item-category parameter, e.g., $\beta_{11}$ have to be fixed.
+For the linear extensions it holds that the $\beta$-parameters that are fixed within a certain condition (e.g., first measurement point, control group etc.) are also constrained in the other conditions (e.g., second measurement point, treatment group etc.).
+
+At this point, for the \acronym{LPCM} the likelihood equations with corresponding first and second order derivatives are presented (i.e., \textit{unified \acronym{CML} equations}).
+In the first version of the \pkg {eRm} package numerical approximations of the Hessian matrix are used.
+However, to ensure numerical accuracy and to speed up the estimation process, it is planned to implement the analytical solution as given below.
+
+The conditional log-likelihood equation for the \acronym{LPCM} is
 
 \begin{equation}
 \label{eq:cmll}
     \log L_c = \sum_{i=1}^k \sum_{h=1}^{m_i} x_{+ih} \sum_{j=1}^p w_{ihj} \eta_j - \sum_{r=1}^{r_{max}} n_r \log \gamma_r.
 \end{equation}
 
-The maximal raw score is denoted by $r_{max}$ whereas the number of subjects with the same raw score is quoted as $n_r$. Alternatively, by going down to an individual level, the last sum over $r$ can be replaced by $\sum_{v=1}^n \log \gamma_{r_v}$. It is straightforward to show that the LPCM as well as the other extended Rasch models, define an exponential family  \citep{And:83}. Thus, the raw score $r_v$ is minimally sufficient for $\theta_v$ and the item totals $x_{.ih}$ are minimally  [...]
-
-Crucial expressions are the $\gamma$-terms which are known as \textit{elementary symmetric functions}. More details about these terms are given in the next section. However, in the \pkg {eRm} package the numerically stable \textit{summation algorithm} as suggested by \citet{And:72} is implemented. \citet{FiPo:94} adopted this algorithm for the LPCM and devised also the first order derivative for computing the corresponding derivative of $\log L_c$:
-
-\begin{equation}
-\label{eq:dcml}
-\frac{\partial\log L_c}{\partial\eta_a} = \sum_{i=1}^k \sum_{h=1}^{m_i} w_{iha}\left(x_{+ih} - \epsilon_{ih} \sum_{r=1}^{r_{max}} n_r \frac{ \gamma_{r}^{(i)}}{\gamma_r}\right).
+The maximal raw score is denoted by $r_{max}$ whereas the number of subjects with the same raw score is quoted as $n_r$.
+Alternatively, by going down to an individual level, the last sum over $r$ can be replaced by $\sum_{v=1}^n \log \gamma_{r_v}$.
+It is straightforward to show that the \acronym{LPCM} as well as the other extended Rasch models, define an exponential family  \citep{And:83}.
+Thus, the raw score $r_v$ is minimally sufficient for $\theta_v$ and the item totals $x_{.ih}$ are minimally sufficient for $\beta_{ih}$.
+
+Crucial expressions are the $\gamma$-terms which are known as \textit{elementary symmetric functions}.
+More details about these terms are given in the next section.
+However, in the \pkg {eRm} package the numerically stable \textit{summation algorithm} as suggested by \citet{And:72} is implemented.
+\citet{FiPo:94} adopted this algorithm for the \acronym{LPCM} and devised also the first order derivative for computing the corresponding derivative of $\log L_c$:
+\begin{equation}\label{eq:dcml}
+  \frac{\partial\log L_c}{\partial\eta_a} = \sum_{i=1}^k \sum_{h=1}^{m_i} w_{iha}\left(x_{+ih} - \epsilon_{ih} \sum_{r=1}^{r_{max}} n_r \frac{ \gamma_{r}^{(i)}}{\gamma_r}\right).
 \end{equation}
-
-It is important to mention that for the CML-representation, the multiplicative Rasch expression is used throughout equations \ref{eq1} to \ref{eq:lpcmeta}, i.e., $\epsilon_i=\exp(-\beta_i)$ for the person parameter. Therefore, $\epsilon_{ih}$ corresponds to the reparameterized item $\times$ category parameter whereas $\epsilon_{ih} > 0$. Furthermore, $\gamma_{r}^{(i)}$ are the first order derivatives of the $\gamma$-functions with respect to item $i$. The index $a$ in $\eta_a$ denotes th [...]
-
-For the second order derivative of $\log L_c$, two cases have to be distinguished: the derivatives for the off-diagonal elements and the derivatives for the main diagonal elements. The item categories with respect to the item index $i$ are coded with $h_i$, and those referring to item $l$ with $h_l$. The second order derivatives of the $\gamma$-functions with respect to items $i$ and $l$ are denoted by $\gamma_r^{(i,l)}$. The corresponding likelihood expressions are
+It is important to mention that for the \acronym{CML}-representation, the multiplicative Rasch expression is used throughout equations \ref{eq1} to \ref{eq:lpcmeta}, i.e., $\epsilon_i=\exp(-\beta_i)$ for the person parameter.
+Therefore, $\epsilon_{ih}$ corresponds to the reparameterized item $\times$ category parameter whereas $\epsilon_{ih} > 0$.
+Furthermore, $\gamma_{r}^{(i)}$ are the first order derivatives of the $\gamma$-functions with respect to item $i$.
+The index $a$ in $\eta_a$ denotes the first derivative with respect to the $a^{th}$ parameter.
+
+For the second order derivative of $\log L_c$, two cases have to be distinguished: the derivatives for the off-diagonal elements and the derivatives for the main diagonal elements.
+The item categories with respect to the item index $i$ are coded with $h_i$, and those referring to item $l$ with $h_l$.
+The second order derivatives of the $\gamma$-functions with respect to items $i$ and $l$ are denoted by $\gamma_r^{(i,l)}$.
+The corresponding likelihood expressions are
 \begin{align}
 \label{eq:2dcml}
 \frac{\partial\log L_c}{\partial\eta_a \eta_b} = & -\sum_{i=1}^k \sum_{h_i=1}^{m_i} w_{ih_ia}w_{ih_ib}\epsilon_{ih_i} \sum_{r=1}^{r_{max}} n_r \frac{\log \gamma_{r-h_i}}{\gamma_r}\\
@@ -323,363 +342,244 @@ for $a\neq b$, and
 \end{align}
 for $a=b$.
 
-To solve the likelihood equations with respect to $\mathbf{\hat{\eta}}$, a Newton-Raphson algorithm is applied. The update within each iteration step $s$ is performed by
-
-\begin{equation}
-\label{eq:iter}
-\boldsymbol{\hat{\eta}}_s=\boldsymbol{\hat{\eta}}_{s-1}-\mathbf{H}_{s-1}^{-1} \boldsymbol{\delta}_{s-1}.
+To solve the likelihood equations with respect to $\hat{\bm{\eta}}$, a Newton-Raphson algorithm is applied.
+The update within each iteration step $s$ is performed by
+\begin{equation}\label{eq:iter}
+  \hat{\bm{\eta}}_s=\hat{\bm{\eta}}_{s-1}-\bm{H}_{s-1}^{-1}\bm{\delta}_{s-1}.
 \end{equation}
-
-The starting values are $\boldsymbol{\hat{\eta}}_0=\mathbf{0}$.
-$\mathbf{H}_{s-1}^{-1}$ is the inverse of the Hessian matrix composed by
-the elements given in Equation \ref{eq:2dcml} and \ref{eq:2dcmlab} and
-$\boldsymbol{\delta}_{s-1}$ is the gradient at iteration $s-1$ as
-specified in Equation \ref{eq:dcml}.  The iteration stops if the
-likelihood difference $\left|\log L_c^{(s)} - \log L_c^{(s-1)}
-\right|\leq \varphi$ where $\varphi$ is a predefined (small) iteration
-limit.  Note that in the current version (\Sexpr{packageDescription("eRm", fields = "Version")})
-$\mathbf{H}$ is
-approximated numerically by using the \pkg{nlm} Newton-type algorithm
-provided in the \pkg{stats} package.  The analytical solution as given
-in Equation \ref{eq:2dcml} and \ref{eq:2dcmlab} will be implemented in
-the subsequent version of \pkg{eRm}.
-
-
-\subsection{Mathematical properties of the CML estimates}
+The starting values are $\hat{\bm{\eta}}_0=\bm{0}$.
+$\bm{H}_{s-1}^{-1}$ is the inverse of the Hessian matrix composed by the elements given in Equation \ref{eq:2dcml} and \ref{eq:2dcmlab} and $\bm{\delta}_{s-1}$ is the gradient at iteration $s-1$ as specified in Equation \ref{eq:dcml}.
+The iteration stops if the likelihood difference $\left|\log L_c^{(s)} - \log L_c^{(s-1)} \right|\leq \varphi$ where $\varphi$ is a predefined (small) iteration limit.
+Note that in the current version (\Sexpr{packageDescription("eRm", fields = "Version")}) $\bm{H}$ is approximated numerically by using the \pkg{nlm} Newton-type algorithm provided in the \pkg{stats} package.
+The analytical solution as given in Equation \ref{eq:2dcml} and \ref{eq:2dcmlab} will be implemented in the subsequent version of \pkg{eRm}.
+%
+%
+%
+\subsection[Mathematical properties of the CML estimates]{Mathematical properties of the \acronym{CML} estimates}
 \label{sec:mpcml}
-A variety of estimation approaches for IRT models in general  and
-for the Rasch model in particular are available: The \emph{joint
-maximum likelihood} (JML) estimation as proposed by \citet{Wright+Panchapakesan:1969}
-which is not recommended since the estimates are not consistent
-\citep[see e.g.][]{Haberman:77}. The basic reason for that is that the
-person parameters $\boldsymbol{\theta}$ are nuisance parameters; the
-larger the sample size, the larger the number of parameters.
-
-A well-known alternative is the \emph{marginal maximum likelihood}
-(MML) estimation \citep{Bock+Aitkin:1981}: A distribution $g(\theta)$ for
-the person parameters is assumed and the resulting situation
-corresponds to a mixed-effects ANOVA: Item difficulties can be
-regarded as fixed effects and person abilities as random effects.
-Thus, IRT models fit into the framework of \emph{generalized linear
-mixed models} (GLMM) as elaborated in \citet{deBoeck+Wilson:2004}. By
-integrating over the ability distribution the random nuisance
-parameters can be removed from the likelihood equations. This leads
-to consistent estimates of the item parameters. Further discussions
-of the MML approach with respect to the CML method will follow.
-
-For the sake of completeness, some other methods for the estimation
-of the item parameters are the following: \citet{CAnd:07} propose
-a Pseudo-ML approach, \citet{Molenaar:1995} and \citet{Linacre:2004} give an
-overview of various (heuristic) non-ML methods, Bayesian
-techniques can be found in \citet[Chapter 7]{BaKi:04}, and for nonparameteric approaches it is referred to \citet{LeVe:86}.
-
-However, back to CML, the main idea behind this approach is the
-assumption that the raw score $r_v$ is a minimal sufficient
-statistic for $\theta_v$. Starting from the equivalent
-multiplicative expression of Equation \ref{eq1} with
-$\xi_v=\exp(\theta_v)$ and $\epsilon_i=\exp(-\beta_i)$, i.e.,
-\begin{equation}
-\label{eq7}
-  P(X_{vi}=1)=\frac{\xi_v \epsilon_i}{1+\xi_v \epsilon_i},
+A variety of estimation approaches for \acronym{IRT} models in general  and for the Rasch model in particular are available: The \emph{joint maximum likelihood} \acronym{(JML)} estimation as proposed by \citet{Wright+Panchapakesan:1969} which is not recommended since the estimates are not consistent \citep[see e.g.][]{Haberman:77}.
+The basic reason for that is that the person parameters $\bm{\theta}$ are nuisance parameters; the larger the sample size, the larger the number of parameters.
+
+A well-known alternative is the \emph{marginal maximum likelihood} \acronym{(MML)} estimation \citep{Bock+Aitkin:1981}: A distribution $g(\theta)$ for the person parameters is assumed and the resulting situation corresponds to a mixed-effects \acronym{ANOVA}: Item difficulties can be regarded as fixed effects and person abilities as random effects.
+Thus, \acronym{IRT} models fit into the framework of \emph{generalized linear mixed models} \acronym{(GLMM)} as elaborated in \citet{deBoeck+Wilson:2004}.
+By integrating over the ability distribution the random nuisance parameters can be removed from the likelihood equations.
+This leads to consistent estimates of the item parameters.
+Further discussions of the \acronym{MML} approach with respect to the \acronym{CML} method will follow.
+
+For the sake of completeness, some other methods for the estimation of the item parameters are the following: \citet{CAnd:07} propose a Pseudo-\acronym{ML} approach, \citet{Molenaar:1995} and \citet{Linacre:2004} give an overview of various (heuristic) non-\acronym{ML} methods, Bayesian techniques can be found in \citet[Chapter 7]{BaKi:04}, and for non-parameteric approaches it is referred to \citet{LeVe:86}.
+
+However, back to \acronym{CML}, the main idea behind this approach is the assumption that the raw score $r_v$ is a minimal sufficient statistic for $\theta_v$.
+Starting from the equivalent multiplicative expression of Equation \ref{eq1} with $\xi_v=\exp(\theta_v)$ and $\epsilon_i=\exp(-\beta_i)$, i.e.,
+\begin{equation}\label{eq7}
+  \P(X_{vi}=1)=\frac{\xi_v \epsilon_i}{1+\xi_v \epsilon_i},
 \end{equation}
-the following likelihood for the response pattern $\boldsymbol{x}_v$
-for a certain subject $v$ results:
-\begin{equation}
-\label{eq8}
-  P(\boldsymbol{x}_v|\xi_v,\boldsymbol{\epsilon})=\prod_{i=1}^k \frac{(\xi_v \epsilon_i)^{x_{vi}}}{1+\xi_v \epsilon_i}=
+the following likelihood for the response pattern $\bm{x}_v$ for a certain subject $v$ results:
+\begin{equation}\label{eq8}
+  \P(\bm{x}_v|\xi_v,\bm{\epsilon})=\prod_{i=1}^k \frac{(\xi_v \epsilon_i)^{x_{vi}}}{1+\xi_v \epsilon_i}=
   \frac{{\theta_v}^{r_v} \prod_{i=1}^k {\epsilon_i}^{x_{vi}}}{\prod_{i=1}^k (1+\xi_v \epsilon_i)}.
 \end{equation}
-Using the notation $\boldsymbol{y}=(y_1,\ldots ,y_k)$ for all
-possible response patterns with $\sum_{i=1}^k y_i=r_v$,  the
-probability for a fixed raw score $r_v$ is
-\begin{equation}
-\label{eq9}
-  P(r_v|\xi_v,\boldsymbol{\epsilon})=\sum_{\boldsymbol{y}|r_v} \prod_{i=1}^k \frac{(\xi_v \epsilon_i)^{x_{vi}}}{1+\xi_v \epsilon_i}=\frac{{\theta_v}^{r_v} \sum_{\boldsymbol{y}|r_v}  \prod_{i=1}^k {\epsilon_i}^{x_{vi}}}{\prod_{i=1}^k (1+\xi_v \epsilon_i)}.
+Using the notation $\bm{y}=(y_1,\ldots ,y_k)$ for all possible response patterns with $\sum_{i=1}^k y_i=r_v$,  the probability for a fixed raw score $r_v$ is
+\begin{equation}\label{eq9}
+  \P(r_v|\xi_v,\bm{\epsilon})=\sum_{\bm{y}|r_v} \prod_{i=1}^k \frac{(\xi_v \epsilon_i)^{x_{vi}}}{1+\xi_v \epsilon_i}=\frac{{\theta_v}^{r_v} \sum_{\bm{y}|r_v}  \prod_{i=1}^k {\epsilon_i}^{x_{vi}}}{\prod_{i=1}^k (1+\xi_v \epsilon_i)}.
 \end{equation}
-The crucial term with respect to numerical solutions of the
-likelihood equations is the second term in the numerator:
-\begin{equation}
-\label{eq:gamma}
-  \gamma_r(\epsilon_i) \equiv \sum_{\boldsymbol{y}|r_v} \prod_{i=1}^k {\epsilon_i}^{x_{vi}}
+The crucial term with respect to numerical solutions of the likelihood equations is the second term in the numerator:
+\begin{equation}\label{eq:gamma}
+  \gamma_r(\epsilon_i) \equiv \sum_{\bm{y}|r_v} \prod_{i=1}^k {\epsilon_i}^{x_{vi}}
 \end{equation}
 These are the \emph{elementary symmetric functions}  (of order $r$).
-An overview of efficient computational algorithms and corresponding
-simulation studies can be found in \citet{Li:94}. The \pkg{eRm}
-package uses the summation algorithm as proposed by \citet{And:72}.
+An overview of efficient computational algorithms and corresponding simulation studies can be found in \citet{Li:94}.
+The \pkg{eRm} package uses the summation algorithm as proposed by \citet{And:72}.
 
-Finally, by collecting the different raw scores into the vector
-$\boldsymbol{r}$ the conditional probability of observing response
-pattern $\boldsymbol{x}_v$ with given raw score $r_v$ is
-\begin{equation}
-\label{eq:xraw}
-  P(\boldsymbol{x}_v|r_v,\boldsymbol{\epsilon})=\frac{P(\boldsymbol{x}_v|\xi_v,\boldsymbol{\epsilon})}{P(r_v|\xi_v,\boldsymbol{\epsilon})} \,.
+Finally, by collecting the different raw scores into the vector $\bm{r}$ the conditional probability of observing response pattern $\bm{x}_v$ with given raw score $r_v$ is
+\begin{equation}\label{eq:xraw}
+  \P(\bm{x}_v|r_v,\bm{\epsilon})=\frac{\P(\bm{x}_v|\xi_v,\bm{\epsilon})}{\P(r_v|\xi_v,\bm{\epsilon})} \,.
 \end{equation}
-By taking the product over the persons (independence  assumption),
-the (conditional) likelihood expression for the whole sample becomes
-\begin{equation}
-\label{eq:likall}
-  L(\boldsymbol{\epsilon}|\boldsymbol{r})=P(\boldsymbol{x}|\boldsymbol{r},\boldsymbol{\epsilon})=\prod_{v=1}^n \frac{\prod_{i=1}^k {\epsilon_i}^{x_{vi}}}{\gamma_{r_v}}.
+By taking the product over the persons (independence  assumption), the (conditional) likelihood expression for the whole sample becomes
+\begin{equation}\label{eq:likall}
+  L(\bm{\epsilon}|\bm{r})=\P(\bm{x}|\bm{r},\bm{\epsilon})=\prod_{v=1}^n \frac{\prod_{i=1}^k {\epsilon_i}^{x_{vi}}}{\gamma_{r_v}}.
 \end{equation}
-With respect to raw score frequencies $n_r$ and by reintroducing the
-$\beta$-parameters, (\ref{eq:likall}) can be reformulated as
-\begin{equation}
-\label{eq12a}
-  L(\boldsymbol{\beta}|\boldsymbol{r})= \frac{\exp \left(\sum_{i=1}^k x_{+i}\beta_i \right)}{\prod_{r=0}^k
-  \gamma_r^{n_r}} \,,
+With respect to raw score frequencies $n_r$ and by reintroducing the $\beta$-parameters, (\ref{eq:likall}) can be reformulated as
+\begin{equation}\label{eq12a}
+  L(\bm{\beta}|\bm{r})= \frac{\exp \left(\sum_{i=1}^k x_{+i}\beta_i \right)}{\prod_{r=0}^k\gamma_r^{n_r}}\,,
 \end{equation}
-where $x_{+i}$ are the item raw scores. It is obvious  that by
-conditioning the likelihood on the raw scores $\boldsymbol{r}$, the
-person parameters completely vanished from the expression. As a
-consequence, the parameters $\boldsymbol{\hat{\beta}}$ can be
-estimated without knowledge of the subject's abilities. This issue
-is referred as \emph{person-free item assessment} and we will
-discuss this topic within the context of specific objectivity in the
-next section.
-
-Pertaining to asymptotical issues, it can be shown that  under mild
-regularity conditions \citep{Pf:94} the CML estimates are
-consistent for $n\rightarrow \infty$ and $k$ fixed, unbiased,
-asymptotically efficient, and normally distributed
-\citep{Andersen:1970}. For the computation of a Rasch model,
-comparatively small samples are sufficient to get reliable estimates
-\citep{Fischer:1988}. Whether the MML estimates are unbiased depends
-on the correct specification of the ability distribution
-$g(\theta)$. In case of an incorrect assumption, the estimates are
-biased which is surely a drawback of this method. If $g(\theta)$ is
-specified appropriately, the CML and MML estimates are
-asymptotically equivalent \citep{Pf:94}.
-
-\citet{Fischer:1981} elaborates on the conditions for the existence and
-the uniqueness of the CML estimates. The crucial condition for the
-data matrix is that $\boldsymbol{X}$ has to be
-\emph{well-conditioned}. To introduce this issue it is convenient to
-look at a matrix which is \emph{ill-conditioned}: A matrix is
-ill-conditioned if there exists a partition of the items into two
-nonempty subsets such that all of a group of subjects responded
-correctly to items $i+1,\ldots,k$ ($\boldsymbol{X}_2$) and all of
-all other subjects failed for items $1,\ldots,i$
-($\boldsymbol{X}_3$), i.e.,
-\begin{table}[h]
-\centering
+where $x_{+i}$ are the item raw scores.
+It is obvious  that by conditioning the likelihood on the raw scores $\bm{r}$, the person parameters completely vanished from the expression.
+As a consequence, the parameters $\bm{\hat{\beta}}$ can be estimated without knowledge of the subject's abilities.
+This issue is referred as \emph{person-free item assessment} and we will discuss this topic within the context of specific objectivity in the next section.
+
+Pertaining to asymptotical issues, it can be shown that  under mild regularity conditions \citep{Pf:94} the \acronym{CML} estimates are consistent for $n\rightarrow \infty$ and $k$ fixed, unbiased, asymptotically efficient, and normally distributed \citep{Andersen:1970}.
+For the computation of a Rasch model, comparatively small samples are sufficient to get reliable estimates \citep{Fischer:1988}.
+Whether the \acronym{MML} estimates are unbiased depends on the correct specification of the ability distribution $g(\theta)$.
+In case of an incorrect assumption, the estimates are biased which is surely a drawback of this method.
+If $g(\theta)$ is specified appropriately, the \acronym{CML} and \acronym{MML} estimates are asymptotically equivalent \citep{Pf:94}.
+
+\citet{Fischer:1981} elaborates on the conditions for the existence and the uniqueness of the \acronym{CML} estimates.
+The crucial condition for the data matrix is that $\bm{X}$ has to be \emph{well-conditioned}.
+To introduce this issue it is convenient to look at a matrix which is \emph{ill-conditioned}: A matrix is ill-conditioned if there exists a partition of the items into two nonempty subsets such that all of a group of subjects responded correctly to items $i+1,\ldots,k$ ($\bm{X}_2$) and all of all other subjects failed for items $1,\ldots,i$ ($\bm{X}_3$), i.e.,
+\begin{table}[h]\centering%
 \[
-\boldsymbol{X}=
+\bm{X}=
 \left(
 \begin{array}{c|c}
-\boldsymbol{X}_1 & \boldsymbol{X}_2\\
+\bm{X}_1 & \bm{X}_2\\
 \hline
-\boldsymbol{X}_3 & \boldsymbol{X}_4\\
+\bm{X}_3 & \bm{X}_4\\
 \end{array}
 \right)
 =
 \left(
 \begin{array}{ccc|ccc}
 & & & 1 & \ldots & 1 \\
-& \boldsymbol{X}_1 & & \vdots & \ddots & \vdots \\
+& \bm{X}_1 & & \vdots & \ddots & \vdots \\
 & & & 1 & \ldots & 1 \\
 \hline
 0 & \ldots & 0 & & & \\
-\vdots & \ddots & \vdots & & \boldsymbol{X}_4 & \\
+\vdots & \ddots & \vdots & & \bm{X}_4 & \\
 0 & \ldots & 0 & & & \\
 \end{array}
 \right)
 \]
 \end{table}
 
-Thus, following the definition in \citet{Fischer:1981}: $\boldsymbol{X}$
-will be called \emph{well-conditioned} iff in every possible
-partition of the items into two nonempty subsets some subjects has
-given response 1 on some item in the first set and response 0 on
-some item in the second set. In this case a unique solution for the
-CML estimates $\boldsymbol{\hat{\beta}}$  exists.
-
-This issue is important for structurally incomplete designs which
-often  occur in practice; different subsets of items are presented
-to different groups of persons $g=1,\ldots,G$ where $G\leq n$. As a
-consequence, the likelihood values have to be computed for each
-group separately and the joint likelihood is the product over the
-single group likelihoods. Hence, the likelihood in Equation
-\ref{eq12a} becomes
-\begin{equation}
-\label{eq:glik}
-L(\boldsymbol{\beta}|\boldsymbol{r})=\prod_{g=1}^G \frac{\exp \left(\sum_{i=1}^k x_{+i}\beta_i \right)}{\prod_{r=0}^k {\gamma_{g,r}}^{n_{g,r}}}
+Thus, following the definition in \citet{Fischer:1981}: $\bm{X}$ will be called \emph{well-conditioned} iff in every possible partition of the items into two nonempty subsets some subjects has given response 1 on some item in the first set and response 0 on
+some item in the second set.
+In this case a unique solution for the \acronym{CML} estimates $\hat{\bm{\beta}}$  exists.
+
+This issue is important for structurally incomplete designs which often  occur in practice; different subsets of items are presented to different groups of persons $g=1,\ldots,G$ where $G\leq n$.
+As a consequence, the likelihood values have to be computed for each group separately and the joint likelihood is the product over the single group likelihoods.
+Hence, the likelihood in Equation \ref{eq12a} becomes
+\begin{equation}\label{eq:glik}
+  L(\bm{\beta}|\bm{r})=\prod_{g=1}^G \frac{\exp \left(\sum_{i=1}^k x_{+i}\beta_i \right)}{\prod_{r=0}^k {\gamma_{g,r}}^{n_{g,r}}}
 \end{equation}
-This also implies the necessity to compute the elementary symmetric
-functions separately for each group. The \pkg{eRm} package can
-handle such structurally incomplete designs.
-
-From the elaborations above it is obvious that from an
-asymptotical point of view the CML estimates are at least as good
-as the MML estimates. In the past, computational problems (speed,
-numerical accuracy) involved in calculating the elementary symmetric
-functions limited the practical usage of the CML approach \citep[see e.g.][]{Gustafsson:1980}.
+This also implies the necessity to compute the elementary symmetric functions separately for each group.
+The \pkg{eRm} package can handle such structurally incomplete designs.
+
+From the elaborations above it is obvious that from an asymptotical point of view the \acronym{CML} estimates are at least as good as the \acronym{MML} estimates.
+In the past, computational problems (speed, numerical accuracy) involved in calculating the elementary symmetric functions limited the practical usage of the \acronym{CML} approach \citep[see e.g.][]{Gustafsson:1980}.
 Nowadays, these issues are less crucial due to increased computer power.
 
-In some cases MML estimation has advantages not shared  by CML: MML
-leads to finite person parameters even for persons with zero and
-perfect raw score, and such persons are not removed from the
-estimation process \citep{Molenaar:1995}. On he other hand the
-consideration of such persons does not seem meaningful from a
-substantial point of view since the person parameters are not
-reliable anymore -- for such subjects the test is too difficult or
-too easy, respectively. Thus, due to these covering effects, a
-corresponding ability estimation is not feasible. However, if the
-research goal is to find ability distributions such persons should
-be regarded and MML can handle this.
-
-When estimates for the person parameters are of interest some care
-has to be taken if the CML method is used since person parameters
-cancel from the estimation equations. Usually, they are estimated
-(once having obtained values for the item parameters) by inserting
-$\boldsymbol{\hat{\beta}}$ (or equivalently
-$\boldsymbol{\hat{\epsilon}}$) into Equation \ref {eq8} and
-solving with respect to $\boldsymbol{\theta}$. Alternatively,
-Bayesian procedures are applicable \citep{Hoijtink+Boomsma:1995}. It is again
-pointed out that each person in the sample gets an own parameter
-even though limited by the number of different raw scores.
-
-\subsection{CML and specific objectivity}
-In general, the Rasch model can be regarded as a measurement model:
-Starting from the (nominally scaled) 0/1-data matrix
-$\boldsymbol{X}$, the person raw scores $r_v$ are on an ordinal
-level. They, in turn, are used to estimate the item parameters
-$\boldsymbol{\beta}$ which are on an interval scale provided that
-the Rasch model holds.
-
-Thus, Rasch models allow for comparisons between objects on an
-interval level. Rasch reasoned on requirements to be fulfilled such
-that a specific proposition within this context can be regarded as
-``scientific''. His conclusions were that a basic requirement is the
-``objectivity'' of comparisons \citep{Ra:61}. This claim
-contrasts assumptions met in \emph{classical test theory} (CTT). A
-major advantage of the Rasch model over CTT models is the
-\emph{sample independence} of the results. The relevant concepts in
-CTT are based on a linear model for the ``true score" leading to
-some indices, often correlation coefficients, which in turn depend
-on the observed data. This is a major drawback in CTT. According to
-\citet{Fisch:74}, sample independence in IRT models has the
-following implications:
+In some cases \acronym{MML} estimation has advantages not shared  by \acronym{CML}: \acronym{MML} leads to finite person parameters even for persons with zero and perfect raw score, and such persons are not removed from the estimation process \citep{Molenaar:1995}.
+On he other hand the consideration of such persons does not seem meaningful from a substantial point of view since the person parameters are not reliable anymore -- for such subjects the test is too difficult or too easy, respectively.
+Thus, due to these covering effects, a corresponding ability estimation is not feasible.
+However, if the research goal is to find ability distributions such persons should be regarded and \acronym{MML} can handle this.
+
+When estimates for the person parameters are of interest some care has to be taken if the \acronym{CML} method is used since person parameters cancel from the estimation equations.
+Usually, they are estimated (once having obtained values for the item parameters) by inserting $\hat{\bm{\beta}}$ (or equivalently $\hat{\bm{\epsilon}}$) into Equation \ref {eq8} and solving with respect to $\bm{\theta}$.
+Alternatively, Bayesian procedures are applicable \citep{Hoijtink+Boomsma:1995}.
+It is again pointed out that each person in the sample gets an own parameter even though limited by the number of different raw scores.
+%
+%
+%
+\subsection[CML and specific objectivity]{\acronym{CML} and specific objectivity}
+In general, the Rasch model can be regarded as a measurement model: Starting from the (nominally scaled) 0/1-data matrix $\bm{X}$, the person raw scores $r_v$ are on an ordinal level.
+They, in turn, are used to estimate the item parameters $\bm{\beta}$ which are on an interval scale provided that the Rasch model holds.
+
+Thus, Rasch models allow for comparisons between objects on an interval level.
+Rasch reasoned on requirements to be fulfilled such that a specific proposition within this context can be regarded as ``scientific''.
+His conclusions were that a basic requirement is the ``objectivity'' of comparisons \citep{Ra:61}.
+This claim contrasts assumptions met in \emph{classical test theory} \acronym{(CTT)}.
+A major advantage of the Rasch model over \acronym{CTT} models is the \emph{sample independence} of the results.
+The relevant concepts in \acronym{CTT} are based on a linear model for the ``true score'' leading to some indices, often correlation coefficients, which in turn depend on the observed data.
+This is a major drawback in \acronym{CTT}.
+According to \citet{Fisch:74}, sample independence in \acronym{IRT} models has the following implications:
 \begin{itemize}
-  \item The person-specific results (i.e., essentially $\boldsymbol{\theta}$) do not depend on the assignment of a person to a certain subject group nor on the selected test items from an item pool $\Psi$.
+  \item The person-specific results (i.e., essentially $\bm{\theta}$) do not depend on the assignment of a person to a certain subject group nor on the selected test items from an item pool $\Psi$.
   \item Changes in the skills of a person on the latent trait can be determined independently from its base level and independently from the selected item subset $\psi \subset \Psi$.
   \item From both theoretical and practical perspective the requirement for representativeness of the sample is obsolete in terms of a true random selection process.
 \end{itemize}
-Based on these requirements for parameter comparisons, \citet{Ra:77}
-introduced the term \emph{specific objectivity}: \emph{objective}
-because any comparison of a pair of parameters is independent of any
-other parameters or comparisons; \emph{specifically objective}
-because the comparison made was relative to some specified frame of
-reference \citep{Andrich:88}. In other words, if specific
-objectivity holds, two persons $v$ and $w$ with corresponding
-parameters $\theta_v$ and $\theta_w$, are comparable independently
-from the remaining persons in the sample and independently from the
-presented item subset $\psi$. In turn, for two items $i$ and $j$
-with parameters $\beta_i$ and $\beta_j$, the comparison of these
-items can be accomplished independently from the remaining items in
-$\Psi$ and independently from the persons in the sample.
-
-The latter is crucial since it reflects completely what is called
-sample independence. If we think not only of comparing $\beta_i$ and
-$\beta_j$ but rather to estimate these parameters, we achieve a
-point where specific objectivity requires a procedure which is able
-to provide estimates $\boldsymbol{\hat{\beta}}$ that do not
-depend on the sample. This implies that
-$\boldsymbol{\hat{\beta}}$ should be computable without the
-involvement of $\boldsymbol{\theta}$. CML estimation fulfills this requirement: By
-conditioning on the sufficient raw score vector $\boldsymbol{r}$,
-$\boldsymbol{\theta}$ disappears from the likelihood equation and
-$L(\boldsymbol{\beta}|\boldsymbol{r})$ can be solved without
-knowledge of $\boldsymbol{\theta}$. This issue is referred to as
-\emph{separability of item and person parameters} \citep[see e.g.][]{Wright+Masters:1982}. Furthermore, separability implies  that no specific distribution should be assumed neither for the person nor for the item parameters \citep{Rost:2000}. MML estimation requires such assumptions. At this point it is clear that CML estimation is
-the only estimation method within the Rasch measurement context
-fulfilling the requirement of \emph{person-free item calibration}
-and, thus, it maps the epistemological theory of specific
-objectivity to a statistical maximum likelihood framework. Note that
-strictly speaking any statistical result based on sample
-observations is sample-dependent because any result depends at least
-on the sample size \citep{Fischer:1987}. The estimation of the item
-parameters is ``sample-independent", a term indicating the fact that
-the actually obtained sample of a certain population is not of
-relevance for the statistical inference on these parameters
-\citep[][p. 23]{Kubinger:1989}.
-
+Based on these requirements for parameter comparisons, \citet{Ra:77} introduced the term \emph{specific objectivity}: \emph{objective} because any comparison of a pair of parameters is independent of any other parameters or comparisons; \emph{specifically objective} because the comparison made was relative to some specified frame of reference \citep{Andrich:88}.
+In other words, if specific objectivity holds, two persons $v$ and $w$ with corresponding parameters $\theta_v$ and $\theta_w$, are comparable independently from the remaining persons in the sample and independently from the presented item subset $\psi$.
+In turn, for two items $i$ and $j$ with parameters $\beta_i$ and $\beta_j$, the comparison of these items can be accomplished independently from the remaining items in $\Psi$ and independently from the persons in the sample.
+
+The latter is crucial since it reflects completely what is called sample independence.
+If we think not only of comparing $\beta_i$ and $\beta_j$ but rather to estimate these parameters, we achieve a point where specific objectivity requires a procedure which is able to provide estimates $\hat{\bm{\beta}}$ that do not depend on the sample.
+This implies that $\hat{\bm{\beta}}$ should be computable without the involvement of $\bm{\theta}$.
+\acronym{CML} estimation fulfills this requirement: By conditioning on the sufficient raw score vector $\bm{r}$, $\bm{\theta}$ disappears from the likelihood equation and $L(\bm{\beta}|\bm{r})$ can be solved without knowledge of $\bm{\theta}$.
+This issue is referred to as \emph{separability of item and person parameters} \citep[see e.g.][]{Wright+Masters:1982}.
+Furthermore, separability implies  that no specific distribution should be assumed neither for the person nor for the item parameters \citep{Rost:2001}.
+\acronym{MML} estimation requires such assumptions.
+At this point it is clear that \acronym{CML} estimation is the only estimation method within the Rasch measurement context fulfilling the requirement of \emph{person-free item calibration} and, thus, it maps the epistemological theory of specific objectivity to a statistical maximum likelihood framework.
+Note that strictly speaking any statistical result based on sample observations is sample-dependent because any result depends at least on the sample size \citep{Fischer:1987}.
+The estimation of the item parameters is ``sample-independent'', a term indicating the fact that the actually obtained sample of a certain population is not of relevance for the statistical inference on these parameters \citep[][p.\ 23]{Kubinger:1989}.
+%
+%
+%
 \subsection{Estimation of person parameters}
-CML estimation for person parameters is not recommended due to computational issues. The \pkg{eRm} package provides two methods for this estimation. The first is ordinary ML where the CML-based item parameters are plugged into the joint ML equation. The likelihood is optimized with respect to $\boldsymbol{\theta}$. \citet{And:95} gives a general formulation of this ML estimate with $r_v=r$ and $\theta_v=\theta$:
-\begin{equation}
-\label{eq17}
-    r - \sum_{i=1}^k \sum_{h=1}^{m_i} \frac{h \exp(h \theta+\hat{\beta}_{ih})}{\sum_{l=0}^{m_i}\exp(h \theta_v+\hat{\beta}_{il})}=0
+\acronym{CML} estimation for person parameters is not recommended due to computational issues.
+The \pkg{eRm} package provides two methods for this estimation.
+The first is ordinary \acronym{ML} where the \acronym{CML}-based item parameters are plugged into the joint \acronym{ML} equation.
+The likelihood is optimized with respect to $\bm{\theta}$.
+\citet{And:95} gives a general formulation of this \acronym{ML} estimate with $r_v=r$ and $\theta_v=\theta$:
+\begin{equation}\label{eq17}
+  r - \sum_{i=1}^k \sum_{h=1}^{m_i} \frac{h \exp(h \theta+\hat{\beta}_{ih})}{\sum_{l=0}^{m_i}\exp(h \theta_v+\hat{\beta}_{il})}=0
 \end{equation}
-
-\citet{Warm:1989} proposed a weighted likelihood estimation (WLE) which is more accurate compared to ML. For the dichotomous Rasch model the expression to be solved with respect to $\boldsymbol{\theta}$ is
+\citet{Warm:1989} proposed a weighted likelihood estimation \acronym{(WLE)} which is more accurate compared to \acronym{ML}.
+For the dichotomous Rasch model the expression to be solved with respect to $\bm{\theta}$ is
 \begin{equation}
-P(\theta_v|\boldsymbol{x}_v, \hat{\boldsymbol{\beta}}) \propto \frac{exp(r_v\theta_v)}{\prod_i (1+exp(\theta_v-\hat{\beta}_i)}\sum_i p_{vi}(1-p_{vi})
+  \P(\theta_v|\bm{x}_v, \hat{\bm{\beta}}) \propto \frac{exp(r_v\theta_v)}{\prod_i (1+exp(\theta_v-\hat{\beta}_i)}\sum_i p_{vi}(1-p_{vi})
 \end{equation}
-Again, the item parameter vector $\hat{\boldsymbol{\beta}}$ is used from CML. This approach will implemented in a subsequent \pkg{eRm} version. Additional explanations and simulation studies regarding person parameter estimation can be found in \citet{Hoijtink+Boomsma:1995}.
-
+Again, the item parameter vector $\hat{\bm{\beta}}$ is used from \acronym{CML}.
+This approach will implemented in a subsequent \pkg{eRm} version.
+Additional explanations and simulation studies regarding person parameter estimation can be found in \citet{Hoijtink+Boomsma:1995}.
+%
+%
+%
+%
 %----------------- end parameter estimation -----------------
-
 \section{Testing extended Rasch models}
 \label{Gof}
-
-Testing IRT models involves two parts: First, item- and person-wise
-statistics can be examined; in particular item-fit and person-fit
-statistics. Secondly, based on CML properties, various model tests
-can be derived \citep[see][]{Glas+Verhelst:1995a,
-Glas+Verhelst:1995b}.
-
+Testing \acronym{IRT} models involves two parts: First, item- and person-wise statistics can be examined; in particular item-fit and person-fit statistics.
+Secondly, based on \acronym{CML} properties, various model tests can be derived \citep[see][]{Glas+Verhelst:1995a, Glas+Verhelst:1995b}.
+%
+%
+%
 \subsection{Item-fit and person-fit statistics}
-
-Commonly in IRT, items and persons are excluded  due to item-fit and
-person-fit statistics. Both are residual based measures: The
-observed data matrix $\mathbf{X}$ is compared with the model
-probability matrix $\mathbf{P}$. Computing standardized residuals
-for all observations gives the $n \times k$ residual matrix
-$\mathbf{R}$. The squared column sums correspond to item-fit
-statistics and the squared row sums to person-fit statistics both of
-which are $\chi^2$-distributed with the corresponding degrees of
-freedom. Based on these quantities unweighted (\textsl{outfit}) and
-weighted (\textsl{infit}) mean-square statistics can also be used to
-evaluate item and person fit \citep[see
-e.g.][]{Wright+Masters:1982}.
-
+Commonly in \acronym{IRT}, items and persons are excluded  due to item-fit and person-fit statistics.
+Both are residual based measures: The observed data matrix $\bm{X}$ is compared with the model probability matrix $\bm{P}$.
+Computing standardized residuals for all observations gives the $n \times k$ residual matrix $\bm{R}$.
+The squared column sums correspond to item-fit statistics and the squared row sums to person-fit statistics both of which are $\chi^2$-distributed with the corresponding degrees of freedom.
+Based on these quantities unweighted (\textsl{outfit}) and weighted (\textsl{infit}) mean-square statistics can also be used to evaluate item and person fit \citep[see e.g.][]{Wright+Masters:1982}.
+%
+%
+%
 \subsection{A Wald test for item elimination}
-A helpful implication of CML estimates is that subsequent test
-statistics are readily obtained and model tests are easy to carry
-out. Basically, we have to distinguish between test on item level
-and global model tests.
-
-On item level, sample independence reflects the property that by
-splitting up the sample in, e.g., two parts, the corresponding
-parameter vectors $\boldsymbol{\hat{\beta}}^{(1)}$ and
-$\boldsymbol{\hat{\beta}}^{(2)}$ should be the same. Thus,  when
-we want to achieve Rasch model fit those items have to be
-eliminated from the test which differ in the subsamples. This
-important issue in test calibration can be examined, e.g., by using
-a graphical model test. \citet{FiSch:70} propose a $N(0,1)$-distributed
-test statistic which compares the item parameters for two subgroups:
-\begin{equation}
-\label{eq:wald}
+A helpful implication of \acronym{CML} estimates is that subsequent test statistics are readily obtained and model tests are easy to carry out.
+Basically, we have to distinguish between test on item level and global model tests.
+
+On item level, sample independence reflects the property that by splitting up the sample in, e.g., two parts, the corresponding parameter vectors $\hat{\bm{\beta}}^{(1)}$ and $\hat{\bm{\beta}}^{(2)}$ should be the same.
+Thus,  when we want to achieve Rasch model fit those items have to be eliminated from the test which differ in the subsamples.
+This important issue in test calibration can be examined, e.g., by using a graphical model test.
+\citet{FiSch:70} propose a $\mathcal{N}(0,\,1)$-distributed test statistic which compares the item parameters for two subgroups:
+\begin{equation}\label{eq:wald}
   z=\frac{\beta_i^{(1)}-\beta_i^{(2)}}{\sqrt{Var_i^{(1)}-Var_i^{(2)}}}
 \end{equation}
-The variance term in the denominator is based on Fisher's function of ``information in the sample".
-However, as \citet{Glas+Verhelst:1995a} point out
-discussing their Wald-type test that this term can be extracted directly
-from the variance-covariance matrix of the CML estimates. This Wald approach is provided in \pkg{eRm} by means of the function \code{Waldtest()}.
-
+The variance term in the denominator is based on Fisher's function of ``information in the sample''.
+However, as \citet{Glas+Verhelst:1995a} point out discussing their Wald-type test that this term can be extracted directly from the variance-covariance matrix of the \acronym{CML} estimates.
+This Wald approach is provided in \pkg{eRm} by means of the function \code{Waldtest()}.
+%
+%
+%
 \subsection{Andersen's likelihood-ratio test}
-In the \pkg {eRm} package the likelihood ratio test statistic $LR$, initially proposed by \citet{And:73} is computed for the RM, the RSM, and the PCM. For the models with linear extensions, $LR$ has to be computed separately for each measurement point and subgroup.
+In the \pkg{eRm} package the likelihood ratio test statistic $LR$, initially proposed by \citet{And:73} is computed for the \acronym{RM}, the \acronym{RSM}, and the \acronym{PCM}.
+For the models with linear extensions, $LR$ has to be computed separately for each measurement point and subgroup.
 \begin{equation}
 \label{eq15}
-LR = 2\left(\sum_{g=1}^G \log L_c(\boldsymbol{\hat{\eta}}_g;\boldsymbol{X}_g)-\log L_c(\boldsymbol{\hat{\eta}};\boldsymbol{X})\right)
+LR = 2\left(\sum_{g=1}^G \log L_c(\hat{\bm{\eta}}_g;\bm{X}_g)-\log L_c(\hat{\bm{\eta}};\bm{X})\right)
 \end{equation}
-The underlying principle of this test statistic is that of \textit{subgroup homogeneity} in Rasch models: for arbitrary disjoint subgroups $g=1,...,G$ the parameter estimates $\boldsymbol{\hat{\eta}}_g$ have to be the same. $LR$ is asymptotically $\chi^2$-distributed with $df$ equal to the number of parameters estimated in the subgroups minus the number of parameters in the total data set. For the sake of computational efficiency, the \pkg {eRm} package performs a person raw score median [...]
-
-\subsection{Nonparametric (``exact'') Tests}
+The underlying principle of this test statistic is that of \textit{subgroup homogeneity} in Rasch models: for arbitrary disjoint subgroups $g=1,\,\ldots,\,G$ the parameter estimates $\hat{\bm{\eta}}_g$ have to be the same.
+$LR$ is asymptotically $\chi^2$-distributed with $df$ equal to the number of parameters estimated in the subgroups minus the number of parameters in the total data set.
+For the sake of computational efficiency, the \pkg {eRm} package performs a person raw score median split into two subgroups.
+In addition, a graphical model test \citep{Ra:60} based on these estimates is produced by plotting $\hat{\bm{\beta}}_1$ against $\hat{\bm{\beta}}_2$.
+Thus, critical items (i.e., those fairly apart from the diagonal) can be identified and eliminated.
+Further elaborations and additional test statistics for polytomous Rasch models can be found, e.g., in \citet{Glas+Verhelst:1995a}.
+
+\subsection{Non-parametric (``quasi-exact'') Tests}
 Based on the package \pkg{RaschSampler} by
 \citet{Verhelst+Hatzinger+Mair:2007} several Rasch model tests as
 proposed by \citep{Ponocny:2001} are provided.
 
-\subsection{Martin-L\"of Test}
-Applying the LR principle to subsets of items, Martin-L\"of \citep[1973, see][]{Glas+Verhelst:1995a} suggested a statistic to
+\subsection{Martin-Löf Test}
+Applying the LR-principle to subsets of items, Martin-Löf \citep[1973, see][]{Glas+Verhelst:1995a} suggested a statistic to
 evaluate if two groups of items are homogeneous, i.e.,
 to test the unidimensionality axiom.
 %-------------------------- end goodness-of-fit ------------------
@@ -687,184 +587,127 @@ to test the unidimensionality axiom.
 %---------------------------- APPLIED SECTION ----------------------------
 \section{The eRm package and application examples}
 \label{sec:pack}
-The underlying idea of the \pkg {eRm} package is to provide a user-friendly
-flexible tool to compute extended Rasch models. This implies, amongst others,
-an automatic generation of the design matrix $\mathbf{W}$. However, in order to
-test specific hypotheses the user may specify $\mathbf{W}$ allowing the package
-to be flexible enough for computing IRT-models beyond their regular applications.
-In the following subsections, various examples are provided pertaining to different model and design
-matrix scenarios. Due to intelligibility matters, the artificial data sets are kept rather small. A detailed description in German of applications of various extendend Rasch models using the \pkg{eRm} package can be found in \citet{Poinstingl+Mair+Hatzinger:07}.
+The underlying idea of the \pkg{eRm} package is to provide a user-friendly flexible tool to compute extended Rasch models.
+This implies, amongst others, an automatic generation of the design matrix $\bm{W}$.
+However, in order to test specific hypotheses the user may specify $\bm{W}$ allowing the package to be flexible enough for computing \acronym{IRT}-models beyond their regular applications.
+In the following subsections, various examples are provided pertaining to different model and design matrix scenarios.
+Due to intelligibility matters, the artificial data sets are kept rather small.
+A detailed description in German of applications of various extendend Rasch models using the \pkg{eRm} package can be found in \citet{Poinstingl+Mair+Hatzinger:07}.
 
 \subsection{Structure of the eRm package}
-Embedding \pkg{eRm} into the flexible framework of \proglang{R} is a
-crucial benefit over existing stand-alone programs like WINMIRA
-\citep{Davier:1998}, LPCM-WIN \citep{FiPS:98}, and others.
-
-Another important issue in the development phase was that the
-package should be flexible enough to allow for CML compatible
-polytomous generalizations of the basic Rasch model such as the RSM
-and the PCM. In addition, by introducing a design matrix concept
-linear extensions of these basic models should be applicable. This
-approach resulted in including the LLTM, the LRSM and the LPCM as
-the most general model into the \pkg{eRm} package. For the latter
-model the CML estimation was implemented which can be used for the
-remaining models as well. A corresponding
-graphical representation is given in Figure \ref{fig:body}.
-
-\begin{figure}[hbt]
-\begin{center}
-    \includegraphics[width=13.7cm, height=6.5cm]{UCML.jpg}
-    \caption{\label{fig:body}Bodywork of the \pkg{eRm} routine}
-\end{center}
+Embedding \pkg{eRm} into the flexible framework of \proglang{R} is a crucial benefit over existing stand-alone programs like WINMIRA \citep{Davier:1998}, LPCM-WIN \citep{FiPS:98}, and others.
+
+Another important issue in the development phase was that the package should be flexible enough to allow for \acronym{CML} compatible polytomous generalizations of the basic Rasch model such as the \acronym{RSM} and the \acronym{PCM}.
+In addition, by introducing a design matrix concept linear extensions of these basic models should be applicable.
+This approach resulted in including the \acronym{LLTM}, the \acronym{LRSM} and the \acronym{LPCM} as the most general model into the \pkg{eRm} package.
+For the latter model the \acronym{CML} estimation was implemented which can be used for the remaining models as well.
+A corresponding graphical representation is given in Figure \ref{fig:body}.
+\begin{figure}[hbt]\centering%
+  \includegraphics[width=157mm, keepaspectratio=true]{UCML}%
+  \caption{Bodywork of the \pkg{eRm} routine}%
+  \label{fig:body}%
 \end{figure}
 
-An important benefit of the package with respect to linearly
-extended models is that for certain models the design matrix
-$\boldsymbol{W}$ can be generated automatically \citep[LPCM-WIN,][]{FiPS:98} also allows for specifying design matrices but in
-case of more complex models this can become a tedious task and the
-user must have a thorough understanding of establishing proper
-design structures). For repeated measurement models time contrasts
-in the \pkg{eRm} can be simply specified by defining the number of
-measurement points, i.e., {\tt mpoints}. To regard group contrasts
-like, e.g., treatment and control groups, a corresponding vector
-({\tt groupvec}) can be specified that denotes which person belongs
-to which group. However, $\boldsymbol{W}$ can also be defined by the
-user.
-
-A recently added feature of the routine is the option to allow for
-structurally missing values. This is required, e.g., in situations
-when different subsets of items are presented to different groups of
-subjects as described in Section \ref{sec:mpcml}. These person groups
-are identified automatically: In the data matrix $\boldsymbol{X}$,
-those items which are not presented to a certain subject are
-declared as \code{NA}s, as usual in \proglang{R}.
-
-After solving the CML equations by the Newton-Raphson method, the
-output of the routine consists of the ``basic" parameter estimates
-$\boldsymbol{\hat{\eta}}$, the corresponding variance-covariance
-matrix, and consequently the vector with the standard errors.
-Furthermore, the ordinary item parameter estimates
-$\boldsymbol{\hat{\beta}}$ are computed by using the linear
-transformation
-$\boldsymbol{\hat{\beta}}=\boldsymbol{W}\boldsymbol{\hat{\eta}}$.
-For ordinary Rasch models these basic parameters correspond to the
-item easiness. For the RM, the RSM, and the PCM, however, we display
-$\boldsymbol{\hat{\eta}}$ as $\boldsymbol{-\hat{\eta}}$, i.e., as difficulty.
-It has to be mentioned that the CML equation is
-solved with the restriction that one item parameter has to be fixed
-to zero (we use
- $\beta_1=0$). For the sake of interpretability, the resulting
-estimates $\boldsymbol{\hat{\beta}}$ can easily be transformed
-into ``sum-zero" restricted $\boldsymbol{\hat{\beta}^*}$ by
-applying
+An important benefit of the package with respect to linearly extended models is that for certain models the design matrix $\bm{W}$ can be generated automatically \citep[LPCM-WIN;][]{FiPS:98} also allows for specifying design matrices but in case of more complex models this can become a tedious task and the user must have a thorough understanding of establishing proper design structures).
+For repeated measurement models time contrasts in the \pkg{eRm} can be simply specified by defining the number of measurement points, i.e., {\tt mpoints}.
+To regard group contrasts like, e.g., treatment and control groups, a corresponding vector ({\tt groupvec}) can be specified that denotes which person belongs to which group.
+However, $\bm{W}$ can also be defined by the user.
+
+A recently added feature of the routine is the option to allow for structurally missing values.
+This is required, e.g., in situations when different subsets of items are presented to different groups of subjects as described in Section \ref{sec:mpcml}.
+These person groups are identified automatically: In the data matrix $\bm{X}$, those items which are not presented to a certain subject are declared as \code{NA}s, as usual in \proglang{R}.
+
+After solving the \acronym{CML} equations by the Newton-Raphson method, the output of the routine consists of the ``basic'' parameter estimates $\hat{\bm{\eta}}$, the corresponding variance-covariance matrix, and consequently the vector with the standard errors.
+Furthermore, the ordinary item parameter estimates $\hat{\bm{\beta}}$ are computed by using the linear transformation $\hat{\bm{\beta}}=\bm{W}\hat{\bm{\eta}}$.
+For ordinary Rasch models these basic parameters correspond to the item easiness.
+For the \acronym{RM}, the \acronym{RSM}, and the \acronym{PCM}, however, we display $\hat{\bm{\eta}}$ as $-\hat{\bm{\eta}}$, i.e., as difficulty.
+It has to be mentioned that the \acronym{CML} equation is solved with the restriction that one item parameter has to be fixed to zero (we use $\beta_1=0$).
+For the sake of interpretability, the resulting estimates $\hat{\bm{\beta}}$ can easily be transformed into ``sum-zero'' restricted $\hat{\bm{\beta}^*}$ by applying
 $\hat{\beta}_i^*=\hat{\beta}_i-\sum_i{\hat{\beta}_i}/k$.
 This transformation is also used for the graphical model test.
-
+%
+%
+%
 \subsection{Example 1: Rasch model}
-We start the example section
-with a  simple Rasch model based on a $100 \times 30$ data matrix.
-First, we estimate the item parameters using the function
-\code{RM()} and then the person parameters with
-\code{person.parameters()}.
-
+We start the example section with a  simple Rasch model based on a $100 \times 30$ data matrix.
+First, we estimate the item parameters using the function \code{RM()} and then the person parameters with \code{person.parameters()}.
 <<>>=
-library(eRm)
-data(raschdat1)
+library("eRm")
 res.rasch <- RM(raschdat1)
 pres.rasch <- person.parameter(res.rasch)
 @
-
 Then we use Andersen's LR-test for goodness-of-fit with mean split criterion:
 <<>>=
-lrres.rasch <- LRtest(res.rasch, splitcr = "mean", se = TRUE)
+lrres.rasch <- LRtest(res.rasch, splitcr = "mean")
 lrres.rasch
 @
-
-We see that the model fits and a graphical  representation of this
-result (subset of items only) is given in Figure \ref{fig:GOF} by means
-of a goodness-of-fit plot with confidence ellipses.
-
-\begin{figure}[hbt]
-\begin{center}
-<<fig = TRUE>>=
+We see that the model fits and a graphical  representation of this result (subset of items only) is given in Figure \ref{fig:GOF} by means of a goodness-of-fit plot with confidence ellipses.
+<<plotGOF-lrres-rasch, eval=FALSE, fig=FALSE, results=hide>>=
 plotGOF(lrres.rasch, beta.subset=c(14,5,18,7,1), tlab="item", conf=list(ia=FALSE,col="blue",lty="dotted"))
 @
-\caption{\label{fig:GOF} Goodness-of-fit plot for some items with confidence ellipses.}
-\end{center}
+\begin{figure}[hbt]\centering%
+<<plotGOF-lrres-rasch-plot, echo=FALSE, fig=TRUE>>=
+<<plotGOF-lrres-rasch>>
+@
+\caption{Goodness-of-fit plot for some items with confidence ellipses.}
+\label{fig:GOF}
 \end{figure}
 
 To be able to draw confidence ellipses it is needed to set \code{se = TRUE} when computing the LR-test.
-
-\subsection{Example 2: LLTM as a restricted Rasch model}
-As mentioned in Section \ref{Rep}, also the models with the linear extensions on
-the item parameters can be seen as special cases of their underlying basic model.
-In fact, the LLTM as presented below and following the original idea by \citet{Scheib:72},
-is a restricted RM, i.e. the number of estimated parameters is smaller compared to a Rasch model. The data matrix
-$\mathbf{X}$ consists of $n=15$ persons and $k=5$ items. Furthermore, we specify a design matrix $\mathbf{W}$ (following Equation \ref{eq4}) with specific weight elements $w_{ij}$.
-
+%
+%
+%
+\subsection[Example 2: LLTM as a restricted Rasch model]{Example 2: \acronym{LLTM} as a restricted Rasch model}
+As mentioned in Section \ref{Rep}, also the models with the linear extensions on the item parameters can be seen as special cases of their underlying basic model.
+In fact, the \acronym{LLTM} as presented below and following the original idea by \citet{Scheib:72}, is a restricted \acronym{RM}, i.e. the number of estimated parameters is smaller compared to a Rasch model.
+The data matrix $\bm{X}$ consists of $n=15$ persons and $k=5$ items.
+Furthermore, we specify a design matrix $\bm{W}$ (following Equation \ref{eq4}) with specific weight elements $w_{ij}$.
 <<>>=
-data(lltmdat2)
 W <- matrix(c(1,2,1,3,2,2,2,1,1,1),ncol=2)
 res.lltm <- LLTM(lltmdat2, W)
 summary(res.lltm)
 @
-
-The \code{summary()} method provides point estimates and standard
-errors for the basic parameters and for the resulting item
-parameters. Note that item parameters in \pkg{eRm} are always
-estimated as easiness parameters according to equations \ref{eq1}
-and \ref{eq2} but not \ref{eq:rasch}. If the sign is switched, the
-user gets difficulty parameters (the standard errors remain the
-same, of course). However,
-all plotting functions \code{plotGOF}, \code{plotICC},
-\code{plotjointICC}, and \code{plotPImap}, as well as the function
-\code{thresholds} display the difficulty parameters. The same applies
-for the basic parameters $\eta$ in the output of the RM, RSM, and PCM.
-
-\subsection{Example 3: RSM and PCM}
-Again, we provide an artificial data set now with $n=300$ persons and $k=4$ items;
-each of them with $m+1=3$ categories. We start with the estimation of an RSM and, subsequently,
-we calculate the corresponding category-intersection parameters using the function \code{thresholds()}.
-
+The \code{summary()} method provides point estimates and standard errors for the basic parameters and for the resulting item parameters.
+Note that item parameters in \pkg{eRm} are always estimated as easiness parameters according to equations \ref{eq1} and \ref{eq2} but not \ref{eq:rasch}.
+If the sign is switched, the user gets difficulty parameters (the standard errors remain the same, of course).
+However, all plotting functions \code{plotGOF}, \code{plotICC}, \code{plotjointICC}, and \code{plotPImap}, as well as the function \code{thresholds} display the difficulty parameters.
+The same applies for the basic parameters $\eta$ in the output of the \acronym{RM}, \acronym{RSM}, and \acronym{PCM}.
+%
+%
+%
+\subsection[Example 3: RSM and PCM]{Example 3: \protect\acronym{RSM} and \protect\acronym{PCM}}
+Again, we provide an artificial data set now with $n=300$ persons and $k=4$ items; each of them with $m+1=3$ categories.
+We start with the estimation of an \acronym{RSM} and, subsequently, we calculate the corresponding category-intersection parameters using the function \code{thresholds()}.
 <<>>=
 data(pcmdat2)
 res.rsm <- RSM(pcmdat2)
 thresholds(res.rsm)
 @
-
-The location parameter is basically the item difficulty and the thesholds are the points in the
-ICC plot given in Figure \ref{fig:ICC} where the category curves intersect:
-
-<<fig = FALSE>>=
+The location parameter is basically the item difficulty and the thesholds are the points in the \acronym{ICC} plot given in Figure \ref{fig:ICC} where the category curves intersect:
+<<plotICC-res-rsm, eval=FALSE, fig=FALSE, results=hide>>=
 plotICC(res.rsm, mplot=TRUE, legpos=FALSE,ask=FALSE)
 @
-
-\begin{figure}[hbt]
-\begin{center}
-<<fig = TRUE, echo=FALSE>>=
-plotICC(res.rsm, mplot=TRUE, legpos=FALSE,ask=FALSE)
+\begin{figure}[hbt]\centering%
+<<plotICC-res-rsm-plot, echo=FALSE, fig=TRUE>>=
+<<plotICC-res-rsm>>
 @
-\caption{\label{fig:ICC} ICC plot for an RSM.}
-\end{center}
+\caption{\acronym{ICC} plot for an \acronym{RSM}.}
+\label{fig:ICC}
 \end{figure}
 
-The RSM restricts the threshold distances to be the same across all items.
-This strong assumption can be relaxed using a PCM. The results are represented in a person-item map
-(see Figure \ref{fig:PImap}).
-
-<<fig=FALSE>>=
+The \acronym{RSM} restricts the threshold distances to be the same across all items.
+This strong assumption can be relaxed using a \acronym{PCM}.
+The results are represented in a person-item map (see Figure \ref{fig:PImap}).
+<<plotPImap-res-pcm, eval=FALSE, fig=FALSE, results=hide>>=
 res.pcm <- PCM(pcmdat2)
 plotPImap(res.pcm, sorted = TRUE)
 @
-
-\begin{figure}[hbt]
-\begin{center}
-<<fig=TRUE,echo=FALSE>>=
-res.pcm <- PCM(pcmdat2)
-plotPImap(res.pcm, sorted = TRUE)
+\begin{figure}[hbt]\centering%
+<<plotPImap-res-pcm-plot, echo=FALSE, fig=TRUE>>=
+<<plotPImap-res-pcm>>
 @
-\caption{\label{fig:PImap} Person-Item map for a PCM.}
-\end{center}
+\caption{Person-Item map for a \acronym{PCM}.}
+\label{fig:PImap}
 \end{figure}
 
 After estimating the person parameters we can check the item-fit statistics.
@@ -872,72 +715,66 @@ After estimating the person parameters we can check the item-fit statistics.
 pres.pcm <- person.parameter(res.pcm)
 itemfit(pres.pcm)
 @
-
-A likelihood ratio test comparing the RSM and the PCM indicates that the PCM provides a better fit.
-%Since none of the items is significant we can conclude that the data fit the PCM.
-
+A likelihood ratio test comparing the \acronym{RSM} and the \acronym{PCM} indicates that the \acronym{PCM} provides a better fit.
+%Since none of the items is significant we can conclude that the data fit the \acronym{PCM}.
 <<>>=
 lr<- 2*(res.pcm$loglik-res.rsm$loglik)
 df<- res.pcm$npar-res.rsm$npar
 pvalue<-1-pchisq(lr,df)
 cat("LR statistic: ", lr, "  df =",df, "  p =",pvalue, "\n")
 @
-
-
-\subsection{An LPCM for repeated measurements in different groups}
-The most complex example refers to an LPCM with two measurement points.
+%
+%
+%
+\subsection[An LPCM for repeated measurements in different groups]{An \protect\acronym{LPCM} for repeated measurements in different groups}
+The most complex example refers to an \acronym{LPCM} with two measurement points.
 In addition, the hypothesis is of interest whether the treatment has an effect.
-The corresponding contrast is the last column in $\mathbf{W}$ below.
+The corresponding contrast is the last column in $\bm{W}$ below.
 
-First, the data matrix $\mathbf{X}$ is specified. We assume an artificial test consisting of $k=3$ items
-which was presented twice to the subjects. The first 3 columns in $\mathbf{X}$ correspond
-to the first test occasion, whereas the last 3 to the second occasion.
+First, the data matrix $\bm{X}$ is specified.
+We assume an artificial test consisting of $k=3$ items which was presented twice to the subjects.
+The first 3 columns in $\bm{X}$ correspond to the first test occasion, whereas the last 3 to the second occasion.
 Generally, the first $k$ columns correspond to the first test occasion, the next $k$ columns for the second, etc.
-In total, there are $n=20$ subjects. Among these, the first 10 persons belong to the first group (e.g., control),
-and the next 10 persons to the second group (e.g., treatment). This is specified
-by a group vector:
-
+In total, there are $n=20$ subjects.
+Among these, the first 10 persons belong to the first group (e.g., control), and the next 10 persons to the second group (e.g., treatment).
+This is specified by a group vector:
 <<>>=
-data(lpcmdat)
 grouplpcm <- rep(1:2, each = 10)
 @
-
-Again, $\boldsymbol{W}$ is generated automatically. In general, for such designs
-the generation of $\boldsymbol{W}$ consists first of the item contrasts,
-followed by the time contrasts and finally by the group main effects except for
-the first measurement point (due to identifiability issues, as already described).
-
+Again, $\bm{W}$ is generated automatically.
+In general, for such designs the generation of $\bm{W}$ consists first of the item contrasts, followed by the time contrasts and finally by the group main effects except for the first measurement point (due to identifiability issues, as already described).
 <<>>=
 reslpcm <- LPCM(lpcmdat, mpoints = 2, groupvec = grouplpcm, sum0 = FALSE)
 model.matrix(reslpcm)
 @
-
 The parameter estimates are the following:
-
-<<echo = FALSE>>=
-reslpcm
+<<>>=
+coef(reslpcm, parm="eta")
 @
-
-Testing whether the $\eta$-parameters equal 0 is mostly not of relevance for those
-parameters referring to the items (in this example $\eta_1,...,\eta_8$).
-But for the remaining contrasts, $H_0: \eta_9=0$ (implying no general time effect)
-can not be rejected ($p=.44$), whereas hypothesis $H_0: \eta_{10}=0$ has to be rejected
-($p=.004$) when applying a $z$-test.
+Testing whether the $\eta$-parameters equal 0 is mostly not of relevance for those parameters referring to the items (in this example $\eta_1,\,\ldots,\,\eta_8$).
+But for the remaining contrasts, $H_0: \eta_9=0$ (implying no general time effect) can not be rejected ($p=.44$), whereas hypothesis $H_0: \eta_{10}=0$ has to be rejected ($p=.004$) when applying a $z$-test.
 This suggests that there is a significant treatment effect over the measurement points.
-If a user wants to perform additional tests such as a Wald test for the equivalence
-of two $\eta$-parameters, the \code{vcov} method can be applied to get the
-variance-covariance matrix.
-
+If a user wants to perform additional tests such as a Wald test for the equivalence of two $\eta$-parameters, the \code{vcov} method can be applied to get the variance-covariance matrix.
+%
+%
+%
+%
+%
 \section{Additional topics}
-
-This section will be extended successively with new developments and
-components which do not directly relate to the modeling core of
-\pkg{eRm} but may prove to be useful add-ons.
-
+This section will be extended successively with new developments and components which do not directly relate to the modeling core of \pkg{eRm} but may prove to be useful add-ons.
+%
+%
+%
 \subsection{The eRm simulation module}
-A recent \pkg{eRm} development is the implementation of a simulation module to generate 0-1 matrices for different Rasch scenarios. In this article we give a brief overview about the functionality and for more detailed descriptions (within the context of model testing) it is referred to \citet{Mair:2006} and \citet{Suarez+Glas:2003}.
-
-For each scenario the user has the option either to assign $\boldsymbol{\theta}$ and $\boldsymbol{\beta}$ as vectors to the simulation function (e.g. by drawing parameters from a uniform distribution) or to let the function draw the parameters from a $N(0,1)$ distribution. The first scenario is the simulation of Rasch homogenous data by means of the function \code{sim.rasch()}. The parameter values are plugged into equation \ref{eq:rasch} and it results the matrix $\mathbf{P}$ of model p [...]
+A recent \pkg{eRm} development is the implementation of a simulation module to generate 0-1 matrices for different Rasch scenarios.
+In this article we give a brief overview about the functionality and for more detailed descriptions (within the context of model testing) it is referred to \citet{Mair:2006} and \citet{Suarez+Glas:2003}.
+
+For each scenario the user has the option either to assign $\bm{\theta}$ and $\bm{\beta}$ as vectors to the simulation function (e.g., by drawing parameters from a uniform distribution) or to let the function draw the parameters from a $\mathcal{N}(0,1)$ distribution.
+The first scenario is the simulation of Rasch homogenous data by means of the function \code{sim.rasch()}.
+The parameter values are plugged into equation \ref{eq:rasch} and it results the matrix $\bm{P}$ of model probabilites which is of dimension $n \times k$.
+An element $p_{vi}$ indicates the probability that subject $v$ solves item $i$.
+In a second step the matrix $\bm{P}$ has to be transformed into the 0-1 data matrix $\bm{X}$.
+The recommended way to achieve this is to draw another random number $p^{\star}_{vi}$ from a uniform distribution in $[0;1]$ and perform the transformation according to the following rule:
 \begin{equation*}
 x_{vi} = \left\{
  \begin{array}{rl}
@@ -945,65 +782,85 @@ x_{vi} = \left\{
   0 & \text{if } p^{\star}_{vi} > p_{vi}\\
  \end{array} \right.
 \end{equation*}
-Alternatively, the user can specify a fixed cutpoint $p^{\star}:=p^{\star}_{vi}$ (e.g. $p^{\star} = 0.5$) and make the decision according to the same rule. This option is provided by means of the \code{cutpoint} argument. Caution is advised when using this deterministic option since this leads likely to ill-conditioned data matrices.
+Alternatively, the user can specify a fixed cutpoint $p^{\star}:=p^{\star}_{vi}$ (e.g., $p^{\star} = 0.5$) and make the decision according to the same rule.
+This option is provided by means of the \code{cutpoint} argument.
+Caution is advised when using this deterministic option since this leads likely to ill-conditioned data matrices.
 
-The second scenario in this module regards the violation of the parallel ICC assumption which leads to the two-parameter logistic model (2-PL) proposed by \citet{Birnbaum:1968}:
-\begin{equation}
-\label{eq:2pl}
-  P(X_{vi}=1)=\frac{\exp(\alpha_i(\theta_v - \beta_i))}{1+\exp(\alpha_i(\theta_v-\beta_i))}.
+The second scenario in this module regards the violation of the parallel \acronym{ICC} assumption which leads to the two-parameter logistic model (2-\acronym{PLM}) proposed by \citet{Birnbaum:1968}:
+\begin{equation}\label{eq:2pl}
+  \P(X_{vi}=1)=\frac{\exp(\alpha_i(\theta_v - \beta_i))}{1+\exp(\alpha_i(\theta_v-\beta_i))}.
 \end{equation}
-The parameter $\alpha_i$ denotes the item discrimination which for the Rasch model is 1 across all items. Thus, each item score gets a weight and the raw scores are not sufficient anymore. The function for simulating 2-PL data is \code{sim.2pl()} and if $\boldsymbol{\alpha}$ is not specified by the user by means of the argument \code{discrim}, the discrimination parameters are drawn from a log-normal distribution. The reasons for using this particular kind of distribution are the followi [...]
-ns of the dispersion parameter $\sigma^2$. A value of $\sigma^2 = .50$ already denotes a strong violation. The lower $\sigma^2$, the closer the values lie around 1. In this case the $\alpha_i$ are close to the Rasch slopes.
-
-Using the function \code{sim.xdim()} the unidimensionality assumptions is violated. This function allows for the simulation of multidimensional Rasch models as for instance given \citet{Glas:1992} and \citet{Adams+Wilson+Wang:1997}. Multidimensionality implies that one single item measures more than one latent construct. Let us denote the number of these latent traits by $D$. Consequently, each person has a vector of ability parameters $\boldsymbol{\theta}_v$ of length $D$. These vectors [...]
- is not provided by the user, \code{sim.xdim()} generates $\mathbf{Z}$ such that each $\mathbf{z}_i$ contains only nonzero element which indicates the assigned dimension. This corresponds to the \emph{between-item multidimensional model} \citep{Adams+Wilson+Wang:1997}. However, in any case the person part of the model is $\mathbf{z}_i^T \boldsymbol{\theta}_v$ which replaces $\theta_v$ in Equation \ref{eq:rasch}.
-
-Finally, locally dependent item responses can be produced by means of the function \code{sim.locdep()}. Local dependence implies the introduction of pair-wise item correlations $\delta_{ij}$. If these correlations are constant across items, the argument \code{it.cor} can be a single value $\delta$. A value $\delta = 0$ corresponds to the Rasch model whereas $\delta = 1$ leads to the strongest violation. Alternatively, for different pair-wise item correlations, the user can specify a VC-m [...]
+The parameter $\alpha_i$ denotes the item discrimination which for the Rasch model is 1 across all items.
+Thus, each item score gets a weight and the raw scores are not sufficient anymore.
+The function for simulating 2-\acronym{PL} data is \code{sim.2pl()} and if $\bm{\alpha}$ is not specified by the user by means of the argument \code{discrim}, the discrimination parameters are drawn from a log-normal distribution.
+The reasons for using this particular kind of distribution are the following: In the case of $\alpha_i = 1$ the \acronym{ICC} are Rasch consistent.
+Concerning the violations, it should be possible to achieve deviations in both directions (for $\alpha_i > 0$).
+If $\alpha_i > 0$ the \acronym{ICC} is steeper than in the Rasch case and, consequently, if $\alpha_i < 1$ the \acronym{ICC} is flatter.
+This bidirectional deviation around 1 is warranted by the lognormal distribution $LN(\mu,\sigma^2)$ with $\mu = 0$.
+Since it is a logarithmic distribution, $\alpha_i$ cannot be negative.
+The degrees of model violation can be steered by means of the dispersion parameter $\sigma^2$.
+A value of $\sigma^2 = .50$ already denotes a strong violation.
+The lower $\sigma^2$, the closer the values lie around 1.
+In this case the $\alpha_i$ are close to the Rasch slopes.
+
+Using the function \code{sim.xdim()} the unidimensionality assumptions is violated.
+This function allows for the simulation of multidimensional Rasch models as for instance given \citet{Glas:1992} and \citet{Adams+Wilson+Wang:1997}.
+Multidimensionality implies that one single item measures more than one latent construct.
+Let us denote the number of these latent traits by $D$.
+Consequently, each person has a vector of ability parameters $\bm{\theta}_v$ of length $D$.
+These vectors are drawn from a multivariate normal distribution with mean $\bm{\mu} = \bm{0}$ and VC-matrix $\bm{\Sigma}$ of dimension $D \times D$.
+This matrix has to be specified by the user with the argument \code{Sigma}.
+In order to achieve strong model violations, very low correlations such as .01 should be provided.
+To specify to which extend item $i$ is measuring each of the $D$ dimensions, a corresponding vector of weights $\bm{z}_i$ of length $D$ is defined.
+If the resulting $k \times D$ matrix $\bm{Z}$
+ is not provided by the user, \code{sim.xdim()} generates $\bm{Z}$ such that each $\bm{z}_i$ contains only nonzero element which indicates the assigned dimension.
+ This corresponds to the \emph{between-item multidimensional model} \citep{Adams+Wilson+Wang:1997}.
+ However, in any case the person part of the model is $\bm{z}_i^T \bm{\theta}_v$ which replaces $\theta_v$ in Equation \ref{eq:rasch}.
+
+Finally, locally dependent item responses can be produced by means of the function \code{sim.locdep()}.
+Local dependence implies the introduction of pair-wise item correlations $\delta_{ij}$.
+If these correlations are constant across items, the argument \code{it.cor} can be a single value $\delta$.
+A value $\delta = 0$ corresponds to the Rasch model whereas $\delta = 1$ leads to the strongest violation.
+Alternatively, for different pair-wise item correlations, the user can specify a VC-matrix $\Delta$ of dimension $k \times k$.
+The formal representation of the corresponding \acronym{IRT} model is
 \begin{equation}
-P(X_{vi}=1|X_{vj}=x_{vj})=\frac{\exp(\theta_v - \beta_i + x_{vj}\delta_{ij})}{1+\exp(\theta_v-\beta_i + x_{vj}\delta_{ij})}.
+  \P(X_{vi}=1|X_{vj}=x_{vj})=\frac{\exp(\theta_v - \beta_i + x_{vj}\delta_{ij})}{1+\exp(\theta_v-\beta_i + x_{vj}\delta_{ij})}.
 \end{equation}
 This model was proposed by \citet{Jannarone:1986} and is suited to model locally dependent item responses.
-
-
+%
+%
+%
+%
+%
 \section{Discussion and outlook}
 \label{sec:disc}
-
-Here we give a brief outline of future \pkg{eRm} developments. The
-CML estimation  approach, in combination with the EM-algorithm, can
-also be used to estimate \textit{mixed Rasch models} (MIRA). The
-basic idea behind such models is that the extended Rasch model holds
-within subpopulations of individuals, but with different parameter
-values for each subgroup. Corresponding elaborations are given in
-\citet{RoDa:95}.
-
-In Rasch models the item discrimination parameter $\alpha_i$ is
-always fixed  to 1 and thus it does not appear in the basic
-equation. Allowing for different discrimination parameters across
-items leads to the two-parameter logistic model as given in Equation
-\ref{eq:2pl}. In this model the raw scores are not sufficient
-statistics anymore and hence CML can not be applied. 2-PL models can
-be estimated by means of the \pkg{ltm} package \citep{Riz:06}.
-However, \citet{Verhelst+Glas:1995} formulated the one parameter
-logistic model (OPLM) where the $\alpha_i$ do not vary across the
-items but are unequal to one. The basic strategy to estimate OPLM is
-a three-step approach: First, the item parameters of the Rasch model
-are computed. Then, discrimination parameters are computed under
-certain restrictions. Finally, using these discrimination weights,
-the item parameters for the OPLM are estimated using CML. This is a
-more flexible version of the Rasch model in terms of different
-slopes.
-
-To conclude, the \pkg{eRm} package is a tool to estimate extended
-Rasch models for unidimensional traits.  The generalizations towards
-different numbers of item categories, linear extensions to allow for
-introducing item covariates and/or trend and optionally group
-contrasts are important issues when examining item behavior and
-person performances in tests.  This improves the feasibility of IRT
-models with respect to a wide variety of application areas.
-
-\bibliography{eRmvig}
-\newpage
-
-\rotatebox[origin=c]{90}{\includegraphics[width=1.1\textheight]{eRm_object_tree.pdf}}
-
+Here we give a brief outline of future \pkg{eRm} developments.
+The \acronym{CML} estimation  approach, in combination with the \acronym{EM}-algorithm, can also be used to estimate \textit{mixed Rasch models} (MIRA).
+The basic idea behind such models is that the extended Rasch model holds within subpopulations of individuals, but with different parameter values for each subgroup.
+Corresponding elaborations are given in \citet{RoDa:95}.
+
+In Rasch models the item discrimination parameter $\alpha_i$ is always fixed  to 1 and thus it does not appear in the basic equation.
+Allowing for different discrimination parameters across items leads to the two-parameter logistic model as given in Equation \ref{eq:2pl}.
+In this model the raw scores are not sufficient statistics anymore and hence \acronym{CML} can not be applied.
+2-\acronym{PL} models can be estimated by means of the \pkg{ltm} package \citep{Riz:06}.
+However, \citet{Verhelst+Glas:1995} formulated the one parameter logistic model \acronym{(OPLM)} where the $\alpha_i$ do not vary across the items but are unequal to one.
+The basic strategy to estimate \acronym{OPLM} is a three-step approach: First, the item parameters of the Rasch model are computed.
+Then, discrimination parameters are computed under certain restrictions.
+Finally, using these discrimination weights, the item parameters for the \acronym{OPLM} are estimated using \acronym{CML}.
+This is a more flexible version of the Rasch model in terms of different slopes.
+
+To conclude, the \pkg{eRm} package is a tool to estimate extended Rasch models for unidimensional traits.
+The generalizations towards different numbers of item categories, linear extensions to allow for introducing item covariates and/or trend and optionally group contrasts are important issues when examining item behavior and person performances in tests.
+This improves the feasibility of \acronym{IRT} models with respect to a wide variety of application areas.
+%
+%
+%
+%
+%
+\bibliography{eRmvig}%
+\newpage%
+\rotatebox[origin=c]{90}{\includegraphics[width=1.1\textheight]{eRm_object_tree.pdf}}%
+%
+%
+%
 \end{document}
diff --git a/inst/doc/eRm.pdf b/inst/doc/eRm.pdf
index 1c389ee..bdcbaa8 100644
Binary files a/inst/doc/eRm.pdf and b/inst/doc/eRm.pdf differ
diff --git a/inst/doc/eRmvig.bib b/inst/doc/eRmvig.bib
deleted file mode 100755
index de9c02c..0000000
--- a/inst/doc/eRmvig.bib
+++ /dev/null
@@ -1,695 +0,0 @@
- at article{Ro:99,
-   author = {J. Rost},
-   year = {1999},
-   TITLE  = {Was ist aus dem Rasch-Modell geworden? [What Happened with the Rasch Model?]},
-   JOURNAL = {Psychologische Rundschau},
-   VOLUME = {50},
-   PAGES = {140--156}
-}
-
- at article{Scheib:72,
-   author = {H. Scheiblechner},
-   year = {1972},
-   TITLE  = {{Das Lernen und L\"osen komplexer Denkaufgaben. [The learning and solving of complex reasoning items.]}},
-   JOURNAL = {Zeitschrift f\"ur Experimentelle und Angewandte Psychologie},
-   VOLUME = {3},
-   PAGES = {456--506}
-}
-
- at article{And:78,
-   author = {D. Andrich},
-   year = {1978},
-   TITLE  = {A rating formulation for ordered response categories},
-   JOURNAL = {Psychometrika},
-   VOLUME = {43},
-   PAGES = {561--573}
-}
-
- at article{FiPa:91,
-   author = {G. H. Fischer and P. Parzer},
-   year = {1991},
-   TITLE  = {An extension of the rating scale model with an application to the measurement of change},
-   JOURNAL = {Psychometrika},
-   VOLUME = {56},
-   PAGES = {637--651}
-}
-
- at article{Mast:82,
-   author = {G. N. Masters},
-   year = {1982},
-   TITLE  = {A Rasch model for partial credit scoring},
-   JOURNAL = {Psychometrika},
-   VOLUME = {47},
-   PAGES = {149--174}
-}
-
- at article{FiPo:94,
-   author = {G. H. Fischer and I. Ponocny},
-   year = {1994},
-   TITLE  = {An extension of the partial credit model with an application to the measurement of change},
-   JOURNAL = {Psychometrika},
-   VOLUME = {59},
-   PAGES = {177--192}
-}
-
- at article{LeVe:86,
-   author = {J. de Leeuw and N. Verhelst},
-   year = {1986},
-   TITLE  = {Maximum likelihood estimation in generalized Rasch models},
-   JOURNAL = {Journal of educational statistics},
-   VOLUME = {11},
-   PAGES = {183--196}
-}
-
- at article{Ra:77,
-   author = {G. Rasch},
-   year = {1977},
-   TITLE  = {On specific objectivity: An attempt at formalising the request for generality and validity of scientific statements},
-   JOURNAL = {Danish Yearbook of Philosophy},
-   VOLUME = {14},
-   PAGES = {58--94}
-}
-
- at article{GlVe:89,
-   author = {C. A. W. Glas and N. Verhelst},
-   year = {1989},
-   TITLE  = {Extensions of the partial credit model},
-   JOURNAL = {Psychometrika},
-   VOLUME = {54},
-   PAGES = {635--659}
-}
-
- at article{Mi:85,
-   author = {R. J. Mislevy},
-   year = {1985},
-   TITLE  = {Estimation of latent group effects},
-   JOURNAL = {Journal of the American Statistical Association},
-   VOLUME = {80},
-   PAGES = {993--997}
-}
-
- at article{Li:94,
-   author = {M. Liou},
-   year = {1994},
-   TITLE  = {More on the computation of higher-order derivatives of the elementary symmetric functions in the Rasch model},
-   JOURNAL = {Applied Psychological Measurement},
-   VOLUME = {18},
-   PAGES = {53--62}
-}
-
-
- at article{And:72,
-   author = {E. B. Andersen},
-   year = {1972},
-   TITLE  = {The numerical solution of a set of conditional estimation equations},
-   JOURNAL = {Journal of the Royal Statistical Society, Series B},
-   VOLUME = {34},
-   PAGES = {42--54}
-}
-
- at article{And:73,
-   author = {E. B. Andersen},
-   year = {1973},
-   TITLE  = {A goodness of fit test for the Rasch model},
-   JOURNAL = {Psychometrika},
-   VOLUME = {38},
-   PAGES = {123--140}
-}
-
- at article{Fisch:73,
-   author = {G. H. Fischer},
-   year = {1973},
-   TITLE  = {The linear logistic test model as an instrument in educational research},
-   JOURNAL = {Acta Psychologica},
-   VOLUME = {37},
-   PAGES = {359--374}
-}
-
- at article{Riz:06,
-   author = {D. Rizopoulos},
-   year = {2006},
-   TITLE  = {\pkg{ltm}: An \proglang{R} package for latent variable modeling and item response theory analyses},
-   JOURNAL = {Journal of Statistical Software},
-   VOLUME = {17},
-   number = {5},
-   pages = {1-25},
-   url = {http://www.jstatsoft.org/v17/i05/}
-}
-
- at article{Bor:06,
-   author = {D. Borsboom},
-   year = {2006},
-   TITLE  = {The attack of the psychometricians},
-   JOURNAL = {Psychometrika},
-   VOLUME = {71},
-   PAGES = {425--440}
-}
-
- at article{Kub:05,
-   author = {K. D. Kubinger},
-   year = {2005},
-   TITLE  = {Psychological test calibration using the Rasch model: Some critical suggestions on traditional approaches},
-   JOURNAL = {International Journal of Testing},
-   VOLUME = {5},
-   PAGES = {377--394}
-}
-
- at article{CAnd:07,
-   author = {C. Anderson and Z. Li and J. Vermunt},
-   year = {2007},
-   TITLE  = {Estimation of models in the Rasch family for polytomous items and multiple latent variables},
-   JOURNAL = {Journal of Statistical Software},
-   VOLUME = {20},
-   number = {6},
-   PAGES = {},
-   url = {http://www.jstatsoft.org/v20/i06/}
-}
-
- at BOOK{Ra:60,
-   AUTHOR = {Rasch, G.},
-   YEAR   = {1960},
-   TITLE  = {Probabilistic Models for some Intelligence and Attainment Tests},
-   PUBLISHER = {Danish Institute for Educational Research},
-   EDITION = {},
-   ADDRESS = {Copenhagen}
-}
-
- at BOOK{Fisch:74,
-   AUTHOR = {Fischer, G. H.},
-   YEAR   = {1974},
-   TITLE  = {Einf\"uhrung in die Theorie psychologischer Tests [Introduction to Mental Test Theory]},
-   PUBLISHER = {Huber},
-   EDITION = {},
-   ADDRESS = {Bern}
-}
-
- at BOOK{BaKi:04,
-   AUTHOR = {Baker, F. B. and Kim, S.},
-   YEAR   = {2004},
-   TITLE  = {Item Response Theory: Parameter Estimation Techniques},
-   PUBLISHER = {Dekker},
-   EDITION = {2nd},
-   ADDRESS = {New York}
-}
-
- at BOOK{FiPS:98,
-   AUTHOR = {Fischer, G. H. and Ponocny-Seliger, E.},
-   YEAR   = {1998},
-   TITLE  = {Structural Rasch Modeling: Handbook of the Usage of LPCM-WIN 1.0},
-   PUBLISHER = {ProGAMMA},
-   EDITION = {},
-   ADDRESS = {Groningen}
-}
-
-
- at INCOLLECTION{Ra:61,
-   AUTHOR = {Rasch, G.},
-   YEAR   = {1961},
-   TITLE  = {On General Laws and the Meaning of Measurement in Psychology.},
-   BOOKTITLE = {Proceedings of the IV. Berkeley Symposium on Mathematical Statistics and Probability, Vol. IV},
-   PAGES = {321--333},
-   EDITOR = {},
-   PUBLISHER = {University of California Press}, 
-   ADDRESS = {Berkeley}
-}
-
- at INCOLLECTION{Fisch:95a,
-   AUTHOR = {Fischer, G. H.},
-   YEAR   = {1995},
-   TITLE  = {Derivations of the Rasch Model},
-   BOOKTITLE = {Rasch Models: Foundations, Recent Developments, and Applications},
-   PAGES = {15--38},
-   EDITOR = {G.H. Fischer and I.W. Molenaar},
-   PUBLISHER = {Springer}, 
-   ADDRESS = {New York}
-}
-
- at INCOLLECTION{Fisch:95b,
-   AUTHOR = {Fischer, G. H.},
-   YEAR   = {1995},
-   TITLE  = {Linear Logistic Models for Change},
-   BOOKTITLE = {Rasch Models: Foundations, Recent Developments, and Applications},
-   PAGES = {157--180},
-   EDITOR = {G.H. Fischer and I.W. Molenaar},
-   PUBLISHER = {Springer}, 
-   ADDRESS = {New York}
-}
-
- at INCOLLECTION{Linacre:2004,
-   AUTHOR = {Linacre, J. M.},
-   YEAR   = {2004},
-   TITLE  = {Estimation Methods for \uppercase{R}asch Measures},
-   BOOKTITLE = {Introduction to \uppercase{R}asch Measurement},
-   PAGES = {25--48},
-   EDITOR = {E. V. {Smith Jr.} and R. M. Smith},
-   PUBLISHER = {JAM Press}, 
-   ADDRESS = {Maple Grove, MN}
-}
-
- at INCOLLECTION{And:95,
-   AUTHOR = {Andersen, E. B.},
-   YEAR   = {1995},
-   TITLE  = {Polytomous Rasch Models and their Estimation},
-   BOOKTITLE = {Rasch models: Foundations, recent developments, and applications},
-   PAGES = {271--292},
-   EDITOR = {G.H. Fischer and I.W. Molenaar},
-   PUBLISHER = {Springer}, 
-   ADDRESS = {New York}
-}
-
- at INCOLLECTION{Molenaar:1995,
-   AUTHOR = {Molenaar, I.},
-   YEAR   = {1995},
-   TITLE  = {Estimation of Item Parameters},
-   BOOKTITLE = {Rasch models: Foundations, recent developements, and applications},
-   PAGES = {39--51},
-   EDITOR = {G.H. Fischer and I.W. Molenaar},
-   PUBLISHER = {Springer}, 
-   ADDRESS = {New York}
-}
-
- at article{Bock+Aitkin:1981,
-   author = {R. D. Bock and M. Aitkin},
-   year = {1981},
-   TITLE  = {Marginal maximum likelihood estimation of item parameters: as application of an \uppercase{EM} algorithm},
-   JOURNAL = {Psychometrika},
-   VOLUME = {46},
-   PAGES = {443--459}
-}
-
- at article{Haberman:77,
-   author = {S. J. Haberman},
-   year = {1977},
-   TITLE  = {Maximum likelihood estimates in exponential response models},
-   JOURNAL = {The Annals of Statistics},
-   VOLUME = {5},
-   PAGES = {815--841}
-}
-
- at article{Wright+Panchapakesan:1969,
-   author = {B. D. Wright and N. Panchapakesan},
-   year = {1969},
-   TITLE  = {A procedure for sample-free item analysis},
-   JOURNAL = {Educational and Psychological measurement},
-   VOLUME = {29},
-   PAGES = {23--48}
-}
-
- at BOOK{Wright+Masters:1982,
-   AUTHOR = {Wright, B. D. and Masters, G. N.},
-   YEAR   = {1982},
-   TITLE  = {Rating scale analysis: \uppercase{R}asch measurement},
-   PUBLISHER = {Mesa Press},
-   EDITION = {},
-   ADDRESS = {Chicago}
-}
-
- at BOOK{Andrich:88,
-   AUTHOR = {Andrich, D.},
-   YEAR   = {1988},
-   TITLE  = {Rasch Models for Measurement (Sage University paper series on quantitative applications in the social sciences)},
-   PUBLISHER = {Sage},
-   EDITION = {},
-   ADDRESS = {Newbury Park, CA}
-}
-
- at INCOLLECTION{FiJr:92,
-   AUTHOR = {Fisher Jr., W. P.},
-   YEAR   = {1992},
-   TITLE  = {Objectivity in Measurement: A Philosophical History of \uppercase{R}asch's Separability Theorem},
-   BOOKTITLE = {Objective Measurement: Theory into Practice, Volume 1},
-   PAGES = {29--60},
-   EDITOR = {M. Wilson},
-   PUBLISHER = {Ablex}, 
-   ADDRESS = {Norwood, NJ}
-}
-
- at INCOLLECTION{Rost:2000,
-   AUTHOR = {Rost, J.},
-   YEAR   = {2000},
-   TITLE  = {The Growing Family of \uppercase{R}asch Models},
-   BOOKTITLE = {Essays on item response theory},
-   PAGES = {25--42},
-   EDITOR = {A. Boomsma and M.A.J. van Duijn and T.A.B. Snijders},
-   PUBLISHER = {Springer}, 
-   ADDRESS = {New York}
-}
-
- at article{Fischer:1987,
-    author = {G. H. Fischer},
-    title = {Applying the principles of specific objectivity and of generalizability to the measurement of change},
-    year = {1987},
-    journal = {Psychometrika},
-    volume = {52},
-    pages = {565-587},
-}
-
- at BOOK{Davier:1998,
-   AUTHOR = {{von Davier}, M.},
-   YEAR   = {1998},
-   TITLE  = {\uppercase{WINMIRA}: A \uppercase{W}indows program for mixed \uppercase{R}asch models},
-   PUBLISHER = {IPN},
-   EDITION = {},
-   ADDRESS = {Kiel}
-}
-
- at INCOLLECTION{Kubinger:1989,
-   AUTHOR = {Kubinger, K. D.},
-   YEAR   = {1989},
-   TITLE  = {Aktueller \uppercase{S}tand und kritische \uppercase{W}\"urdigung der \uppercase{P}robabilistischen \uppercase{T}esttheorie. [\uppercase{C}urrent status and critical appreciation of probabilistic test theory]},
-   BOOKTITLE = {Moderne \uppercase{T}esttheorie: Ein Abriss samt neuesten Beitr\"agen},
-   PAGES = {19--83},
-   EDITOR = {K.D. Kubinger},
-   PUBLISHER = {Beltz}, 
-   ADDRESS = {Weinheim}
-}
-
-
- at INCOLLECTION{Glas:1992,
-   AUTHOR = {Glas, C. A. W.},
-   YEAR   = {1992},
-   TITLE  = {A Rasch Model with a Multivariate Distribution of Ability},
-   BOOKTITLE = {Objective Measurement: Theory into Practice, Volume 1},
-   PAGES = {236--258},
-   EDITOR = {M. Wilson},
-   PUBLISHER = {Ablex}, 
-   ADDRESS = {Norwood, NJ}
-}
-
- at INCOLLECTION{Ho:95,
-   AUTHOR = {Hoijtink, H.},
-   YEAR   = {1995},
-   TITLE  = {Linear and Repeated Measures Models for the Person Parameter},
-   BOOKTITLE = {Rasch Models: Foundations, Recent Developments, and Applications},
-   PAGES = {203--214},
-   EDITOR = {G.H. Fischer and I.W. Molenaar},
-   PUBLISHER = {Springer}, 
-   ADDRESS = {New York}
-}
-
- at article{Fischer:1981,
-   author = {G. H. Fischer},
-   year = {1981},
-   TITLE  = {On the existence and uniqueness of maximum-likelihood estimates in the \uppercase{R}asch model},
-   JOURNAL = {Psychometrika},
-   VOLUME = {46},
-   PAGES = {59--77}
-}
-
- at INCOLLECTION{Fischer:1988,
-   AUTHOR = {Fischer, G. H.},
-   YEAR   = {1988},
-   TITLE  = {Spezifische \uppercase{O}bjektvit\"at: \uppercase{E}ine wissenschaftstheoretische \uppercase{G}rundlage des \uppercase{R}asch-\uppercase{M}odells. [\uppercase{S}pecific objectivity: \uppercase{A}n epistemological foundation of the \uppercase{R}asch model.]},
-   BOOKTITLE = {Moderne Testtheorie},
-   PAGES = {87--111},
-   EDITOR = {K.D. Kubinger},
-   PUBLISHER = {Beltz}, 
-   ADDRESS = {Weinheim}
-}
-
- at INCOLLECTION{And:83,
-   AUTHOR = {Andersen, E. B.},
-   YEAR   = {1983},
-   TITLE  = {A General Latent Structure Model for Contingency Table Data},
-   BOOKTITLE = {Principals of Modern Psychological Measurement},
-   PAGES = {117--138},
-   EDITOR = {H. Wainer and S. Messik},
-   PUBLISHER = {Erlbaum}, 
-   ADDRESS = {Hillsdale, NJ}
-}
-
- at article{Andersen:1970,
-   author = {E. B. Andersen},
-   year = {1970},
-   TITLE  = {Asymptotic properties of conditional maximum likelihood estimators},
-   JOURNAL = {Journal of the Royal Statistical Society, Series B},
-   VOLUME = {32},
-   PAGES = {283--301}
-}
-
- at INCOLLECTION{Glas+Verhelst:1995b,
-   AUTHOR = {Glas, C. A. W. and Verhelst, N.},
-   YEAR   = {1995},
-   TITLE  = {Tests of Fit for Polytomous \uppercase{R}asch Models},
-   BOOKTITLE = {Rasch Models: Foundations, Recent Developments, and Applications},
-   PAGES = {325--352},
-   EDITOR = {G.H. Fischer and I.W. Molenaar},
-   PUBLISHER = {Springer}, 
-   ADDRESS = {New York}
-}
-
- at BOOK{deBoeck+Wilson:2004,
-   AUTHOR = {{de Boeck}, P. and Wilson, M.},
-   YEAR   = {2004},
-   TITLE  = {Explanatory item response models: A generalized linear and nonlinear approach},
-   PUBLISHER = {Springer},
-   EDITION = {},
-   ADDRESS = {New York}
-}
-
-
- at article{VedB:01,
-   author = {T. Verguts and P. {De Boeck}},
-   year = {2001},
-   TITLE  = {Some \uppercase{M}antel-\uppercase{H}aenszel tests of \uppercase{R}asch model assumptions},
-   JOURNAL = {British Journal of Mathematical and Statistical Psychology},
-   VOLUME = {54},
-   PAGES = {21--37}
-}
-
- at INCOLLECTION{Glas+Verhelst:1995a,
-   AUTHOR = {Glas, C. A. W. and Verhelst, N.},
-   YEAR   = {1995},
-   TITLE  = {Testing the \uppercase{R}asch model},
-   BOOKTITLE = {Rasch Models: Foundations, Recent Developments, and Applications},
-   PAGES = {69--96},
-   EDITOR = {G.H. Fischer and I.W. Molenaar},
-   PUBLISHER = {Springer}, 
-   ADDRESS = {New York}
-}
-
- at INCOLLECTION{Sm:04,
-   AUTHOR = {Smith, R. M.},
-   YEAR   = {2004},
-   TITLE  = {Fit Analysis in Latent Trait Measurement Models.},
-   BOOKTITLE = {Introduction to Rasch Measurement},
-   PAGES = {73--92},
-   EDITOR = {E. S. Smith and R. M. Smith},
-   PUBLISHER = {JAM Press}, 
-   ADDRESS = {Maple Grove, MN}
-}
-
- at INCOLLECTION{Fisch:77,
-   AUTHOR = {Fischer, G. H:},
-   YEAR   = {1977},
-   TITLE  = {Linear Logistic Trait Models: Theory and Application},
-   BOOKTITLE = {Structural Models of Thinking and Learning},
-   PAGES = {203--225},
-   EDITOR = {H. Spada and W. F. Kempf},
-   PUBLISHER = {Huber}, 
-   ADDRESS = {Bern}
-}
-
- at INCOLLECTION{RoDa:95,
-   AUTHOR = {Rost, J. and von Davier, M.},
-   YEAR   = {1995},
-   TITLE  = {Polytomous Mixed Rasch Models},
-   BOOKTITLE = {Rasch Models: Foundations, Recent Developments, and Applications},
-   PAGES = {371--382},
-   EDITOR = {G.H. Fischer and I.W. Molenaar},
-   PUBLISHER = {Springer}, 
-   ADDRESS = {New York}
-}
-
- at INCOLLECTION{Verhelst+Glas:1995,
-   AUTHOR = {N. Verhelst and C. A. W. Glas},
-   YEAR   = {1995},
-   TITLE  = {The one parameter logistic test model},
-   BOOKTITLE = {Rasch Models: Foundations, Recent Developments, and Applications},
-   PAGES = {215--238},
-   EDITOR = {G.H. Fischer and I.W. Molenaar},
-   PUBLISHER = {Springer}, 
-   ADDRESS = {New York}
-}
-
- at INCOLLECTION{Pf:94,
-   AUTHOR = {Pfanzagl, J.},
-   YEAR   = {1994},
-   TITLE  = {On Item Parameter Estimation in Certain Latent Trait Models},
-   BOOKTITLE = {Contributions to Mathematical Psychology, Psychometrics, and Methodology},
-   PAGES = {249--263},
-   EDITOR = {G.H. Fischer and D. Laming},
-   PUBLISHER = {Springer}, 
-   ADDRESS = {New York}
-}
-
- at article{Gustafsson:1980,
-   author = {J. Gustafsson},
-   year = {1980},
-   TITLE  = {Testing and obtaining fit of data to the \uppercase{R}asch model},
-   JOURNAL = {British Journal of Mathematical and Statistical Psychology},
-   VOLUME = {33},
-   PAGES = {205--233}
-}
-
- at Manual{R:06,
-       title        = {R: A Language and Environment for Statistical
-                       Computing},
-       author       = {{R Development Core Team}},
-       organization = {R Foundation for Statistical Computing},
-       address      = {Vienna, Austria},
-       year         = 2007,
-       note         = {{ISBN} 3-900051-07-0},
-       url          = {http://www.R-project.org}
-     }
-     
- at article{Mair+Hatzinger:2007,
-   author = {P. Mair and R. Hatzinger},
-   year = {2007},
-   TITLE  = {Extended \uppercase{R}asch Modeling: The e\uppercase{R}m package for the application of \uppercase{IRT} models in \uppercase{R}},
-   JOURNAL = {Journal of Statistical Software},
-   VOLUME = {20(9)},
-   PAGES = {1--20}
-}
-
- at article{Warm:1989,
-   author = {T. A. Warm},
-   year = {1989},
-   TITLE  = {Weighted likelihood estimation of ability in item response theory},
-   JOURNAL = {Psychometrika},
-   VOLUME = {54},
-   PAGES = {427--450}
-}
-
- at article{Ponocny:2001,
-   author = {I. Ponocny},
-   year = {2001},
-   TITLE  = {Nonparametric goodness-of-fit tests for the \uppercase{R}asch model.},
-   JOURNAL = {Psychometrika},
-   VOLUME = {66},
-   PAGES = {437--460}
-}
-
- at INCOLLECTION{Birnbaum:1968,
-   AUTHOR = {Birnbaum, A.},
-   YEAR   = {1968},
-   TITLE  = {Some latent trait models and their use in inferring an examinee's ability},
-   BOOKTITLE = {Statistical theories of mental test scores},
-   PAGES = {395--479},
-   EDITOR = {F. M. Lord and M. R. Novick},
-   PUBLISHER = {Addison-Wesley}, 
-   ADDRESS = {Reading, MA}
-}
-
- at article{Verhelst+Hatzinger+Mair:2007,
-   author = {N. Verhelst and R. Hatzinger and P. Mair},
-   year = {2007},
-   TITLE  = {The \uppercase{R}asch sampler},
-   JOURNAL = {Journal of Statistical Software},
-   VOLUME = {20(4)},
-   PAGES = {1--14}
-}
-
- at article{FiSch:70,
-   author = {G. H. Fischer and H. H. Scheiblechner},
-   year = {1970},
-   TITLE  = {Algorithmen und \uppercase{P}rogramme f\"ur das probabilistische \uppercase{T}estmodell von \uppercase{R}asch. [\uppercase{A}lgorithms and programs for \uppercase{R}asch's probabilistic test model.]},
-   JOURNAL = {Psychologische Beitr\"age},
-   VOLUME = {12},
-   PAGES = {23--51}
-}
-
- at article{Suarez+Glas:2003,
-   author = {J. C. Su\'arez-Falc\'on and C. A. W. Glas},
-   year = {2003},
-   TITLE  = {Evaluation of global testing procedures for item fit to the \uppercase{R}asch model.},
-   JOURNAL = {British Journal of Mathematical and Statistical Society},
-   VOLUME = {56},
-   PAGES = {127--143}
-}
-
- at article{Adams+Wilson+Wang:1997,
-   author = {R. J. Adams and M. Wilson and W. C. Wang},
-   year = {1997},
-   TITLE  = {The multidimensional random coefficients multinomial logit model},
-   JOURNAL = {Applied Psychological Measurement},
-   VOLUME = {21},
-   PAGES = {1--23}
-}
-
- at article{Jannarone:1986,
-   author = {R. J. Jannarone},
-   year = {1986},
-   TITLE  = {Conjunctive item response theory model kernels},
-   JOURNAL = {Psychometrika},
-   VOLUME = {51},
-   PAGES = {357--373}
-}
-
- at article{,
-   author = {},
-   year = {},
-   TITLE  = {},
-   JOURNAL = {},
-   VOLUME = {},
-   PAGES = {}
-}
-
- at article{Mair+Hatzinger:2007b,
-   author = {P. Mair and R. Hatzinger},
-   year = {2007},
-   TITLE  = {\uppercase{CML} based estimation of extended \uppercase{R}asch models with the e\uppercase{R}m package in \uppercase{R}},
-   JOURNAL = {Psychology Science},
-   VOLUME = {49},
-   PAGES = {26--43}
-}
-
- at INCOLLECTION{Hoijtink+Boomsma:1995,
-   AUTHOR = {H. Hoijtink and A. Boomsma},
-   YEAR   = {1995},
-   TITLE  = {On person parameter estimation in the dichotomous \uppercase{R}asch model},
-   BOOKTITLE = {Rasch Models: Foundations, Recent Developments, and Applications},
-   PAGES = {53--68},
-   EDITOR = {G.H. Fischer and I.W. Molenaar},
-   PUBLISHER = {Springer}, 
-   ADDRESS = {New York}
-}
-
-
- at BOOK{Poinstingl+Mair+Hatzinger:07,
-   AUTHOR = {Poinstingl, H. and Mair, P. and Hatzinger, R.},
-   YEAR   = {2007},
-   TITLE  = {Manual zum \uppercase{S}oftwarepackage e\uppercase{R}m: Anwendung des \uppercase{R}asch-\uppercase{M}odells},
-   PUBLISHER = {Pabst Science Publishers},
-   EDITION = {},
-   ADDRESS = {Lengerich}
-}
-
- at MastersThesis{Mair:2006,
-   Author = {P. Mair},
-   School = {Department of Psychology, University of Vienna},
-   Title = {Simulation Studies for Goodness-of-Fit Statistics in Item Response Theory},
-   Year = {2006}
-}
-
-
-
- at INCOLLECTION{,
-   AUTHOR = {},
-   YEAR   = {},
-   TITLE  = {},
-   BOOKTITLE = {},
-   PAGES = {},
-   EDITOR = {},
-   PUBLISHER = {}, 
-   ADDRESS = {}
-}
-
- at BOOK{,
-   AUTHOR = {},
-   YEAR   = {},
-   TITLE  = {},
-   PUBLISHER = {},
-   EDITION = {},
-   ADDRESS = {}
-}
\ No newline at end of file
diff --git a/inst/doc/index.html.old b/inst/doc/index.html.old
deleted file mode 100755
index 408101d..0000000
--- a/inst/doc/index.html.old
+++ /dev/null
@@ -1,10 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
-<html><head><title>R: eRm vignettes</title>
-<link rel="stylesheet" type="text/css" href="../../R.css">
-</head><body>
-<h2>Vignettes of package eRm </h2>
-<dl>
-<dt><a href="eRmvig.pdf">eRmvig.pdf</a>:
-<dd> eRm Basics
-</dl>
-</body></html>
diff --git a/inst/doc/jss.bst b/inst/doc/jss.bst
deleted file mode 100755
index a5b0e78..0000000
--- a/inst/doc/jss.bst
+++ /dev/null
@@ -1,1647 +0,0 @@
-%%
-%% This is file `jss.bst',
-%% generated with the docstrip utility.
-%%
-%% The original source files were:
-%%
-%% merlin.mbs  (with options: `ay,nat,nm-rvx,keyxyr,dt-beg,yr-par,note-yr,tit-qq,bt-qq,atit-u,trnum-it,vol-bf,volp-com,num-xser,isbn,issn,edpar,pp,ed,xedn,xand,etal-it,revdata,eprint,url,url-blk,doi,nfss')
-%% ----------------------------------------
-%% *** Journal of Statistical Software ***
-%% 
-%% Copyright 1994-2004 Patrick W Daly
- % ===============================================================
- % IMPORTANT NOTICE:
- % This bibliographic style (bst) file has been generated from one or
- % more master bibliographic style (mbs) files, listed above.
- %
- % This generated file can be redistributed and/or modified under the terms
- % of the LaTeX Project Public License Distributed from CTAN
- % archives in directory macros/latex/base/lppl.txt; either
- % version 1 of the License, or any later version.
- % ===============================================================
- % Name and version information of the main mbs file:
- % \ProvidesFile{merlin.mbs}[2004/02/09 4.13 (PWD, AO, DPC)]
- %   For use with BibTeX version 0.99a or later
- %-------------------------------------------------------------------
- % This bibliography style file is intended for texts in ENGLISH
- % This is an author-year citation style bibliography. As such, it is
- % non-standard LaTeX, and requires a special package file to function properly.
- % Such a package is    natbib.sty   by Patrick W. Daly
- % The form of the \bibitem entries is
- %   \bibitem[Jones et al.(1990)]{key}...
- %   \bibitem[Jones et al.(1990)Jones, Baker, and Smith]{key}...
- % The essential feature is that the label (the part in brackets) consists
- % of the author names, as they should appear in the citation, with the year
- % in parentheses following. There must be no space before the opening
- % parenthesis!
- % With natbib v5.3, a full list of authors may also follow the year.
- % In natbib.sty, it is possible to define the type of enclosures that is
- % really wanted (brackets or parentheses), but in either case, there must
- % be parentheses in the label.
- % The \cite command functions as follows:
- %   \citet{key} ==>>                Jones et al. (1990)
- %   \citet*{key} ==>>               Jones, Baker, and Smith (1990)
- %   \citep{key} ==>>                (Jones et al., 1990)
- %   \citep*{key} ==>>               (Jones, Baker, and Smith, 1990)
- %   \citep[chap. 2]{key} ==>>       (Jones et al., 1990, chap. 2)
- %   \citep[e.g.][]{key} ==>>        (e.g. Jones et al., 1990)
- %   \citep[e.g.][p. 32]{key} ==>>   (e.g. Jones et al., p. 32)
- %   \citeauthor{key} ==>>           Jones et al.
- %   \citeauthor*{key} ==>>          Jones, Baker, and Smith
- %   \citeyear{key} ==>>             1990
- %---------------------------------------------------------------------
-
-ENTRY
-  { address
-    archive
-    author
-    booktitle
-    chapter
-    collaboration
-    doi
-    edition
-    editor
-    eid
-    eprint
-    howpublished
-    institution
-    isbn
-    issn
-    journal
-    key
-    month
-    note
-    number
-    numpages
-    organization
-    pages
-    publisher
-    school
-    series
-    title
-    type
-    url
-    volume
-    year
-  }
-  {}
-  { label extra.label sort.label short.list }
-INTEGERS { output.state before.all mid.sentence after.sentence after.block }
-FUNCTION {init.state.consts}
-{ #0 'before.all :=
-  #1 'mid.sentence :=
-  #2 'after.sentence :=
-  #3 'after.block :=
-}
-STRINGS { s t}
-FUNCTION {output.nonnull}
-{ 's :=
-  output.state mid.sentence =
-    { ", " * write$ }
-    { output.state after.block =
-        { add.period$ write$
-          newline$
-          "\newblock " write$
-        }
-        { output.state before.all =
-            'write$
-            { add.period$ " " * write$ }
-          if$
-        }
-      if$
-      mid.sentence 'output.state :=
-    }
-  if$
-  s
-}
-FUNCTION {output}
-{ duplicate$ empty$
-    'pop$
-    'output.nonnull
-  if$
-}
-FUNCTION {output.check}
-{ 't :=
-  duplicate$ empty$
-    { pop$ "empty " t * " in " * cite$ * warning$ }
-    'output.nonnull
-  if$
-}
-FUNCTION {fin.entry}
-{ add.period$
-  write$
-  newline$
-}
-
-FUNCTION {new.block}
-{ output.state before.all =
-    'skip$
-    { after.block 'output.state := }
-  if$
-}
-FUNCTION {new.sentence}
-{ output.state after.block =
-    'skip$
-    { output.state before.all =
-        'skip$
-        { after.sentence 'output.state := }
-      if$
-    }
-  if$
-}
-FUNCTION {add.blank}
-{  " " * before.all 'output.state :=
-}
-
-FUNCTION {date.block}
-{
-  new.block
-}
-
-FUNCTION {not}
-{   { #0 }
-    { #1 }
-  if$
-}
-FUNCTION {and}
-{   'skip$
-    { pop$ #0 }
-  if$
-}
-FUNCTION {or}
-{   { pop$ #1 }
-    'skip$
-  if$
-}
-FUNCTION {non.stop}
-{ duplicate$
-   "}" * add.period$
-   #-1 #1 substring$ "." =
-}
-
-STRINGS {z}
-FUNCTION {remove.dots}
-{ 'z :=
-  ""
-  { z empty$ not }
-  { z #1 #1 substring$
-    z #2 global.max$ substring$ 'z :=
-    duplicate$ "." = 'pop$
-      { * }
-    if$
-  }
-  while$
-}
-FUNCTION {new.block.checkb}
-{ empty$
-  swap$ empty$
-  and
-    'skip$
-    'new.block
-  if$
-}
-FUNCTION {field.or.null}
-{ duplicate$ empty$
-    { pop$ "" }
-    'skip$
-  if$
-}
-FUNCTION {emphasize}
-{ duplicate$ empty$
-    { pop$ "" }
-    { "\emph{" swap$ * "}" * }
-  if$
-}
-FUNCTION {bolden}
-{ duplicate$ empty$
-    { pop$ "" }
-    { "\textbf{" swap$ * "}" * }
-  if$
-}
-FUNCTION {tie.or.space.prefix}
-{ duplicate$ text.length$ #3 <
-    { "~" }
-    { " " }
-  if$
-  swap$
-}
-
-FUNCTION {capitalize}
-{ "u" change.case$ "t" change.case$ }
-
-FUNCTION {space.word}
-{ " " swap$ * " " * }
- % Here are the language-specific definitions for explicit words.
- % Each function has a name bbl.xxx where xxx is the English word.
- % The language selected here is ENGLISH
-FUNCTION {bbl.and}
-{ "and"}
-
-FUNCTION {bbl.etal}
-{ "et~al." }
-
-FUNCTION {bbl.editors}
-{ "eds." }
-
-FUNCTION {bbl.editor}
-{ "ed." }
-
-FUNCTION {bbl.edby}
-{ "edited by" }
-
-FUNCTION {bbl.edition}
-{ "edition" }
-
-FUNCTION {bbl.volume}
-{ "volume" }
-
-FUNCTION {bbl.of}
-{ "of" }
-
-FUNCTION {bbl.number}
-{ "number" }
-
-FUNCTION {bbl.nr}
-{ "no." }
-
-FUNCTION {bbl.in}
-{ "in" }
-
-FUNCTION {bbl.pages}
-{ "pp." }
-
-FUNCTION {bbl.page}
-{ "p." }
-
-FUNCTION {bbl.eidpp}
-{ "pages" }
-
-FUNCTION {bbl.chapter}
-{ "chapter" }
-
-FUNCTION {bbl.techrep}
-{ "Technical Report" }
-
-FUNCTION {bbl.mthesis}
-{ "Master's thesis" }
-
-FUNCTION {bbl.phdthesis}
-{ "Ph.D. thesis" }
-
-MACRO {jan} {"January"}
-
-MACRO {feb} {"February"}
-
-MACRO {mar} {"March"}
-
-MACRO {apr} {"April"}
-
-MACRO {may} {"May"}
-
-MACRO {jun} {"June"}
-
-MACRO {jul} {"July"}
-
-MACRO {aug} {"August"}
-
-MACRO {sep} {"September"}
-
-MACRO {oct} {"October"}
-
-MACRO {nov} {"November"}
-
-MACRO {dec} {"December"}
-
-MACRO {acmcs} {"ACM Computing Surveys"}
-
-MACRO {acta} {"Acta Informatica"}
-
-MACRO {cacm} {"Communications of the ACM"}
-
-MACRO {ibmjrd} {"IBM Journal of Research and Development"}
-
-MACRO {ibmsj} {"IBM Systems Journal"}
-
-MACRO {ieeese} {"IEEE Transactions on Software Engineering"}
-
-MACRO {ieeetc} {"IEEE Transactions on Computers"}
-
-MACRO {ieeetcad}
- {"IEEE Transactions on Computer-Aided Design of Integrated Circuits"}
-
-MACRO {ipl} {"Information Processing Letters"}
-
-MACRO {jacm} {"Journal of the ACM"}
-
-MACRO {jcss} {"Journal of Computer and System Sciences"}
-
-MACRO {scp} {"Science of Computer Programming"}
-
-MACRO {sicomp} {"SIAM Journal on Computing"}
-
-MACRO {tocs} {"ACM Transactions on Computer Systems"}
-
-MACRO {tods} {"ACM Transactions on Database Systems"}
-
-MACRO {tog} {"ACM Transactions on Graphics"}
-
-MACRO {toms} {"ACM Transactions on Mathematical Software"}
-
-MACRO {toois} {"ACM Transactions on Office Information Systems"}
-
-MACRO {toplas} {"ACM Transactions on Programming Languages and Systems"}
-
-MACRO {tcs} {"Theoretical Computer Science"}
-FUNCTION {bibinfo.check}
-{ swap$
-  duplicate$ missing$
-    {
-      pop$ pop$
-      ""
-    }
-    { duplicate$ empty$
-        {
-          swap$ pop$
-        }
-        { swap$
-          pop$
-        }
-      if$
-    }
-  if$
-}
-FUNCTION {bibinfo.warn}
-{ swap$
-  duplicate$ missing$
-    {
-      swap$ "missing " swap$ * " in " * cite$ * warning$ pop$
-      ""
-    }
-    { duplicate$ empty$
-        {
-          swap$ "empty " swap$ * " in " * cite$ * warning$
-        }
-        { swap$
-          pop$
-        }
-      if$
-    }
-  if$
-}
-FUNCTION {format.eprint}
-{ eprint duplicate$ empty$
-    'skip$
-    { "\eprint"
-      archive empty$
-        'skip$
-        { "[" * archive * "]" * }
-      if$
-      "{" * swap$ * "}" *
-    }
-  if$
-}
-FUNCTION {format.url}
-{ url empty$
-    { "" }
-    { "\urlprefix\url{" url * "}" * }
-  if$
-}
-
-STRINGS  { bibinfo}
-INTEGERS { nameptr namesleft numnames }
-
-FUNCTION {format.names}
-{ 'bibinfo :=
-  duplicate$ empty$ 'skip$ {
-  's :=
-  "" 't :=
-  #1 'nameptr :=
-  s num.names$ 'numnames :=
-  numnames 'namesleft :=
-    { namesleft #0 > }
-    { s nameptr
-      "{vv~}{ll}{ jj}{ f{}}"
-      format.name$
-      remove.dots
-      bibinfo bibinfo.check
-      't :=
-      nameptr #1 >
-        {
-          namesleft #1 >
-            { ", " * t * }
-            {
-              "," *
-              s nameptr "{ll}" format.name$ duplicate$ "others" =
-                { 't := }
-                { pop$ }
-              if$
-              t "others" =
-                {
-                  " " * bbl.etal emphasize *
-                }
-                { " " * t * }
-              if$
-            }
-          if$
-        }
-        't
-      if$
-      nameptr #1 + 'nameptr :=
-      namesleft #1 - 'namesleft :=
-    }
-  while$
-  } if$
-}
-FUNCTION {format.names.ed}
-{
-  'bibinfo :=
-  duplicate$ empty$ 'skip$ {
-  's :=
-  "" 't :=
-  #1 'nameptr :=
-  s num.names$ 'numnames :=
-  numnames 'namesleft :=
-    { namesleft #0 > }
-    { s nameptr
-      "{f{}~}{vv~}{ll}{ jj}"
-      format.name$
-      remove.dots
-      bibinfo bibinfo.check
-      't :=
-      nameptr #1 >
-        {
-          namesleft #1 >
-            { ", " * t * }
-            {
-              "," *
-              s nameptr "{ll}" format.name$ duplicate$ "others" =
-                { 't := }
-                { pop$ }
-              if$
-              t "others" =
-                {
-
-                  " " * bbl.etal emphasize *
-                }
-                { " " * t * }
-              if$
-            }
-          if$
-        }
-        't
-      if$
-      nameptr #1 + 'nameptr :=
-      namesleft #1 - 'namesleft :=
-    }
-  while$
-  } if$
-}
-FUNCTION {format.key}
-{ empty$
-    { key field.or.null }
-    { "" }
-  if$
-}
-
-FUNCTION {format.authors}
-{ author "author" format.names
-    duplicate$ empty$ 'skip$
-    { collaboration "collaboration" bibinfo.check
-      duplicate$ empty$ 'skip$
-        { " (" swap$ * ")" * }
-      if$
-      *
-    }
-  if$
-}
-FUNCTION {get.bbl.editor}
-{ editor num.names$ #1 > 'bbl.editors 'bbl.editor if$ }
-
-FUNCTION {format.editors}
-{ editor "editor" format.names duplicate$ empty$ 'skip$
-    {
-      " " *
-      get.bbl.editor
-   "(" swap$ * ")" *
-      *
-    }
-  if$
-}
-FUNCTION {format.isbn}
-{ isbn "isbn" bibinfo.check
-  duplicate$ empty$ 'skip$
-    {
-      new.block
-      "ISBN " swap$ *
-    }
-  if$
-}
-
-FUNCTION {format.issn}
-{ issn "issn" bibinfo.check
-  duplicate$ empty$ 'skip$
-    {
-      new.block
-      "ISSN " swap$ *
-    }
-  if$
-}
-
-FUNCTION {format.doi}
-{ doi "doi" bibinfo.check
-  duplicate$ empty$ 'skip$
-    {
-      new.block
-      "\doi{" swap$ * "}" *
-    }
-  if$
-}
-FUNCTION {format.note}
-{
- note empty$
-    { "" }
-    { note #1 #1 substring$
-      duplicate$ "{" =
-        'skip$
-        { output.state mid.sentence =
-          { "l" }
-          { "u" }
-        if$
-        change.case$
-        }
-      if$
-      note #2 global.max$ substring$ * "note" bibinfo.check
-    }
-  if$
-}
-
-FUNCTION {format.title}
-{ title
-  "title" bibinfo.check
-  duplicate$ empty$ 'skip$
-    {
-      "\enquote{" swap$ *
-      add.period$ "}" *
-    }
-  if$
-}
-FUNCTION {end.quote.btitle}
-{ booktitle empty$
-    'skip$
-    { before.all 'output.state := }
-  if$
-}
-FUNCTION {format.full.names}
-{'s :=
- "" 't :=
-  #1 'nameptr :=
-  s num.names$ 'numnames :=
-  numnames 'namesleft :=
-    { namesleft #0 > }
-    { s nameptr
-      "{vv~}{ll}" format.name$
-      't :=
-      nameptr #1 >
-        {
-          namesleft #1 >
-            { ", " * t * }
-            {
-              s nameptr "{ll}" format.name$ duplicate$ "others" =
-                { 't := }
-                { pop$ }
-              if$
-              t "others" =
-                {
-                  " " * bbl.etal emphasize *
-                }
-                {
-                  numnames #2 >
-                    { "," * }
-                    'skip$
-                  if$
-                  bbl.and
-                  space.word * t *
-                }
-              if$
-            }
-          if$
-        }
-        't
-      if$
-      nameptr #1 + 'nameptr :=
-      namesleft #1 - 'namesleft :=
-    }
-  while$
-}
-
-FUNCTION {author.editor.key.full}
-{ author empty$
-    { editor empty$
-        { key empty$
-            { cite$ #1 #3 substring$ }
-            'key
-          if$
-        }
-        { editor format.full.names }
-      if$
-    }
-    { author format.full.names }
-  if$
-}
-
-FUNCTION {author.key.full}
-{ author empty$
-    { key empty$
-         { cite$ #1 #3 substring$ }
-          'key
-      if$
-    }
-    { author format.full.names }
-  if$
-}
-
-FUNCTION {editor.key.full}
-{ editor empty$
-    { key empty$
-         { cite$ #1 #3 substring$ }
-          'key
-      if$
-    }
-    { editor format.full.names }
-  if$
-}
-
-FUNCTION {make.full.names}
-{ type$ "book" =
-  type$ "inbook" =
-  or
-    'author.editor.key.full
-    { type$ "proceedings" =
-        'editor.key.full
-        'author.key.full
-      if$
-    }
-  if$
-}
-
-FUNCTION {output.bibitem}
-{ newline$
-  "\bibitem[{" write$
-  label write$
-  ")" make.full.names duplicate$ short.list =
-     { pop$ }
-     { * }
-   if$
-  "}]{" * write$
-  cite$ write$
-  "}" write$
-  newline$
-  ""
-  before.all 'output.state :=
-}
-
-FUNCTION {n.dashify}
-{
-  't :=
-  ""
-    { t empty$ not }
-    { t #1 #1 substring$ "-" =
-        { t #1 #2 substring$ "--" = not
-            { "--" *
-              t #2 global.max$ substring$ 't :=
-            }
-            {   { t #1 #1 substring$ "-" = }
-                { "-" *
-                  t #2 global.max$ substring$ 't :=
-                }
-              while$
-            }
-          if$
-        }
-        { t #1 #1 substring$ *
-          t #2 global.max$ substring$ 't :=
-        }
-      if$
-    }
-  while$
-}
-
-FUNCTION {word.in}
-{ bbl.in capitalize
-  " " * }
-
-FUNCTION {format.date}
-{ year "year" bibinfo.check duplicate$ empty$
-    {
-      "empty year in " cite$ * "; set to ????" * warning$
-       pop$ "????"
-    }
-    'skip$
-  if$
-  extra.label *
-  before.all 'output.state :=
-  " (" swap$ * ")" *
-}
-FUNCTION {format.btitle}
-{ title "title" bibinfo.check
-  duplicate$ empty$ 'skip$
-    {
-      emphasize
-    }
-  if$
-}
-FUNCTION {either.or.check}
-{ empty$
-    'pop$
-    { "can't use both " swap$ * " fields in " * cite$ * warning$ }
-  if$
-}
-FUNCTION {format.bvolume}
-{ volume empty$
-    { "" }
-    { bbl.volume volume tie.or.space.prefix
-      "volume" bibinfo.check * *
-      series "series" bibinfo.check
-      duplicate$ empty$ 'pop$
-        { swap$ bbl.of space.word * swap$
-          emphasize * }
-      if$
-      "volume and number" number either.or.check
-    }
-  if$
-}
-FUNCTION {format.number.series}
-{ volume empty$
-    { number empty$
-        { series field.or.null }
-        { series empty$
-            { number "number" bibinfo.check }
-            { output.state mid.sentence =
-                { bbl.number }
-                { bbl.number capitalize }
-              if$
-              number tie.or.space.prefix "number" bibinfo.check * *
-              bbl.in space.word *
-              series "series" bibinfo.check *
-            }
-          if$
-        }
-      if$
-    }
-    { "" }
-  if$
-}
-
-FUNCTION {format.edition}
-{ edition duplicate$ empty$ 'skip$
-    {
-      output.state mid.sentence =
-        { "l" }
-        { "t" }
-      if$ change.case$
-      "edition" bibinfo.check
-      " " * bbl.edition *
-    }
-  if$
-}
-INTEGERS { multiresult }
-FUNCTION {multi.page.check}
-{ 't :=
-  #0 'multiresult :=
-    { multiresult not
-      t empty$ not
-      and
-    }
-    { t #1 #1 substring$
-      duplicate$ "-" =
-      swap$ duplicate$ "," =
-      swap$ "+" =
-      or or
-        { #1 'multiresult := }
-        { t #2 global.max$ substring$ 't := }
-      if$
-    }
-  while$
-  multiresult
-}
-FUNCTION {format.pages}
-{ pages duplicate$ empty$ 'skip$
-    { duplicate$ multi.page.check
-        {
-          bbl.pages swap$
-          n.dashify
-        }
-        {
-          bbl.page swap$
-        }
-      if$
-      tie.or.space.prefix
-      "pages" bibinfo.check
-      * *
-    }
-  if$
-}
-FUNCTION {format.journal.pages}
-{ pages duplicate$ empty$ 'pop$
-    { swap$ duplicate$ empty$
-        { pop$ pop$ format.pages }
-        {
-          ", " *
-          swap$
-          n.dashify
-          "pages" bibinfo.check
-          *
-        }
-      if$
-    }
-  if$
-}
-FUNCTION {format.journal.eid}
-{ eid "eid" bibinfo.check
-  duplicate$ empty$ 'pop$
-    { swap$ duplicate$ empty$ 'skip$
-      {
-          ", " *
-      }
-      if$
-      swap$ *
-      numpages empty$ 'skip$
-        { bbl.eidpp numpages tie.or.space.prefix
-          "numpages" bibinfo.check * *
-          " (" swap$ * ")" * *
-        }
-      if$
-    }
-  if$
-}
-FUNCTION {format.vol.num.pages}
-{ volume field.or.null
-  duplicate$ empty$ 'skip$
-    {
-      "volume" bibinfo.check
-    }
-  if$
-  bolden
-  number "number" bibinfo.check duplicate$ empty$ 'skip$
-    {
-      swap$ duplicate$ empty$
-        { "there's a number but no volume in " cite$ * warning$ }
-        'skip$
-      if$
-      swap$
-      "(" swap$ * ")" *
-    }
-  if$ *
-  eid empty$
-    { format.journal.pages }
-    { format.journal.eid }
-  if$
-}
-
-FUNCTION {format.chapter.pages}
-{ chapter empty$
-    'format.pages
-    { type empty$
-        { bbl.chapter }
-        { type "l" change.case$
-          "type" bibinfo.check
-        }
-      if$
-      chapter tie.or.space.prefix
-      "chapter" bibinfo.check
-      * *
-      pages empty$
-        'skip$
-        { ", " * format.pages * }
-      if$
-    }
-  if$
-}
-
-FUNCTION {bt.enquote}
-{ duplicate$ empty$ 'skip$
-  { "\enquote{" swap$ *
-    non.stop
-      { ",} " * }
-      { "}, " * }
-    if$
-  }
-  if$
-}
-FUNCTION {format.booktitle}
-{
-  booktitle "booktitle" bibinfo.check
-  bt.enquote
-}
-FUNCTION {format.in.ed.booktitle}
-{ format.booktitle duplicate$ empty$ 'skip$
-    {
-      editor "editor" format.names.ed duplicate$ empty$ 'pop$
-        {
-          " " *
-          get.bbl.editor
-          "(" swap$ * "), " *
-          * swap$
-          * }
-      if$
-      word.in swap$ *
-    }
-  if$
-}
-FUNCTION {format.thesis.type}
-{ type duplicate$ empty$
-    'pop$
-    { swap$ pop$
-      "t" change.case$ "type" bibinfo.check
-    }
-  if$
-}
-FUNCTION {format.tr.number}
-{ number "number" bibinfo.check
-  type duplicate$ empty$
-    { pop$ bbl.techrep }
-    'skip$
-  if$
-  "type" bibinfo.check
-  swap$ duplicate$ empty$
-    { pop$ "t" change.case$ }
-    { tie.or.space.prefix * * }
-  if$
-}
-FUNCTION {format.article.crossref}
-{
-  word.in
-  " \cite{" * crossref * "}" *
-}
-FUNCTION {format.book.crossref}
-{ volume duplicate$ empty$
-    { "empty volume in " cite$ * "'s crossref of " * crossref * warning$
-      pop$ word.in
-    }
-    { bbl.volume
-      capitalize
-      swap$ tie.or.space.prefix "volume" bibinfo.check * * bbl.of space.word *
-    }
-  if$
-  " \cite{" * crossref * "}" *
-}
-FUNCTION {format.incoll.inproc.crossref}
-{
-  word.in
-  " \cite{" * crossref * "}" *
-}
-FUNCTION {format.org.or.pub}
-{ 't :=
-  ""
-  address empty$ t empty$ and
-    'skip$
-    {
-      t empty$
-        { address "address" bibinfo.check *
-        }
-        { t *
-          address empty$
-            'skip$
-            { ", " * address "address" bibinfo.check * }
-          if$
-        }
-      if$
-    }
-  if$
-}
-FUNCTION {format.publisher.address}
-{ publisher "publisher" bibinfo.warn format.org.or.pub
-}
-
-FUNCTION {format.organization.address}
-{ organization "organization" bibinfo.check format.org.or.pub
-}
-
-FUNCTION {article}
-{ output.bibitem
-  format.authors "author" output.check
-  author format.key output
-  format.date "year" output.check
-  date.block
-  format.title "title" output.check
-  new.block
-  crossref missing$
-    {
-      journal
-      "journal" bibinfo.check
-      emphasize
-      "journal" output.check
-      format.vol.num.pages output
-    }
-    { format.article.crossref output.nonnull
-      format.pages output
-    }
-  if$
-  format.issn output
-  format.doi output
-  new.block
-  format.note output
-  format.eprint output
-  format.url output
-  fin.entry
-}
-FUNCTION {book}
-{ output.bibitem
-  author empty$
-    { format.editors "author and editor" output.check
-      editor format.key output
-    }
-    { format.authors output.nonnull
-      crossref missing$
-        { "author and editor" editor either.or.check }
-        'skip$
-      if$
-    }
-  if$
-  format.date "year" output.check
-  date.block
-  format.btitle "title" output.check
-  crossref missing$
-    { format.bvolume output
-      new.block
-      format.number.series output
-      new.sentence
-      format.publisher.address output
-    }
-    {
-      new.block
-      format.book.crossref output.nonnull
-    }
-  if$
-  format.edition output
-  format.isbn output
-  format.doi output
-  new.block
-  format.note output
-  format.eprint output
-  format.url output
-  fin.entry
-}
-FUNCTION {booklet}
-{ output.bibitem
-  format.authors output
-  author format.key output
-  format.date "year" output.check
-  date.block
-  format.title "title" output.check
-  new.block
-  howpublished "howpublished" bibinfo.check output
-  address "address" bibinfo.check output
-  format.isbn output
-  format.doi output
-  new.block
-  format.note output
-  format.eprint output
-  format.url output
-  fin.entry
-}
-
-FUNCTION {inbook}
-{ output.bibitem
-  author empty$
-    { format.editors "author and editor" output.check
-      editor format.key output
-    }
-    { format.authors output.nonnull
-      crossref missing$
-        { "author and editor" editor either.or.check }
-        'skip$
-      if$
-    }
-  if$
-  format.date "year" output.check
-  date.block
-  format.btitle "title" output.check
-  crossref missing$
-    {
-      format.bvolume output
-      format.chapter.pages "chapter and pages" output.check
-      new.block
-      format.number.series output
-      new.sentence
-      format.publisher.address output
-    }
-    {
-      format.chapter.pages "chapter and pages" output.check
-      new.block
-      format.book.crossref output.nonnull
-    }
-  if$
-  format.edition output
-  crossref missing$
-    { format.isbn output }
-    'skip$
-  if$
-  format.doi output
-  new.block
-  format.note output
-  format.eprint output
-  format.url output
-  fin.entry
-}
-
-FUNCTION {incollection}
-{ output.bibitem
-  format.authors "author" output.check
-  author format.key output
-  format.date "year" output.check
-  date.block
-  format.title "title" output.check
-  new.block
-  crossref missing$
-    { format.in.ed.booktitle "booktitle" output.check
-      end.quote.btitle
-      format.bvolume output
-      format.number.series output
-      format.chapter.pages output
-      new.sentence
-      format.publisher.address output
-      format.edition output
-      format.isbn output
-    }
-    { format.incoll.inproc.crossref output.nonnull
-      format.chapter.pages output
-    }
-  if$
-  format.doi output
-  new.block
-  format.note output
-  format.eprint output
-  format.url output
-  fin.entry
-}
-FUNCTION {inproceedings}
-{ output.bibitem
-  format.authors "author" output.check
-  author format.key output
-  format.date "year" output.check
-  date.block
-  format.title "title" output.check
-  new.block
-  crossref missing$
-    { format.in.ed.booktitle "booktitle" output.check
-      end.quote.btitle
-      format.bvolume output
-      format.number.series output
-      format.pages output
-      new.sentence
-      publisher empty$
-        { format.organization.address output }
-        { organization "organization" bibinfo.check output
-          format.publisher.address output
-        }
-      if$
-      format.isbn output
-      format.issn output
-    }
-    { format.incoll.inproc.crossref output.nonnull
-      format.pages output
-    }
-  if$
-  format.doi output
-  new.block
-  format.note output
-  format.eprint output
-  format.url output
-  fin.entry
-}
-FUNCTION {conference} { inproceedings }
-FUNCTION {manual}
-{ output.bibitem
-  format.authors output
-  author format.key output
-  format.date "year" output.check
-  date.block
-  format.btitle "title" output.check
-  organization address new.block.checkb
-  organization "organization" bibinfo.check output
-  address "address" bibinfo.check output
-  format.edition output
-  format.doi output
-  new.block
-  format.note output
-  format.eprint output
-  format.url output
-  fin.entry
-}
-
-FUNCTION {mastersthesis}
-{ output.bibitem
-  format.authors "author" output.check
-  author format.key output
-  format.date "year" output.check
-  date.block
-  format.btitle
-  "title" output.check
-  new.block
-  bbl.mthesis format.thesis.type output.nonnull
-  school "school" bibinfo.warn output
-  address "address" bibinfo.check output
-  format.doi output
-  new.block
-  format.note output
-  format.eprint output
-  format.url output
-  fin.entry
-}
-
-FUNCTION {misc}
-{ output.bibitem
-  format.authors output
-  author format.key output
-  format.date "year" output.check
-  date.block
-  format.title output
-  new.block
-  howpublished "howpublished" bibinfo.check output
-  format.doi output
-  new.block
-  format.note output
-  format.eprint output
-  format.url output
-  fin.entry
-}
-FUNCTION {phdthesis}
-{ output.bibitem
-  format.authors "author" output.check
-  author format.key output
-  format.date "year" output.check
-  date.block
-  format.btitle
-  "title" output.check
-  new.block
-  bbl.phdthesis format.thesis.type output.nonnull
-  school "school" bibinfo.warn output
-  address "address" bibinfo.check output
-  format.doi output
-  new.block
-  format.note output
-  format.eprint output
-  format.url output
-  fin.entry
-}
-
-FUNCTION {proceedings}
-{ output.bibitem
-  format.editors output
-  editor format.key output
-  format.date "year" output.check
-  date.block
-  format.btitle "title" output.check
-  format.bvolume output
-  format.number.series output
-  new.sentence
-  publisher empty$
-    { format.organization.address output }
-    { organization "organization" bibinfo.check output
-      format.publisher.address output
-    }
-  if$
-  format.isbn output
-  format.issn output
-  format.doi output
-  new.block
-  format.note output
-  format.eprint output
-  format.url output
-  fin.entry
-}
-
-FUNCTION {techreport}
-{ output.bibitem
-  format.authors "author" output.check
-  author format.key output
-  format.date "year" output.check
-  date.block
-  format.title
-  "title" output.check
-  new.block
-  format.tr.number emphasize output.nonnull
-  institution "institution" bibinfo.warn output
-  address "address" bibinfo.check output
-  format.doi output
-  new.block
-  format.note output
-  format.eprint output
-  format.url output
-  fin.entry
-}
-
-FUNCTION {unpublished}
-{ output.bibitem
-  format.authors "author" output.check
-  author format.key output
-  format.date "year" output.check
-  date.block
-  format.title "title" output.check
-  format.doi output
-  new.block
-  format.note "note" output.check
-  format.eprint output
-  format.url output
-  fin.entry
-}
-
-FUNCTION {default.type} { misc }
-READ
-FUNCTION {sortify}
-{ purify$
-  "l" change.case$
-}
-INTEGERS { len }
-FUNCTION {chop.word}
-{ 's :=
-  'len :=
-  s #1 len substring$ =
-    { s len #1 + global.max$ substring$ }
-    's
-  if$
-}
-FUNCTION {format.lab.names}
-{ 's :=
-  "" 't :=
-  s #1 "{vv~}{ll}" format.name$
-  s num.names$ duplicate$
-  #2 >
-    { pop$
-      " " * bbl.etal emphasize *
-    }
-    { #2 <
-        'skip$
-        { s #2 "{ff }{vv }{ll}{ jj}" format.name$ "others" =
-            {
-              " " * bbl.etal emphasize *
-            }
-            { bbl.and space.word * s #2 "{vv~}{ll}" format.name$
-              * }
-          if$
-        }
-      if$
-    }
-  if$
-}
-
-FUNCTION {author.key.label}
-{ author empty$
-    { key empty$
-        { cite$ #1 #3 substring$ }
-        'key
-      if$
-    }
-    { author format.lab.names }
-  if$
-}
-
-FUNCTION {author.editor.key.label}
-{ author empty$
-    { editor empty$
-        { key empty$
-            { cite$ #1 #3 substring$ }
-            'key
-          if$
-        }
-        { editor format.lab.names }
-      if$
-    }
-    { author format.lab.names }
-  if$
-}
-
-FUNCTION {editor.key.label}
-{ editor empty$
-    { key empty$
-        { cite$ #1 #3 substring$ }
-        'key
-      if$
-    }
-    { editor format.lab.names }
-  if$
-}
-
-FUNCTION {calc.short.authors}
-{ type$ "book" =
-  type$ "inbook" =
-  or
-    'author.editor.key.label
-    { type$ "proceedings" =
-        'editor.key.label
-        'author.key.label
-      if$
-    }
-  if$
-  'short.list :=
-}
-
-FUNCTION {calc.label}
-{ calc.short.authors
-  short.list
-  "("
-  *
-  year duplicate$ empty$
-  short.list key field.or.null = or
-     { pop$ "" }
-     'skip$
-  if$
-  *
-  'label :=
-}
-
-FUNCTION {sort.format.names}
-{ 's :=
-  #1 'nameptr :=
-  ""
-  s num.names$ 'numnames :=
-  numnames 'namesleft :=
-    { namesleft #0 > }
-    { s nameptr
-      "{vv{ } }{ll{ }}{  f{ }}{  jj{ }}"
-      format.name$ 't :=
-      nameptr #1 >
-        {
-          "   "  *
-          namesleft #1 = t "others" = and
-            { "zzzzz" * }
-            { t sortify * }
-          if$
-        }
-        { t sortify * }
-      if$
-      nameptr #1 + 'nameptr :=
-      namesleft #1 - 'namesleft :=
-    }
-  while$
-}
-
-FUNCTION {sort.format.title}
-{ 't :=
-  "A " #2
-    "An " #3
-      "The " #4 t chop.word
-    chop.word
-  chop.word
-  sortify
-  #1 global.max$ substring$
-}
-FUNCTION {author.sort}
-{ author empty$
-    { key empty$
-        { "to sort, need author or key in " cite$ * warning$
-          ""
-        }
-        { key sortify }
-      if$
-    }
-    { author sort.format.names }
-  if$
-}
-FUNCTION {author.editor.sort}
-{ author empty$
-    { editor empty$
-        { key empty$
-            { "to sort, need author, editor, or key in " cite$ * warning$
-              ""
-            }
-            { key sortify }
-          if$
-        }
-        { editor sort.format.names }
-      if$
-    }
-    { author sort.format.names }
-  if$
-}
-FUNCTION {editor.sort}
-{ editor empty$
-    { key empty$
-        { "to sort, need editor or key in " cite$ * warning$
-          ""
-        }
-        { key sortify }
-      if$
-    }
-    { editor sort.format.names }
-  if$
-}
-FUNCTION {presort}
-{ calc.label
-  label sortify
-  "    "
-  *
-  type$ "book" =
-  type$ "inbook" =
-  or
-    'author.editor.sort
-    { type$ "proceedings" =
-        'editor.sort
-        'author.sort
-      if$
-    }
-  if$
-  #1 entry.max$ substring$
-  'sort.label :=
-  sort.label
-  *
-  "    "
-  *
-  title field.or.null
-  sort.format.title
-  *
-  #1 entry.max$ substring$
-  'sort.key$ :=
-}
-
-ITERATE {presort}
-SORT
-STRINGS { last.label next.extra }
-INTEGERS { last.extra.num number.label }
-FUNCTION {initialize.extra.label.stuff}
-{ #0 int.to.chr$ 'last.label :=
-  "" 'next.extra :=
-  #0 'last.extra.num :=
-  #0 'number.label :=
-}
-FUNCTION {forward.pass}
-{ last.label label =
-    { last.extra.num #1 + 'last.extra.num :=
-      last.extra.num int.to.chr$ 'extra.label :=
-    }
-    { "a" chr.to.int$ 'last.extra.num :=
-      "" 'extra.label :=
-      label 'last.label :=
-    }
-  if$
-  number.label #1 + 'number.label :=
-}
-FUNCTION {reverse.pass}
-{ next.extra "b" =
-    { "a" 'extra.label := }
-    'skip$
-  if$
-  extra.label 'next.extra :=
-  extra.label
-  duplicate$ empty$
-    'skip$
-    { "{\natexlab{" swap$ * "}}" * }
-  if$
-  'extra.label :=
-  label extra.label * 'label :=
-}
-EXECUTE {initialize.extra.label.stuff}
-ITERATE {forward.pass}
-REVERSE {reverse.pass}
-FUNCTION {bib.sort.order}
-{ sort.label
-  "    "
-  *
-  year field.or.null sortify
-  *
-  "    "
-  *
-  title field.or.null
-  sort.format.title
-  *
-  #1 entry.max$ substring$
-  'sort.key$ :=
-}
-ITERATE {bib.sort.order}
-SORT
-FUNCTION {begin.bib}
-{ preamble$ empty$
-    'skip$
-    { preamble$ write$ newline$ }
-  if$
-  "\begin{thebibliography}{" number.label int.to.str$ * "}" *
-  write$ newline$
-  "\newcommand{\enquote}[1]{``#1''}"
-  write$ newline$
-  "\providecommand{\natexlab}[1]{#1}"
-  write$ newline$
-  "\providecommand{\url}[1]{\texttt{#1}}"
-  write$ newline$
-  "\providecommand{\urlprefix}{URL }"
-  write$ newline$
-  "\expandafter\ifx\csname urlstyle\endcsname\relax"
-  write$ newline$
-  "  \providecommand{\doi}[1]{doi:\discretionary{}{}{}#1}\else"
-  write$ newline$
-  "  \providecommand{\doi}{doi:\discretionary{}{}{}\begingroup \urlstyle{rm}\Url}\fi"
-  write$ newline$
-  "\providecommand{\eprint}[2][]{\url{#2}}"
-  write$ newline$
-}
-EXECUTE {begin.bib}
-EXECUTE {init.state.consts}
-ITERATE {call.type$}
-FUNCTION {end.bib}
-{ newline$
-  "\end{thebibliography}" write$ newline$
-}
-EXECUTE {end.bib}
-%% End of customized bst file
-%%
-%% End of file `jss.bst'.
diff --git a/man/IC.Rd b/man/IC.Rd
old mode 100755
new mode 100644
index 21907d7..980940c
--- a/man/IC.Rd
+++ b/man/IC.Rd
@@ -1,3 +1,4 @@
+\encoding{UTF-8}
 \name{IC}
 \alias{IC}
 \alias{IC.ppar}
@@ -28,18 +29,14 @@ The function \code{IC} returns an object of class \code{ICr} containing:
     \code{\link{LRtest.Rm}}
 }
 \examples{
-
 #IC's for Rasch model
-data(raschdat2)
 res <- RM(raschdat2)             #Rasch model
 pres <- person.parameter(res)    #Person parameters
 IC(pres)
 
 #IC's for RSM
-data(rsmdat)
 res <- RSM(rsmdat)
 pres <- person.parameter(res)
 IC(pres)
-
 }
 \keyword{models}
diff --git a/man/LLRA.Rd b/man/LLRA.Rd
old mode 100755
new mode 100644
index 64d7426..21c0a99
--- a/man/LLRA.Rd
+++ b/man/LLRA.Rd
@@ -1,3 +1,4 @@
+\encoding{UTF-8}
 \name{LLRA}
 \alias{LLRA}
 \alias{print.llra}
@@ -60,8 +61,7 @@ Additional arguments to be passed to and from other methods.
   improvement). Currently only data matrices are supported as arguments.    
 }
 \value{
-Returns an object of class \code{"llra"} (also inheriting from class
-\code{"eRm"}) containing
+Returns an object of class \code{'llra'} (also inheriting from class \code{'eRm'}) containing
 
 \item{loglik}{Conditional log-likelihood.}
 \item{iter}{Number of iterations.}
@@ -97,9 +97,7 @@ Hatzinger, R. and Rusch, T. (2009) IRT models with relaxed assumptions
 in eRm: A manual-like instruction. \emph{Psychology Science Quarterly}, \bold{51},
 pp. 87--120, \url{http://erm.r-forge.r-project.org/psq_1_2009_06_87-120.pdf}
 }
-\author{
-Thomas Rusch
-}
+\author{Thomas Rusch}
 \section{Warning}{A warning is printed that the first two categories
   for polytomous items are equated to save parameters. See Hatzinger and
   Rusch (2009) for a justification why this is valid also from a substantive
@@ -110,27 +108,30 @@ S3 methods \code{\link{summary.llra}} and \code{\link{plotTR}} and
 \code{\link{plotGR}} for plotting. 
 }
 \examples{
-    ##Example 6 from Hatzinger & Rusch (2009)
-    data("llradat3")
-    groups <- c(rep("TG",30),rep("CG",30))
-    llra1 <- LLRA(llradat3,mpoints=2,groups=groups)
-    llra1
+##Example 6 from Hatzinger & Rusch (2009)
+groups <- c(rep("TG",30),rep("CG",30))
+llra1 <- LLRA(llradat3,mpoints=2,groups=groups)
+llra1
 
-    ##An LLRA with 2 treatment groups and 1 baseline group, 5 items and 4
-    ##time points. Item 1 is dichotomous, all others have 3, 4, 5, 6
-    ##categories respectively.
 \dontrun{
-    data("llraDat2")
-    dats <- llraDat2[1:20]
-    groups <- llraDat2$group
-    tps <- 4
-    ex2 <- LLRA(dats,mpoints=tps,groups=groups) #baseline CG
-    #baseline TG1
-    ex2a <- LLRA(dats,mpoints=tps,groups=groups,baseline="TG1") #baseline TG1
-    ex2
-    summary(ex2)
-    summary(ex2a)
-    plotGR(ex2)
-    plotTR(ex2)
-}
+##An LLRA with 2 treatment groups and 1 baseline group, 5 items and 4
+##time points. Item 1 is dichotomous, all others have 3, 4, 5, 6
+##categories respectively.
+dats <- llraDat2[1:20]
+groups <- llraDat2$group
+tps <- 4
+
+#baseline CG
+ex2 <- LLRA(dats,mpoints=tps,groups=groups) 
+
+#baseline TG1
+ex2a <- LLRA(dats,mpoints=tps,groups=groups,baseline="TG1") 
+
+#summarize results
+summary(ex2)
+summary(ex2a)
+
+#plotting
+plotGR(ex2)
+plotTR(ex2)}
 }
diff --git a/man/LLTM.Rd b/man/LLTM.Rd
old mode 100755
new mode 100644
index 11eb14d..31f9cfc
--- a/man/LLTM.Rd
+++ b/man/LLTM.Rd
@@ -1,3 +1,4 @@
+\encoding{UTF-8}
 \name{LLTM}
 \alias{LLTM}
 \title{Estimation of linear logistic test models}
@@ -70,21 +71,16 @@ with the eRm package in R. Psychology Science, 49, 26-43.
 %\note{}
 \seealso{\code{\link{LRSM}},\code{\link{LPCM}}}
 \examples{
-
 #LLTM for 2 measurement points
 #100 persons, 2*15 items, W generated automatically
-data(lltmdat1)
 res1 <- LLTM(lltmdat1, mpoints = 2)
-print(res1)
+res1
 summary(res1)
 
 #Reparameterized Rasch model as LLTM (more pasimonious)
-data(lltmdat2)
 W <- matrix(c(1,2,1,3,2,2,2,1,1,1),ncol=2)              #design matrix
 res2 <- LLTM(lltmdat2, W = W)
-print(res2)
+res2
 summary(res2)
-
 }
-
 \keyword{models}
diff --git a/man/LPCM.Rd b/man/LPCM.Rd
old mode 100755
new mode 100644
index b3de4dd..4684c86
--- a/man/LPCM.Rd
+++ b/man/LPCM.Rd
@@ -1,3 +1,4 @@
+\encoding{UTF-8}
 \name{LPCM}
 \alias{LPCM}
 %- Also NEED an '\alias' for EACH other topic documented here.
@@ -39,7 +40,7 @@ LPCM(X, W , mpoints = 1, groupvec = 1, se = TRUE, sum0 = TRUE,
   \code{model.matrix}, \code{vcov},\code{summary}, \code{logLik}, \code{person.parameters}.
 }
 \value{
-  Returns on object of class \code{eRm} containing:
+  Returns on object of class \code{'eRm'} containing:
 
   \item{loglik}{Conditional log-likelihood.}
   \item{iter}{Number of iterations.}
@@ -60,22 +61,19 @@ LPCM(X, W , mpoints = 1, groupvec = 1, se = TRUE, sum0 = TRUE,
 Fischer, G. H., and Molenaar, I. (1995). Rasch Models - Foundations,
 Recent Developements, and Applications. Springer.
 
-Mair, P., and Hatzinger, R. (2007). Extended Rasch modeling: The eRm package for the application of IRT models in R. Journal of Statistical Software, 20(9), 1-20.
+Mair, P., and Hatzinger, R. (2007). Extended Rasch modeling: The \pkg{eRm} package for the application of IRT models in R. Journal of Statistical Software, 20(9), 1-20.
 
-Mair, P., and Hatzinger, R. (2007). CML based estimation of extended Rasch models with the eRm package in R. Psychology Science, 49, 26-43.
+Mair, P., and Hatzinger, R. (2007). CML based estimation of extended Rasch models with the \pkg{eRm} package in R. Psychology Science, 49, 26-43.
 }
 \author{Patrick Mair, Reinhold Hatzinger}
 %\note{}
 \seealso{\code{\link{LRSM}},\code{\link{LLTM}}}
 \examples{
-
 #LPCM for two measurement points and two subject groups
 #20 subjects, 2*3 items
-data(lpcmdat)
 G <- c(rep(1,10),rep(2,10))                   #group vector
 res <- LPCM(lpcmdat, mpoints = 2, groupvec = G)
-print(res)
+res
 summary(res)
 }
-
 \keyword{models}
diff --git a/man/LRSM.Rd b/man/LRSM.Rd
old mode 100755
new mode 100644
index 572c777..350b84b
--- a/man/LRSM.Rd
+++ b/man/LRSM.Rd
@@ -1,3 +1,4 @@
+\encoding{UTF-8}
 \name{LRSM}
 \alias{LRSM}
 %- Also NEED an '\alias' for EACH other topic documented here.
@@ -38,7 +39,7 @@ LRSM(X, W , mpoints = 1, groupvec = 1, se = TRUE, sum0 = TRUE,
   \code{model.matrix}, \code{vcov},\code{summary}, \code{logLik}, \code{person.parameters}.
 }
 \value{
-  Returns on object of class \code{eRm} containing:
+  Returns an object of class \code{'eRm'} containing:
 
   \item{loglik}{Conditional log-likelihood.}
   \item{iter}{Number of iterations.}
@@ -59,22 +60,19 @@ LRSM(X, W , mpoints = 1, groupvec = 1, se = TRUE, sum0 = TRUE,
 Fischer, G. H., and Molenaar, I. (1995). Rasch Models - Foundations,
 Recent Developements, and Applications. Springer.
 
-Mair, P., and Hatzinger, R. (2007). Extended Rasch modeling: The eRm package for the application of IRT models in R. Journal of Statistical Software, 20(9), 1-20.
+Mair, P., and Hatzinger, R. (2007). Extended Rasch modeling: The \pkg{eRm} package for the application of IRT models in R. Journal of Statistical Software, 20(9), 1-20.
 
-Mair, P., and Hatzinger, R. (2007). CML based estimation of extended Rasch models with the eRm package in R. Psychology Science, 49, 26-43.
+Mair, P., and Hatzinger, R. (2007). CML based estimation of extended Rasch models with the \pkg{eRm} package in R. Psychology Science, 49, 26-43.
 }
 \author{Patrick Mair, Reinhold Hatzinger}
 %\note{}
 \seealso{\code{\link{LLTM}},\code{\link{LPCM}}}
 \examples{
-
 #LRSM for two measurement points
 #20 subjects, 2*3 items, W generated automatically,
 #first parameter set to 0, no standard errors computed.
 
-data(lrsmdat)
 res <- LRSM(lrsmdat, mpoints = 2, groupvec = 1, sum0 = FALSE, se = FALSE)
-print(res)
+res
 }
-
 \keyword{models}
diff --git a/man/LRtest.Rd b/man/LRtest.Rd
old mode 100755
new mode 100644
index a7437b9..cccbaf9
--- a/man/LRtest.Rd
+++ b/man/LRtest.Rd
@@ -1,3 +1,4 @@
+\encoding{UTF-8}
 \name{LRtest}
 \alias{LRtest.Rm}
 \alias{LRtest}
@@ -5,90 +6,68 @@
 \alias{summary.LR}
 \alias{plotGOF}
 \alias{plotGOF.LR}
-%- Also NEED an '\alias' for EACH other topic documented here.
 \title{Computation of Andersen's LR-test.}
-\description{This LR-test is based on subject subgroup splitting.
-}
-\usage{
-\method{LRtest}{Rm}(object, splitcr = "median", se = FALSE)
-\method{plotGOF}{LR}(x, beta.subset = "all", main="Graphical Model Check",
-   xlab = NULL, ylab = NULL, tlab = "item",
-   ylim = c(-3, 3), xlim = c(-3, 3), type = "p", pos = "4",
-   conf = NULL, ctrline = NULL, ...)
-%\method{print}{LR}(x,...)
-%\method{summary}{LR}(object,...)
-}
-%- maybe also 'usage' for other objects documented here.
+\description{This LR-test is based on subject subgroup splitting.}
+\usage{\method{LRtest}{Rm}(object, splitcr = "median", se = TRUE)
+
+\method{plotGOF}{LR}(x, beta.subset = "all", main = "Graphical Model Check", xlab, ylab,
+    tlab = "item", xlim, ylim, type = "p", pos = 4, conf = NULL, ctrline = NULL,
+    asp = 1, x_axis = TRUE, y_axis = TRUE, set_par = TRUE, reset_par = TRUE, \dots)}
 \arguments{
-  \item{object}{Object of class \code{Rm}.}
-  \item{splitcr}{Split criterion for subject raw score splitting. \code{all.r} corresponds to a
-  full raw score split, \code{median} uses the median as split criterion, \code{mean} performs a mean-split.
-  Optionally \code{splitcr} can also be a vector which assigns each person to a
-  certain subgroup (e.g., following an external criterion). This vector can be numeric, character or a factor.}
-  \item{se}{If \code{TRUE} standard errors for beta's are computed.}
-
-%Arguments for \code{plotGOF}:
-  \item{x}{Object of class \code{LR}. Also used for visualizing the fit of single items.}
+  \item{object}{Object of class \code{"Rm"}.}
+  \item{splitcr}{Split criterion for subject raw score splitting.
+    \code{"all.r"} corresponds to a full raw score split, \code{"median"} uses the median as split criterion, \code{"mean"} performs a mean split.
+    Optionally \code{splitcr} can also be a vector which assigns each person to a certain subgroup (e.g., following an external criterion).
+    This vector can be numeric, character or a factor.}
+  \item{se}{controls computation of standard errors in the submodels (default: \code{TRUE}).}
+
+  \item{x}{Object of class \code{"LR"}. Also used for visualizing the fit of single items.}
   \item{beta.subset}{If \code{"all"}, all items are plotted. Otherwise numeric subset vector can be specified.}
-  \item{main}{Main title of the plot.}
-  \item{xlab}{Label on x-axis, default gives name of \code{splitcr} and level.}
-  \item{ylab}{Label on y-axis, default gives name of \code{splitcr} and level.}
-  \item{tlab}{Specification of item labels: \code{"item"} prints the item names, \code{"number"} gives integers
-       corresponding to order of the beta parameters, if \code{"none"} no labels are printed.
-       \code{"identify"} allows for an interactive labelling. Initially no labels are printed, after clicking
-       close to an item point the corresponding label is added. The identification process is terminated by clicking
-       the second button and selecting 'Stop' from the menu, or from the 'Stop' menu on the graphics window.
-       For more information and basic operation see \code{\link{identify}}.
-       }
-  \item{xlim}{Limits on x-axis.}
-  \item{ylim}{Limits on y-axis.}
-  \item{type}{Plotting type.(see \code{\link{plot}})}
-  \item{pos}{Position of the item label (see \code{\link{text}})}
-  \item{conf}{for plotting confidence ellipses for the item parameters. If \code{conf=NULL}
-             (the default) no ellipses are drawn. Otherwise, \code{conf} must be
-             specified as a list with optional elements: \code{gamma}, is
-             the confidence level (numeric), \code{col} and \code{lty},
-             colour and linetype (see \code{\link{par}}), \code{which} (numeric index vector) specifying for which
-             items ellipses are drawn (must be a subset of \code{beta.subset}),
-             and \code{ia}, logical, if the ellipses are to be drawn interactively (cf.
-             \code{tlab="identify"} above). If \code{conf} is specified as a an empty list, %\code{conf=list()},
-             the default values \code{conf=list(gamma=0.95, col="red", lty="dashed", ia=FALSE)}
-             will be used. See example below. To use \code{conf}, the LR object \code{x} has
-             to be generated using the option \code{se=TRUE} in \code{LRtest()}.
-             For specification of \code{col} and \code{which} see Details and Examples below.}
-  \item{ctrline}{for plotting confidence bands (control lines, cf.eg.Wright and Stone, 1999).
-             If \code{ctrline=NULL}
-             (the default) no lines are drawn. Otherwise, \code{ctrline} must be
-             specified as a list with optional elements: \code{gamma}, is
-             the confidence level (numeric), \code{col} and \code{lty}, colour and linetype (see \code{\link{par}}).
-             If \code{ctrline} is specified as \code{ctrline=list()},
-             the default values \code{conf=list(gamma=0.95, col="blue", lty="solid")}
-             will be used. See examples below. To use \code{ctrline}, the LR object \code{x} has
-             to be generated using the option \code{se=TRUE} in \code{LRtest()}.
-             }
-  \item{...}{Additional parameters.}
-
+  \item{main}{Title of the plot.}
+  \item{xlab}{Label on \eqn{x}{x}-axis, default gives name of \code{splitcr} and level.}
+  \item{ylab}{Label on \eqn{y}{y}-axis, default gives name of \code{splitcr} and level.}
+  \item{tlab}{Specification of item labels: \code{"item"} prints the item names, \code{"number"} gives integers corresponding to order of the beta parameters, if \code{"none"} no labels are printed.
+    \code{"identify"} allows for an interactive labelling.
+    Initially no labels are printed, after clicking close to an item point the corresponding label is added.
+    The identification process is terminated by clicking the second button and selecting 'Stop' from the menu, or from the 'Stop' menu on the graphics window.
+    For more information and basic operation see \code{\link{identify}}.}
+  \item{xlim}{Limits on \eqn{x}{x}-axis.}
+  \item{ylim}{Limits on \eqn{y}{y}-axis.}
+  \item{type}{Plotting type (see \code{\link[graphics]{plot}}).}
+  \item{pos}{Position of the item label (see \code{\link[graphics]{text}}).}
+  \item{conf}{for plotting confidence ellipses for the item parameters.
+    If \code{conf = NULL} (the default) no ellipses are drawn.
+    Otherwise, \code{conf} must be specified as a list with optional elements: \code{gamma}, is the confidence level (numeric), \code{col} and \code{lty}, color and linetype (see \code{\link[graphics:par]{par}}), \code{which} (numeric index vector) specifying for which items ellipses are drawn (must be a subset of \code{beta.subset}), and \code{ia}, logical, if the ellipses are to be drawn interactively (cf., \code{tlab = "identify"} above).
+    For details about the default behavior, if \code{conf} is specified as a an empty list, see Details and Examples below.
+    To use \code{conf}, the LR object \code{x} has to be generated using the option \code{se = TRUE} in \code{LRtest()}.
+    For specification of \code{col} and \code{which} see Details and Examples below.}
+  \item{ctrline}{for plotting confidence bands (control lines, cf. eg. Wright and Stone, 1999).
+    If \code{ctrline = NULL} (the default) no lines are drawn.
+    Otherwise, \code{ctrline} must be specified as a list with optional elements: \code{gamma}, is the confidence level (numeric), \code{col} and \code{lty}, color and linetype (see \code{\link[graphics:par]{par}}).
+    If \code{ctrline} is specified as \code{ctrline = list()}, the default values \code{conf = list(gamma = 0.95, col = "blue", lty = "solid")} will be used.
+    See examples below.
+    To use \code{ctrline}, the LR object \code{x} has to be generated using the option \code{se = TRUE} in \code{LRtest()}.}
+  \item{asp}{sets the \eqn{y/x}{y/x} ratio of the plot (see \code{\link[graphics]{plot.window}}).}
+  \item{x_axis}{if \code{TRUE}, the \eqn{x}{x}-axis will be plotted.}
+  \item{y_axis}{if \code{TRUE}, the \eqn{y}{y}-axis will be plotted.}
+  \item{set_par}{if \code{TRUE}, graphical parameters will be set by the function to optimize the plot's appearance. Unless \code{reset_par = FALSE}, these will be reset to the previous \code{\link[graphics:par]{par}} settings.}
+  \item{reset_par}{if \code{TRUE}, graphical parameters will be reset to defaults via \code{\link[graphics:par]{par}()} after plotting (only if \code{set_par = TRUE}). To make adjustments \emph{after} using \code{plotGOF}, this reset can be switched off. Note that the changed graphical parameters will remain in place unless they are redefined (using \code{\link[graphics:par]{par}()}) or the device is closed.}
+  \item{...}{additional parameters.}
 }
-\details{If the data set contains missing values and \code{mean} or \code{median} is specified as splitcriterion,
-         means or medians are calculated for each missing value subgroup and consequently used for raw score splitting.
-
-         When using interactive selection for both labelling of single points (\code{tlab = "identify"} and
-         drawing confidence ellipses at certain points (\code{ia = TRUE}) then first all plotted points are labelled
-         and afterwards all ellipses are generated. Both identification processes can be terminated
-         by clicking the second (right) mouse button and selecting `Stop' from the menu, or from the `Stop'
-         menu on the graphics window.
-
-         Using the specification \code{which} in allows for selectively drawing ellipses for
-         certain items only, e.g., \code{which=1:3} draws ellipses for items 1 to 3 (as long as they are included
-         in \code{beta.subset}). The default is drawing ellipses for all items. 
-         The element \code{col} in the \code{conf} list can either be a single colour
-         specification such as \code{"blue"} or a vector with colour specifications for all items.
-         The length must be the same as the number of ellipses to be drawn. For colour specification
-         a palette can be set up using standard palettes (e.g. \code{\link{rainbow}}) or palettes from
-         the \code{colorspace} or \code{RColorBrewer} package. An example is given below.
-         
-
-         \code{summary} and \code{print} methods are available for objects of class \code{LR}.
+\details{
+If the data set contains missing values and \code{mean} or \code{median} is specified as split criterion, means or medians are calculated for each missing value subgroup and consequently used for raw score splitting.
+
+When using interactive selection for both labelling of single points (\code{tlab = "identify"} and drawing confidence ellipses at certain points (\code{ia = TRUE}) then first all plotted points are labelled and afterwards all ellipses are generated.
+Both identification processes can be terminated by clicking the second (right) mouse button and selecting `Stop' from the menu, or from the `Stop' menu on the graphics window.
+
+Using the specification \code{which} in allows for selectively drawing ellipses for certain items only, e.g., \code{which = 1:3} draws ellipses for items 1 to 3 (as long as they are included in \code{beta.subset}).
+The default is drawing ellipses for all items.
+The element \code{col} in the \code{conf} list can either be a single color specification such as \code{"blue"} or a vector with color specifications for all items.
+The length must be the same as the number of ellipses to be drawn.
+For color specification a palette can be set up using standard palettes (e.g., \code{\link{rainbow}}) or palettes from the \code{colorspace} or \code{RColorBrewer} package.
+An example is given below.
+
+\code{summary} and \code{print} methods are available for objects of class \code{LR}.
 }
 \value{
 \code{LRtest} returns an object of class \code{LR} containing:
@@ -107,52 +86,69 @@
 \references{
 Fischer, G. H., and Molenaar, I. (1995). Rasch Models - Foundations, Recent Developements, and Applications. Springer.
 
-Mair, P., and Hatzinger, R. (2007). Extended Rasch modeling: The eRm package for the application of IRT models in R. Journal of Statistical Software, 20(9), 1-20.
+Mair, P., and Hatzinger, R. (2007). Extended Rasch modeling: The \pkg{eRm} package for the application of IRT models in R. Journal of Statistical Software, 20(9), 1-20.
 
-Mair, P., and Hatzinger, R. (2007). CML based estimation of extended Rasch models with the eRm package in R. Psychology Science, 49, 26-43.
+Mair, P., and Hatzinger, R. (2007). CML based estimation of extended Rasch models with the \pkg{eRm} package in R. Psychology Science, 49, 26-43.
 
-Wright, B.D.,  and Stone, M.H. (1999). Measurement essentials. Wide Range Inc., Wilmington.
-        (\url{http://www.rasch.org/measess/me-all.pdf} 28Mb).
+Wright, B.D., and Stone, M.H. (1999). Measurement essentials. Wide Range Inc., Wilmington. (\url{http://www.rasch.org/measess/me-all.pdf} 28Mb).
 }
-
-\author{Patrick Mair, Reinhold Hatzinger}
-%\note{}
-\seealso{\code{\link{Waldtest}}}
+\author{Patrick Mair, Reinhold Hatzinger, Marco J. Maier}
+\seealso{\code{\link{Waldtest}}, \code{\link{MLoef}}}
 \examples{
+# the object used is the result of running ... RM(raschdat1)
+res <- raschdat1_RM_fitted       # see ? raschdat1_RM_fitted
 
 # LR-test on dichotomous Rasch model with user-defined split
-splitvec <- sample(1:3, 100, replace = TRUE)
-data(raschdat1)
-res <- RM(raschdat1)
+splitvec <- sample(1:2, 100, replace = TRUE)
 lrres <- LRtest(res, splitcr = splitvec)
 lrres
 summary(lrres)
 
 \dontrun{
-# goodness-of-fit plot with interactive labelling of items
-plotGOF(lrres, tlab = "identify")
-}
+# goodness-of-fit plot with interactive labelling of items w/o standard errors
+plotGOF(lrres, tlab = "identify")}
+
+# LR-test with a full raw-score split
+X <- sim.rasch(1000, -2:2, seed = 5)
+res2 <- RM(X)
+full_lrt <- LRtest(res2, splitcr = "all.r")
+full_lrt
 
+\dontrun{
 # LR-test with mean split, standard errors for beta's
-lrres2 <- LRtest(res, split = "mean", se = TRUE)
+lrres2 <- LRtest(res, split = "mean")}
+
+# to save computation time, the results are loaded from raschdat1_RM_lrres2
+lrres2 <- raschdat1_RM_lrres2                    # see ?raschdat1_RM_lrres2
 
 # goodness-of-fit plot
 # additional 95 percent control line with user specified style
-plotGOF(lrres2, ctrline=list(gamma=0.95, col="red", lty="dashed"))
-
+plotGOF(lrres2, ctrline = list(gamma = 0.95, col = "red", lty = "dashed"))
 
 # goodness-of-fit plot for items 1, 14, 24, and 25
 # additional 95 percent confidence ellipses, default style
-plotGOF(lrres2, beta.subset=c(14,25,24,1), conf=list())
+plotGOF(lrres2, beta.subset = c(14, 25, 24, 1), conf = list())
 
+\dontrun{
 # goodness-of-fit plot for items 1, 14, 24, and 25
 # for items 1 and 24 additional 95 percent confidence ellipses
-# using colours for these 2 items from the colorspace package
-\dontrun{ 
-library(colorspace)
-colors<-rainbow_hcl(2)
-plotGOF(lrres2, beta.subset=c(14,25,24,1), conf=list(which=c(1,14), col=colors))
+# using colors for these 2 items from the colorspace package
+library("colorspace")
+my_colors <- rainbow_hcl(2)
+plotGOF(lrres2, beta.subset = c(14, 25, 24, 1),
+        conf = list(which = c(1, 14), col = my_colors))}
+
+# first, save current graphical parameters in an object
+old_par <- par(mfrow = c(1, 2), no.readonly = TRUE)
+# plots
+plotGOF(lrres2, ctrline = list(gamma = 0.95, col = "red", lty = "dashed"),
+  xlim = c(-3, 3), x_axis = FALSE, set_par = FALSE)
+axis(1, seq(-3, 3, .5))
+
+plotGOF(lrres2, conf = list(), xlim = c(-3, 3), x_axis = FALSE, set_par = FALSE)
+axis(1, seq(-3, 3, .5))
+text(-2, 2, labels = "Annotation")
+# reset graphical parameters
+par(old_par)
 }
-}
-
 \keyword{models}
diff --git a/man/MLoef.Rd b/man/MLoef.Rd
old mode 100755
new mode 100644
index 33b3a2b..5b4a9a3
--- a/man/MLoef.Rd
+++ b/man/MLoef.Rd
@@ -1,43 +1,36 @@
+\encoding{UTF-8}
 \name{MLoef}
 \alias{MLoef}
 \alias{print.MLoef}
 \alias{summary.MLoef}
-\title{Computation of Martin-Loef's LR-Test}
-\description{This LR-Test is based on item subgroup splitting.}
-\usage{
-MLoef(robj, splitcr = "median")
-}
+\title{Martin-Löf's Likelihood-Ratio-Test}
+\description{This Likelihood-Ratio-Test is based on item subgroup splitting.}
+\usage{MLoef(robj, splitcr = "median")}
 \arguments{
-  \item{robj}{Object of class \code{Rm}.}
-  \item{splitcr}{Split criterion to define the item groups.
-    \code{"median"} and \code{"mean"} split items in two groups based on their
-    items' raw scores. \code{splitcr} can also be a vector of length k (where k
-    denotes the number of items) that takes two or more distinct values to
-    define groups used for the Martin-Loef Test.}
+  \item{robj}{
+    An object of class \code{'Rm'}.
+  }
+  \item{splitcr}{
+    Split criterion to define the item groups.
+    \code{"median"} and \code{"mean"} split items in two groups based on their items' raw scores.\cr%
+    \code{splitcr} can also be a vector of length \eqn{k}{k} (where \eqn{k}{k} denotes the number of items) that takes two or more distinct values to define groups used for the Martin-Löf Test.
+  }
 }
 \details{
-  This function implements a generalization of the Martin-Loef test for polytomous
-  items as proposed by Christensen, Bjorner, Kreiner & Petersen (2002), but does
-  currently not allow for missing values.
-    % The function can handle missing values, as long as every subject has at
-    % least 2 valid responses in each group of items.
+  This function implements a generalization of the Martin-Löf test for polytomous items as proposed by Christensen, Bjørner, Kreiner & Petersen (2002), but does currently not allow for missing values.
 
-  If the split criterion is \code{"median"} or \code{"mean"} and one or more items'
-  raw scores are equal the median resp. mean, \code{MLoef} will assign those items
-  to the lower raw score group. \code{summary.MLoef} gives detailed information
-  about the allocation of all items.
+  If the split criterion is \code{"median"} or \code{"mean"} and one or more items' raw scores are equal the median resp. mean, \code{MLoef} will assign those items to the lower raw score group.
+  \code{summary.MLoef} gives detailed information about the allocation of all items.
 
-  \code{summary} and \code{print} methods are available for objects of class
-  \code{MLoef}.
+  \code{summary} and \code{print} methods are available for objects of class \code{'MLoef'}.
 
-  An 'exaxt' version of the Martin-Loef test for binary items is implemented
-  in the function \code{\link{NPtest}}.
+  An \sQuote{exact} version of the Martin-Löf test for binary items is implemented in the \code{\link[eRm:NPtest]{NPtest}} function.
 }
 \value{
   \code{MLoef} returns an object of class \code{MLoef} containing:
     \item{LR}{LR-value}
-    \item{df}{degrees of freedom of the test statistic}
-    \item{p.value}{p-value of the test}
+    \item{df}{degrees of freedom}
+    \item{p.value}{\emph{p}-value of the test}
     \item{fullModel}{the overall Rasch model}
     \item{subModels}{a list containing the submodels}
     \item{Lf}{log-likelihood of the full model}
@@ -49,17 +42,16 @@ MLoef(robj, splitcr = "median")
     \item{call}{the matched call}
 }
 \references{
-Christensen, K. B., Bjorner, J. B., Kreiner S. & Petersen J. H. (2002). Testing unidimensionality in polytomous Rasch models. \emph{Psychometrika, (67)}4, 563--574.
+Christensen, K. B., Bjørner, J. B., Kreiner S. & Petersen J. H. (2002). Testing unidimensionality in polytomous Rasch models. \emph{Psychometrika, (67)}4, 563--574.
 
 Fischer, G. H., and Molenaar, I. (1995). \emph{Rasch Models -- Foundations, Recent Developements, and Applications.} Springer.
 
 Rost, J. (2004). \emph{Lehrbuch Testtheorie -- Testkonstruktion.} Bern: Huber.
 }
-\author{Marco Maier, Reinhold Hatzinger}
-%\note{}
+\author{Marco J. Maier, Reinhold Hatzinger}
 \seealso{\code{\link{LRtest}}, \code{\link{Waldtest}}}
 \examples{
-# Martin-Loef-test on dichotomous Rasch model using "median" and a user-defined
+# Martin-Löf-test on dichotomous Rasch model using "median" and a user-defined
 # split vector. Note that group indicators can be of character and/or numeric.
 splitvec <- c(1, 1, 1, "x", "x", "x", 0, 0, 1, 0)
 
diff --git a/man/NPtest.Rd b/man/NPtest.Rd
old mode 100755
new mode 100644
index 248187a..198dc5d
--- a/man/NPtest.Rd
+++ b/man/NPtest.Rd
@@ -1,180 +1,215 @@
-\name{NPtest}
-\Rdversion{1.1}
+\encoding{UTF-8}
+\name{NonparametricTests}
+\alias{NonparametricTests}
 \alias{NPtest}
-\title{function to perform nonparametric Rasch model tests}
-\description{A variety of nonparametric tests as proposed by Ponocny(2001) and an 'exact' version of the Martin-Loef test are implemented. The function operates on
-    random binary matrices that have been generated using an
-    MCMC algorithm (Verhelst, 2008) from the RaschSampler package (Hatzinger, Mair, and Verhelst, 2009).
-}
-\usage{
-NPtest(obj, n=NULL, method = "T1", ...)
+\title{A Function to Perform Nonparametric Rasch Model Tests}
+\description{A variety of nonparametric tests as proposed by Ponocny (2001), Koller and Hatzinger (2012), and an \sQuote{exact} version of the Martin-Löf test are implemented. The function operates on random binary matrices that have been generated using an \acronym{MCMC} algorithm (Verhelst, 2008) from the \pkg{RaschSampler} package (Hatzinger, Mair, and Verhelst, 2009).%
 }
+\usage{NPtest(obj, n = NULL, method = "T1", \dots)}
 \arguments{
   \item{obj}{
-     A binary data matrix (or data frame) or
-     an object containing the output from the \code{\link[RaschSampler]{RaschSampler}} package.
+    A binary data matrix (or data frame) or an object containing the output from the \pkg{\link[eRm:RaschSampler]{RaschSampler}} package.
   }
   \item{n}{
-     If \code{obj} is a matrix or a data frame, \code{n} n is the number of sampled matrices
-     (default is 500)
+    If \code{obj} is a matrix or a data frame, \code{n} is the number of sampled matrices (default is 500)
   }
   \item{method}{
-     One of the test statistics. See details below.
+     One of the test statistics. See Details below.
   }
   \item{\dots}{
-     Further arguments for specifying the statistics functions. See details below.
+     Further arguments according to \code{method}.
+     See Details below.
+     Additionally, the sampling routine can be controlled by specifying \code{burn_in}, \code{step}, and \code{seed} (for details see below and \code{\link[eRm:rsctrl]{rsctrl}}).
+     A summary of the sampling object may be obtained using the option \code{RSinfo = TRUE}.
   }
 }
 \details{
-     The function uses the  \code{\link[RaschSampler]{RaschSampler}} package. On input the user has to supply
-     either a binary data matrix or a RaschSampler output object. If the input is a data matrix, the RaschSampler
-     is called with default values (i.e., \code{rsctrl(burn_in = 256, n_eff = n, step = 32)}, see \code{\link[RaschSampler]{rsctrl}}),
-     where \code{n} may be specified by the user (otherwise it is 500). The starting values for the random number generators are chosen
-     randomly using system time.
-     Methods other than those listed below can easily be implemented using the RaschSampler package directly.
-
-     The currently implemented methods (following Ponocny's notation of \emph{T}-statistics) and their options are:
-   \describe{
-     \item{\bold{T1:}}{\code{method = "T1"}, no further option}\cr
-     Checks for local dependence via increased inter-item correlations. For all item pairs
-     cases are counted with equal responses on both items.
-
-     \item{\bold{T2:}}{\code{method = "T2", idx = NULL, stat = "var"}}\cr
-     \code{idx} \ldots vector of indexes specifying items which define a subscale, e.g., \code{idx = c(1, 5, 7)}\cr
-     \code{stat} \ldots one of \code{"var"} (variance), \code{"mad1"} (mean absolute deviation),
-     \code{"mad2"} (median absolute deviation), \code{"range"} (range)\cr
-     Checks for local dependence within model deviating subscales via increased
-     dispersion of subscale person rawscores.
-
-     \item{\bold{T4:}}{\code{method = "T4", idx = NULL, group = NULL, alternative = "high"}}\cr
-     \code{idx} \ldots vector of indexes specifying items which define a subscale, e.g., \code{idx = c(1, 5, 7)}\cr
-     \code{group} \ldots logical vector defining a subject group, e.g., \code{group = (age >= 15 && age < 30)}\cr
-     \code{alternative} \ldots one of \code{"high"} or \code{"low"}. Specifies the alternative hypothesis.\cr
-     Checks for group anomalies (DIF) via too high (low) raw scores on item(s) for specified group.
-
-     \item{\bold{T7:}}{\code{method = "T7", idx = NULL}}\cr
-     \code{idx} \ldots vector of indexes specifying items which define a subscale, e.g., \code{idx = c(1, 5, 7)}\cr
-     Checks for lower discrimination (2PL) in item subscale via counting cases with response 1 on more
-     difficult and 0 on easier items. The test is global for the subscale, i.e. all subscale items are evaluated
-     using a single statistic.
-
-     \item{\bold{T7a:}}{\code{method = "T7a", idx = NULL}}\cr
-     \code{idx} \ldots vector of indexes specifying items to investigate, e.g., \code{idx = c(1, 5, 7)}\cr
-     Checks for lower discrimination (2PL) of an item compared to another item  via counting cases with response 1 on more
-     difficult and 0 on easier item. The test is performed pairwise, i.e. a statistic is calculated for each item pair.
-
-     \item{\bold{T10:}}{\code{method = "T10", splitcr="median"}}\cr
-     \code{splitcr} \ldots split criterion for subject raw score splitting. \code{"median"} uses the median as split criterion,
-     \code{"mean"} performs a mean-split. Optionally \code{splitcr} can also be a vector which assigns each person to a
-     one of two subgroups (e.g., following an external criterion). This vector can be numeric, character, logical or a factor.\cr
-     Gobal test for subgroup-invariance. Checks for different item difficulties in two subgroups (for details see Ponocny, 2001).
-
-     \item{\bold{T11:}}{\code{method = "T11"}, no further option}\cr
-     Gobal test for local dependence. The statistic calculates the sum of absolute deviations between the observed inter-item correlations
-     and the expected correlations.
-   }
-
-    The 'exact' version of the \bold{Martin-Loef} statistic is specified via \code{method = "MLoef"} and optionally \code{splitcr}
-    (see \code{\link{MLoef}}).
+  The function uses the \pkg{\link[eRm:RaschSampler]{RaschSampler}} package, which is now packaged with \pkg{eRm} for convenience.
+  It can, of course, still be accessed and downloaded separately via CRAN.
+  
+  As an input the user has to supply either a binary data matrix or a \pkg{RaschSampler} output object.
+  If the input is a data matrix, the \pkg{RaschSampler} is called with default values (i.e., \code{rsctrl(burn_in = 256, n_eff = n, step = 32)}, see \code{\link[eRm:rsctrl]{rsctrl}}), where \code{n} corresponds to \code{n_eff} (the default number of sampled matrices is 500).
+  By default, the starting values for the random number generators (\code{seed}) are chosen randomly using system time.
+  Methods other than those listed below can easily be implemented using the \pkg{RaschSampler} package directly.
+
+  The currently implemented methods (following Ponocny's notation of \eqn{T}{T}-statistics) and their options are:
+  \describe{
+    \item{\eqn{T_1}{T_1}:}{\code{method = "T1"}\cr%
+      Checks for local dependence via increased inter-item correlations.
+      For all item pairs, cases are counted with equal responses on both items.
+    }
+    \item{\eqn{T_{1m}}{T_1m}:}{\code{method = "T1m"}\cr%
+      Checks for multidimensionality via decreased inter-item correlations.
+      For all item pairs, cases are counted with equal responses on both items.
+    }
+    \item{\eqn{T_{1l}}{T_1l}:}{\code{method = "T1l"}\cr%
+      Checks for learning.
+      For all item pairs, cases are counted with response pattern (1,1).
+    }
+    \item{\eqn{T_{md}}{T_md}:}{\code{method = "Tmd", idx1, idx2}\cr%
+      \code{idx1} and \code{idx2} are vectors of indices specifying items which define two subscales, e.g., \code{idx1 = c(1, 5, 7)} and \code{idx2 = c(3, 4, 6)}\cr%
+      Checks for multidimensionality based on correlations of person raw scores for the subscales.
+    }
+    \item{\eqn{T_2}{T_2}:}{\code{method = "T2", idx = NULL, stat = "var"}\cr%
+      \code{idx} is a vector of indices specifying items which define a subscale, e.g., \code{idx = c(1, 5, 7)}\cr%
+      \code{stat} defines the used statistic as a character object which can be: \code{"var"} (variance), \code{"mad1"} (mean absolute deviation), \code{"mad2"} (median absolute deviation), or \code{"range"} (range)\cr%
+      Checks for local dependence within model deviating subscales via increased dispersion of subscale person rawscores.
+    }
+    \item{\eqn{T_{2m}}{T_2m}:}{\code{method = "T2m", idx = NULL, stat = "var"}\cr%
+      \code{idx} is a vector of indices specifying items which define a subscale, e.g., \code{idx = c(1, 5, 7)}\cr%
+      \code{stat} defines the used statistic as a character object which can be: \code{"var"} (variance), \code{"mad1"} (mean absolute deviation), \code{"mad2"} (median absolute deviation), \code{"range"} (range)\cr%
+      Checks for multidimensionality within model deviating subscales via decreased dispersion of subscale person rawscores.
+    }
+    \item{\eqn{T_4}{T_4}:}{\code{method = "T4", idx = NULL, group = NULL, alternative = "high"}\cr%
+      \code{idx} is a vector of indices specifying items which define a subscale, e.g., \code{idx = c(1, 5, 7)}\cr%
+      \code{group} is a logical vector defining a subject group, e.g., \code{group = ((age >= 20) & (age < 30))}\cr%
+      \code{alternative} specifies the alternative hypothesis and can be: \code{"high"} or \code{"low"}.\cr%
+      Checks for group anomalies (\acronym{DIF}) via too high (low) raw scores on item(s) for specified group.
+    }
+%  removed in version 0.14-5
+%     \item{\bold{T7:}}{\code{method = "T7", idx = NULL}}\cr
+%     \code{idx} \ldots vector of indices specifying items which define a subscale, e.g., \code{idx = c(1, 5, 7)}\cr
+%     Checks for lower discrimination (2PL) in item subscale via counting cases with response 1 on more
+%     difficult and 0 on easier items. The test is global for the subscale, i.e. all subscale items are evaluated
+%     using a single statistic.
+%
+%     \item{\bold{T7a:}}{\code{method = "T7a", idx = NULL}}\cr
+%     \code{idx} \ldots vector of indices specifying items to investigate, e.g., \code{idx = c(1, 5, 7)}\cr
+%     Checks for lower discrimination (2PL) of an item compared to another item  via counting cases with response 1 on more
+%     difficult and 0 on easier item. The test is performed pairwise, i.e. a statistic is calculated for each item pair.
+%     # Examples
+%     ##---- T7, T7a --------------------------------------------------
+%     # simultaenous test for all items in subscale
+%     t7<-NPtest(rmat,method="T7",idx=1:3)
+%     t7
+%
+%     # test for item-pairs
+%     t7a<-NPtest(rmat,method="T7a",idx=c(1,3,5)) # test for item-pairs
+%     t7a
+
+    \item{\eqn{T_{10}}{T_10}:}{\code{method = "T10", splitcr = "median"}\cr%
+      \code{splitcr} defines the split criterion for subject raw score splitting.
+      \code{"median"} uses the median as split criterion, \code{"mean"} performs a mean-split.
+      Optionally, \code{splitcr} can also be a vector which assigns each person to one of two subgroups (e.g., following an external criterion).
+      This vector can be numeric, character, logical, or a factor.\cr%
+      Global test for subgroup-invariance.
+      Checks for different item difficulties in two subgroups (for details see Ponocny, 2001).
+    }
+    \item{\eqn{T_{11}}{T_11}:}{\code{method = "T11"}\cr%
+      Global test for local dependence.
+      The statistic calculates the sum of absolute deviations between the observed inter-item correlations and the expected correlations.
+    }
+    \item{\eqn{T_{pbis}}{T_pbis}:}{\code{method = "Tpbis", idxt, idxs}\cr
+      Test for discrimination.
+      The statistic calculates a point-biserial correlation for a test item (specified via \code{idxt}) with the person row scores for a subscale of the test sum (specified via \code{idxs}).
+      If the correlation is too low, the test item shows different discrimination compared to the items of the subscale.
+    }
+    \item{\emph{Martin-Löf}}{%
+      The \sQuote{exact} version of the \emph{Martin-Löf} statistic is specified via \code{method = "MLoef"} and optionally \code{splitcr} (see \code{\link{MLoef}}).%
+    }
+  }
 }
 \value{
-Depends on the method used. For each method a list is returned. The returned objects are of class
-\code{T1obj}, \code{T2obj}, \code{T4obj}, \code{T7obj}, \code{T7aobj}, \code{T10obj}, \code{T11obj} corresponding to the method used.
-The main output element is \code{prop} giving the one-sided p-value, i.e., the number of statistics from the sampled matrices which are equal
-or exceed the statistic based on the observed data. For \emph{T1} and \emph{T7a} \code{prop} is a vector.
-For the \emph{Martin-Loef} test the returned object is of class \code{MLobj}. Besides other elements, it contains a \code{prop} vector and \code{MLres}, the output
-object from the asymptotic Martin-Loef test on the input data.
-}
-\references{
-Ponocny, I. (2001) Nonparametric goodness-of-fit tests for the rasch model. Psychometrika,  Volume 66, Number 3\cr
-Verhelst, N. D. (2008) An Efficient MCMC Algorithm to Sample Binary
-Matrices with Fixed Marginals. Psychometrika, Volume 73, Number 4\cr
-Verhelst, N. D., Hatzinger, R., and Mair, P. (2007) The Rasch Sampler, Journal of Statistical Software, Vol. 20, Issue 4, Feb 2007
+Depending on the \code{method} argument, a list is returned which has one of the following classes:
+\code{'T1obj'}, \code{'T1mobj'}, \code{'T1lobj'}, \code{'Tmdobj'}, \code{'T2obj'}, \code{'T2mobj'}, \code{'T4obj'}, \code{'T10obj'}, \code{'T11obj'}, or \code{'Tpbisobj'}.
+
+The main output element is \code{prop} giving the one-sided \eqn{p}{p}-value, i.e., the number of statistics from the sampled matrices which are equal or exceed the statistic based on the observed data.
+For \eqn{T_1}{T_1}, \eqn{T_{1m}}{T_1m}, and \eqn{T_{1l}}{T_1l}, \code{prop} is a vector.
+For the Martin-Löf test, the returned object is of class \code{'MLobj'}.
+Besides other elements, it contains a \code{prop} vector and \code{MLres}, the output object from the asymptotic Martin-Löf test on the input data.
 }
-\author{
-Reinhold Hatzinger
+\note{The \pkg{RaschSampler} package is no longer required to use \code{NPtest} since \pkg{eRm} version 0.15-0.}
+\references{%
+Ponocny, I. (2001). Nonparametric goodness-of-fit tests for the Rasch model. \emph{Psychometrika, 66}(3), 437--459. \url{http://dx.doi.org/10.1007/BF02294444}
+
+Verhelst, N. D. (2008). An efficient \acronym{MCMC} algorithm to sample binary matrices with fixed marginals. \emph{Psychometrika, 73}(4), 705--728. \url{http://dx.doi.org/10.1007/s11336-008-9062-3}
+
+Verhelst, N., Hatzinger, R., & Mair, P. (2007). The Rasch sampler. \emph{Journal of Statistical Software, 20}(4), 1--14. \url{http://www.jstatsoft.org/v20/i04}
+
+Koller, I., & Hatzinger, R. (2013). Nonparametric tests for the Rasch model: Explanation, development, and application of quasi-exact tests for small samples. \emph{Interstat, 11}, 1--16. \url{http://interstat.statjournals.net/YEAR/2013/abstracts/1311002.php}
+
+Koller, I., Maier, M. J., & Hatzinger, R. (2015). An Empirical Power Analysis of Quasi-Exact Tests for the Rasch Model: Measurement Invariance in Small Samples. \emph{Methodology, 11}(2), 45--54. \url{http://dx.doi.org/10.1027/1614-2241/a000090}
 }
-%\note{
-%Maybe notes appear here
+\author{Reinhold Hatzinger}
+%\seealso{
+%  \code{\link{RaschSampler}}
 %}
-\seealso{
-    \code{\link[RaschSampler]{RaschSampler}}
-}
 \examples{
 ### Preparation:
 
 # data for examples below
-data(raschdat1)
-X<-raschdat1
+X <- raschdat1
 
 # generate 100 random matrices based on original data matrix
-rmat<-rsampler(X,rsctrl(burn_in=100, n_eff=100, seed=123))
+rmat <- rsampler(X, rsctrl(burn_in = 100, n_eff = 100, seed = 123))
 
 ## the following examples can also directly be used by setting
 ## rmat <- raschdat1
 ## without calling rsampler() first, e.g.,
-t1<-NPtest(raschdat1, n=100, method="T1")
+t1 <- NPtest(raschdat1, n = 100, method = "T1")
+
 
-### Examples:
+### Examples ###################################################################
 
-##---- T1 ------------------------------------------------------
-t1<-NPtest(rmat,method="T1")
+###--- T1 ----------------------------------------------------------------------
+t1 <- NPtest(rmat, method = "T1")
 # choose a different alpha for selecting displayed values
-print(t1,alpha=0.01)
+print(t1, alpha = 0.01)
 
 
-##---- T2 ------------------------------------------------------
-t21<-NPtest(rmat,method="T2",idx=1:5) # default is variance
+###--- T2 ----------------------------------------------------------------------
+t21 <- NPtest(rmat, method = "T2", idx = 1:5, burn_in = 100, step = 20,
+              seed = 7654321, RSinfo = TRUE)
+# default stat is variance
 t21
 
-t22<-NPtest(rmat,method="T2",idx=c(1,22,5,27,6,9,11),stat="mad1")
+t22 <- NPtest(rmat, method = "T2", stat = "mad1",
+              idx = c(1, 22, 5, 27, 6, 9, 11))
 t22
 
-##---- T4 ------------------------------------------------------
-age<-sample(20:90, 100, replace=TRUE)
-# group must be a logical vector
-#   (value of TRUE is used for group selection)
-age<-age<30
-t41<-NPtest(rmat,method="T4",idx=1:3,group=age)
+
+###--- T4 ----------------------------------------------------------------------
+age <- sample(20:90, 100, replace = TRUE)
+# group MUST be a logical vector
+# (value of TRUE is used for group selection)
+age <- age < 30
+t41 <- NPtest(rmat, method = "T4", idx = 1:3, group = age)
 t41
 
-sex<-gl(2,50)
-# group can also be a logical expression  (generating a vector)
-t42<-NPtest(rmat,method="T4",idx=c(1,4,5,6),group=sex==1)
+sex <- gl(2, 50)
+# group can also be a logical expression (generating a vector)
+t42 <- NPtest(rmat, method = "T4", idx = c(1, 4, 5, 6), group = sex == 1)
 t42
 
-##---- T7, T7a --------------------------------------------------
-# simultaenous test for all items in subscale
-t7<-NPtest(rmat,method="T7",idx=1:3)
-t7
-
-# test for item-pairs
-t7a<-NPtest(rmat,method="T7a",idx=c(1,3,5)) # test for item-pairs
-t7a
 
-##---- T10 ------------------------------------------------------
-t101<-NPtest(rmat,method="T10")  # default split criterion is "median"
+###--- T10 ---------------------------------------------------------------------
+t101 <- NPtest(rmat, method = "T10")       # default split criterion is "median"
 t101
 
-split<-runif(100)
-t102<-NPtest(rmat,method="T10",splitcr=split>0.5)
+\dontrun{
+split <- runif(100)
+t102 <- NPtest(rmat, method = "T10", splitcr = split > 0.5)
 t102
 
-t103<-NPtest(rmat,method="T10",splitcr=sex)
-t103
+t103 <- NPtest(rmat, method = "T10", splitcr = sex)
+t103}
 
-##---- T11 ------------------------------------------------------
-t11<-NPtest(rmat,method="T11")
+
+###--- T11 ---------------------------------------------------------------------
+t11 <- NPtest(rmat, method = "T11")
 t11
 
-##---- Martin-Loef ----------------------------------------------
+
+###--- Tpbis -------------------------------------------------------------------
+tpb <- NPtest(X[, 1:5], method = "Tpbis", idxt = 1, idxs = 2:5)
+tpb
+
+
+###--- Martin-Löf --------------------------------------------------------------
 \dontrun{
 # takes a while ...
-data(raschdat1)
-split<-rep(1:3, each=10)
-NPtest(raschdat1, n=100, method="MLoef", splitcr=split)
-}
-
+split <- rep(1:3, each = 10)
+NPtest(raschdat1, n = 100, method = "MLoef", splitcr = split)}
 }
 \keyword{htest}
 \keyword{nonparametric}
diff --git a/man/PCM.Rd b/man/PCM.Rd
old mode 100755
new mode 100644
index abcce1a..7c59393
--- a/man/PCM.Rd
+++ b/man/PCM.Rd
@@ -1,3 +1,4 @@
+\encoding{UTF-8}
 \name{PCM}
 \alias{PCM}
 %- Also NEED an '\alias' for EACH other topic documented here.
@@ -46,9 +47,9 @@ PCM(X, W, se = TRUE, sum0 = TRUE, etaStart)
 Fischer, G. H., and Molenaar, I. (1995). Rasch Models - Foundations,
 Recent Developements, and Applications. Springer.
 
-Mair, P., and Hatzinger, R. (2007). Extended Rasch modeling: The eRm package for the application of IRT models in R. Journal of Statistical Software, 20(9), 1-20.
+Mair, P., and Hatzinger, R. (2007). Extended Rasch modeling: The \pkg{eRm} package for the application of IRT models in R. Journal of Statistical Software, 20(9), 1-20.
 
-Mair, P., and Hatzinger, R. (2007). CML based estimation of extended Rasch models with the eRm package in R. Psychology Science, 49, 26-43.
+Mair, P., and Hatzinger, R. (2007). CML based estimation of extended Rasch models with the \pkg{eRm} package in R. Psychology Science, 49, 26-43.
 }
 \author{Patrick Mair, Reinhold Hatzinger}
 %\note{}
@@ -57,7 +58,6 @@ Mair, P., and Hatzinger, R. (2007). CML based estimation of extended Rasch model
 \examples{
 
 ##PCM with 10 subjects, 3 items
-data(pcmdat)
 res <- PCM(pcmdat)
 res
 summary(res)                #eta and beta parameters with CI
diff --git a/man/RM.Rd b/man/RM.Rd
old mode 100755
new mode 100644
index 799b848..c165447
--- a/man/RM.Rd
+++ b/man/RM.Rd
@@ -1,3 +1,4 @@
+\encoding{UTF-8}
 \name{RM}
 \alias{RM}
 %- Also NEED an '\alias' for EACH other topic documented here.
@@ -45,35 +46,30 @@ RM(X, W, se = TRUE, sum0 = TRUE, etaStart)
 Fischer, G. H., and Molenaar, I. (1995). Rasch Models - Foundations,
 Recent Developements, and Applications. Springer.
 
-Mair, P., and Hatzinger, R. (2007). Extended Rasch modeling: The eRm package for the application of IRT models in R. Journal of Statistical Software, 20(9), 1-20.
+Mair, P., and Hatzinger, R. (2007). Extended Rasch modeling: The \pkg{eRm} package for the application of IRT models in R. Journal of Statistical Software, 20(9), 1-20.
 
-Mair, P., and Hatzinger, R. (2007). CML based estimation of extended Rasch models with the eRm package in R. Psychology Science, 49, 26-43.
+Mair, P., and Hatzinger, R. (2007). CML based estimation of extended Rasch models with the \pkg{eRm} package in R. Psychology Science, 49, 26-43.
 }
 \author{Patrick Mair, Reinhold Hatzinger}
 %\note{}
 \seealso{\code{\link{RSM}},\code{\link{PCM}}, \code{\link{LRtest}}, \code{\link{Waldtest}}
 }
 \examples{
-
 # Rasch model with beta.1 restricted to 0
-data(raschdat1)
 res <- RM(raschdat1, sum0 = FALSE)
-print(res)
+res
 summary(res)
 res$W                                       #generated design matrix
 
 # Rasch model with sum-0 beta restriction; no standard errors computed
 res <- RM(raschdat1, se = FALSE, sum0 = TRUE)
-print(res)
+res
 summary(res)
 res$W                                       #generated design matrix
 
 #Rasch model with missing values
-data(raschdat2)
 res <- RM(raschdat2)
-print(res)
+res
 summary(res)
-
 }
-
 \keyword{models}
diff --git a/man/RSM.Rd b/man/RSM.Rd
old mode 100755
new mode 100644
index fa71764..c0233bc
--- a/man/RSM.Rd
+++ b/man/RSM.Rd
@@ -1,3 +1,4 @@
+\encoding{UTF-8}
 \name{RSM}
 \alias{RSM}
 %- Also NEED an '\alias' for EACH other topic documented here.
@@ -24,7 +25,7 @@ RSM(X, W, se = TRUE, sum0 = TRUE, etaStart)
   \code{vcov}, \code{summary}, \code{logLik}, \code{person.parameters}, \code{plotICC}, \code{LRtest}.
 }
 \value{
-  Returns an object of class \code{Rm, eRm} and contains the log-likelihood value,
+  Returns an object of class \code{'Rm'}, \code{'eRm'} and contains the log-likelihood value,
   the parameter estimates and their standard errors.
 
   \item{loglik}{Conditional log-likelihood.}
@@ -45,9 +46,9 @@ RSM(X, W, se = TRUE, sum0 = TRUE, etaStart)
 Fischer, G. H., and Molenaar, I. (1995). Rasch Models - Foundations,
 Recent Developements, and Applications. Springer.
 
-Mair, P., and Hatzinger, R. (2007). Extended Rasch modeling: The eRm package for the application of IRT models in R. Journal of Statistical Software, 20(9), 1-20.
+Mair, P., and Hatzinger, R. (2007). Extended Rasch modeling: The \pkg{eRm} package for the application of IRT models in R. Journal of Statistical Software, 20(9), 1-20.
 
-Mair, P., and Hatzinger, R. (2007). CML based estimation of extended Rasch models with the eRm package in R. Psychology Science, 49, 26-43.
+Mair, P., and Hatzinger, R. (2007). CML based estimation of extended Rasch models with the \pkg{eRm} package in R. Psychology Science, 49, 26-43.
 }
 \author{Patrick Mair, Reinhold Hatzinger}
 %\note{}
@@ -55,13 +56,10 @@ Mair, P., and Hatzinger, R. (2007). CML based estimation of extended Rasch model
 \seealso{\code{\link{RM}},\code{\link{PCM}},\code{\link{LRtest}}
 }
 \examples{
-
 ##RSM with 10 subjects, 3 items
-data(rsmdat)
 res <- RSM(rsmdat)
 res
 summary(res)                            #eta and beta parameters with CI
 thresholds(res)                         #threshold parameters
 }
-
 \keyword{models}
diff --git a/man/RSctr.Rd b/man/RSctr.Rd
new file mode 100644
index 0000000..3902896
--- /dev/null
+++ b/man/RSctr.Rd
@@ -0,0 +1,46 @@
+\encoding{UTF-8}
+\name{RSctr}
+\alias{RSctr}
+\title{Control Object}
+\description{
+  The object of class \code{RSctr} represents the control parameter
+  specification for the sampling function \code{\link{rsampler}}.
+}
+\value{
+   A legitimate \code{\link{RSctr}} object is a list with components
+  \item{burn_in}{
+      the number of matrices to be sampled to
+      come close to a stationary distribution.
+      }
+  \item{n_eff}{
+      the number of effective matrices, i.e.,
+      the number of matrices
+      to be generated by the sampling function \code{\link{rsampler}}.
+      }
+  \item{step}{
+      controls the number number of void matrices generated in the the burn in
+      process and when effective matrices are generated (see note 
+      in \code{\link{rsctrl}}). }
+  \item{seed}{
+      is the indicator for the seed of the random number generator.
+      If the value of seed at equals zero, a seed is generated
+      by the sampling function \code{\link{rsampler}}
+      }
+  \item{tfixed}{
+      \code{TRUE} or \code{FALSE}. \code{tfixed = TRUE} has no effect
+      if the input matrix is not quadratic,
+      i.e., all matrix elements are considered free (unrestricted).
+      If the input matrix is quadratic, and \code{tfixed = TRUE},
+      the main diagonal of the matrix is considered as fixed.
+      }
+}
+\section{Generation}{
+      This object is returned from function
+      \code{rsctrl}.
+}
+\section{Methods}{
+      This class has a method for the generic \code{summary}
+      function.
+}
+\seealso{\code{\link{rsctrl}} }
+\keyword{misc}
diff --git a/man/RSmpl.Rd b/man/RSmpl.Rd
new file mode 100644
index 0000000..c28e7cf
--- /dev/null
+++ b/man/RSmpl.Rd
@@ -0,0 +1,47 @@
+\encoding{UTF-8}
+\name{RSmpl}
+\alias{RSmpl}
+\alias{RSmplext}
+\title{Sample Objects}
+\description{
+  The objects of class \code{RSmpl} and \code{RSmplext} contain
+  the original input matrix, the generated (encoded) random matrices, and
+  some information about the sampling process.
+}
+\value{
+   A list of class \code{RSmpl} or \code{RSmplext} with components
+  \item{n}{number of rows of the input matrix}
+  \item{k}{number of columns of the input matrix}
+  \item{inpmat}{the input matrix}
+  \item{tfixed}{\code{TRUE}, if diagonals of \code{inpmat} are fixed}
+  \item{burn_in}{length of the burn in process}
+  \item{n_eff}{number of generated matrices (effective matrices)}
+  \item{step}{controls the number number of void matrices generated in the the burn in
+              process and when effective matrices are generated (see note 
+              in \code{\link{rsctrl}}). }
+  \item{seed}{starting value for the random number generator}
+  \item{n_tot}{number of matrices in \code{outvec}.}
+  \item{outvec}{vector of encoded random matrices}
+  \item{ier}{error code (see below)}
+}
+\note{By default, all generated matrices plus
+      the original matrix (in position 1) are contained in
+      \code{outvec}, thus \code{n_tot = n_eff + 1}. If
+      the original matrix is not in \code{outvec} then
+      \code{n_tot = n_eff}.\cr\cr
+      If \code{ier} is 0, no error was detected. Otherwise use
+      the error function \code{rserror(ier)} to obtain some informations.\cr\cr
+      For saving and loading objects
+      of class \code{RSmpl} or \code{RSmplext}
+      see the example in \code{\link{rsextrobj}}.
+}
+\section{Generation}{
+      These classes of objects are returned from
+      \code{rsampler} and \code{rsextrobj}.
+}
+\section{Methods}{
+      Both classes have methods for the generic \code{summary}
+      function.
+}
+\seealso{\code{\link{rsampler}}, \code{\link{rsextrobj}} }
+\keyword{misc}
diff --git a/man/RaschSampler.Rd b/man/RaschSampler.Rd
new file mode 100644
index 0000000..1b9e1d6
--- /dev/null
+++ b/man/RaschSampler.Rd
@@ -0,0 +1,75 @@
+\encoding{UTF-8}
+\name{RaschSampler}
+\alias{RaschSampler}
+%\docType{package}
+\title{Rasch Sampler Package}
+\description{
+The package implements an \acronym{MCMC} algorithm for sampling of
+binary matrices with fixed margins complying to the Rasch model.
+Its stationary distribution is uniform. The algorithm also allows
+for square matrices with fixed diagonal.\cr
+
+Parameter estimates in the Rasch model only depend on the marginal totals of
+the data matrix that is used for the estimation. From this it follows that, if the
+model is valid, all binary matrices with the same marginals as the observed one
+are equally likely. For any statistic of the data matrix, one can approximate
+the null distribution, i.e., the distribution if the Rasch model is valid, by taking
+a random sample from the collection of equally likely data matrices and constructing
+the observed distribution of the statistic.
+One can then simply determine the exceedence probability of the statistic in the
+observed sample, and thus construct a non-parametric test of the Rasch model.
+The main purpose of this package is the implementation of a methodology to build nonparametric
+tests for the Rasch model. \cr
+
+In the context of social network theories, where the structure of binary asymmetric
+relations is studied, for example,
+person \eqn{a}{a} esteems person \eqn{b}{b}, which correponds to a 1 in cell \eqn{(a, b)}{(a, b)}
+of the associated adjacency matrix. If one wants to study
+the distribution of a statistic defined on the adjacency matrix and conditional
+on the marginal totals, one has to exclude the diagonal cells from consideration, i.e.,
+by keeping the diagonal cells fixed at an arbitrary value. The \code{RaschSampler} package
+has implemented an appropriate option, thus it can be also used for sampling random adjacency
+matrices with given marginal totals.
+}
+\details{
+\tabular{ll}{%
+Package:\tab RaschSampler\cr
+Type:   \tab Package\cr
+Version:\tab 0.8-6\cr
+Date:   \tab 2012-07-03\cr
+License:\tab GNU GPL 2, June 1991\cr%
+}
+The user has to supply a binary input matrix. After defining appropriate control
+parameters using \code{\link{rsctrl}} the sampling function \code{\link{rsampler}}
+may be called to obtain an object of class \code{\link{RSmpl}} which contains the
+generated random matrices in encoded form. After defining an appropriate function
+to operate on a binary matrix (e.g., calculate a statistic such as \code{\link{phi.range}})
+the application of this function to the sampled matrices is performed
+using \code{\link{rstats}}. Prior to applying the user defined function, \code{\link{rstats}}
+decodes the matrices packed in the \code{\link{RSmpl}}-object.\cr
+
+The package also defines a utility function \code{\link{rsextrobj}} for extracting certains parts from
+the \code{\link{RSmpl}}-object resulting in an object of class \code{\link{RSmplext}}.
+Both types of objects can be saved and reloaded for later use.\cr
+
+Summary methods are available to print information on these objects, as well as
+on the control object \code{\link{RSctr}} which is obtained from using
+\code{\link{rsctrl}} containing the specification for the sampling routine.
+
+}
+\author{Reinhold Hatzinger, Patrick Mair, Norman D. Verhelst}
+\references{
+Verhelst, N. D. (2008) An Efficient \acronym{MCMC} Algorithm to Sample Binary Matrices with Fixed Marginals. Psychometrika, Volume 73, Number 4\cr
+Verhelst, N. D., Hatzinger, R., and Mair, P. (2007) The Rasch Sampler, Journal of Statistical Software, Vol. 20, Issue 4, Feb 2007
+}
+\note{The current implementation allows for data matrices up to 4096 rows and 128 columns.
+      This can be changed by setting \code{nmax} and \code{kmax} in \code{RaschSampler.f90}
+      to values which are a power of 2. These values should also be changed in \code{rserror.R}.
+
+      For convenience, we reuse the Fortran code of package version 0.8-1 which cicumvents the
+      compiler bug in Linux distributions of GCC 4.3. The following note from package version 0.8-3
+      is thus obsolete:
+      In case of compilation errors (due to a bug in Linux distributions of GCC 4.3) please use
+      \code{RaschSampler.f90} from package version 0.8-1 and change \code{nmax} and \code{kmax}
+      accordingly (or use GCC 4.4).}
+\keyword{package}
diff --git a/man/SepRel.Rd b/man/SepRel.Rd
new file mode 100644
index 0000000..eb85378
--- /dev/null
+++ b/man/SepRel.Rd
@@ -0,0 +1,78 @@
+\encoding{UTF-8}
+\name{Separation Reliability}
+\alias{SepRel}
+\alias{print.eRm_SepRel}
+\alias{summary.eRm_SepRel}
+%
+%
+%
+\title{Person Separation Reliability}
+%
+\description{%
+  This function calculates the proportion of person variance that is not due to error.
+  The concept of person separation reliability is very similar to reliability indices such as Cronbach's \eqn{\alpha}.
+}
+%
+%
+%
+\usage{SepRel(pobject)
+
+\method{print}{eRm_SepRel}(x, \dots)
+
+\method{summary}{eRm_SepRel}(object, \dots)}
+%
+\arguments{
+  \item{pobject}{Object of class \code{ppar} (see \code{\link{person.parameter}}).}
+  \item{x}{Object of class \code{eRm_SepRel}.}
+  \item{object}{Object of class \code{eRm_SepRel}.}
+  \item{\dots}{Further arguments.}
+}
+%
+%
+%
+\details{
+  Returns the person separation reliability \eqn{\frac{\mathrm{SSD}-\mathrm{MSE}}{\mathrm{SSD}}}{(SSD-MSE)/SSD} where SSD is the squared standard deviation and MSE the mean squared error.
+  \subsection{Caveats}{%
+    Please note that the concept of \emph{reliability} and associated problems are fundamentally different between \acronym{IRT} and \acronym{CTT} (Classical Test Theory).
+    Separation reliability is more like a workaround to make the \dQuote{change} from \acronym{CTT} to \acronym{IRT} easier for users by providing something \dQuote{familiar.}
+    Hence, we recommend not to put too much emphasis on this particular measure and use it with caution.
+  }
+  \subsection{Varying results in different programs}{%
+    If you compare the separation reliability obtained using \pkg{eRm} with values by other software, you will find that they are most likely not equal.
+    This has a couple of reasons, one of the most important is the employed estimation method.
+    
+    \pkg{eRm} uses a conditional maximum likelihood (\acronym{CML}) framework and handles missing values as separate groups during the estimation of item parameters.
+    Person parameters are computed in a second step using unconditional or joint maximum likelihood (\acronym{UML} or \acronym{JML}) estimation with item parameters assumed to be known from the first step.
+    Other programs might do \acronym{JML} to estimate item and person parameters at the same time, or employ marginal maximum likelihood \acronym{MML} to estimate item parameters, assuming a certain distribution for person parameters.
+    In the latter case person parameters might be obtained by various methods like \acronym{EAP}, \acronym{MAP}, \ldots.
+    Even \acronym{CML}-based programs yield different values, for example, if they use Warm's weighted maximum likelihood estimation \acronym{WLE} to compute person parameters in the second step.
+    
+    The bottom line is that, since there is not \dQuote{definite} solution for this problem, you will end up with different values under different circumstances.
+    This is another reason to take results and implications with a grain of salt.
+  }
+}
+%
+%
+%
+\value{\code{SepRel} returns a list object of class \code{eRm_SepRel} containing:
+  \item{sep.rel}{the person separation reliability,}
+  \item{SSD.PS}{the squared standard deviation (i.e., total person variability),}
+  \item{MSE}{the mean square measurement error (i.e., model error variance).}
+}
+%
+%
+%
+\references{%
+  Wright, B.D., and Stone, M.H. (1999). \emph{Measurement essentials.} Wide Range Inc., Wilmington. (\url{http://www.rasch.org/measess/me-all.pdf} 28Mb).%
+}
+\author{Original code by Adrian Brügger (\email{Adrian.Bruegger at imu.unibe.ch}), adapted by Marco J. Maier}
+%\note{}
+%
+%
+%
+\examples{# Compute Separation Reliability for a Rasch Model:
+pers <- person.parameter(RM(raschdat1))
+res <- SepRel(pers)
+res
+summary(res)
+}
diff --git a/man/Waldtest.Rd b/man/Waldtest.Rd
old mode 100755
new mode 100644
index 562498c..6af8291
--- a/man/Waldtest.Rd
+++ b/man/Waldtest.Rd
@@ -1,64 +1,56 @@
-\name{Waldtest}
-\alias{Waldtest}
-\alias{Waldtest.Rm}
-\alias{print.wald}
-%- Also NEED an '\alias' for EACH other topic documented here.
-\title{Item-Specific Wald Test}
-\description{Performs a Wald test on item-level by splitting subjects into subgroups.
-}
-\usage{
-\method{Waldtest}{Rm}(object, splitcr = "median")
-\method{print}{wald}(x,...)
-}
-%- maybe also 'usage' for other objects documented here.
-\arguments{
-  \item{object}{Object of class \code{RM}.}
-  \item{splitcr}{Split criterion for subject raw score splitting. \code{median}
-  uses the median as split criterion, \code{mean} performs a mean-split.
-  Optionally \code{splitcr} can also be a dichotomous vector which assigns each person to a
-  certain subgroup (e.g., following an external criterion). This vector can be numeric, character or a factor. }
-  \item{x}{Object of class \code{wald}.}
-  \item{...}{Further arguments passed to or from other methods. They are ignored in this function.}
-}
-\details{Items are eliminated if they not have the same number of categories in each subgroup.
-To avoid this problem, for RSM and PCM it is considered to use a random or another user-defined split.
-If the data set contains missing values and \code{mean} or \code{median} is specified as splitcriterion,
-means or medians are calculated for each missing value subgroup and consequently used for raw score splitting.}
-\value{
-Returns an object of class \code{wald} containing:
-  \item{coef.table}{Data frame with test statistics, z- and p-values.}
-  \item{betapar1}{Beta parameters for first subgroup}
-  \item{se.beta1}{Standard errors for first subgroup}
-  \item{betapar2}{Beta parameters for second subgroup}
-  \item{se.beta2}{Standard errors for second subgroup}
-  \item{se.beta2}{Standard errors for second subgroup}
-  \item{spl.gr}{Names and levels for \code{splitcr}.}
-  \item{call}{The matched call.}
-}
-\references{
-Fischer, G. H., and Molenaar, I. (1995). Rasch Models - Foundations,
-Recent Developements, and Applications. Springer.
-
-Fischer, G. H., and Scheiblechner, H. (1970). Algorithmen und Programme fuer das
-probabilistische Testmodell von Rasch [Algorithms and programs for Rasch's
-probabilistic test model]. Psychologische Beitraege, 12, 23-51.
-}
-\author{Patrick Mair, Reinhold Hatzinger}
-%\note{}
-\seealso{\code{\link{LRtest}}}
-\examples{
-
-#Wald test for Rasch model with user-defined subject split
-data(raschdat2)
-res <- RM(raschdat2)
-splitvec <- sample(1:2,25,replace=TRUE)
-Waldtest(res, splitcr = splitvec)
-
-#Wald test for RSM eliminates 4 items (with median split)
-data(rsmdat)
-res <- RSM(rsmdat)
-Waldtest(res)
-
-}
-
-\keyword{models}
+\encoding{UTF-8}
+\name{Waldtest}
+\alias{Waldtest}
+\alias{Waldtest.Rm}
+\alias{print.wald}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{Item-Specific Wald Test}
+\description{Performs a Wald test on item-level by splitting subjects into subgroups.
+}
+\usage{
+\method{Waldtest}{Rm}(object, splitcr = "median")
+\method{print}{wald}(x,...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{object}{Object of class \code{RM}.}
+  \item{splitcr}{Split criterion for subject raw score splitting. \code{median}
+  uses the median as split criterion, \code{mean} performs a mean-split.
+  Optionally \code{splitcr} can also be a dichotomous vector which assigns each person to a
+  certain subgroup (e.g., following an external criterion). This vector can be numeric, character or a factor. }
+  \item{x}{Object of class \code{wald}.}
+  \item{...}{Further arguments passed to or from other methods. They are ignored in this function.}
+}
+\details{Items are eliminated if they not have the same number of categories in each subgroup.
+To avoid this problem, for RSM and PCM it is considered to use a random or another user-defined split.
+If the data set contains missing values and \code{mean} or \code{median} is specified as splitcriterion,
+means or medians are calculated for each missing value subgroup and consequently used for raw score splitting.}
+\value{
+Returns an object of class \code{wald} containing:
+  \item{coef.table}{Data frame with test statistics, z- and p-values.}
+  \item{betapar1}{Beta parameters for first subgroup}
+  \item{se.beta1}{Standard errors for first subgroup}
+  \item{betapar2}{Beta parameters for second subgroup}
+  \item{se.beta2}{Standard errors for second subgroup}
+  \item{se.beta2}{Standard errors for second subgroup}
+  \item{spl.gr}{Names and levels for \code{splitcr}.}
+  \item{call}{The matched call.}
+}
+\references{
+Fischer, G. H., and Molenaar, I. (1995). Rasch Models - Foundations,
+Recent Developements, and Applications. Springer.
+
+Fischer, G. H., and Scheiblechner, H. (1970). Algorithmen und Programme fuer das
+probabilistische Testmodell von Rasch [Algorithms and programs for Rasch's
+probabilistic test model]. Psychologische Beitraege, 12, 23-51.
+}
+\author{Patrick Mair, Reinhold Hatzinger}
+%\note{}
+\seealso{\code{\link{LRtest}}, \code{\link{MLoef}}}
+\examples{
+#Wald test for Rasch model with user-defined subject split
+res <- RM(raschdat2)
+splitvec <- sample(1:2,25,replace=TRUE)
+Waldtest(res, splitcr = splitvec)
+}
+\keyword{models}
diff --git a/man/anova.eRm.Rd b/man/anova.eRm.Rd
new file mode 100644
index 0000000..b97a283
--- /dev/null
+++ b/man/anova.eRm.Rd
@@ -0,0 +1,53 @@
+\encoding{UTF-8}
+\name{Analysis of Deviances}
+\alias{anova.eRm}
+\alias{print.eRm_anova}
+\title{Analysis of Deviances for Rasch Models}
+\description{Performs likelihood ratio tests against the model with the largest number of parameters.}
+\usage{\method{anova}{eRm}(object, \dots)
+
+\method{print}{eRm_anova}(x, \dots)}
+\arguments{
+  \item{object}{
+    Gives the first object to be tested against others which follow, separated by commata.
+  }
+  \item{x}{
+    An object of class \code{"eRm_anova"}.
+  }
+  \item{\dots}{
+    Further models to test with \code{anova.eRm()}.
+  }
+}
+\details{
+The \code{anova} method is quite flexible and, as long the used data are identical, every model except the \acronym{LLRA} can be tested against each other.
+Regardless of the order that models are specified, they will always be sorted by the number of parameters in decreasing order.
+If \eqn{\geq3}{>= 3} models are passed to the method, all models will be tested against the first model (i.e., the one with the largest amount of parameters).
+}
+\value{
+  \code{anova.eRm} returns a list object of class \code{eRm_anova} containing:
+    \item{calls}{function calls of the different models (character).}
+    \item{statistics}{the analysis of deviances table (columns are \code{LLs}: conditional log-likelihoods, \code{dev}: deviances, \code{npar}: number of parameters, \code{LR}: likelihood ratio statistics, \code{df}: degrees of freedom, \code{p}: \eqn{p}{p}-values).}
+}
+\section{Warning}{Although, there is a check for identical data matrices used, the models have to be nested for the likelihood ratio test to work.
+You have to ensure that this is the case, otherwise results will be invalid.
+
+\acronym{LLRA}s cannot be tested with other models (\acronym{RM}, \acronym{LLTM}, \acronym{RSM}, \ldots); for more information see \code{\link{anova.llra}}.}
+%\references{xxx}
+\author{Marco J. Maier}
+\seealso{\code{\link{anova.llra}}, \code{\link{anova}}}
+\examples{
+### dichotomous data
+dmod1 <- RM(lltmdat1)
+dmod2 <- LLTM(lltmdat1, mpoints = 2)
+anova(dmod1, dmod2)
+
+### polytomous data
+pmod1 <- RSM(rsmdat)
+pmod2 <- PCM(rsmdat)
+anova(pmod1, pmod2)
+
+W <- cbind(rep(c(1,0), each=9), rep(c(0,1), each=9))
+W
+pmod3 <- LPCM(rsmdat, W)
+anova(pmod3, pmod1, pmod2) # note that models are sorted by npar
+}
diff --git a/man/anova.llra.Rd b/man/anova.llra.Rd
old mode 100755
new mode 100644
index cf039c4..04ed27d
--- a/man/anova.llra.Rd
+++ b/man/anova.llra.Rd
@@ -1,17 +1,14 @@
+\encoding{UTF-8}
 \name{anova.llra}
 \alias{anova.llra}
-\alias{anova.llra.default}
 
-
-\title{Analysis of Deviance for Linear Logistic Models with Relaxed Assumptions
-}
-\description{Compute an analysis of deviance table for one or more LLRA.
-}
+\title{Analysis of Deviance for Linear Logistic Models with Relaxed Assumptions}
+\description{Compute an analysis of deviance table for one or more LLRA.}
 \usage{
 \method{anova}{llra}(object, ...)
 }
 \arguments{
-  \item{object, ... }{objects of class "llra", typically the result of a
+  \item{object, ... }{Objects of class "llra", typically the result of a
     call to \code{\link{LLRA}}.
     }
 }
@@ -30,9 +27,7 @@ deviance to the df for each row based on the asymptotic Chi^2-Distribution of th
 \value{
 An object of class \code{"anova"} inheriting from class \code{"data.frame"}.
 }
-\author{
-Thomas Rusch
-}
+\author{Thomas Rusch}
 \section{Warning:}{
 The comparison between two or more models by \code{anova} will only be valid
 if they are fitted to the same dataset and if the models are nested. The
@@ -41,25 +36,21 @@ function does not check if that is the case.
 \seealso{
 The model fitting function \code{\link{LLRA}}.
 }
-\examples{
-    \dontrun{
-    ##An LLRA with 2 treatment groups and 1 baseline group, 5 items and 4
-    ##time points. Item 1 is dichotomous, all others have 3, 4, 5, 6
-    ##categories respectively.    
-    data("llraDat2")
+\examples{\dontrun{
+##An LLRA with 2 treatment groups and 1 baseline group, 5 items and 4
+##time points. Item 1 is dichotomous, all others have 3, 4, 5, 6
+##categories respectively.
 
-    #fit LLRA
-    ex2 <- LLRA(llraDat2[,1:20],mpoints=4,groups=llraDat2[,21])
+#fit LLRA
+ex2 <- LLRA(llraDat2[,1:20],mpoints=4,groups=llraDat2[,21])
 
-    #Imposing a linear trend for items 2 and 3 using collapse_W 
-    collItems2 <- list(c(32,37,42),c(33,38,43))
-    newNames2 <- c("trend.I2","trend.I3")
-    Wnew <- collapse_W(ex2$W,collItems2,newNames2)
+#Imposing a linear trend for items 2 and 3 using collapse_W 
+collItems2 <- list(c(32,37,42),c(33,38,43))
+newNames2 <- c("trend.I2","trend.I3")
+Wnew <- collapse_W(ex2$W,collItems2,newNames2)
 
-    #Estimating LLRA with the linear trend for item 2 and 3
-    ex2new <- LLRA(llraDat2[1:20],W=Wnew,mpoints=4,groups=llraDat2[21])
+#Estimating LLRA with the linear trend for item 2 and 3
+ex2new <- LLRA(llraDat2[1:20],W=Wnew,mpoints=4,groups=llraDat2[21])
 
-    #comparing models with likelihood ratio test
-    anova(ex2,ex2new)
-}
-}
+#comparing models with likelihood ratio test
+anova(ex2,ex2new)}}
diff --git a/man/build_W.Rd b/man/build_W.Rd
old mode 100755
new mode 100644
index 120f96e..bbeac60
--- a/man/build_W.Rd
+++ b/man/build_W.Rd
@@ -1,3 +1,4 @@
+\encoding{UTF-8}
 \name{build_W}
 \alias{build_W}
 \alias{build_catdes}
@@ -62,9 +63,7 @@ Hatzinger, R. and Rusch, T. (2009) IRT models with relaxed assumptions
 in eRm: A manual-like instruction. \emph{Psychology Science Quarterly},
   \bold{51}, pp. 87--120, \url{http://erm.r-forge.r-project.org/psq_1_2009_06_87-120.pdf}
 }
-\author{
-Thomas Rusch
-}
+\author{Thomas Rusch}
 \section{Warning }{A warning is printed that the first two categories
   for polytomous items are equated.} 
 
@@ -72,19 +71,17 @@ Thomas Rusch
 This function is used for automatic generation of the design matrix in \code{\link{LLRA}}.
 }
 \examples{
-    ##An LLRA with 2 treatment groups and 1 baseline group, 5 items and 4
-    ##time points. Item 1 is dichotomous, all others have 3, 4, 5, 6
-    ##categories respectively.
-    data("llraDat2")
-    llraDat2a <- matrix(unlist(llraDat2[1:20]),ncol=4)
-    groupvec <-rep(1:3*5,each=20)
-    W <- build_W(llraDat2a,nitems=5,mpoints=4,grp_n=c(10,20,40),groupvec=groupvec,itmgrps=1:5)
+##An LLRA with 2 treatment groups and 1 baseline group, 5 items and 4
+##time points. Item 1 is dichotomous, all others have 3, 4, 5, 6
+##categories respectively.
+llraDat2a <- matrix(unlist(llraDat2[1:20]),ncol=4)
+groupvec <-rep(1:3*5,each=20)
+W <- build_W(llraDat2a,nitems=5,mpoints=4,grp_n=c(10,20,40),groupvec=groupvec,itmgrps=1:5)
 
-    #There are 55 parameters
-    dim(W)
+#There are 55 parameters
+dim(W)
 
-    #Estimating LLRA by specifiying W
-    \dontrun{
-    ex2W <- LLRA(llraDat2[1:20],W=W,mpoints=4,groups=llraDat2[21])
-    }
+\dontrun{
+#Estimating LLRA by specifiying W
+ex2W <- LLRA(llraDat2[1:20],W=W,mpoints=4,groups=llraDat2[21])}
 }
\ No newline at end of file
diff --git a/man/collapse_W.Rd b/man/collapse_W.Rd
old mode 100755
new mode 100644
index 9106779..251686a
--- a/man/collapse_W.Rd
+++ b/man/collapse_W.Rd
@@ -1,3 +1,4 @@
+\encoding{UTF-8}
 \name{collapse_W}
 \alias{collapse_W}
 
@@ -45,32 +46,27 @@ Hatzinger, R. and Rusch, T. (2009) IRT models with relaxed assumptions
 in eRm: A manual-like instruction. \emph{Psychology Science Quarterly},
   \bold{51}, pp. 87--120, \url{http://erm.r-forge.r-project.org/psq_1_2009_06_87-120.pdf}
 }
-\author{
-Thomas Rusch
-}
+\author{Thomas Rusch}
 \seealso{
 The function to build design matrices from scratch, \code{\link{build_W}}.
 }
 \examples{
-    ##An LLRA with 2 treatment groups and 1 baseline group, 5 items and 4
-    ##time points. Item 1 is dichotomous, all others have 3, 4, 5, 6
-    ##categories respectively.    
-    data("llraDat2")
-    llraDat2a <- matrix(unlist(llraDat2[1:20]),ncol=4)
-    groupvec <-rep(1:3*5,each=20)
-    W <- build_W(llraDat2a, nitems=5, mpoints=4, grp_n=c(10,20,40), groupvec=groupvec,
-           itmgrps=1:5)
-   
-    #There are 55 parameters to be estimated
-    dim(W)
+##An LLRA with 2 treatment groups and 1 baseline group, 5 items and 4
+##time points. Item 1 is dichotomous, all others have 3, 4, 5, 6
+##categories respectively.    
+llraDat2a <- matrix(unlist(llraDat2[1:20]),ncol=4)
+groupvec <-rep(1:3*5,each=20)
+W <- build_W(llraDat2a, nitems=5, mpoints=4, grp_n=c(10,20,40), groupvec=groupvec,itmgrps=1:5)
+
+#There are 55 parameters to be estimated
+dim(W)
 
-    #Imposing a linear trend for the second item ,i.e. parameters in
-    #columns 32, 37  and 42 need to be
-    #collapsed into a single column. 
-    collItems1 <- list(c(32,37,42))
-    newNames1 <- c("trend.I2")
-    Wstar1 <- collapse_W(W,collItems1)
+#Imposing a linear trend for the second item ,i.e. parameters in
+#columns 32, 37  and 42 need to be collapsed into a single column. 
+collItems1 <- list(c(32,37,42))
+newNames1 <- c("trend.I2")
+Wstar1 <- collapse_W(W,collItems1)
 
-    #53 parameters need to be estimated
-    dim(Wstar1)
+#53 parameters need to be estimated
+dim(Wstar1)
 }
\ No newline at end of file
diff --git a/man/eRm-package.Rd b/man/eRm-package.Rd
old mode 100755
new mode 100644
index 85d6fbf..f64dd51
--- a/man/eRm-package.Rd
+++ b/man/eRm-package.Rd
@@ -1,3 +1,4 @@
+\encoding{UTF-8}
 \name{eRm-package}
 \alias{eRm-package}
 \alias{eRm}
@@ -13,16 +14,17 @@ and its linear extension (LPCM). The parameters are estimated by conditional max
 likelihood (CML). Missing values are allowed in the data matrix. Additional features
 are the estimation of the person parameters, LR-Model test, item-spefific Wald test,
 Martin-Loef test, nonparametric Monte-Carlo tests,
-itemfit and personfit statistics, various ICC plots. An eRm platform is provided at
-http://r-forge.r-project.org/projects/erm/.
+itemfit and personfit statistics, various ICC plots.
+
+An \pkg{eRm} platform is provided at \url{http://r-forge.r-project.org/projects/erm/}.
 }
-\details{
-\tabular{ll}{
-Package: \tab eRm\cr
-Type: \tab Package\cr
-Version: \tab 0.14-0\cr
-Date: \tab 2011-06-05\cr
-License: \tab GPL\cr
+\details{%
+\tabular{ll}{%
+Package: \tab{}eRm\cr%
+Type:    \tab{}Package\cr%
+Version: \tab{}\Sexpr[stage=build]{packageDescription("eRm")$Version}\cr%
+Date:    \tab{}\Sexpr[stage=build]{packageDescription("eRm")$Date}\cr%
+License: \tab{}\Sexpr[stage=build]{packageDescription("eRm")$License}\cr%
 }
 The basic input units for the functions are the person-item matrix X and the design matrix W.
 Missing values in X are coded with \code{NA}.
@@ -51,8 +53,9 @@ and \code{LLRA} are objects of class \code{eRm}. For a detailled overview of all
 classes defined in the package and the functions depending on them see the package's vignette.
 
 We acknowledge Julian Gilbey for writing the \code{plotPWmap} function, Kathrin Gruber
-for the function \code{plotDIF}, and Thomas Rusch for \code{LLRA} and related utilities.
-The \code{eRm} package contains functions from the packages \code{sna}, \code{gtools} and \code{ROCR}.
+for the function \code{plotDIF}, and Thomas Rusch for \code{LLRA}, related utilities and
+functionality to calculate and plot item and test information.
+The \pkg{eRm} package contains functions from the packages \pkg{sna}, \pkg{gtools} and \pkg{ROCR}.
 Thanks to Carter T. Butts, Gregory R. Warnes, and Tobias Sing et al.
 }
 \note{The fitting engine by default is \code{\link{nlm}} unless changed to \code{\link{optim}}.
@@ -60,19 +63,18 @@ Thanks to Carter T. Butts, Gregory R. Warnes, and Tobias Sing et al.
       \code{fitctrl <- "nlm"} or \code{fitctrl <- "optim"}.}
 
 
-\author{Patrick Mair, Reinhold Hatzinger, Marco Maier, and others
+\author{Patrick Mair, Reinhold Hatzinger, Marco J. Maier, and others.
 
-Maintainer: Patrick Mair <patrick.mair at wu.ac.at>
+Maintainer: Patrick Mair (\email{mair at fas.harvard.edu})%
 }
 \references{
 Fischer, G. H., and Molenaar, I. (1995). Rasch Models - Foundations,
 Recent Developements, and Applications. Springer.
 
-Mair, P., and Hatzinger, R. (2007). Extended Rasch modeling: The eRm package for
+Mair, P., and Hatzinger, R. (2007). Extended Rasch modeling: The \pkg{eRm} package for
 the application of IRT models in R. Journal of Statistical Software, 20(9), 1-20.
 
 Mair, P., and Hatzinger, R. (2007). CML based estimation of extended Rasch models
-with the eRm package in R. Psychology Science, 49, 26-43.
+with the \pkg{eRm} package in R. Psychology Science, 49, 26-43.
 }
 \keyword{models}
-
diff --git a/man/eRm.data.Rd b/man/eRm.data.Rd
new file mode 100644
index 0000000..43b4265
--- /dev/null
+++ b/man/eRm.data.Rd
@@ -0,0 +1,43 @@
+\encoding{UTF-8}
+\name{eRm.data}
+\alias{eRm.data}
+\alias{raschdat1}
+\alias{raschdat1_RM_fitted}
+\alias{raschdat1_RM_plotDIF}
+\alias{raschdat1_RM_lrres2}
+\alias{raschdat2}
+\alias{raschdat3}
+\alias{raschdat4}
+\alias{lltmdat1}
+\alias{lltmdat2}
+\alias{pcmdat}
+\alias{pcmdat2}
+\alias{lpcmdat}
+\alias{rsmdat}
+\alias{lrsmdat}
+\docType{data}
+\title{Data for Computing Extended Rasch Models}
+\description{Artificial data sets for computing extended Rasch models.}
+\usage{raschdat1
+raschdat2
+raschdat3
+raschdat4
+
+lltmdat1
+lltmdat2
+
+rsmdat
+
+lrsmdat
+
+pcmdat
+pcmdat2
+
+lpcmdat
+
+raschdat1_RM_fitted
+raschdat1_RM_plotDIF
+raschdat1_RM_lrres2}
+\format{Numeric matrices with subjects as rows, items as columns, missing values as \code{NA}.}
+\details{\code{raschdat1_RM_fitted} is the resulting object of \code{RM(raschdat1)} and used in examples to reduce computation time. For the generation of \code{raschdat1_RM_plotDIF} see the excluded example code of \code{\link{plotDIF}}. \code{raschdat1_RM_lrres2} results from \code{LRtest(RM(raschdat1), split = "mean")}}
+\keyword{datasets}
diff --git a/man/gofIRT.Rd b/man/gofIRT.Rd
old mode 100755
new mode 100644
index 624f7a0..6dcfbf2
--- a/man/gofIRT.Rd
+++ b/man/gofIRT.Rd
@@ -1,3 +1,4 @@
+\encoding{UTF-8}
 \name{gofIRT}
 \alias{gofIRT}
 \alias{gofIRT.ppar}
@@ -47,14 +48,11 @@ Mair, P., Reise, S. P., and Bentler, P. M. (2008). IRT goodness-of-fit using app
     \code{\link{itemfit.ppar}},\code{\link{personfit.ppar}},\code{\link{LRtest}}
 }
 \examples{
-
 #Goodness-of-fit for a Rasch model
-data(raschdat1)
 res <- RM(raschdat1)
 pres <- person.parameter(res)
 gof.res <- gofIRT(pres)
 gof.res
 summary(gof.res)
-
 }
 \keyword{models}
diff --git a/man/item_info.Rd b/man/item_info.Rd
new file mode 100644
index 0000000..66698db
--- /dev/null
+++ b/man/item_info.Rd
@@ -0,0 +1,51 @@
+\encoding{UTF-8}
+\name{item_info}
+\alias{item_info}
+\alias{i_info}
+
+\title{Calculate Item Information for 'eRm' objects
+}
+\description{
+Calculates Samejima's (1969) information for all items
+}
+\usage{
+item_info(ermobject, theta = seq(-5, 5, 0.01))
+
+i_info(hvec, itembeta, theta)
+}
+\arguments{
+  \item{ermobject}{An object of class 'eRm'.
+  } 
+  \item{theta}{Supporting or sampling points on the latent trait.
+  }
+  \item{hvec}{Number of categories of a single item.
+  }
+  \item{itembeta}{Cumulative item category parameters for a single item.
+  }
+}
+\details{The function \code{item_info} calculates information of the
+  whole set of items in the 'eRm' object. The function \code{i_info}
+  does the same for a single item (and is called by \code{item_info}).  
+}
+\value{
+  Returns a list (\code{i_info}) or a list of lists (where each element
+  corresponds to an item, \code{item_info}) and contains
+   \item{c.info}{Matrix of category information in columns for the
+    different theta values in rows.}
+   \item{i.info}{Vector of item information for the
+    different theta values.}
+}
+\references{
+Samejima, F. (1969) Estimation of latent ability using a response
+pattern of graded scores. \emph{Psychometric Monographs}, \bold{17}.  
+}
+\author{Thomas Rusch} 
+\seealso{
+The function to calculate the test information, \code{\link{test_info}}
+and the plot function \code{\link{plotINFO}}.
+}
+\examples{
+res <- PCM(pcmdat)
+info <- item_info(res)
+plotINFO(res,type="item")
+}
diff --git a/man/itemfit.ppar.Rd b/man/itemfit.ppar.Rd
old mode 100755
new mode 100644
index 0e20621..2331dda
--- a/man/itemfit.ppar.Rd
+++ b/man/itemfit.ppar.Rd
@@ -1,3 +1,4 @@
+\encoding{UTF-8}
 \name{itemfit.ppar}
 \alias{itemfit.ppar}
 \alias{itemfit}
@@ -66,7 +67,6 @@ Rasch Measurement Transactions, 1990, 3:4 p.84-5
 \examples{
 
 # Rasch model, estimation of item and person parameters
-data(raschdat2)
 res <- RM(raschdat2)
 p.res <- person.parameter(res)
 
diff --git a/man/llra.datprep.Rd b/man/llra.datprep.Rd
old mode 100755
new mode 100644
index 27714b0..641df8d
--- a/man/llra.datprep.Rd
+++ b/man/llra.datprep.Rd
@@ -1,3 +1,4 @@
+\encoding{UTF-8}
 \name{llra.datprep}
 \alias{llra.datprep}
 
@@ -43,9 +44,7 @@ Returns a list with the components
 \item{assign.vec}{The assignment vector.}
 \item{grp_n}{A vector of the number of subjects in each group.}
 }
-\author{
-Reinhold Hatzinger
-}
+\author{Reinhold Hatzinger}
 \seealso{
 The function that uses this is \code{\link{LLRA}}. The values from
   \code{llra.datprep} can be passed to \code{\link{build_W}}.
diff --git a/man/llraDat1.Rd b/man/llraDat1.Rd
old mode 100755
new mode 100644
index 1ae4167..1a16809
--- a/man/llraDat1.Rd
+++ b/man/llraDat1.Rd
@@ -1,42 +1,40 @@
+\encoding{UTF-8}
 \name{llraDat1}
 \alias{llraDat1}
 \docType{data}
-\title{An Artifical LLRA Data Set
-}
-\description{
-Artificial data set of 5 items, 5 time points and 5 groups for LLRA.
-}
-\usage{data(llraDat1)}
+\title{An Artificial LLRA Data Set}
+\description{Artificial data set of 5 items, 5 time points and 5 groups for LLRA.}
+\usage{llraDat1}
 \format{
   A data frame with 150 observations of 26 variables.
- \itemize{
- \item{t1.I1}{ Answers to item 1 at time point 1}
- \item{t1.I2}{ Answers to item 2 at time point 1}
- \item{t1.I3}{ Answers to item 3 at time point 1}
- \item{t1.I4}{ Answers to item 4 at time point 1}
- \item{t1.I5}{ Answers to item 5 at time point 1}
- \item{t2.I1}{ Answers to item 1 at time point 2}
- \item{t2.I2}{ Answers to item 2 at time point 2}
- \item{t2.I3}{ Answers to item 3 at time point 2}
- \item{t2.I4}{ Answers to item 4 at time point 2}
- \item{t2.I5}{ Answers to item 5 at time point 2}
- \item{t3.I1}{ Answers to item 1 at time point 3}
- \item{t3.I2}{ Answers to item 2 at time point 3}
- \item{t3.I3}{ Answers to item 3 at time point 3}
- \item{t3.I4}{ Answers to item 4 at time point 3}
- \item{t3.I5}{ Answers to item 5 at time point 3}
- \item{t4.I1}{ Answers to item 1 at time point 4}
- \item{t4.I2}{ Answers to item 2 at time point 4}
- \item{t4.I3}{ Answers to item 3 at time point 4}
- \item{t4.I4}{ Answers to item 4 at time point 4}
- \item{t4.I5}{ Answers to item 5 at time point 4}
- \item{t5.I1}{ Answers to item 1 at time point 5}
- \item{t5.I2}{ Answers to item 2 at time point 5}
- \item{t5.I3}{ Answers to item 3 at time point 5}
- \item{t5.I4}{ Answers to item 4 at time point 5}
- \item{t5.I5}{ Answers to item 5 at time point 5}
- \item{groups}{ The group membership}
-}
+  \describe{
+    \item{\code{t1.I1}}{Answers to item 1 at time point 1}
+    \item{\code{t1.I2}}{Answers to item 2 at time point 1}
+    \item{\code{t1.I3}}{Answers to item 3 at time point 1}
+    \item{\code{t1.I4}}{Answers to item 4 at time point 1}
+    \item{\code{t1.I5}}{Answers to item 5 at time point 1}
+    \item{\code{t2.I1}}{Answers to item 1 at time point 2}
+    \item{\code{t2.I2}}{Answers to item 2 at time point 2}
+    \item{\code{t2.I3}}{Answers to item 3 at time point 2}
+    \item{\code{t2.I4}}{Answers to item 4 at time point 2}
+    \item{\code{t2.I5}}{Answers to item 5 at time point 2}
+    \item{\code{t3.I1}}{Answers to item 1 at time point 3}
+    \item{\code{t3.I2}}{Answers to item 2 at time point 3}
+    \item{\code{t3.I3}}{Answers to item 3 at time point 3}
+    \item{\code{t3.I4}}{Answers to item 4 at time point 3}
+    \item{\code{t3.I5}}{Answers to item 5 at time point 3}
+    \item{\code{t4.I1}}{Answers to item 1 at time point 4}
+    \item{\code{t4.I2}}{Answers to item 2 at time point 4}
+    \item{\code{t4.I3}}{Answers to item 3 at time point 4}
+    \item{\code{t4.I4}}{Answers to item 4 at time point 4}
+    \item{\code{t4.I5}}{Answers to item 5 at time point 4}
+    \item{\code{t5.I1}}{Answers to item 1 at time point 5}
+    \item{\code{t5.I2}}{Answers to item 2 at time point 5}
+    \item{\code{t5.I3}}{Answers to item 3 at time point 5}
+    \item{\code{t5.I4}}{Answers to item 4 at time point 5}
+    \item{\code{t5.I5}}{Answers to item 5 at time point 5}
+    \item{\code{groups}}{The group membership}
+  }
 }
 \details{
 This is a data set as described in Hatzinger and Rusch (2009). 5 items
@@ -56,6 +54,6 @@ pp. 87--120,
 \url{http://erm.r-forge.r-project.org/psq_1_2009_06_87-120.pdf}
 }
 \examples{
-data(llraDat1)
+llraDat1
 }
 \keyword{datasets}
diff --git a/man/llraDat2.Rd b/man/llraDat2.Rd
old mode 100755
new mode 100644
index 1e0a970..27a3c88
--- a/man/llraDat2.Rd
+++ b/man/llraDat2.Rd
@@ -1,37 +1,35 @@
+\encoding{UTF-8}
 \name{llraDat2}
 \alias{llraDat2}
 \docType{data}
-\title{An Artifical LLRA Data Set
-}
-\description{
-Artificial data set of 70 subjects with 5 items, 4 time points and 3 groups for LLRA.
-}
-\usage{data(llraDat2)}
+\title{An Artificial LLRA Data Set}
+\description{Artificial data set of 70 subjects with 5 items, 4 time points and 3 groups for LLRA.}
+\usage{llraDat2}
 \format{
-   A data frame with 70 observations of 21 variables.
- \itemize{
- \item{t1.I1}{ Answers to item 1 at time point 1}
- \item{t1.I2}{ Answers to item 2 at time point 1}
- \item{t1.I3}{ Answers to item 3 at time point 1}
- \item{t1.I4}{ Answers to item 4 at time point 1}
- \item{t1.I5}{ Answers to item 5 at time point 1}
- \item{t2.I1}{ Answers to item 1 at time point 2}
- \item{t2.I2}{ Answers to item 2 at time point 2}
- \item{t2.I3}{ Answers to item 3 at time point 2}
- \item{t2.I4}{ Answers to item 4 at time point 2}
- \item{t2.I5}{ Answers to item 5 at time point 2}
- \item{t3.I1}{ Answers to item 1 at time point 3}
- \item{t3.I2}{ Answers to item 2 at time point 3}
- \item{t3.I3}{ Answers to item 3 at time point 3}
- \item{t3.I4}{ Answers to item 4 at time point 3}
- \item{t3.I5}{ Answers to item 5 at time point 3}
- \item{t4.I1}{ Answers to item 1 at time point 4}
- \item{t4.I2}{ Answers to item 2 at time point 4}
- \item{t4.I3}{ Answers to item 3 at time point 4}
- \item{t4.I4}{ Answers to item 4 at time point 4}
- \item{t4.I5}{ Answers to item 5 at time point 4}
- \item{groups}{ The group membership}
-}
+  A data frame with 70 observations of 21 variables.
+  \describe{
+    \item{\code{t1.I1}}{Answers to item 1 at time point 1}
+    \item{\code{t1.I2}}{Answers to item 2 at time point 1}
+    \item{\code{t1.I3}}{Answers to item 3 at time point 1}
+    \item{\code{t1.I4}}{Answers to item 4 at time point 1}
+    \item{\code{t1.I5}}{Answers to item 5 at time point 1}
+    \item{\code{t2.I1}}{Answers to item 1 at time point 2}
+    \item{\code{t2.I2}}{Answers to item 2 at time point 2}
+    \item{\code{t2.I3}}{Answers to item 3 at time point 2}
+    \item{\code{t2.I4}}{Answers to item 4 at time point 2}
+    \item{\code{t2.I5}}{Answers to item 5 at time point 2}
+    \item{\code{t3.I1}}{Answers to item 1 at time point 3}
+    \item{\code{t3.I2}}{Answers to item 2 at time point 3}
+    \item{\code{t3.I3}}{Answers to item 3 at time point 3}
+    \item{\code{t3.I4}}{Answers to item 4 at time point 3}
+    \item{\code{t3.I5}}{Answers to item 5 at time point 3}
+    \item{\code{t4.I1}}{Answers to item 1 at time point 4}
+    \item{\code{t4.I2}}{Answers to item 2 at time point 4}
+    \item{\code{t4.I3}}{Answers to item 3 at time point 4}
+    \item{\code{t4.I4}}{Answers to item 4 at time point 4}
+    \item{\code{t4.I5}}{Answers to item 5 at time point 4}
+    \item{\code{group}}{The group membership}
+  }
 }
 \details{
 This is a data set as described in Hatzinger and Rusch (2009). 5 items
@@ -49,6 +47,6 @@ pp. 87--120,
 \url{http://erm.r-forge.r-project.org/psq_1_2009_06_87-120.pdf}
 }
 \examples{
-data(llraDat2)
+llraDat2
 }
 \keyword{datasets}
diff --git a/man/llradat3.Rd b/man/llradat3.Rd
old mode 100755
new mode 100644
index 26cbf1d..393b6f2
--- a/man/llradat3.Rd
+++ b/man/llradat3.Rd
@@ -1,23 +1,23 @@
+\encoding{UTF-8}
 \name{llradat3}
 \alias{llradat3}
 \docType{data}
-\title{An Artifical LLRA Data Set
-}
+\title{An Artificial LLRA Data Set}
 \description{
 Artificial data set of 3 items, 2 time points and 2 groups for LLRA. It
 is example 6 from Hatzinger and Rusch (2009).
 }
-\usage{data(llradat3)}
+\usage{llradat3}
 \format{
   A data frame with 60 observations of 6 variables.
- \itemize{
- \item{V1}{ Answers to item 1 at time point 1}
- \item{V2}{ Answers to item 2 at time point 1}
- \item{V3}{ Answers to item 3 at time point 1}
- \item{V4}{ Answers to item 1 at time point 2}
- \item{V5}{ Answers to item 2 at time point 2}
- \item{V6}{ Answers to item 3 at time point 2}
-}
+  \describe{
+    \item{\code{V1}}{Answers to item 1 at time point 1}
+    \item{\code{V2}}{Answers to item 2 at time point 1}
+    \item{\code{V3}}{Answers to item 3 at time point 1}
+    \item{\code{V4}}{Answers to item 1 at time point 2}
+    \item{\code{V5}}{Answers to item 2 at time point 2}
+    \item{\code{V6}}{Answers to item 3 at time point 2}
+  }
 }
 \details{
 This is a data set as described in Hatzinger and Rusch (2009).
@@ -29,6 +29,6 @@ pp. 87--120,
 \url{http://erm.r-forge.r-project.org/psq_1_2009_06_87-120.pdf}
 }
 \examples{
-data(llradat3)
+llradat3
 }
 \keyword{datasets}
diff --git a/man/person.parameter.Rd b/man/person.parameter.Rd
old mode 100755
new mode 100644
index 69dde8b..e799153
--- a/man/person.parameter.Rd
+++ b/man/person.parameter.Rd
@@ -1,3 +1,4 @@
+\encoding{UTF-8}
 \name{person.parameter}
 \alias{person.parameter}
 \alias{person.parameter.eRm}
@@ -19,13 +20,13 @@ as AIC, BIC, and cAIC based on unconditional log-likelihood.}
 \method{print}{ppar}(x, ...)
 \method{plot}{ppar}(x, xlab = "Person Raw Scores",
    ylab = "Person Parameters (Theta)", main = NULL, ...)
-\method{coef}{ppar}(object, ...)
+\method{coef}{ppar}(object, extrapolated = TRUE, ...)
 \method{logLik}{ppar}(object, ...)
 \method{confint}{ppar}(object, parm, level = 0.95, ...)
 }
 
 \arguments{
-  \item{object}{Object of class \code{eRm} in \code{person.parameter} and object of class \code{ppar} in \code{IC}.}
+  \item{object}{Object of class \code{'eRm'} in \code{person.parameter} and object of class \code{ppar} in \code{IC}.}
 
 Arguments for \code{print} and \code{plot} methods:
   \item{x}{Object of class \code{ppar}.}
@@ -34,6 +35,9 @@ Arguments for \code{print} and \code{plot} methods:
   \item{main}{Title of the plot.}
   \item{...}{Further arguments to be passed to or from other methods. They are ignored in this function.}
 
+Arguments for the \code{coef} method:
+  \item{extrapolated}{either returns extrapolated values for raw scores 0 and k or sets them \code{NA}}
+
 Arguments for \code{confint}:
   \item{parm}{Parameter specification (ignored).}
   \item{level}{Alpha-level.}
@@ -70,9 +74,9 @@ The function \code{logLik} returns an object of class \code{loglik.ppar} contain
 Fischer, G. H., and Molenaar, I. (1995). Rasch Models - Foundations,
 Recent Developements, and Applications. Springer.
 
-Mair, P., and Hatzinger, R. (2007). Extended Rasch modeling: The eRm package for the application of IRT models in R. Journal of Statistical Software, 20(9), 1-20.
+Mair, P., and Hatzinger, R. (2007). Extended Rasch modeling: The \pkg{eRm} package for the application of IRT models in R. Journal of Statistical Software, 20(9), 1-20.
 
-Mair, P., and Hatzinger, R. (2007). CML based estimation of extended Rasch models with the eRm package in R. Psychology Science, 49, 26-43.
+Mair, P., and Hatzinger, R. (2007). CML based estimation of extended Rasch models with the \pkg{eRm} package in R. Psychology Science, 49, 26-43.
 }
 \author{Patrick Mair, Reinhold Hatzinger}
 %\note{}
@@ -80,23 +84,18 @@ Mair, P., and Hatzinger, R. (2007). CML based estimation of extended Rasch model
     \code{\link{itemfit.ppar}},\code{\link{personfit.ppar}}
 }
 \examples{
-
 #Person parameter estimation of a rating scale model
-data(rsmdat)
 res <- RSM(rsmdat)
 pres <- person.parameter(res)
-print(pres)
+pres
 summary(pres)
 plot(pres)
 
 #Person parameter estimation for a Rasch model with missing values
-data(raschdat2)
 res <- RM(raschdat2, se = FALSE) #Rasch model without standard errors
 pres <- person.parameter(res)
-print(pres)                      #person parameters
+pres                             #person parameters
 summary(pres)
 logLik(pres)                     #log-likelihood of person parameter estimation
-
-
 }
 \keyword{models}
diff --git a/man/phi.range.Rd b/man/phi.range.Rd
new file mode 100644
index 0000000..fcffdac
--- /dev/null
+++ b/man/phi.range.Rd
@@ -0,0 +1,16 @@
+\encoding{UTF-8}
+\name{phi.range}
+\alias{phi.range}
+\title{Example User Function}
+\description{Calculates the \eqn{R_\phi} statistic, i.e., the range of the inter-column correlations (\eqn{\phi}-coefficients) for a binary matrix.}
+\usage{phi.range(mat)}
+\arguments{\item{mat}{a binary matrix}}
+\value{The range of the inter-column correlations}
+\examples{
+ctr <- rsctrl(burn_in = 10, n_eff = 5, step=10, seed = 123, tfixed = FALSE)
+mat <- matrix(sample(c(0,1), 50, replace = TRUE), nr = 10)
+rso <- rsampler(mat, ctr)
+rso_st <- rstats(rso,phi.range)
+print(unlist(rso_st))
+}
+\keyword{misc}
diff --git a/man/plotDIF.Rd b/man/plotDIF.Rd
old mode 100755
new mode 100644
index 5f09aef..49edbe2
--- a/man/plotDIF.Rd
+++ b/man/plotDIF.Rd
@@ -1,3 +1,4 @@
+\encoding{UTF-8}
 \name{plotDIF}
 \alias{plotDIF}
 \title{
@@ -53,7 +54,7 @@ If \code{TRUE} a legend is provided by default.
   \item{legpos}{
 Position of the legend with possible values  \code{"bottomright"}, \code{"bottom"}, \code{"bottomleft"}, \code{"left"},
 \code{"topleft"}, \code{"top"}, \code{"topright"}, \code{"right"} and \code{"center"}. The default value for the legend
-is \code{"bottomright"}.
+is \code{"bottomleft"}.
 }
   \item{...}{
 Further options to be passed to \code{plot}.
@@ -76,40 +77,42 @@ Further options to be passed to \code{plot}.
 \code{plotCI} returns a list containing the confidence limits of each group in each \code{LRtest} object.
 }
 
-\author{
-Kathrin Gruber, Reinhold Hatzinger
-}
-
-\seealso{
-\code{\link{LRtest}}, \code{\link{confint.threshold}}, \code{\link{thresholds}}
-}
+\author{Kathrin Gruber, Reinhold Hatzinger}
+\seealso{\code{\link{LRtest}}, \code{\link{confint.threshold}}, \code{\link{thresholds}}}
 \examples{
+# the object used is the result of running   RM(raschdat1)
+res <- raschdat1_RM_fitted     # see ? raschdat1_RM_fitted
 
-splitvec <- sample(1:3, 100, replace = TRUE)
-data(raschdat1)
-res <- RM(raschdat1)
-
+\dontrun{
 # LR-test on dichotomous Rasch model with user-defined split
-lrres <- LRtest(res, splitcr = splitvec, se = TRUE)
-# LR-test with mean split, standard errors for beta's
-lrres2 <- LRtest(res, split = "mean", se = TRUE)
-RMplotCI <- list(lrres, lrres2)
+splitvec <- rep(1:2, each = 50)
+lrres <- LRtest(res, splitcr = splitvec)
+
+# LR-test with mean split
+lrres2 <- LRtest(res, split = "mean")
+
+# combination of LRtest-objects in a list
+RMplotCI <- list(lrres, lrres2)}
+
+# the object raschdat1_RM_plotDIF is the result of the computations outlined
+# above and is loaded to save computation time. see ?raschdat1_RM_plotDIF
+RMplotCI <- raschdat1_RM_plotDIF
 
 # Confidence intervals plot with default assumptions
 plotDIF(RMplotCI)
 
 # Confidence intervals plot with Bonferroni correction
-plotDIF(RMplotCI, gamma = 1 - (0.05/10))
+plotDIF(RMplotCI, gamma = (1 - (0.05/10)))
 
 # Confidence intervals plot for an item subset
-plotDIF(RMplotCI, item.subset=1:6)
+plotDIF(RMplotCI, item.subset = 1:6)
 
 # with user defined group color and legend
-plotDIF(RMplotCI, col=c("red","blue"), leg=TRUE)
+plotDIF(RMplotCI, col = c("red", "blue"), leg = TRUE, legpos = "bottomright")
 
 # with names for the splitobjects
-plotDIF(RMplotCI, col=c("red","blue"), leg=TRUE,
-splitnames=c(paste(rep("User",3),1:3,sep=" "), paste(rep("Mean",2),1:2, sep=" ")))
+plotDIF(RMplotCI, col = c("red", "blue"), leg = TRUE, legpos = "bottomright",
+        splitnames = c(paste("User", 1:2), paste(rep("Mean", 2), 1:2)))
 }
 
 \keyword{models}
diff --git a/man/plotGR.Rd b/man/plotGR.Rd
old mode 100755
new mode 100644
index 98307c3..24d272f
--- a/man/plotGR.Rd
+++ b/man/plotGR.Rd
@@ -1,3 +1,4 @@
+\encoding{UTF-8}
 \name{plotGR}
 \alias{plotGR}
 
@@ -28,9 +29,7 @@ baseline.
 Currently, this function only works for a full item x treatment x
 timepoints LLRA. Collapsed effects will not be displayed properly. 
 }
-\author{
-Thomas Rusch
-}
+\author{Thomas Rusch}
 \seealso{
 The plot method for trend effects \code{\link{plotTR}}.
 }
@@ -39,19 +38,16 @@ The plot method for trend effects \code{\link{plotTR}}.
   data matrix will not be displayed correctly.
 }
 \examples{
-    ##Example 6 from Hatzinger & Rusch (2009)
-    data("llradat3")
-    groups <- c(rep("TG",30),rep("CG",30))
-    llra1 <- LLRA(llradat3,mpoints=2,groups=groups)
-    summary(llra1)
-    plotGR(llra1)
+##Example 6 from Hatzinger & Rusch (2009)
+groups <- c(rep("TG",30),rep("CG",30))
+llra1 <- LLRA(llradat3,mpoints=2,groups=groups)
+summary(llra1)
+plotGR(llra1)
 
-    ##An LLRA with 2 treatment groups and 1 baseline group, 5 items and 4
-    ##time points. Item 1 is dichotomous, all others have 3, 4, 5, 6
-    ##categories respectively.
-   \dontrun{  
-    data("llraDat2")
-    ex2 <- LLRA(llraDat2[1:20],mpoints=4,groups=llraDat2[21])
-    plotGR(ex2)
-   }
-}
\ No newline at end of file
+\dontrun{  
+##An LLRA with 2 treatment groups and 1 baseline group, 5 items and 4
+##time points. Item 1 is dichotomous, all others have 3, 4, 5, 6
+##categories respectively.
+ex2 <- LLRA(llraDat2[1:20],mpoints=4,groups=llraDat2[21])
+plotGR(ex2)}
+}
diff --git a/man/plotICC.Rd b/man/plotICC.Rd
old mode 100755
new mode 100644
index 740de54..e3c0cee
--- a/man/plotICC.Rd
+++ b/man/plotICC.Rd
@@ -1,3 +1,4 @@
+\encoding{UTF-8}
 \name{plotICC}
 \alias{plotICC}
 \alias{plotICC.Rm}
@@ -13,7 +14,7 @@
    col = NULL, lty = 1, legpos = "left", ask = TRUE, ...)
 \method{plotjointICC}{dRm}(object, item.subset = "all", legend = TRUE,
    xlim = c(-4, 4), ylim = c(0, 1), xlab = "Latent Dimension",
-   ylab = "Probability to Solve", lty = 1, legpos = "left",
+   ylab = "Probability to Solve", lty = 1, legpos = "topleft",
    main="ICC plot",col=NULL,...)
 }
 \arguments{
@@ -92,7 +93,6 @@
 \seealso{\code{\link{plotGOF}}}
 \examples{
 # Rating scale model, ICC plot for all items
-data(rsmdat)
 rsm.res <- RSM(rsmdat)
 thresholds(rsm.res)
 plotICC(rsm.res)
@@ -102,7 +102,6 @@ plotICC(rsm.res, item.subset = 1:4, mplot = TRUE, legpos = FALSE)
 
 # Rasch model for items 1 to 8 from raschdat1
 # empirical ICCs displaying relative frequencies (default settings)
-data(raschdat1)
 rm8.res <- RM(raschdat1[,1:8])
 plotICC(rm8.res, empICC=list("raw"))
 
@@ -117,7 +116,6 @@ plotICC(rm8.res, empICC = list("kernel",smooth=3))
 plotICC(rm8.res, item.subset=c(2,3,7,8), empICC=list("raw"), empCI=list())
 
 # Joint ICC plot for items 2, 6, 8, and 15 for a Rasch model
-data(raschdat1)
 res <- RM(raschdat1)
 plotjointICC(res, item.subset = c(2,6,8,15), legpos = "left")
 }
diff --git a/man/plotINFO.Rd b/man/plotINFO.Rd
new file mode 100644
index 0000000..b2697dd
--- /dev/null
+++ b/man/plotINFO.Rd
@@ -0,0 +1,26 @@
+\encoding{UTF-8}
+\name{plotINFO}
+\alias{plotINFO}
+\title{Plot Information For \code{'eRm'} objects
+}
+\description{Calculates and plots the individual or summed item information by Samejima (1969)}
+\usage{plotINFO(ermobject, type = "both", theta = seq(-6, 6, length.out = 1001L), \dots)}
+\arguments{
+  \item{ermobject}{An object of class \code{'eRm'}.}
+  \item{type}{A string denoting the type of information to be plotted. Currently supports \code{"item"}, \code{"test"} and \code{"both"} (default).}
+  \item{theta}{Supporting or sampling points on the latent trait.}
+  \item{...}{%
+    Further arguments.
+    \code{xlab} sets the label of the \eqn{x}{x} axis.
+    \code{ylabI} and \code{ylabT} control the labeling of the item or test information plot.
+    \code{mainI} and \code{mainT} set the titles for item/test information plots.
+    \code{legpos} defines the positioning of the legend, as in \code{\link[eRm]{plotICC}}.
+  }
+}
+\references{Samejima, F. (1969) Estimation of latent ability using a response pattern of graded scores. \emph{Psychometric Monographs}, \bold{17}.}
+\author{Thomas Rusch}
+\seealso{The function to calculate the item or test information, \code{\link[eRm]{item_info}} and \code{\link[eRm]{test_info}}.}
+\examples{
+res <- PCM(pcmdat)
+plotINFO(res)
+}
diff --git a/man/plotPImap.Rd b/man/plotPImap.Rd
old mode 100755
new mode 100644
index 5aee890..e3bdd6c
--- a/man/plotPImap.Rd
+++ b/man/plotPImap.Rd
@@ -1,3 +1,4 @@
+\encoding{UTF-8}
 \name{plotPImap}
 \alias{plotPImap}
 %- Also NEED an '\alias' for EACH other topic documented here.
@@ -51,12 +52,11 @@ plotPImap(object, item.subset = "all", sorted = FALSE,
 \references{Bond, T.G., and Fox Ch.M. (2007) Applying the Rasch Model. Fundamental Measurement in the Human Sciences.
 2nd Edition. Lawrence Erlbaum Associates.
 }
-\author{Patrick Mair, Reinhold Hatzinger, patches from Julian Gilbey and Marco Maier}
+\author{Patrick Mair, Reinhold Hatzinger, patches from Julian Gilbey and Marco J. Maier}
 %\note{}
 %\seealso{}
 \examples{
-data(pcmdat)
-res<-PCM(pcmdat)
+res <- PCM(pcmdat)
 plotPImap(res, sorted=TRUE)
 }
 \keyword{models}
diff --git a/man/plotPWmap.Rd b/man/plotPWmap.Rd
old mode 100755
new mode 100644
index 6c13d1b..4de49b9
--- a/man/plotPWmap.Rd
+++ b/man/plotPWmap.Rd
@@ -1,3 +1,4 @@
+\encoding{UTF-8}
 \name{plotPWmap}
 \alias{plotPWmap}
 \title{Pathway Map}
@@ -79,32 +80,21 @@ plotPWmap(object, pmap = FALSE, imap=TRUE,
 }
 %\value{}
 \references{
-  Bond T.G., Fox C.M. (2007) \emph{Applying the Rasch Model: Fundamental
-  Measurement in the Human Sciences} (2nd ed.) chapter 3, Lawrence
-  Erlbaum Associates, Inc.
+  Bond T.G., Fox C.M. (2007) \emph{Applying the Rasch Model: Fundamental Measurement in the Human Sciences} (2nd ed.) chapter 3, Lawrence Erlbaum Associates, Inc.
 
-  Linacre J.M., Wright B.D. (1994) Dichotomous Infit and
-  Outfit Mean-Square Fit Statistics / Chi-Square Fit Statistics.
-  \emph{Rasch Measurement Transactions} \bold{8:2} p. 350,
-  \url{http://www.rasch.org/rmt/rmt82a.htm}
+  Linacre J.M., Wright B.D. (1994) Dichotomous Infit and Outfit Mean-Square Fit Statistics / Chi-Square Fit Statistics. \emph{Rasch Measurement Transactions} \bold{8:2} p. 350, \url{http://www.rasch.org/rmt/rmt82a.htm}
 
-  Linacre J.M. (2002) What do Infit and Outfit, Mean-square and
-  Standardized mean?  \emph{Rasch Measurement Transactions} \bold{16:2}
-  p. 878, \url{http://www.rasch.org/rmt/rmt162f.htm}
-
-  Wright B.D., Masters G.N. (1990) Computation of OUTFIT and INFIT
-  Statistics.  \emph{Rasch Measurement Transactions} \bold{3:4}
-  p. 84--85, \url{http://www.rasch.org/rmt/rmt34e.htm}
+  Linacre J.M. (2002) What do Infit and Outfit, Mean-square and Standardized mean? \emph{Rasch Measurement Transactions} \bold{16:2} p. 878, \url{http://www.rasch.org/rmt/rmt162f.htm}
 
+  Wright B.D., Masters G.N. (1990) Computation of OUTFIT and INFIT Statistics. \emph{Rasch Measurement Transactions} \bold{3:4} p. 84--85, \url{http://www.rasch.org/rmt/rmt34e.htm}
 }
 \author{Julian Gilbey}
 %\note{}
 %\seealso{}
 \examples{
-data(pcmdat)
-res<-PCM(pcmdat)
-pparm<-person.parameter(res)
-plotPWmap(res, pp=pparm)
-plotPWmap(res, pp=pparm, pmap=TRUE)
+res <- PCM(pcmdat)
+pparm <- person.parameter(res)
+plotPWmap(res, pp = pparm)
+plotPWmap(res, pp = pparm, pmap = TRUE)
 }
 \keyword{models}
diff --git a/man/plotTR.Rd b/man/plotTR.Rd
old mode 100755
new mode 100644
index 011a2c1..5a11b1f
--- a/man/plotTR.Rd
+++ b/man/plotTR.Rd
@@ -1,3 +1,4 @@
+\encoding{UTF-8}
 \name{plotTR}
 \alias{plotTR}
 
@@ -25,9 +26,7 @@ plotTR(object, ...)
   Currently, this function only works for a full item x treatment x
   timepoints LLRA. Collapsed effects will not be displayed properly. 
 }
-\author{
-Thomas Rusch
-}
+\author{Thomas Rusch}
 \seealso{
   The plot method for treatment effects \code{"plotGR"}.
 }
@@ -36,19 +35,16 @@ Thomas Rusch
   data matrix will not be displayed correctly.
 }
 \examples{
-    ##Example 6 from Hatzinger & Rusch (2009)
-    data("llradat3")
-    groups <- c(rep("TG",30),rep("CG",30))
-    llra1 <- LLRA(llradat3,mpoints=2,groups=groups)
-    summary(llra1)
-    plotTR(llra1)
+##Example 6 from Hatzinger & Rusch (2009)
+groups <- c(rep("TG",30),rep("CG",30))
+llra1 <- LLRA(llradat3,mpoints=2,groups=groups)
+summary(llra1)
+plotTR(llra1)
 
-    ##An LLRA with 2 treatment groups and 1 baseline group, 5 items and 4
-    ##time points. Item 1 is dichotomous, all others have 3, 4, 5, 6
-    ##categories respectively.
-   \dontrun{ 
-    data("llraDat2")
-    ex2 <- LLRA(llraDat2[1:20],mpoints=4,groups=llraDat2[21])
-    plotTR(ex2)
- }
+\dontrun{ 
+##An LLRA with 2 treatment groups and 1 baseline group, 5 items and 4
+##time points. Item 1 is dichotomous, all others have 3, 4, 5, 6
+##categories respectively.
+ex2 <- LLRA(llraDat2[1:20],mpoints=4,groups=llraDat2[21])
+plotTR(ex2)}
 }
diff --git a/man/predict.ppar.Rd b/man/predict.ppar.Rd
old mode 100755
new mode 100644
index e214347..4671437
--- a/man/predict.ppar.Rd
+++ b/man/predict.ppar.Rd
@@ -1,3 +1,4 @@
+\encoding{UTF-8}
 \name{predict.ppar}
 \alias{predict.ppar}
 
@@ -27,12 +28,9 @@ Returns data matrix based on model probabilities
 }
 
 \examples{
-
 #Model-based data matrix for RSM
-data(raschdat2)
 res <- RM(raschdat2)
 pres <- person.parameter(res)
 predict(pres)
-
 }
 \keyword{models}
diff --git a/man/print.eRm.Rd b/man/print.eRm.Rd
old mode 100755
new mode 100644
index bed420d..c34bf55
--- a/man/print.eRm.Rd
+++ b/man/print.eRm.Rd
@@ -1,3 +1,4 @@
+\encoding{UTF-8}
 \name{print.eRm}
 \alias{print.eRm}
 \alias{summary.eRm}
@@ -8,7 +9,7 @@
 \alias{confint.eRm}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{Methods for extended Rasch models}
-\description{Several methods for objects of class \code{eRm}.}
+\description{Several methods for objects of class \code{'eRm'}.}
 \usage{
 \method{print}{eRm}(x, ...)
 \method{summary}{eRm}(object, ...)
@@ -50,15 +51,13 @@ The methods below are extractor functions and return various quantities:
 \author{Patrick Mair, Reinhold Hatzinger}
 
 \examples{
-data(raschdat1)
 res <- RM(raschdat1)
-print(res)
+res
 summary(res)
 coef(res)
 vcov(res)
 model.matrix(res)
 logLik(res)
 }
-
 \keyword{models}
 
diff --git a/man/raschdat.Rd b/man/raschdat.Rd
deleted file mode 100755
index 32b876e..0000000
--- a/man/raschdat.Rd
+++ /dev/null
@@ -1,30 +0,0 @@
-\name{raschdat1}
-\alias{raschdat1}
-\alias{raschdat2}
-\alias{lltmdat1}
-\alias{lltmdat2}
-\alias{pcmdat}
-\alias{pcmdat2}
-\alias{lpcmdat}
-\alias{rsmdat}
-\alias{lrsmdat}
-
-\docType{data}
-\title{Data for Computing Extended Rasch Models}
-\description{Artificial data sets for computing extended Rasch models.
-}
-\usage{data(raschdat1)}
-\format{Numeric matrices with subjects as rows, items as columns, missing values as \code{NA}.
-}
-\examples{
-data(raschdat1)
-data(raschdat2)
-data(lltmdat1)
-data(lltmdat2)
-data(pcmdat)
-data(pcmdat2)
-data(lpcmdat)
-data(rsmdat)
-data(lrsmdat)
-}
-\keyword{datasets}
diff --git a/man/rsampler.Rd b/man/rsampler.Rd
new file mode 100644
index 0000000..e0726eb
--- /dev/null
+++ b/man/rsampler.Rd
@@ -0,0 +1,57 @@
+\encoding{UTF-8}
+\name{rsampler}
+\alias{rsampler}
+\title{Sampling Binary Matrices}
+\description{%
+  The function implements an \acronym{MCMC} algorithm for sampling of binary matrices with fixed margins complying to the Rasch model.
+  Its stationary distribution is uniform.
+  The algorithm also allows for square matrices with fixed diagonal.
+}
+\usage{
+rsampler(inpmat, controls = rsctrl())
+}
+\arguments{
+  \item{inpmat}{A binary (data) matrix with \eqn{n}{n} rows and \eqn{k}{k} columns.}
+  \item{controls}{An object of class \code{\link{RSctr}}. If not specified, the default parameters as returned by function \code{\link{rsctrl}} are used.}
+}
+\details{
+  \code{rsampler} is a wrapper function for a Fortran routine to generate binary random matrices based
+   on an input matrix.
+   On output the generated binary matrices are integer encoded. For further
+   processing of the generated matrices use the function \code{\link{rstats}}.
+}
+\value{
+   A list of class \code{\link{RSmpl}} with components
+  \item{n}{number of rows of the input matrix}
+  \item{k}{number of columns of the input matrix}
+  \item{inpmat}{the input matrix}
+  \item{tfixed}{\code{TRUE}, if diagonals of \code{inpmat} are fixed}
+  \item{burn_in}{length of the burn in process}
+  \item{n_eff}{number of generated matrices (effective matrices)}
+  \item{step}{controls the number number of void matrices generated in the the burn in
+                process and when effective matrices are generated (see note
+                in \code{\link{rsctrl}}). }
+  \item{seed}{starting value for the random number generator}
+  \item{n_tot}{number of matrices in \code{outvec}, \code{n_tot = n_eff + 1}}
+  \item{outvec}{vector of encoded random matrices}
+  \item{ier}{error code}
+}
+\references{%
+Verhelst, N. D. (2008). An Efficient \acronym{MCMC} Algorithm to Sample Binary Matrices with Fixed Marginals. \emph{Psychometrika, 73} (4)%
+}
+\author{Reinhold Hatzinger, Norman Verhelst}
+\note{%
+  An element of \code{outvec} is a four byte (or 32 bits) integer.
+  The matrices to be output are stored bitwise (some bits are unused, since a integer is used for every row of a matrix).
+  So the number of integers per row needed equals \eqn{(k+31)/32}{(k+31)/32} (integer division), which is one to four in the present implementation since the number of columns and rows must not exceed 128 and 4096, respectively.
+
+  The summary method (\code{\link{summary.RSmpl}}) prints information on the content of the output object.
+}
+\seealso{\code{\link{rsctrl}}, \code{\link{rstats}} }
+\examples{
+data(xmpl)
+ctr<-rsctrl(burn_in=10, n_eff=5, step=10, seed=0, tfixed=FALSE)
+res<-rsampler(xmpl,ctr)
+summary(res)
+}
+\keyword{misc}
diff --git a/man/rsctrl.Rd b/man/rsctrl.Rd
new file mode 100644
index 0000000..d303fb7
--- /dev/null
+++ b/man/rsctrl.Rd
@@ -0,0 +1,84 @@
+\encoding{UTF-8}
+\name{rsctrl}
+\alias{rsctrl}
+\title{Controls for the Sampling Function}
+\description{
+
+  Various parameters that control aspects of
+  the random generation of binary matrices.
+}
+\usage{
+rsctrl(burn_in = 100, n_eff = 100, step = 16, seed = 0, tfixed = FALSE)
+}
+\arguments{
+  \item{burn_in}{
+      the number of sampled matrices to
+      come close to a stationary distribution.
+      The default is \code{burn_in = 100}.
+      (The actual number is \code{2 * burn_in * step}.)
+      }
+  \item{n_eff}{
+    the number of effective matrices, i.e., the number of matrices to be generated by the sampling function \code{\link{rsampler}}.
+    \code{n_eff} must be positive and not larger than 8191 (\eqn{2^{13}-1}{(2^13) - 1}).
+    The default is \code{n_eff = 100}.
+  }
+  \item{step}{controls the number number of void matrices generated in the the burn in
+                process and when effective matrices are generated (see note
+                below). The default is \code{step = 16}. }
+  \item{seed}{
+      is the indicator for the seed of the random number generator. 
+      Its value must be in the range 0 and 2147483646 (2**31-2).
+      If the value of seed equals zero, a seed is generated
+      by the sampling function \code{\link{rsampler}}
+      (dependent on the system's clock) and its value is returned
+      in the output. If seed is not equal to zero, its 
+      value is used as the seed of the random number generator.
+      In that case its value is unaltered at output.
+      The default is \code{seed = 0}.
+      }
+  \item{tfixed}{logical, -- specifies if in case of a quadratic input
+      matrix the diagonal is considered fixed (see note below).
+      The default is \code{tfixed = FALSE}.
+      }
+}
+
+\value{
+  A list of class \code{RSctr} with components
+ \code{burn_in}, \code{n_eff}, \code{step},
+ \code{seed}, \code{tfixed}.,
+}
+\note{
+   If one of the components is incorrectly specified
+   the error function \code{rserror}
+   is called and some informations are printed. The ouput object
+   will not be defined.\cr\cr
+   The specification of \code{step} controls the sampling algorithm as follows:
+   If , e.g., \code{burn_in = 10}, \code{n_eff = 5}, and \code{step = 2},
+   then during the burn in period \code{step * burn_in = 2 * 10}
+   matrices are generated. After that, \code{n_eff * step = 5 * 2} matrices
+   are generated and every second matrix of these last ten is returned from
+   \code{link{rsampler}}.\cr\cr
+   \code{tfixed} has no effect if the input matrix is not quadratic,
+      i.e., all matrix elements are considered free (unrestricted).
+      If the input matrix is quadratic, and \code{tfixed = TRUE},
+      the main diagonal of the matrix is considered as fixed.
+      On return from \code{link{rsampler}} all diagonal elements
+      of the generated matrices are set to zero.
+      This specification applies, e.g.,
+      to analyzing square incidence matrices
+      representing binary asymmetric relation
+      in social network theory.\cr\cr
+   The summary method (\code{\link{summary.RSctr}}) prints
+   the current definitions. \cr
+}
+\seealso{\code{\link{rsampler}} }
+\examples{
+ctr <- rsctrl(n_eff = 1, seed = 987654321)  # specify new controls
+summary(ctr)
+
+\dontrun{
+# incorrect specifications will lead to an error
+ctr2 <- rsctrl(step = -3, n_eff = 10000)}
+}
+
+\keyword{misc}
diff --git a/man/rsextrmat.Rd b/man/rsextrmat.Rd
new file mode 100644
index 0000000..8f7c80e
--- /dev/null
+++ b/man/rsextrmat.Rd
@@ -0,0 +1,29 @@
+\encoding{UTF-8}
+\name{rsextrmat}
+\alias{rsextrmat}
+\title{Extracting a Matrix}
+\description{
+  Convenience function to extract a matrix.
+}
+\usage{
+rsextrmat(RSobj, mat.no = 1)
+}
+\arguments{
+  \item{RSobj}{object as obtained from using \code{rsampler} or \code{rsextrobj}}
+  \item{mat.no}{number of the matrix to extract from the sample object.}
+}
+\value{
+   One of the matrices (either the original or a sampled matrix)
+}
+\seealso{\code{\link{rsampler}}, \code{\link{rsextrobj}},\code{\link{rstats}},}
+\examples{
+ctr <- rsctrl(burn_in = 10, n_eff = 3, step=10, seed = 0, tfixed = FALSE)
+mat <- matrix(sample(c(0,1), 50, replace = TRUE), nr = 10)
+all_m <- rsampler(mat, ctr)
+summary(all_m)
+
+# extract the third sampled matrix (here the fourth)
+third_m <- rsextrmat(all_m, 4)
+head(third_m)
+}
+\keyword{misc}
diff --git a/man/rsextrobj.Rd b/man/rsextrobj.Rd
new file mode 100644
index 0000000..987a23f
--- /dev/null
+++ b/man/rsextrobj.Rd
@@ -0,0 +1,70 @@
+\encoding{UTF-8}
+\name{rsextrobj}
+\alias{rsextrobj}
+\title{Extracting Encoded Sample Matrices}
+\description{
+  Utility function to extract some of the generated matrices, still in encoded form.
+}
+\usage{
+rsextrobj(RSobj, start = 1, end = 8192)
+}
+\arguments{
+  \item{RSobj}{object as obtained from using \code{rsampler}}
+  \item{start}{number of the matrix to start with. When specifying 1
+              (the default value) the original input matrix is
+               included in the output object.
+              }
+  \item{end}{last matrix to be extracted. If \code{end}
+             is not specified, all matrices from \code{RSobj}
+             are extracted (the maximal value is 8192, see
+             \code{rsctrl}). If \code{end} is larger than
+             the number of matrices stored in \code{RSobj},
+             \code{end} is set to the highest possible value
+             (i.e., \code{n_tot}).
+            }
+}
+\value{
+   A list of class \code{\link{RSmpl}} with components
+  \item{n}{number of rows of the input matrix}
+  \item{k}{number of columns of the input matrix}
+  \item{inpmat}{the input matrix}
+  \item{tfixed}{\code{TRUE}, if diagonals of \code{inpmat} are fixed}
+  \item{burn_in}{length of the burn in process}
+  \item{n_eff}{number of generated matrices (effective matrices)}
+  \item{step}{controls the number number of void matrices generated in the burn in
+              process and when effective matrices are generated (see note
+              in \code{\link{rsctrl}}). }
+  \item{seed}{starting value for the random number generator}
+  \item{n_tot}{number of matrices in \code{outvec}.}
+  \item{outvec}{vector of encoded random matrices}
+  \item{ier}{error code}
+}
+\note{By default, all generated matrices plus
+      the original matrix (in position 1) are contained in
+      \code{outvec}, thus \code{n_tot = n_eff + 1}. If
+      the original matrix is not in \code{outvec} then
+      \code{n_tot = n_eff}.\cr
+      For saving and loading objects
+      of class \code{RSobj} see the example below.
+
+      For extracting a decoded (directly usable) matrix use \code{\link{rsextrmat}}.
+}
+\seealso{\code{\link{rsampler}}, \code{\link{rsextrmat}} }
+\examples{
+ctr <- rsctrl(burn_in = 10, n_eff = 3, step=10, seed = 0, tfixed = FALSE)
+mat <- matrix(sample(c(0,1), 50, replace = TRUE), nr = 10)
+all_m <- rsampler(mat, ctr)
+summary(all_m)
+
+some_m <- rsextrobj(all_m, 1, 2)
+summary(some_m)
+
+\dontrun{
+save(some_m, file = "some.RSobj.RData")
+rm(some_m)
+ls()
+
+load("some.RSobj.RData")
+summary(some_m)}
+}
+\keyword{misc}
diff --git a/man/rstats.Rd b/man/rstats.Rd
new file mode 100644
index 0000000..60f86cd
--- /dev/null
+++ b/man/rstats.Rd
@@ -0,0 +1,67 @@
+\encoding{UTF-8}
+\name{rstats}
+\alias{rstats}
+\title{Calculating Statistics for the Sampled Matrices}
+\description{
+   This function is used to calculate user defined statistics for the
+   (original and) sampled matrices. A user defined function has to
+   be provided.
+}
+\usage{
+rstats(RSobj, userfunc, ...)
+}
+\arguments{
+  \item{RSobj}{object as obtained from using \code{\link{rsampler}}
+               or \code{\link{rsextrobj}} }
+  \item{userfunc}{a user defined function which performs operations
+     on the (original and) sampled matrices. The first argument in the definition
+     of the user function must be an object of type matrix.}
+  \item{...}{further arguments, that are passed to the user function}
+}
+\value{
+      A list of objects as specified in the user supplied function
+}
+\note{The encoded matrices that are contained in the
+      input object \code{RSobj} are decoded and passed to the user function in turn.
+      If \code{RSobj} is not an object obtained from either \code{\link{rsampler}}
+      or \code{\link{rsextrobj}} or
+      no user function is specified an error message is printed.
+      A simple user function, \code{\link{phi.range}}, is included in
+      the RaschSampler package for demonstration purposes.\cr
+
+      \code{rstats} can be used to obtain the 0/1 values for any
+      of the sampled matrices (see second example below). Please note,
+      that the output from the user function is stored in a list where
+      the number of components corresponds to the number of matrices passed
+      to the user function (see third example).
+}
+\seealso{\code{\link{rsampler}}, \code{\link{rsextrobj}} }
+\examples{
+ctr <- rsctrl(burn_in = 10, n_eff = 5, step=10, seed = 12345678, tfixed = FALSE)
+mat <- matrix(sample(c(0,1), 50, replace = TRUE), nr = 10)
+rso <- rsampler(mat, ctr)
+rso_st <- rstats(rso,phi.range)
+unlist(rso_st)
+
+# extract the third generated matrix
+# (here, the first is the input matrix)
+# and decode it into rsmat
+
+rso2 <- rsextrobj(rso,4,4)
+summary(rso2)
+rsmat <- rstats(rso2, function(x) matrix(x, nr = rso2$n))
+print(rsmat[[1]])
+
+# extract only the first r rows of the third generated matrix
+
+mat<-function(x, nr = nr, r = 3){
+  m <- matrix(x, nr = nr)
+  m[1:r,]
+}
+rsmat2 <- rstats(rso2, mat, nr=rso$n, r = 3)
+print(rsmat2[[1]])
+
+# apply a user function to the decoded object
+print(phi.range(rsmat[[1]]))
+}
+\keyword{misc}
diff --git a/man/sim.2pl.Rd b/man/sim.2pl.Rd
old mode 100755
new mode 100644
index 218e3ab..763cc28
--- a/man/sim.2pl.Rd
+++ b/man/sim.2pl.Rd
@@ -1,52 +1,47 @@
+\encoding{UTF-8}
 \name{sim.2pl}
+%
 \alias{sim.2pl}
-
-\title{Simulation of 2-pl data}
-\description{This utility function returns a 0-1 matrix violating the
-  parallel ICC assumption in the Rasch model.
-}
-\usage{
-sim.2pl(persons, items, discrim = 0.25, seed = NULL,
-   cutpoint = "randomized")
-}
-
+%
+%
+%
+\title{Simulation of 2-PL Data}
+\description{This utility function returns a 0-1 matrix violating the parallel ICC assumption in the Rasch model.}
+\usage{sim.2pl(persons, items, discrim = 0.25, seed = NULL, cutpoint = "randomized")}
+%
 \arguments{
-  \item{persons}{Either a vector of person parameters or an integer indicating
-  the number of persons (see details).}
+  \item{persons}{Either a vector of person parameters or an integer indicating the number of persons (see details).}
   \item{items}{Either a vector of item parameters or an integer indicating the number of items (see details).}
   \item{discrim}{Standard deviation on the log scale.}
   \item{seed}{A seed for the random number generated can be set.}
-  \item{cutpoint}{Either \code{"randomized"} for a randomized tranformation of the model probability matrix into the model 0-1 matrix or an integer value between 0 and 1 (see details).}
+  \item{cutpoint}{Either \code{"randomized"} for a randomized transformation of the model probability matrix into the model 0-1 matrix or an integer value between 0 and 1 (see details).}
 }
 
-\details{If \code{persons} and/or \code{items} (using single integers) are specified to
-determine the number of subjects or items, the corresponding
-parameter vector is drawn from N(0,1). The \code{cutpoint} argument refers to the
-transformation of the theoretical probabilities into a 0-1 data matrix. A randomized
-assingment implies that for each cell an additional random number is drawn.
-If the model probability is larger than this value, the person gets 1 on this particular
-item, if smaller, 0 is assigned. Alternatively, a numeric probability cutpoint can be
-assigned and the 0-1 scoring is carried out according to the same rule.
-
-The \code{discrim} argument can be specified either as a vector of length \code{items}
-defining the item discrimination parameters in the 2-PL (e.g., \code{c(1,1,0.5,1,1.5)}),
-or as a single value. In that case, the discrimination parameters are drawn from a lognormal
-distribution with
-\code{meanlog = 0}, where the specified
-value in \code{discrim} refers to the standard deviation on the log-scale.
-The larger the values, the stronger the degree of Rasch violation. Reasonable values are up to 0.5.
+\details{%
+If \code{persons} and/or \code{items} (using single integers) are specified to determine the number of subjects or items, the corresponding parameter vector is drawn from N(0,1).
+The \code{cutpoint} argument refers to the transformation of the theoretical probabilities into a 0-1 data matrix.
+A randomized assingment implies that for each cell an additional random number is drawn.
+If the model probability is larger than this value, the person gets 1 on this particular item, if smaller, 0 is assigned.
+Alternatively, a numeric probability cutpoint can be assigned and the 0-1 scoring is carried out according to the same rule.
+
+The \code{discrim} argument can be specified either as a vector of length \code{items} defining the item discrimination parameters in the 2-PL (e.g., \code{c(1,1,0.5,1,1.5)}), or as a single value.
+In that case, the discrimination parameters are drawn from a lognormal distribution with \code{meanlog = 0}, where the specified value in \code{discrim} refers to the standard deviation on the log-scale.
+The larger the values, the stronger the degree of Rasch violation.
+Reasonable values are up to 0.5.
 If 0, the data are Rasch homogeneous.
 }
-
+%
+%
+%
 \references{
 Su\'arez-Falc\'on, J. C., & Glas, C. A. W. (2003). Evaluation of global testing procedures for
    item fit to the Rasch model. British Journal of Mathematical and Statistical Society,
    56, 127-143.
 }
-
+%
 \seealso{\code{\link{sim.rasch}}, \code{\link{sim.locdep}}, \code{\link{sim.xdim}}}
+%
 \examples{
-
 #simulating 2-PL data
 #500 persons, 10 items, sdlog = 0.30, randomized cutpoint
 X <- sim.2pl(500, 10, discrim = 0.30)
@@ -57,5 +52,5 @@ dpar <- runif(50, 0, 2)
 ipar <- runif(50, -1.5, 1.5)
 X <- sim.2pl(500, ipar, dpar, cutpoint = 0.5)
 }
-
+%
 \keyword{models}
diff --git a/man/sim.locdep.Rd b/man/sim.locdep.Rd
old mode 100755
new mode 100644
index fbf83fa..345d291
--- a/man/sim.locdep.Rd
+++ b/man/sim.locdep.Rd
@@ -1,3 +1,4 @@
+\encoding{UTF-8}
 \name{sim.locdep}
 \alias{sim.locdep}
 
diff --git a/man/sim.rasch.Rd b/man/sim.rasch.Rd
old mode 100755
new mode 100644
index c2497a9..4f68311
--- a/man/sim.rasch.Rd
+++ b/man/sim.rasch.Rd
@@ -1,3 +1,4 @@
+\encoding{UTF-8}
 \name{sim.rasch}
 \alias{sim.rasch}
 
diff --git a/man/sim.xdim.Rd b/man/sim.xdim.Rd
old mode 100755
new mode 100644
index 3c64977..efdf5c4
--- a/man/sim.xdim.Rd
+++ b/man/sim.xdim.Rd
@@ -1,3 +1,4 @@
+\encoding{UTF-8}
 \name{sim.xdim}
 \alias{sim.xdim}
 
diff --git a/man/stepwiseIt.Rd b/man/stepwiseIt.Rd
old mode 100755
new mode 100644
index a6d951e..256c0d1
--- a/man/stepwiseIt.Rd
+++ b/man/stepwiseIt.Rd
@@ -1,3 +1,4 @@
+\encoding{UTF-8}
 \name{stepwiseIt}
 \alias{stepwiseIt}
 \alias{stepwiseIt.eRm}
@@ -9,7 +10,8 @@
 criteria: itemfit, Wald test, Andersen's LR-test
 }
 \usage{
-\method{stepwiseIt}{eRm}(object, criterion = list("itemfit"), alpha = 0.05, verbose = TRUE, maxstep = NA)
+\method{stepwiseIt}{eRm}(object, criterion = list("itemfit"), alpha = 0.05,
+           verbose = TRUE, maxstep = NA)
 }
 
 \arguments{
diff --git a/man/summary.RSctr.Rd b/man/summary.RSctr.Rd
new file mode 100644
index 0000000..ed7087c
--- /dev/null
+++ b/man/summary.RSctr.Rd
@@ -0,0 +1,20 @@
+\encoding{UTF-8}
+\name{summary.RSctr}
+\alias{summary.RSctr}
+\title{Summary Method for Control Objects}
+\description{
+  Prints the current definitions for the sampling function.
+}
+\usage{
+\method{summary}{RSctr}(object, ...)
+}
+\arguments{
+  \item{object}{ object of class \code{RSctr} as obtained from \code{\link{rsctrl}} }
+  \item{\dots}{ potential further arguments (ignored) }
+}
+\seealso{ \code{\link{rsctrl}} }
+\examples{
+   ctr <- rsctrl(n_eff = 1, seed = 123123123)  # specify controls
+   summary(ctr)
+}
+\keyword{misc}
diff --git a/man/summary.RSmpl.Rd b/man/summary.RSmpl.Rd
new file mode 100644
index 0000000..855d98e
--- /dev/null
+++ b/man/summary.RSmpl.Rd
@@ -0,0 +1,32 @@
+\encoding{UTF-8}
+\name{summary.RSmpl}
+\alias{summary.RSmpl}
+\alias{summary.RSmplext}
+\title{Summary Methods for Sample Objects}
+
+\description{
+  Prints a summary list for sample objects of class \code{\link{RSmpl}}
+  and \code{\link{RSmplext}}.
+}
+\usage{
+\method{summary}{RSmpl}(object, ...)
+\method{summary}{RSmplext}(object, ...)
+}
+\arguments{
+  \item{object}{object as obtained from \code{rsampler} or \code{rsextrobj} }
+  \item{\dots}{ potential further arguments (ignored) }
+}
+\details{
+  Describes the status of an sample object.
+}
+\seealso{\code{\link{rsampler}}, \code{\link{rsextrobj}} }
+\examples{
+ctr <- rsctrl(burn_in = 10, n_eff = 3, step=10, seed = 0, tfixed = FALSE)
+mat <- matrix(sample(c(0,1), 50, replace = TRUE), nr = 10)
+all_m <- rsampler(mat, ctr)
+summary(all_m)
+
+some_m <- rsextrobj(all_m, 1, 2)
+summary(some_m)
+}
+\keyword{misc}
diff --git a/man/summary.llra.Rd b/man/summary.llra.Rd
old mode 100755
new mode 100644
index cd7eb00..b50f30a
--- a/man/summary.llra.Rd
+++ b/man/summary.llra.Rd
@@ -1,3 +1,4 @@
+\encoding{UTF-8}
 \name{summary.llra}
 \alias{summary.llra}
 \alias{print.summary.llra}
@@ -8,7 +9,7 @@
 \code{summary} method for class \code{"llra"}
 }
 \usage{
-\method{summary}{llra}(object, gamma, ...)
+\method{summary}{llra}(object, level, ...)
 
 \method{print}{summary.llra}(x, ...)
 }
@@ -19,7 +20,7 @@
   \item{x}{an object of class "summary.llra", usually, a result of a call
     to \code{summary.llra}.
   }
-   \item{gamma}{The level of confidence for the confidence
+   \item{level}{The level of confidence for the confidence
    intervals. Default is 0.95.}
     \item{\dots}{further arguments passed to or from other methods.
     }
@@ -38,32 +39,27 @@ them nicely.
 
   \item{ci}{The upper and lower confidence interval borders.}
 }
-\author{
-Thomas Rusch
-}
+\author{Thomas Rusch}
 
 \seealso{
 The model fitting function \code{\link{LLRA}}.
 }
 \examples{
-    ##Example 6 from Hatzinger & Rusch (2009)
-    data("llradat3")
-    groups <- c(rep("TG",30),rep("CG",30))
-    llra1 <- LLRA(llradat3,mpoints=2,groups=groups)
-    summary(llra1)
+##Example 6 from Hatzinger & Rusch (2009)
+groups <- c(rep("TG",30),rep("CG",30))
+llra1 <- LLRA(llradat3,mpoints=2,groups=groups)
+summary(llra1)
 
-    ##An LLRA with 2 treatment groups and 1 baseline group, 5 items and 4
-    ##time points. Item 1 is dichotomous, all others have 3, 4, 5, 6
-    ##categories respectively.
-    \dontrun{
-    data("llraDat2")
-    ex2 <- LLRA(llraDat2[1:20],mpoints=4,llraDat2[21])
-    sumEx2 <- summary(ex2, gamma=0.95)
+\dontrun{
+##An LLRA with 2 treatment groups and 1 baseline group, 5 items and 4
+##time points. Item 1 is dichotomous, all others have 3, 4, 5, 6
+##categories respectively.
+ex2 <- LLRA(llraDat2[1:20],mpoints=4,llraDat2[21])
+sumEx2 <- summary(ex2, level=0.95)
 
-    #print the summary
-    sumEx2
+#print a summary
+sumEx2
 
-    #get confidence intervals
-    sumEx2$ci
-}
+#get confidence intervals
+sumEx2$ci}
 }
diff --git a/man/test_info.Rd b/man/test_info.Rd
new file mode 100644
index 0000000..b3296e4
--- /dev/null
+++ b/man/test_info.Rd
@@ -0,0 +1,42 @@
+\encoding{UTF-8}
+\name{test_info}
+\alias{test_info}
+
+\title{Calculate Test Information For \code{eRm} objects
+}
+\description{
+Calculates the information of a test or a scale as the sum of Samejima's (1969) information for all items.
+}
+\usage{
+test_info(ermobject, theta=seq(-5,5,0.01))
+}
+\arguments{
+  \item{ermobject}{An object of class \code{'eRm'}.
+  } 
+  \item{theta}{Supporting or sampling points on the latent trait.
+  }
+}
+\details{The function \code{test_info} calculates the test or scale information of the
+  whole set of items in the \code{'eRm'} object. 
+}
+\value{
+  Returns the vector of test information for all values of theta.  
+}
+\references{
+Samejima, F. (1969) Estimation of latent ability using a response
+pattern of graded scores. \emph{Psychometric Monographs}, \bold{17}.  
+}
+\author{Thomas Rusch} 
+\seealso{
+The function to calculate the item information, \code{\link{item_info}}
+and the plot function \code{\link{plotINFO}}.
+
+}
+\examples{
+res <- PCM(pcmdat)
+tinfo <- test_info(res)
+plotINFO(res, type="test")
+}
+
+
+
diff --git a/man/thresholds.Rd b/man/thresholds.Rd
old mode 100755
new mode 100644
index 94ac389..11e51f0
--- a/man/thresholds.Rd
+++ b/man/thresholds.Rd
@@ -1,3 +1,4 @@
+\encoding{UTF-8}
 \name{thresholds}
 \alias{thresholds}
 \alias{thresholds.eRm}
@@ -46,9 +47,7 @@ Andrich, D. (1978). Application of a psychometric rating model to ordered catego
       \code{\link{plotICC.Rm}}
 }
 \examples{
-
 #Threshold parameterization for a rating scale model
-data(rsmdat)
 res <- RSM(rsmdat)
 th.res <- thresholds(res)
 th.res
@@ -56,7 +55,6 @@ confint(th.res)
 summary(th.res)
 
 #Threshold parameters for a PCM with ICC plot
-data(pcmdat)
 res <- PCM(pcmdat)
 th.res <- thresholds(res)
 th.res
@@ -64,12 +62,9 @@ plotICC(res)
 
 #Threshold parameters for a LPCM:
 #Block 1: t1, g1; Block 2: t1, g2; ...; Block 6: t2,g3
-
-data(lpcmdat)
 G <- c(rep(1,7),rep(2,7),rep(3,6)) # group vector for 3 groups
 res <- LPCM(lpcmdat, mpoints = 2, groupvec = G)
 th.res <- thresholds(res)
 th.res
-
 }
 \keyword{models}
diff --git a/man/xmpl.Rd b/man/xmpl.Rd
new file mode 100644
index 0000000..1bc9b06
--- /dev/null
+++ b/man/xmpl.Rd
@@ -0,0 +1,26 @@
+\encoding{UTF-8}
+\name{xmpl}
+\alias{xmpl}
+\alias{xmplbig}
+\docType{data}
+\title{Example Data}
+\description{
+  Ficitious data sets - matrices with binary responses
+}
+\usage{data(xmpl)}
+\format{
+  The format of \code{xmpl} is:\cr
+  300 rows (referring to subjects) \cr
+   30 columns (referring to items) \cr
+
+  The format of \code{xmplbig} is:\cr
+  4096 rows (referring to subjects) \cr
+   128 columns (referring to items) \cr
+  \code{xmplbig} has the maximum dimensions that the RaschSampler package
+  can handle currently.
+}
+\examples{
+data(xmpl)
+print(head(xmpl))
+}
+\keyword{datasets}
diff --git a/src/RaschSampler.f90 b/src/RaschSampler.f90
new file mode 100644
index 0000000..0547815
--- /dev/null
+++ b/src/RaschSampler.f90
@@ -0,0 +1,550 @@
+subroutine sampler(n,k,inputmat,tfixed,burn_in,n_eff,step,seed,outputvec,ier)
+
+      ! sample binary matrices with given marginals.
+      ! input: n = number of rows (integer*4)
+      !        k = number of columns (integer*4)
+      !        inputmat: input binary matrix (n*k) (integer*4)
+      !        tfixed: main diagonal is fixed if true; has only effect if n.eq.k
+      !        step: if the matrix #i is an effective matrix, then the next effective matrix is #(i+step)
+      !        burn_in: number of burn_in matrices in units of step
+      !        n_eff: number of effective matrices, to be written in the output vector
+      ! I/O    seed (integer*4). Seed of the random generator
+      !          if seed <> 0: seed is unaltered and |seed| is used as the seed of the random generator.
+      !          if seed.eq.0: a seed is generated from the system clock, and its value is returned.
+      !          ATTENTION: currently seed.eq.0 is deactivated and only seed<>0 is allowed
+      !                    (see lines 103-113) (rh 2006-10-25)
+      ! output:outputvec (integer*4 vector): n_eff binary matrices, stored bitwise in the following way:
+      !           if(k<=32) one row of a matrix is stored in one position (number) of four bytes, the first element
+      !                     is bit 0, the second is bit 1, etc.
+      !           if(32<k<=64) two positions are used per row: the frst 32 in position 1, the remainder in position 2,
+      !                     again starting with element 33 in bit 0, element 34 in bit 1, etc.
+      !           not used bits are set to zero.
+      !        ier: output error code
+      !             ier = 0: O.K.
+      !                   1: n > nmax = 1024 = 2**10   !!!!!! changed to 2**12 in 0.8-3
+      !                   2: k > kmax = 64 = 2**6      !!!!!! changed to 2**7  in 0.8-3
+      !                   4: n_eff > n_effmax = 8191 = 2**13 - 1
+      !                   8: burn_in < 0
+      !                  16: step <= 0
+      !                1-31: sums of the foregoing codes
+      !                  32: input matrix contains values other than one or zero
+      !                  64: the input matrix has a Guttman form
+      ! if tfixed and n.eq.k, the main diagonal is condidered as fixed.
+      ! if tfixed and n!=k, the case is treated as a rectangular matrix without constraints
+      ! the Markov chain transition matrix used is Q**step
+
+      integer(kind=4), dimension(n*(k+31)/32*(n_eff+1)),intent(out)::outputvec
+      integer(kind=4), intent(out)     :: ier
+      integer(kind=4), intent(in)      :: n,k,burn_in,n_eff,step
+      integer(kind=4), dimension(n,k),intent(in) :: inputmat
+      integer(kind=4)                 :: seed
+      logical(kind=4), intent(in)     :: tfixed
+
+      character(len=10)               :: timevec
+
+      !integer(kind=4),parameter       :: nmax=1024,kmax=64,n_effmax=8191  !!!!!! kmax changed to 2**7 nmax changed to 2**12
+      integer(kind=4),parameter       :: nmax=4096,kmax=128,n_effmax=8191
+      integer(kind=4), allocatable    :: a(:),b(:),aold(:),bold(:),a_kol(:),b_kol(:),iwork(:)
+      integer(kind=4)                 :: i,j,m,kk2,kk3,it,krand,k2,k3,nhex,k2old,k3old,nhexold
+      integer(kind=4)                 :: x1,x2  ! x1 and x2 are reserved for the random generator
+      integer(kind=4)                 :: words_per_row, offset
+      integer(kind=4),allocatable     :: hexmat(:,:),hexmatold(:,:)
+
+      real(kind=4)                    :: tijd
+
+      logical(kind=1),parameter       :: t=.true.,f=.false.
+      logical(kind=1),allocatable     :: t_in(:,:)
+      logical(kind=1),dimension(3,3)  :: hexa,hexb
+      logical(kind=1),allocatable     :: twa(:),twb(:),tw(:),tng(:),tngold(:),col1(:),col2(:) !tng = non-guttman pair
+      logical(kind=1)                 :: t_eff,tfixnow
+
+      data hexa/f,f,t,t,f,f,f,t,f/,hexb/f,t,f,f,f,t,t,f,f/
+
+      !check error codes 1, 2 4, 8 and 16
+      ier=0
+      if(n.le.0 .or. n.gt.nmax)ier=ier+1
+      if(k.le.0 .or. k.gt.kmax)ier=ier+2
+      if(n_eff.le.0 .or. n_eff.gt.n_effmax)ier=ier+4
+      if(burn_in.lt.0)ier=ier+8
+      if(step.le.0)ier=ier+16
+      if(ier.ne.0)return
+
+      ! allocate the necessary arrays
+
+      kk2=k*(k-1)/2
+      allocate(t_in(n,k))
+      allocate(a(kk2),b(kk2),aold(kk2),bold(kk2),a_kol(kk2),b_kol(kk2))
+      allocate(twa(n),twb(n),tw(n),tng(kk2),tngold(kk2),col1(n),col2(n))
+      allocate (iwork(n))
+
+      tfixnow=tfixed
+      if(n.ne.k)tfixnow=.false.
+      if(tfixnow)then
+        kk3=kk2*(k-2)/3
+        allocate(hexmat(3,kk3),hexmatold(3,kk3))
+      endif
+
+      ! check error code 32
+      ier=count(inputmat.gt.1)+count(inputmat.lt.0)
+      if(ier.ne.0)then
+        ier=32
+        return
+      endif
+
+      ! copy input matrix to t_in
+      !!!t_in=inputmat
+      !!!replaced by
+      t_in=btest(inputmat,0)
+      if(tfixnow) then
+        forall (i=1:n) t_in(i,i)=f
+      endif
+!      !select seed for random number generation
+!      if(seed.eq.0)then
+!        call date_and_time(TIME=timevec)
+!        read(timevec,'(f10.3)')tijd
+!        x1=tijd*1000.
+!        x1=0.
+!        x1=x1+536870911 ! =x1 + 2**29 - 1
+!        seed=x1
+!      else
+!        x1=abs(seed)
+!      endif
+      x1=abs(seed) ! added from upper else clause (to be removed if random seed enabled)
+      x2=x1
+      call rand1(x1)
+      krand=1+mod(k,25)  ! KRAND selects a random generator (used in RAND_INTEGER42)
+
+      !fill the arrays a_kol, b_kol, a, b, and b_pairs for the input matrices t_in
+      !determine the weight k2 (= #neighbour column pairs of the input matrix)
+      it=0
+      do i=2,k
+        do j=1,i-1
+          it=it+1
+          a_kol(it)=i
+          b_kol(it)=j
+          call findab(t_in(1:n,i),t_in(1:n,j),i,j,a(it),b(it))
+          tng(it)=a(it)*b(it).gt.0
+        end do
+      end do
+      k2=count(tng(1:kk2))
+      k3=k2
+      if(tfixnow)then
+        call hexagon
+        if(nhex.gt.0)k3=k2+1
+      endif
+      ! check on the Guttman condition (error code 64)
+      if(k3.eq.0)then
+        ier=64
+        return
+      endif
+
+      ! pack the input matrix and put it in the outputvector
+      offset=0 ! offset is the number of words defined until now
+      words_per_row=(k+31)/32
+      call pack_matrix(outputvec) ! offset is updated within the subroutine
+
+      ! compute the total number of matrices to be computed
+      n_tot=burn_in+n_eff
+      ! do the sampling
+      do i=1,n_tot
+        t_eff=i.gt.burn_in
+        do j=1,step  ! generate step matrices before computing the statistic
+          call rand_integer42(it,k3,x1,x2,krand)
+          !{*** from here to ***} only applies to alternating hexagons
+          if(k3.gt.k2.and.it.eq.k3)then  ! there are restrictions on the main diagonal
+                                         ! and there exists at least one alternating hexagon (k3>k2)
+                                         ! an alternating hexagon has to be changed (it=k3)
+            ! first save necessary elements for Metropolis-Hastings
+            k2old=k2
+            k3old=k3
+            aold=a
+            bold=b
+            nhexold=nhex
+            hexmatold(:,1:nhex)=hexmat(:,1:nhex)
+            tngold=tng
+            ! make a new matrix by switching one alternating hexagon
+            call rand_integer42(it,nhex,x1,x2,krand)
+            call make_matrix3(it)
+            call update_pairs3(hexmat(1,it),hexmat(2,it),hexmat(3,it))
+            call update_pairs3(hexmat(2,it),hexmat(1,it),hexmat(3,it))
+            call update_pairs3(hexmat(3,it),hexmat(1,it),hexmat(2,it))
+            call hexagon
+            k2=count(tng(1:kk2))
+            k3=k2+1 ! there is at least one alternating hexagon, vz, the one that just changed into its complement
+            if(k3old.lt.k3) then ! check if process possibly remains in the same state (Metropolis-Hastings)
+              call rand_integer42(m,k3,x1,x2,krand)
+              if(m.gt.k3old)then ! process remains in the same state
+                call make_matrix3(it) ! this restores the matrix
+                k3=k3old
+                k2=k2old
+                a=aold
+                b=bold
+                nhex=nhexold
+                hexmat(:,1:nhex)=hexmatold(:,1:nhex)
+                tng=tngold
+              endif
+            endif
+            cycle
+          endif
+          !***}
+          call pick_a_pair(it)
+          col1=t_in(1:n,a_kol(it))
+          col2=t_in(1:n,b_kol(it))
+          k2old=k2
+          k3old=k3
+          aold=a
+          bold=b
+          tngold=tng
+          if(tfixnow)then
+            nhexold=nhex
+            hexmatold(:,1:nhex)=hexmat(:,1:nhex)
+          endif
+          call make_matrix(it)
+          call update_pairs(a_kol(it),b_kol(it))
+          call update_pairs(b_kol(it),a_kol(it))
+          k2=count(tng(1:kk2))
+          k3=k2
+          if(tfixnow)then
+            call hexagon
+            if(nhex.gt.0)k3=k3+1
+          endif
+          if(k3old.lt.k3)then  ! apply Metropolis-Hastings
+            call rand_integer42(m,k3,x1,x2,krand)
+            if(m.gt.k3old)then ! process remains in the current state
+              t_in(1:n,a_kol(it))=col1(1:n)
+              t_in(1:n,b_kol(it))=col2(1:n)
+              k2=k2old
+              k3=k3old
+              a=aold
+              b=bold
+              tng=tngold
+              if(tfixnow)then
+                nhex=nhexold
+                hexmat(:,1:nhex)=hexmatold(:,1:nhex)
+              endif
+            endif
+          endif
+        end do
+        ! if this is an 'effective' sample, pack it and store it in outputvec
+        if(t_eff)call pack_matrix(outputvec)
+      end do ! here ends the sampling procedure
+
+      deallocate(a,b,aold,bold,a_kol,b_kol,iwork,twa,twb,tw,tng,tngold,col1,col2,t_in)
+      if(tfixnow) deallocate(hexmat,hexmatold)
+
+
+      contains
+
+      subroutine pack_matrix(vec)
+        integer(kind=4) vec(*)
+        integer(kind=4) :: i,j,ib,ie,it,iw
+        do i=1,n
+          ib=1
+          do iw=1,words_per_row
+            offset=offset+1
+            vec(offset)=0
+            ie=min(ib+31,k)
+            it=-1
+            do j=ib,ie
+              it=it+1
+              if(.not.t_in(i,j))cycle
+              vec(offset)=ibset(vec(offset),it)
+            end do
+            ib=ie+1
+          end do
+        end do
+      end subroutine pack_matrix
+
+      subroutine findab(ta,tb,i,j,a,b)
+ !!!!!  logical(kind=1):: ta(n),tb(n)
+        logical(kind=1):: ta(n),tb(n),test(n)
+        integer(kind=4)::a,b,i,j
+        tw=(ta.neqv.tb)
+        if(tfixnow)then
+          tw(i)=.false.
+          tw(j)=.false.
+        endif
+             test=ta.and.tw
+             a=count(test)
+             test=tb.and.tw
+             b=count(test)
+ !!!!!       a=count(ta.and.tw)
+ !!!!!       b=count(tb.and.tw)
+      end subroutine findab
+
+
+      subroutine pick_a_pair(it)
+        integer(kind=4),intent(out)::it
+        integer(kind=4) ::i,m
+        call rand_integer42(it,k2,x1,x2,krand)
+        m=count(tng(1:it))
+        if(m.eq.it)return
+        do i=it+1,kk2
+          if(.not.tng(i))cycle
+          m=m+1
+          if(m.eq.it)then
+            it=i
+            return
+          endif
+        end do
+      end subroutine pick_a_pair
+
+      subroutine make_matrix(it)
+        integer(kind=4),intent(in)::it
+        integer(kind=4)           ::m,i,j,ii,jj
+!!!!!       logical(kind=1),allocatable     :: test(:)
+        logical(kind=1) :: test(n)
+
+        ii=a_kol(it)
+        jj=b_kol(it)
+        if(a(it)*b(it).eq.1)then ! columns ii and jj contain a single tetrad.
+                                 ! no sampling is necessary: the tetrad is complemented
+          j=0
+          do i=1,n
+            if(tfixnow)then
+              if(i.eq.ii)cycle
+              if(i.eq.jj)cycle
+            endif
+            if(t_in(i,ii).eqv.t_in(i,jj))cycle
+            j=j+1
+            t_in(i,ii)=t_in(i,jj)
+            t_in(i,jj)=.not.t_in(i,ii)
+            if(j.eq.2)return
+          end do
+        endif
+        do                     ! a random binomial operation is applied
+          ! copy the two selected colums in the logical vectors twa and twb
+          twa(1:n)=t_in(1:n,ii)
+          twb(1:n)=t_in(1:n,jj)
+          m=a(it)+b(it)
+          call combine(m,a(it),iwork,tw) ! generate the random combination of a(it) objets out of m
+          ! insert the combination into the vectors twa and twb
+          j=0
+          do i=1,n
+            if(tfixnow)then
+              if(i.eq.ii)cycle
+              if(i.eq.jj)cycle
+            endif
+            if(twa(i).eqv.twb(i))cycle
+            j=j+1
+            if(tw(j))then
+              twa(i)=.true.
+              twb(i)=.false.
+            else
+              twa(i)=.false.
+              twb(i)=.true.
+            endif
+            if(j.eq.m)exit
+          end do
+          ! check whether matrix has changed
+ !!!!!
+ !!!!!    test=twa
+          test=twa(1:n).eqv.t_in(1:n,ii)
+          m=count(test)
+ !!!!!         m=count(twa(1:n).eqv.t_in(1:n,ii))
+          if(m.ne.n)exit ! the matrix has changed
+        end do  ! the matrix has not changed; a new combination is tried.
+        ! the changes are inserted in the matrix t_in
+        t_in(1:n,ii)=twa(1:n)
+        t_in(1:n,jj)=twb(1:n)
+
+      end subroutine make_matrix
+
+      subroutine make_matrix3(it)
+        integer(kind=4), intent(in)::it
+        integer(kind=4)            ::i,j,ii,jj
+        do i=1,2
+          ii=hexmat(i,it)
+          do j=i+1,3
+            jj=hexmat(j,it)
+            t_in(ii,jj)=.not.t_in(ii,jj)
+            t_in(jj,ii)=.not.t_in(jj,ii)
+          end do
+        end do
+      end subroutine make_matrix3
+
+      subroutine combine(n,k,ix,tx)
+        ! generate a random combination of k objects out of n
+        ! the result is stored in the logical n-vector tx
+        ! ix is a working array
+        integer(kind=4) ::n,k,kk,ii,iu,nnu
+        integer(kind=4) ::ix(n)
+        logical(kind=1) ::tx(n)
+
+        ix(1:n)=(/(ii,ii=1,n)/)
+        tx(1:n)=.false.
+        nnu=n
+        kk=min(k,n-k)
+        do ii=1,kk
+          call rand_integer42(iu,nnu,x1,x2,krand)
+          tx(ix(iu))=.true.
+          if(iu.lt.nnu)ix(iu:nnu-1)=ix(iu+1:nnu)
+          nnu=nnu-1
+        end do
+        if(kk.lt.k)tx=.not.tx
+      end subroutine combine
+
+      subroutine update_pairs(i,j)
+        integer(kind=4),intent(in) :: i,j
+        integer(kind=4):: jt,m
+        do m=1,i-1
+          if(m.eq.j)cycle
+          jt=(i-1)*(i-2)/2+m
+          call findab(t_in(1:n,i),t_in(1:n,m),i,m,a(jt),b(jt))
+          tng(jt)=a(jt)*b(jt).gt.0
+        end do
+        do m=i+1,k
+          if(m.eq.j)cycle
+          jt=(m-1)*(m-2)/2+i
+          call findab(t_in(1:n,m),t_in(1:n,i),m,i,a(jt),b(jt))
+          tng(jt)=a(jt)*b(jt).gt.0
+        end do
+      end subroutine update_pairs
+
+      subroutine update_pairs3(i,j,l)
+        integer(kind=4),intent(in) :: i,j,l
+        integer(kind=4):: jt,m
+        do m=1,i-1
+          if(m.eq.j)cycle
+          if(m.eq.l)cycle
+          jt=(i-1)*(i-2)/2+m
+          call findab(t_in(1:n,i),t_in(1:n,m),i,m,a(jt),b(jt))
+          tng(jt)=a(jt)*b(jt).gt.0
+        end do
+        do m=i+1,k
+          if(m.eq.j)cycle
+          if(m.eq.l)cycle
+          jt=(m-1)*(m-2)/2+i
+          call findab(t_in(1:n,m),t_in(1:n,i),m,i,a(jt),b(jt))
+          tng(jt)=a(jt)*b(jt).gt.0
+        end do
+      end subroutine update_pairs3
+
+      subroutine hexagon
+        logical(kind=1),dimension(3,3):: c
+        integer(kind=4),dimension(3)  :: v
+        integer(kind=4)               :: i,j,m
+        nhex=0
+        do i=1,n-2
+          v(1)=i
+          do j=i+1,n-1
+            if(t_in(i,j).eqv.t_in(j,i))cycle
+            v(2)=j
+            do m=j+1,n
+              v(3)=m
+              c=t_in(v,v)
+              if(all(c.eqv.hexa).or.all(c.eqv.hexb))then
+                nhex=nhex+1
+                hexmat(1:3,nhex)=v
+              endif
+            end do
+          end do
+        end do
+      end subroutine hexagon
+
+      subroutine rand1(x)
+        ! see Ripley, B.D., Stochastic Simulation. New-York:Wiley, 1987, pp. 37-39. (generator 4)
+        integer(kind=4) x,p,q,r,a,b,c
+
+        data a,b,c,p/127773,16807,2836,2147483647/     ! generator 4
+        q=x/a
+        r=mod(x,a)
+        x=b*r-c*q
+        if(x.lt.0)x=x+p
+      end subroutine rand1
+
+      subroutine rand_integer42(iu,a,x1,x2,k)
+        ! draw a uniformly distributed integer from {1, 2, ..., A}
+        ! the rejection rate is (P - BOUND)/P
+        ! where P equals 2**31-2 (= the number of different values RAND1 can take)
+        ! and BOUND equals A*(P/A)
+        ! example: for A = 1000, the rejection rate is 3.008E-7
+        ! Notice that zero as result of the draw leads to rejection (see !***)
+        ! The routine calls RAND2 with the K-th set of coefficients
+
+        integer(kind=4) iu,a,x1,x2,bound,k
+        integer(kind=4),parameter :: p = 2147483646
+                            ! P is the number of values X can take: 2**31 - 2,
+                            ! because 0 is excluded
+        bound=(p/a)*a
+        do
+          call rand2(x1,x2,k)
+          if(x2.eq.0)cycle
+          if(x2.le.bound)exit        !*** LE not LT
+        end do
+        iu=1+mod(x2,a)
+
+      end subroutine rand_integer42
+
+      subroutine rand2(x1,x2,k)
+
+        ! Random number generators MRG (multiple recursive generator)
+        ! Lih-Yuan Deng and Dennis K.J. Lin (2000).Random Number Generation for the New Century,
+        ! The American Statistician, vol 54, no. 2, pp. 145-150
+        ! To compute the formulae, the method in the referenced article is used. B(K) is the table as published
+        ! A(K)=P/B(K) and C(K)=P-A(K)*B(K), P = 2**31 -1
+
+        integer (kind=4)::k
+        integer (kind=4)::x1,x2,p,q,r,y
+        integer (kind=4), dimension(25)::b,c,a
+
+        data b/26403,33236,36673,40851,43693,27149,33986,36848,40961, &
+               44314,29812,34601,37097,42174,44530,30229,36098,37877, &
+               42457,45670,31332,36181,39613,43199,46338/
+        data a/81334,64613,58557,52568,49149,79099,63187,58279,52427, &
+               48460,72034,62064,57888,50919,48225,71040,59490,56696, &
+               50580,47021,68539,59353,54211,49711,46343/
+        data c/22045, 5979,22786,28279,16390,24896,10265,19055,21300, &
+               27207, 6039, 7183,12511,25741,24397,15487,13627, 9255, &
+                8587,34577,19699,32754,23304,18158,41713/
+        data p/2147483647/
+
+        q=x1/a(k)
+        r=mod(x1,a(k))
+        y=b(k)*r-c(k)*q
+        if(y.ge.x2-p)then
+          y=y-x2
+        else
+          y=y+(p-x2)
+        endif
+        if(y.lt.0)y=y+p
+        x1=x2
+        x2=y
+
+      end subroutine rand2
+
+      end subroutine sampler
+
+
+
+      subroutine unpack(vec,words_per_row,t_out,n,k)
+
+
+      integer(kind=4) offset,words_per_row,n,k
+      integer(kind=4),dimension(n*words_per_row)::vec
+      integer(kind=4) i,j,it,ib,ie,ioff
+      ! matrix t_in is not needed
+      !logical(kind=1),dimension(n,k) :: t_in
+      integer(kind=4),dimension(n,k) :: t_out
+
+      t_out=0 ! intialize t_out
+      ioff=0
+      do i=1,n
+        ib=1
+
+        do iw=1,words_per_row
+          ioff=ioff+1
+          ie=min(ib+31,k)
+          it=-1
+          do j=ib,ie
+            it=it+1
+            !t_in(i,j)=btest(vec(ioff),it)
+            !replace the preceding statement by
+            if(btest(vec(ioff),it)) t_out(i,j)=1
+          end do
+          ib=ie+1
+        end do
+
+      end do
+
+      end subroutine unpack
diff --git a/src/components.c b/src/components.c
old mode 100755
new mode 100644
diff --git a/src/components.h b/src/components.h
old mode 100755
new mode 100644
diff --git a/src/geodist.c b/src/geodist.c
old mode 100755
new mode 100644
diff --git a/src/geodist.h b/src/geodist.h
old mode 100755
new mode 100644
diff --git a/vignettes/UCML.pdf b/vignettes/UCML.pdf
new file mode 100644
index 0000000..0b4d2ee
Binary files /dev/null and b/vignettes/UCML.pdf differ
diff --git a/vignettes/eRm.Rnw b/vignettes/eRm.Rnw
new file mode 100644
index 0000000..6570817
--- /dev/null
+++ b/vignettes/eRm.Rnw
@@ -0,0 +1,866 @@
+%\VignetteIndexEntry{eRm Basics}
+\SweaveOpts{keep.source=FALSE}
+\documentclass[10pt,nojss,nofooter,fleqn]{jss}
+
+\usepackage[utf8]{inputenx}
+
+\usepackage[noae]{Sweave}
+
+\usepackage{amsmath,amssymb,amsfonts}
+\usepackage{lmodern}
+\usepackage[nosf,nott,notextcomp,largesmallcaps,easyscsl]{kpfonts}
+\usepackage{booktabs}
+\usepackage{bm}
+\usepackage{microtype}
+
+\makeatletter%
+\let\P\@undefined%
+\makeatother%
+\DeclareMathOperator{\P}{P}
+
+\newcommand{\gnuR}{\proglang{R}}
+\newcommand{\eRm}{\pkg{eRm}}
+\newcommand{\ie}{i.\,e.}
+\newcommand{\eg}{e.\,g.}
+
+\newcommand{\acronym}[1]{\textsc{\lowercase{#1}}} % from Rd.sty
+
+\author{Patrick Mair\\Wirtschaftsuniversität Wien\And%
+Reinhold Hatzinger\\Wirtschaftsuniversität Wien\And%
+Marco J.\ Maier\\Wirtschaftsuniversität Wien}
+\Plainauthor{Patrick Mair, Reinhold Hatzinger, Marco J. Maier}
+
+\title{Extended Rasch Modeling: The \gnuR\ Package \eRm}
+\Plaintitle{Extended Rasch Modeling: The R Package eRm}
+\Shorttitle{The \gnuR\ Package \eRm}
+
+\Abstract{\noindent%
+This package vignette is an update and extension of the papers published in the Journal of Statistical Software (special issue on Psychometrics, volume 20) and Psychology Science \citep{Mair+Hatzinger:2007, Mair+Hatzinger:2007b}.
+Since the publication of these papers, various extensions and additional features have been incorporated into the package.
+
+We start with a methodological introduction to extended Rasch models followed by a general program description and application topics.
+The package allows for the computation of simple Rasch models, rating scale models, partial credit models and linear extensions thereof.
+Incorporation of such linear structures allows for modeling the effects of covariates and enables the analysis of repeated categorical measurements.
+Item parameter estimation is performed using \acronym{CML}, for the person parameters we use joint \acronym{ML}.
+These estimation routines work for incomplete data matrices as well.
+Based on these estimators, item-wise and global (parametric and non-parametric) goodness-of-fit statistics are described and various plots are presented.}
+%%% ADD: LLRA
+%%% ADD: NP-Tests
+
+\Keywords{\eRm\ Package, Rasch Model (\acronym{RM}), \acronym{LLTM}, \acronym{RSM}, \acronym{LRSM}, \acronym{PCM}, \acronym{LPCM}, \acronym{LLRA}, \acronym{CML} estimation}
+
+\begin{document}
+%
+%
+%
+%
+%
+%\citep{RuschMaierHatzinger:2013:LLRA} %%% LLRA Proceedings
+%\citep{HatzingerRusch:2009:IRTwLLRA} %%% PSQ
+%\citep{Ponocny:2002:ApplicabilitysomeIRT}
+%
+%
+%
+%
+%
+\section{Introduction}
+\citet{Ro:99} claimed in his article that ``even though the Rasch model has been existing for such a long time, 95\% of the current tests in psychology are still constructed by using methods from classical test theory'' (p.\ 140).
+Basically, he quotes the following reasons why the Rasch model \acronym{(RM)} is being rarely used: The Rasch model in its original form \citep{Ra:60}, which was limited to dichotomous items, is arguably too restrictive for practical testing purposes.
+Thus, researchers should focus on extended Rasch models.
+In addition, Rost argues that there is a lack of user-friendly software for the computation of such models.
+Hence, there is a need for a comprehensive, user-friendly software package.
+Corresponding recent discussions can be found in \citet{Kub:05} and \citet{Bor:06}.
+
+In addition to the basic \acronym{RM}, the models that can be computed with \eRm\ package are: the linear logistic test model \citep{Scheib:72}, the rating scale model \citep{And:78}, the linear rating scale model \citep{FiPa:91}, the partial credit model \citep{Mast:82}, and the linear partial credit model \citep{GlVe:89,FiPo:94}.
+These models and their main characteristics are presented in Section \ref{sec:erm}.
+A more recent addition to \eRm\ has been the linear logistic models with relaxed assumptions \citep{Fisch:95b,FischPonocny:95} that provides a very flexible framework with a wide range of applications.
+%%% ADD: ref to sec
+
+Concerning estimation of parameters, all models have an important feature in common: Conditional maximum likelihood \acronym{(CML)} estimation, which leads to separability of item and person parameters.
+Item parameters $\beta$ can be estimated without estimating the person parameters $\theta$ by conditioning the likelihood on the sufficient person raw score.
+\acronym{CML} estimation is described in Section \ref{sec:cml}.
+
+Several diagnostic tools and tests to evaluate model fit are presented in Section \ref{Gof}.
+
+In Section \ref{sec:pack}, the corresponding implementation in \gnuR\ \citep{gnuR} is described by means of several examples.
+The \eRm\ package uses a design matrix approach which allows to reparameterize the item parameters to model common characteristics of the items or to enable the user to impose repeated measurement designs as well as group contrasts.
+By combining these types of contrasts, item parameter may differ over time with respect to certain subgroups.
+To illustrate the flexibility of \eRm, some examples are given to show how suitable design matrices can be constructed.
+%
+%
+%
+%
+%----------------- end introduction ----------------
+\section{Extended Rasch models}
+\label{sec:erm}
+%
+%
+%
+\subsection{General expressions}
+Briefly after the first publication of the basic Rasch Model \citep{Ra:60}, the author worked on polytomous generalizations which can be found in \citet{Ra:61}.
+\citet{And:95} derived the representations below which are based on Rasch's general expression for polytomous data.
+The data matrix is denoted as $\bm{X}$ with the persons $v$ in the rows and items $i$ in the columns.
+In total there are $v=1,\,\ldots,\,n$ persons and $i=1,\,\ldots,\,k$ items.
+A single element in the data matrix $\bm{X}$ is expressed as $x_{vi}$.
+Furthermore, each item $i$ has a certain number of response categories, denoted by $h=0,\,\ldots,\,m_i$.
+The corresponding probability of response $h$ on item $i$ can be derived in terms of the following two expressions \citep{And:95}:
+\begin{equation}\label{eq1}
+  \P(X_{vi}=h)=\frac{\exp[\phi_h(\theta_v+\beta_i)+\omega_h]}{\sum_{l=0}^{m_i} \exp[\phi_l (\theta_v+\beta_i)+\omega_l]} %%%Q: X_vi or x_vi?
+\end{equation}
+or
+\begin{equation}\label{eq2}
+  \P(X_{vi}=h)=\frac{\exp[\phi_h \theta_v+\beta_{ih}]}{\sum_{l=0}^{m_i} \exp[\phi_l \theta_v+\beta_{il}]}.
+\end{equation}
+Here, $\phi_h$ are scoring functions for the item parameters, $\theta_v$ are the uni-dimensional person parameters, and $\beta_i$ are the item parameters.
+In Equation \ref{eq1}, $\omega_h$ corresponds to category parameters, whereas in Equation \ref{eq2} $\beta_{ih}$ are the item-category parameters.
+The meaning of these parameters will be discussed in detail below.
+Within the framework of these two equations, numerous models have been suggested that retain the basic properties of the Rasch model so that \acronym{CML} estimation can be applied.
+%
+%
+%
+\subsection{Representation of extended Rasch models}
+\label{Rep}
+For the ordinary Rasch model for dichotomous items, Equation \ref{eq1} reduces to
+\begin{equation}\label{eq:rasch}
+  \P(X_{vi}=1)=\frac{\exp(\theta_v - \beta_i)}{1+\exp(\theta_v-\beta_i)}.
+\end{equation}
+The main assumptions, which hold as well for the generalizations presented in this paper, are: uni-dimensionality of the latent trait, sufficiency of the raw score, local independence, and parallel item characteristic curves (\acronym{ICC}s).
+Corresponding explanations can be found, e.g., in \citet{Fisch:74} and mathematical derivations and proofs in \citet{Fisch:95a}.
+\begin{figure}[hbt]\centering%
+\includegraphics[height=60mm, width=40mm]{modelhierarchy.pdf}
+\caption{Model hierarchy}
+\label{fig1}
+\end{figure}
+
+For dichotomous items, \citet{Scheib:72} proposed the (even more restricted) linear logistic test model \acronym{(LLTM)}, later formalized by \citet{Fisch:73}, by splitting up the item parameters into the linear combination
+
+\begin{equation}
+\label{eq4}
+  \beta_i=\sum_{j=1}^p w_{ij} \eta_j.
+\end{equation}
+
+\citet{Scheib:72} explained the dissolving process of items in a test for logics (``Mengenrechentest'') by so-called ``cognitive operations'' $\eta_j$ such as negation, disjunction, conjunction, sequence, intermediate result, permutation, and material.
+Note that the weights $w_{ij}$ for item $i$ and operation $j$ have to be fixed a priori.
+Further elaborations about the cognitive operations can be found in \citet[p.~361ff.]{Fisch:74}.
+Thus, from this perspective the \acronym{LLTM} is more parsimonious than the Rasch model.
+
+Though, there exists another way to look at the \acronym{LLTM}: A generalization of the basic Rasch model in terms of repeated measures and group contrasts.
+It should be noted that both types of reparameterization also apply to the linear rating scale model \acronym{(LRSM)} and the linear partial credit model \acronym{(LPCM)} with respect to the basic rating scale model \acronym{(RSM)} and the partial credit model \acronym{(PCM)} presented below.
+Concerning the \acronym{LLTM}, the possibility to use it as a generalization of the Rasch model for repeated measurements was already introduced by \citet{Fisch:74}.
+Over the intervening years this suggestion has been further elaborated.
+\citet{Fisch:95b} discussed certain design matrices which will be presented in Section \ref{sec:design} and on the basis of examples in Section \ref{sec:pack}.
+
+At this point we will focus on a simple polytomous generalization of the Rasch model, the \acronym{RSM} \citep{And:78}, where each item $I_i$ must have the same number of categories.
+Pertaining to Equation \ref{eq1}, $\phi_h$ may be set to $h$ with $h=0,\,\ldots,\,m$.
+Since in the \acronym{RSM} the number of item categories is constant, $m$ is used instead of $m_i$.
+Hence, it follows that
+\begin{equation}\label{eq5}
+  \P(X_{vi}=h)=\frac{\exp[h(\theta_v+\beta_i)+\omega_h]}{\sum_{l=0}^m \exp[l(\theta_v+ \beta_i)+\omega_l]},
+\end{equation}
+with $k$ item parameters $\beta_1,\,\ldots,\,\beta_k$ and $m+1$ category parameters $\omega_0,\,\ldots,\,\omega_m$.
+This parameterization causes a scoring of the response categories $C_h$ which is constant over the single items.
+Again, the item parameters can be split up in a linear combination as in Equation \ref{eq4}.
+This leads to the \acronym{LRSM} proposed by \citet{FiPa:91}.
+
+Finally, the \acronym{PCM} developed by \citet{Mast:82} and its linear extension, the \acronym{LPCM} \citep{FiPo:94}, are presented.
+The \acronym{PCM} assigns one parameter $\beta_{ih}$ to each $I_i \times C_h$ combination for $h=0,\,\ldots,\,m_i$.
+Thus, the constant scoring property must not hold over the items and in addition, the items can have different numbers of response categories denoted by $m_i$.
+Therefore, the \acronym{PCM} can be regarded as a generalization of the \acronym{RSM} and the probability for a response of person $v$ on category $h$ (item $i$) is defined as
+\begin{equation}\label{eq6}
+  \P(X_{vih}=1)=\frac{\exp[h\theta_v + \beta_{ih}]}{\sum_{l=0}^{m_i}\exp[l\theta_v + \beta_{il}]}.
+\end{equation}
+It is obvious that (\ref{eq6}) is a simplification of (\ref{eq2}) in terms of $\phi_h = h$.
+As for the \acronym{LLTM} and the \acronym{LRSM}, the \acronym{LPCM} is defined by reparameterizing the item parameters of the basic model, i.e.,
+\begin{equation}\label{eq:lpcmeta}
+  \beta_{ih}=\sum_{j=1}^p w_{ihj}\eta_j.
+\end{equation}
+These six models constitute a hierarchical order as displayed in Figure \ref{fig1}.
+This hierarchy is the base for a unified \acronym{CML} approach presented in the next section.
+It is outlined again that the linear extension models can be regarded either as generalizations or as more restrictive formulations pertaining to the underlying base model.
+The hierarchy for the basic model is straightforward: The \acronym{RM} allows only items with two categories, thus each item is represented by one parameter $\beta_i$.
+The \acronym{RSM} allows for more than two (ordinal) categories each represented by a category parameter $\omega_h$.
+Due to identifiability issues, $\omega_0$ and $\omega_1$ are restricted to 0.
+Hence, the \acronym{RM} can be seen as a special case of the \acronym{RSM} whereas, the \acronym{RSM} in turn, is a special case of the \acronym{PCM}.
+The latter model assigns the parameter $\beta_{ih}$ to each $I_i \times C_h$ combination.
+
+To conclude, the most general model is the \acronym{LPCM}.
+All other models can be considered as simplifications of Equation \ref{eq6} combined with Equation \ref{eq:lpcmeta}.
+As a consequence, once an estimation procedure is established for the \acronym{LPCM}, this approach can be used for any of the remaining models.
+This is what we quote as \textit{unified \acronym{CML} approach}.
+The corresponding likelihood equations follow in Section \ref{sec:cml}.
+%
+%
+%
+\subsection{The concept of virtual items}
+\label{sec:design}
+When operating with longitudinal models, the main research question  is whether an individual's test performance changes over time.
+The most intuitive way would be to look at the shift in ability $\theta_v$ across time points.
+Such models are presented, e.g., in \citet{Mi:85}, \citet{Glas:1992}, and discussed by \citet{Ho:95}.
+
+Yet there exists another look onto time dependent changes, as presented in \citet[p~158ff.]{Fisch:95b}: The person parameters are fixed over time and instead of them the item parameters change.
+The basic idea is that one item $I_i$ is presented at two different times to the same person $S_v$ is regarded as a pair of \textit{virtual items}.
+Within the framework of extended Rasch models, any change in $\theta_v$ occuring between the testing occasions can be described without loss of generality as a change of the item parameters, instead of describing change in terms of the person parameter.
+Thus, with only two measurement points, $I_i$ with the corresponding parameter $\beta_i$ generates two virtual items $I_r$ and $I_s$ with associated item parameters $\beta^{\ast}_r$ and $\beta^{\ast}_s$.
+For the first measurement point $\beta^{\ast}_r=\beta_i$, whereas for the second $\beta^{\ast}_s=\beta_i+\tau$.
+In this linear combination the $\beta^{\ast}$-parameters are composed additively by means of the real item parameters $\beta$ and the treatment effects $\tau$.
+This concept extends to an arbitrary number of time points or testing occasions.
+
+Correspondingly, for each measurement point $t$ we have a vector of \textit{virtual item parameters} $\bm{\beta}^{\ast(t)}$ of length $k$.
+These are linear reparameterizations of the original $\bm{\beta}^{(t)}$, and thus the \acronym{CML} approach can be used for estimation.
+In general, for a simple \acronym{LLTM} with two measurement points the design matrix $\bm{W}$ is of the form as given in Table \ref{tab1}.
+\begin{table}\centering%
+  $\begin{array}{c|c|rrrr|r}
+  & & \eta_1 & \eta_2 & \hdots & \eta_k & \eta_{k+1}\\
+  \hline
+  \textrm{Time 1} & \beta_1^{\ast(1)} & 1 & 0 & 0 & 0 & 0\\
+  & \beta_2^{\ast(1)} & 0 & 1 & 0 & 0 & 0\\
+  & \vdots        &   &   & \ddots& & \vdots\\
+  & \beta_{k}^{\ast(1)} & 1 & 0 & 0 & 1 & 0\\
+  \hline
+  \textrm{Time 2} & \beta_{k+1}^{\ast(2)} & 1 & 0 & 0 & 0 & 1\\
+  & \beta_{k+2}^{\ast(2)} & 0 & 1 & 0 & 0 & 1\\
+  & \vdots        &   &   & \ddots& & \vdots\\
+  & \beta_{2k}^{\ast(2)} & 1 & 0 & 0 & 1 & 1\\
+  \end{array}$
+  \caption{A design matrix for an \acronym{LLTM} with two timepoints.}
+  \label{tab1}
+\end{table}
+
+The parameter vector $\bm{\beta}^{\ast(1)}$ represents the item parameters for the first test occasion, $\bm{\beta}^{\ast(2)}$ the parameters for the second occasion.
+It might be of interest whether these vectors differ.
+The corresponding trend contrast is $\eta_{k+1}$.
+Due to this contrast, the number of original $\beta$-parameters is doubled by introducing the $2k$ virtual item parameters.
+If we assume a constant shift for all item parameters, it is only necessary to estimate $\hat{\bm{\eta}}'=(\hat{\eta}_1,\,\ldots,\,\hat{\eta}_{k+1})$ where $\hat{\eta}_{k+1}$ gives the amount of shift.
+Since according to (\ref{eq4}), the vector $\hat{\bm{\beta}}^\ast$ is just a linear combination of $\hat{\bm{\eta}}$.
+
+As mentioned in the former section, when using models with linear extensions it is possible to impose group contrasts.
+By doing this, one allows that the item difficulties are different across subgroups.
+However, this is possible only for models with repeated measurements and virtual items since otherwise the introduction of a group contrast leads to overparameterization and the group effect cannot be estimated by using \acronym{CML}.
+
+Table \ref{tab2} gives an example for a repeated measurement design where the effect of a treatment is to be evaluated by comparing item difficulties regarding a control and a treatment group.
+The number of virtual parameters is doubled compared to the model matrix given in Table \ref{tab1}.
+\begin{table}[h]\centering%
+  $\begin{array}{c|c|c|rrrr|rrr}
+  & & & \eta_1 & \eta_2 & \hdots & \eta_k & \eta_{k+1} & \eta_{k+2} \\
+  \hline
+  \textrm{Time 1} & \textrm{Group 1} & \beta_1^{\ast(1)} & 1 & 0 & 0 & 0 & 0 &  0\\
+  & & \beta_2^{\ast(1)} & 0 & 1 & 0 & 0 & 0&  0\\
+  & & \vdots        &   &   & \ddots& &\vdots &\vdots\\
+  & & \beta_{k}^{\ast(1)} & 1 & 0 & 0 & 1 & 0 & 0\\
+  \cline{2-9}
+  & \textrm{Group 2} & \beta_{k+1}^{\ast(1)} & 1 & 0 & 0 & 0 & 0 & 0\\
+  & & \beta_{k+2}^{\ast(1)} & 0 & 1 & 0 & 0 & 0 & 0\\
+  & & \vdots        &   &   & \ddots& &\vdots & \vdots\\
+  & & \beta_{2k}^{\ast(1)} & 1 & 0 & 0 & 1 & 0& 0\\
+  \hline
+  \textrm{Time 2} & \textrm{Group 1} & \beta_1^{\ast(2)} & 1 & 0 & 0 & 0 & 1 & 0\\
+  & & \beta_2^{\ast(2)} & 0 & 1 & 0 & 0 & 1 & 0\\
+  & & \vdots        &   &   & \ddots& &\vdots &\vdots\\
+  & & \beta_{k}^{\ast(2)} & 1 & 0 & 0 & 1 & 1 & 0\\
+  \cline{2-9}
+  & \textrm{Group 2} & \beta_{k+1}^{\ast(2)} & 1 & 0 & 0 & 0 & 1 & 1\\
+  & & \beta_{k+2}^{\ast(2)} & 0 & 1 & 0 & 0 & 1 & 1\\
+  & & \vdots        &   &   & \ddots& &\vdots  & \vdots\\
+  & & \beta_{2k}^{\ast(2)} & 1 & 0 & 0 & 1 & 1 & 1\\
+  \end{array}$
+  \caption{Design matrix for a repeated measurements design with treatment and control group.}
+  \label{tab2}
+\end{table}
+
+Again, $\eta_{k+1}$ is the parameter that refers to the time contrast, and $\eta_{k+2}$ is a group effect within measurement point 2.
+More examples are given in Section \ref{sec:pack} and further explanations can be found in \citet{Fisch:95b},
+\citet{FiPo:94}, and in the software manual for the LPCM-Win program by \citet{FiPS:98}.
+
+By introducing the concept of virtual persons, \pkg{eRm} allows for the computation of the linear logistic test model with relaxed assumptions \citep[\acronym{LLRA};][]{Fisch:77}.
+Corresponding explanations will be given in a subsequent version of this vignette.
+%
+%
+%
+%
+%------------------------ end extended Rasch models --------------------------
+\section{Estimation of item and person parameters}
+\label{sec:cml}
+%
+%
+%
+\subsection[CML for item parameter estimation]{\protect\acronym{CML} for item parameter estimation}
+The main idea behind the \acronym{CML} estimation is that the person's raw score $r_v=\sum_{i=1}^k x_{vi}$ is a sufficient statistic.
+Thus, by conditioning the likelihood onto $\bm{r}'=(r_1,\,\ldots,\,r_n)$, the person parameters $\bm{\theta}$, which in this context are nuisance parameters, vanish from the likelihood equation, thus, leading to consistently estimated item parameters $\hat{\bm{\beta}}$.
+
+Some restrictions have to be imposed on the parameters to ensure identifiability.
+This can be achieved, e.g., by setting certain parameters to zero depending on the model.
+In the Rasch model one item parameter has to be fixed to 0.
+This parameter may be considered as baseline difficulty.
+In addition, in the \acronym{RSM} the category parameters $\omega_0$ and $\omega_1$ are also constrained to 0.
+In the \acronym{PCM} all parameters representing the first category, i.e., $\beta_{i0}$ with $i=1,\ldots,k$, and one additional item-category parameter, e.g., $\beta_{11}$ have to be fixed.
+For the linear extensions it holds that the $\beta$-parameters that are fixed within a certain condition (e.g., first measurement point, control group etc.) are also constrained in the other conditions (e.g., second measurement point, treatment group etc.).
+
+At this point, for the \acronym{LPCM} the likelihood equations with corresponding first and second order derivatives are presented (i.e., \textit{unified \acronym{CML} equations}).
+In the first version of the \pkg {eRm} package numerical approximations of the Hessian matrix are used.
+However, to ensure numerical accuracy and to speed up the estimation process, it is planned to implement the analytical solution as given below.
+
+The conditional log-likelihood equation for the \acronym{LPCM} is
+
+\begin{equation}
+\label{eq:cmll}
+    \log L_c = \sum_{i=1}^k \sum_{h=1}^{m_i} x_{+ih} \sum_{j=1}^p w_{ihj} \eta_j - \sum_{r=1}^{r_{max}} n_r \log \gamma_r.
+\end{equation}
+
+The maximal raw score is denoted by $r_{max}$ whereas the number of subjects with the same raw score is quoted as $n_r$.
+Alternatively, by going down to an individual level, the last sum over $r$ can be replaced by $\sum_{v=1}^n \log \gamma_{r_v}$.
+It is straightforward to show that the \acronym{LPCM} as well as the other extended Rasch models, define an exponential family  \citep{And:83}.
+Thus, the raw score $r_v$ is minimally sufficient for $\theta_v$ and the item totals $x_{.ih}$ are minimally sufficient for $\beta_{ih}$.
+
+Crucial expressions are the $\gamma$-terms which are known as \textit{elementary symmetric functions}.
+More details about these terms are given in the next section.
+However, in the \pkg {eRm} package the numerically stable \textit{summation algorithm} as suggested by \citet{And:72} is implemented.
+\citet{FiPo:94} adopted this algorithm for the \acronym{LPCM} and devised also the first order derivative for computing the corresponding derivative of $\log L_c$:
+\begin{equation}\label{eq:dcml}
+  \frac{\partial\log L_c}{\partial\eta_a} = \sum_{i=1}^k \sum_{h=1}^{m_i} w_{iha}\left(x_{+ih} - \epsilon_{ih} \sum_{r=1}^{r_{max}} n_r \frac{ \gamma_{r}^{(i)}}{\gamma_r}\right).
+\end{equation}
+It is important to mention that for the \acronym{CML}-representation, the multiplicative Rasch expression is used throughout equations \ref{eq1} to \ref{eq:lpcmeta}, i.e., $\epsilon_i=\exp(-\beta_i)$ for the person parameter.
+Therefore, $\epsilon_{ih}$ corresponds to the reparameterized item $\times$ category parameter whereas $\epsilon_{ih} > 0$.
+Furthermore, $\gamma_{r}^{(i)}$ are the first order derivatives of the $\gamma$-functions with respect to item $i$.
+The index $a$ in $\eta_a$ denotes the first derivative with respect to the $a^{th}$ parameter.
+
+For the second order derivative of $\log L_c$, two cases have to be distinguished: the derivatives for the off-diagonal elements and the derivatives for the main diagonal elements.
+The item categories with respect to the item index $i$ are coded with $h_i$, and those referring to item $l$ with $h_l$.
+The second order derivatives of the $\gamma$-functions with respect to items $i$ and $l$ are denoted by $\gamma_r^{(i,l)}$.
+The corresponding likelihood expressions are
+\begin{align}
+\label{eq:2dcml}
+\frac{\partial\log L_c}{\partial\eta_a \eta_b} = & -\sum_{i=1}^k \sum_{h_i=1}^{m_i} w_{ih_ia}w_{ih_ib}\epsilon_{ih_i} \sum_{r=1}^{r_{max}} n_r \frac{\log \gamma_{r-h_i}}{\gamma_r}\\
+& -\sum_{i=1}^k \sum_{h_i=1}^{m_i} \sum_{l=1}^k \sum_{h_l=1}^{m_l} w_{ih_ia}w_{lh_lb} \left[\epsilon_{ih_i} \epsilon_{lh_l} \left( \sum_{r=1}^{r_{max}} n_r \frac{\gamma_{r}^{(i)}\gamma_{r}^{(l)}}{\gamma_r^2} - \sum_{r=1}^{r_{max}} n_r \frac{\gamma_{r}^{(i,l)}}{\gamma_r}\right)\right]
+\notag
+\end{align}
+for $a\neq b$, and
+\begin{align}
+\label{eq:2dcmlab}
+\frac{\partial\log L_c}{\partial\eta_a^2} = & -\sum_{i=1}^k \sum_{h_i=1}^{m_i} w_{ih_ia}^2 \epsilon_{ih_i} \sum_{r=1}^{r_{max}} n_r \frac{\log \gamma_{r-h_i}}{\gamma_r}\\
+& -\sum_{i=1}^k \sum_{h_i=1}^{m_i} \sum_{l=1}^k \sum_{h_l=1}^{m_l} w_{ih_ia}w_{lh_la}\epsilon_{ih_i} \epsilon_{lh_l}\sum_{r=1}^{r_{max}} n_r \frac{\gamma_{r-h_i}^{(i)}\gamma_{r-h_l}^{(l)}}{\gamma_r^2}
+\notag
+\end{align}
+for $a=b$.
+
+To solve the likelihood equations with respect to $\hat{\bm{\eta}}$, a Newton-Raphson algorithm is applied.
+The update within each iteration step $s$ is performed by
+\begin{equation}\label{eq:iter}
+  \hat{\bm{\eta}}_s=\hat{\bm{\eta}}_{s-1}-\bm{H}_{s-1}^{-1}\bm{\delta}_{s-1}.
+\end{equation}
+The starting values are $\hat{\bm{\eta}}_0=\bm{0}$.
+$\bm{H}_{s-1}^{-1}$ is the inverse of the Hessian matrix composed by the elements given in Equation \ref{eq:2dcml} and \ref{eq:2dcmlab} and $\bm{\delta}_{s-1}$ is the gradient at iteration $s-1$ as specified in Equation \ref{eq:dcml}.
+The iteration stops if the likelihood difference $\left|\log L_c^{(s)} - \log L_c^{(s-1)} \right|\leq \varphi$ where $\varphi$ is a predefined (small) iteration limit.
+Note that in the current version (\Sexpr{packageDescription("eRm", fields = "Version")}) $\bm{H}$ is approximated numerically by using the \pkg{nlm} Newton-type algorithm provided in the \pkg{stats} package.
+The analytical solution as given in Equation \ref{eq:2dcml} and \ref{eq:2dcmlab} will be implemented in the subsequent version of \pkg{eRm}.
+%
+%
+%
+\subsection[Mathematical properties of the CML estimates]{Mathematical properties of the \acronym{CML} estimates}
+\label{sec:mpcml}
+A variety of estimation approaches for \acronym{IRT} models in general  and for the Rasch model in particular are available: The \emph{joint maximum likelihood} \acronym{(JML)} estimation as proposed by \citet{Wright+Panchapakesan:1969} which is not recommended since the estimates are not consistent \citep[see e.g.][]{Haberman:77}.
+The basic reason for that is that the person parameters $\bm{\theta}$ are nuisance parameters; the larger the sample size, the larger the number of parameters.
+
+A well-known alternative is the \emph{marginal maximum likelihood} \acronym{(MML)} estimation \citep{Bock+Aitkin:1981}: A distribution $g(\theta)$ for the person parameters is assumed and the resulting situation corresponds to a mixed-effects \acronym{ANOVA}: Item difficulties can be regarded as fixed effects and person abilities as random effects.
+Thus, \acronym{IRT} models fit into the framework of \emph{generalized linear mixed models} \acronym{(GLMM)} as elaborated in \citet{deBoeck+Wilson:2004}.
+By integrating over the ability distribution the random nuisance parameters can be removed from the likelihood equations.
+This leads to consistent estimates of the item parameters.
+Further discussions of the \acronym{MML} approach with respect to the \acronym{CML} method will follow.
+
+For the sake of completeness, some other methods for the estimation of the item parameters are the following: \citet{CAnd:07} propose a Pseudo-\acronym{ML} approach, \citet{Molenaar:1995} and \citet{Linacre:2004} give an overview of various (heuristic) non-\acronym{ML} methods, Bayesian techniques can be found in \citet[Chapter 7]{BaKi:04}, and for non-parameteric approaches it is referred to \citet{LeVe:86}.
+
+However, back to \acronym{CML}, the main idea behind this approach is the assumption that the raw score $r_v$ is a minimal sufficient statistic for $\theta_v$.
+Starting from the equivalent multiplicative expression of Equation \ref{eq1} with $\xi_v=\exp(\theta_v)$ and $\epsilon_i=\exp(-\beta_i)$, i.e.,
+\begin{equation}\label{eq7}
+  \P(X_{vi}=1)=\frac{\xi_v \epsilon_i}{1+\xi_v \epsilon_i},
+\end{equation}
+the following likelihood for the response pattern $\bm{x}_v$ for a certain subject $v$ results:
+\begin{equation}\label{eq8}
+  \P(\bm{x}_v|\xi_v,\bm{\epsilon})=\prod_{i=1}^k \frac{(\xi_v \epsilon_i)^{x_{vi}}}{1+\xi_v \epsilon_i}=
+  \frac{{\theta_v}^{r_v} \prod_{i=1}^k {\epsilon_i}^{x_{vi}}}{\prod_{i=1}^k (1+\xi_v \epsilon_i)}.
+\end{equation}
+Using the notation $\bm{y}=(y_1,\ldots ,y_k)$ for all possible response patterns with $\sum_{i=1}^k y_i=r_v$,  the probability for a fixed raw score $r_v$ is
+\begin{equation}\label{eq9}
+  \P(r_v|\xi_v,\bm{\epsilon})=\sum_{\bm{y}|r_v} \prod_{i=1}^k \frac{(\xi_v \epsilon_i)^{x_{vi}}}{1+\xi_v \epsilon_i}=\frac{{\theta_v}^{r_v} \sum_{\bm{y}|r_v}  \prod_{i=1}^k {\epsilon_i}^{x_{vi}}}{\prod_{i=1}^k (1+\xi_v \epsilon_i)}.
+\end{equation}
+The crucial term with respect to numerical solutions of the likelihood equations is the second term in the numerator:
+\begin{equation}\label{eq:gamma}
+  \gamma_r(\epsilon_i) \equiv \sum_{\bm{y}|r_v} \prod_{i=1}^k {\epsilon_i}^{x_{vi}}
+\end{equation}
+These are the \emph{elementary symmetric functions}  (of order $r$).
+An overview of efficient computational algorithms and corresponding simulation studies can be found in \citet{Li:94}.
+The \pkg{eRm} package uses the summation algorithm as proposed by \citet{And:72}.
+
+Finally, by collecting the different raw scores into the vector $\bm{r}$ the conditional probability of observing response pattern $\bm{x}_v$ with given raw score $r_v$ is
+\begin{equation}\label{eq:xraw}
+  \P(\bm{x}_v|r_v,\bm{\epsilon})=\frac{\P(\bm{x}_v|\xi_v,\bm{\epsilon})}{\P(r_v|\xi_v,\bm{\epsilon})} \,.
+\end{equation}
+By taking the product over the persons (independence  assumption), the (conditional) likelihood expression for the whole sample becomes
+\begin{equation}\label{eq:likall}
+  L(\bm{\epsilon}|\bm{r})=\P(\bm{x}|\bm{r},\bm{\epsilon})=\prod_{v=1}^n \frac{\prod_{i=1}^k {\epsilon_i}^{x_{vi}}}{\gamma_{r_v}}.
+\end{equation}
+With respect to raw score frequencies $n_r$ and by reintroducing the $\beta$-parameters, (\ref{eq:likall}) can be reformulated as
+\begin{equation}\label{eq12a}
+  L(\bm{\beta}|\bm{r})= \frac{\exp \left(\sum_{i=1}^k x_{+i}\beta_i \right)}{\prod_{r=0}^k\gamma_r^{n_r}}\,,
+\end{equation}
+where $x_{+i}$ are the item raw scores.
+It is obvious  that by conditioning the likelihood on the raw scores $\bm{r}$, the person parameters completely vanished from the expression.
+As a consequence, the parameters $\bm{\hat{\beta}}$ can be estimated without knowledge of the subject's abilities.
+This issue is referred as \emph{person-free item assessment} and we will discuss this topic within the context of specific objectivity in the next section.
+
+Pertaining to asymptotical issues, it can be shown that  under mild regularity conditions \citep{Pf:94} the \acronym{CML} estimates are consistent for $n\rightarrow \infty$ and $k$ fixed, unbiased, asymptotically efficient, and normally distributed \citep{Andersen:1970}.
+For the computation of a Rasch model, comparatively small samples are sufficient to get reliable estimates \citep{Fischer:1988}.
+Whether the \acronym{MML} estimates are unbiased depends on the correct specification of the ability distribution $g(\theta)$.
+In case of an incorrect assumption, the estimates are biased which is surely a drawback of this method.
+If $g(\theta)$ is specified appropriately, the \acronym{CML} and \acronym{MML} estimates are asymptotically equivalent \citep{Pf:94}.
+
+\citet{Fischer:1981} elaborates on the conditions for the existence and the uniqueness of the \acronym{CML} estimates.
+The crucial condition for the data matrix is that $\bm{X}$ has to be \emph{well-conditioned}.
+To introduce this issue it is convenient to look at a matrix which is \emph{ill-conditioned}: A matrix is ill-conditioned if there exists a partition of the items into two nonempty subsets such that all of a group of subjects responded correctly to items $i+1,\ldots,k$ ($\bm{X}_2$) and all of all other subjects failed for items $1,\ldots,i$ ($\bm{X}_3$), i.e.,
+\begin{table}[h]\centering%
+\[
+\bm{X}=
+\left(
+\begin{array}{c|c}
+\bm{X}_1 & \bm{X}_2\\
+\hline
+\bm{X}_3 & \bm{X}_4\\
+\end{array}
+\right)
+=
+\left(
+\begin{array}{ccc|ccc}
+& & & 1 & \ldots & 1 \\
+& \bm{X}_1 & & \vdots & \ddots & \vdots \\
+& & & 1 & \ldots & 1 \\
+\hline
+0 & \ldots & 0 & & & \\
+\vdots & \ddots & \vdots & & \bm{X}_4 & \\
+0 & \ldots & 0 & & & \\
+\end{array}
+\right)
+\]
+\end{table}
+
+Thus, following the definition in \citet{Fischer:1981}: $\bm{X}$ will be called \emph{well-conditioned} iff in every possible partition of the items into two nonempty subsets some subjects has given response 1 on some item in the first set and response 0 on
+some item in the second set.
+In this case a unique solution for the \acronym{CML} estimates $\hat{\bm{\beta}}$  exists.
+
+This issue is important for structurally incomplete designs which often  occur in practice; different subsets of items are presented to different groups of persons $g=1,\ldots,G$ where $G\leq n$.
+As a consequence, the likelihood values have to be computed for each group separately and the joint likelihood is the product over the single group likelihoods.
+Hence, the likelihood in Equation \ref{eq12a} becomes
+\begin{equation}\label{eq:glik}
+  L(\bm{\beta}|\bm{r})=\prod_{g=1}^G \frac{\exp \left(\sum_{i=1}^k x_{+i}\beta_i \right)}{\prod_{r=0}^k {\gamma_{g,r}}^{n_{g,r}}}
+\end{equation}
+This also implies the necessity to compute the elementary symmetric functions separately for each group.
+The \pkg{eRm} package can handle such structurally incomplete designs.
+
+From the elaborations above it is obvious that from an asymptotical point of view the \acronym{CML} estimates are at least as good as the \acronym{MML} estimates.
+In the past, computational problems (speed, numerical accuracy) involved in calculating the elementary symmetric functions limited the practical usage of the \acronym{CML} approach \citep[see e.g.][]{Gustafsson:1980}.
+Nowadays, these issues are less crucial due to increased computer power.
+
+In some cases \acronym{MML} estimation has advantages not shared  by \acronym{CML}: \acronym{MML} leads to finite person parameters even for persons with zero and perfect raw score, and such persons are not removed from the estimation process \citep{Molenaar:1995}.
+On he other hand the consideration of such persons does not seem meaningful from a substantial point of view since the person parameters are not reliable anymore -- for such subjects the test is too difficult or too easy, respectively.
+Thus, due to these covering effects, a corresponding ability estimation is not feasible.
+However, if the research goal is to find ability distributions such persons should be regarded and \acronym{MML} can handle this.
+
+When estimates for the person parameters are of interest some care has to be taken if the \acronym{CML} method is used since person parameters cancel from the estimation equations.
+Usually, they are estimated (once having obtained values for the item parameters) by inserting $\hat{\bm{\beta}}$ (or equivalently $\hat{\bm{\epsilon}}$) into Equation \ref {eq8} and solving with respect to $\bm{\theta}$.
+Alternatively, Bayesian procedures are applicable \citep{Hoijtink+Boomsma:1995}.
+It is again pointed out that each person in the sample gets an own parameter even though limited by the number of different raw scores.
+%
+%
+%
+\subsection[CML and specific objectivity]{\acronym{CML} and specific objectivity}
+In general, the Rasch model can be regarded as a measurement model: Starting from the (nominally scaled) 0/1-data matrix $\bm{X}$, the person raw scores $r_v$ are on an ordinal level.
+They, in turn, are used to estimate the item parameters $\bm{\beta}$ which are on an interval scale provided that the Rasch model holds.
+
+Thus, Rasch models allow for comparisons between objects on an interval level.
+Rasch reasoned on requirements to be fulfilled such that a specific proposition within this context can be regarded as ``scientific''.
+His conclusions were that a basic requirement is the ``objectivity'' of comparisons \citep{Ra:61}.
+This claim contrasts assumptions met in \emph{classical test theory} \acronym{(CTT)}.
+A major advantage of the Rasch model over \acronym{CTT} models is the \emph{sample independence} of the results.
+The relevant concepts in \acronym{CTT} are based on a linear model for the ``true score'' leading to some indices, often correlation coefficients, which in turn depend on the observed data.
+This is a major drawback in \acronym{CTT}.
+According to \citet{Fisch:74}, sample independence in \acronym{IRT} models has the following implications:
+\begin{itemize}
+  \item The person-specific results (i.e., essentially $\bm{\theta}$) do not depend on the assignment of a person to a certain subject group nor on the selected test items from an item pool $\Psi$.
+  \item Changes in the skills of a person on the latent trait can be determined independently from its base level and independently from the selected item subset $\psi \subset \Psi$.
+  \item From both theoretical and practical perspective the requirement for representativeness of the sample is obsolete in terms of a true random selection process.
+\end{itemize}
+Based on these requirements for parameter comparisons, \citet{Ra:77} introduced the term \emph{specific objectivity}: \emph{objective} because any comparison of a pair of parameters is independent of any other parameters or comparisons; \emph{specifically objective} because the comparison made was relative to some specified frame of reference \citep{Andrich:88}.
+In other words, if specific objectivity holds, two persons $v$ and $w$ with corresponding parameters $\theta_v$ and $\theta_w$, are comparable independently from the remaining persons in the sample and independently from the presented item subset $\psi$.
+In turn, for two items $i$ and $j$ with parameters $\beta_i$ and $\beta_j$, the comparison of these items can be accomplished independently from the remaining items in $\Psi$ and independently from the persons in the sample.
+
+The latter is crucial since it reflects completely what is called sample independence.
+If we think not only of comparing $\beta_i$ and $\beta_j$ but rather to estimate these parameters, we achieve a point where specific objectivity requires a procedure which is able to provide estimates $\hat{\bm{\beta}}$ that do not depend on the sample.
+This implies that $\hat{\bm{\beta}}$ should be computable without the involvement of $\bm{\theta}$.
+\acronym{CML} estimation fulfills this requirement: By conditioning on the sufficient raw score vector $\bm{r}$, $\bm{\theta}$ disappears from the likelihood equation and $L(\bm{\beta}|\bm{r})$ can be solved without knowledge of $\bm{\theta}$.
+This issue is referred to as \emph{separability of item and person parameters} \citep[see e.g.][]{Wright+Masters:1982}.
+Furthermore, separability implies  that no specific distribution should be assumed neither for the person nor for the item parameters \citep{Rost:2001}.
+\acronym{MML} estimation requires such assumptions.
+At this point it is clear that \acronym{CML} estimation is the only estimation method within the Rasch measurement context fulfilling the requirement of \emph{person-free item calibration} and, thus, it maps the epistemological theory of specific objectivity to a statistical maximum likelihood framework.
+Note that strictly speaking any statistical result based on sample observations is sample-dependent because any result depends at least on the sample size \citep{Fischer:1987}.
+The estimation of the item parameters is ``sample-independent'', a term indicating the fact that the actually obtained sample of a certain population is not of relevance for the statistical inference on these parameters \citep[][p.\ 23]{Kubinger:1989}.
+%
+%
+%
+\subsection{Estimation of person parameters}
+\acronym{CML} estimation for person parameters is not recommended due to computational issues.
+The \pkg{eRm} package provides two methods for this estimation.
+The first is ordinary \acronym{ML} where the \acronym{CML}-based item parameters are plugged into the joint \acronym{ML} equation.
+The likelihood is optimized with respect to $\bm{\theta}$.
+\citet{And:95} gives a general formulation of this \acronym{ML} estimate with $r_v=r$ and $\theta_v=\theta$:
+\begin{equation}\label{eq17}
+  r - \sum_{i=1}^k \sum_{h=1}^{m_i} \frac{h \exp(h \theta+\hat{\beta}_{ih})}{\sum_{l=0}^{m_i}\exp(h \theta_v+\hat{\beta}_{il})}=0
+\end{equation}
+\citet{Warm:1989} proposed a weighted likelihood estimation \acronym{(WLE)} which is more accurate compared to \acronym{ML}.
+For the dichotomous Rasch model the expression to be solved with respect to $\bm{\theta}$ is
+\begin{equation}
+  \P(\theta_v|\bm{x}_v, \hat{\bm{\beta}}) \propto \frac{exp(r_v\theta_v)}{\prod_i (1+exp(\theta_v-\hat{\beta}_i)}\sum_i p_{vi}(1-p_{vi})
+\end{equation}
+Again, the item parameter vector $\hat{\bm{\beta}}$ is used from \acronym{CML}.
+This approach will implemented in a subsequent \pkg{eRm} version.
+Additional explanations and simulation studies regarding person parameter estimation can be found in \citet{Hoijtink+Boomsma:1995}.
+%
+%
+%
+%
+%----------------- end parameter estimation -----------------
+\section{Testing extended Rasch models}
+\label{Gof}
+Testing \acronym{IRT} models involves two parts: First, item- and person-wise statistics can be examined; in particular item-fit and person-fit statistics.
+Secondly, based on \acronym{CML} properties, various model tests can be derived \citep[see][]{Glas+Verhelst:1995a, Glas+Verhelst:1995b}.
+%
+%
+%
+\subsection{Item-fit and person-fit statistics}
+Commonly in \acronym{IRT}, items and persons are excluded  due to item-fit and person-fit statistics.
+Both are residual based measures: The observed data matrix $\bm{X}$ is compared with the model probability matrix $\bm{P}$.
+Computing standardized residuals for all observations gives the $n \times k$ residual matrix $\bm{R}$.
+The squared column sums correspond to item-fit statistics and the squared row sums to person-fit statistics both of which are $\chi^2$-distributed with the corresponding degrees of freedom.
+Based on these quantities unweighted (\textsl{outfit}) and weighted (\textsl{infit}) mean-square statistics can also be used to evaluate item and person fit \citep[see e.g.][]{Wright+Masters:1982}.
+%
+%
+%
+\subsection{A Wald test for item elimination}
+A helpful implication of \acronym{CML} estimates is that subsequent test statistics are readily obtained and model tests are easy to carry out.
+Basically, we have to distinguish between test on item level and global model tests.
+
+On item level, sample independence reflects the property that by splitting up the sample in, e.g., two parts, the corresponding parameter vectors $\hat{\bm{\beta}}^{(1)}$ and $\hat{\bm{\beta}}^{(2)}$ should be the same.
+Thus,  when we want to achieve Rasch model fit those items have to be eliminated from the test which differ in the subsamples.
+This important issue in test calibration can be examined, e.g., by using a graphical model test.
+\citet{FiSch:70} propose a $\mathcal{N}(0,\,1)$-distributed test statistic which compares the item parameters for two subgroups:
+\begin{equation}\label{eq:wald}
+  z=\frac{\beta_i^{(1)}-\beta_i^{(2)}}{\sqrt{Var_i^{(1)}-Var_i^{(2)}}}
+\end{equation}
+The variance term in the denominator is based on Fisher's function of ``information in the sample''.
+However, as \citet{Glas+Verhelst:1995a} point out discussing their Wald-type test that this term can be extracted directly from the variance-covariance matrix of the \acronym{CML} estimates.
+This Wald approach is provided in \pkg{eRm} by means of the function \code{Waldtest()}.
+%
+%
+%
+\subsection{Andersen's likelihood-ratio test}
+In the \pkg{eRm} package the likelihood ratio test statistic $LR$, initially proposed by \citet{And:73} is computed for the \acronym{RM}, the \acronym{RSM}, and the \acronym{PCM}.
+For the models with linear extensions, $LR$ has to be computed separately for each measurement point and subgroup.
+\begin{equation}
+\label{eq15}
+LR = 2\left(\sum_{g=1}^G \log L_c(\hat{\bm{\eta}}_g;\bm{X}_g)-\log L_c(\hat{\bm{\eta}};\bm{X})\right)
+\end{equation}
+The underlying principle of this test statistic is that of \textit{subgroup homogeneity} in Rasch models: for arbitrary disjoint subgroups $g=1,\,\ldots,\,G$ the parameter estimates $\hat{\bm{\eta}}_g$ have to be the same.
+$LR$ is asymptotically $\chi^2$-distributed with $df$ equal to the number of parameters estimated in the subgroups minus the number of parameters in the total data set.
+For the sake of computational efficiency, the \pkg {eRm} package performs a person raw score median split into two subgroups.
+In addition, a graphical model test \citep{Ra:60} based on these estimates is produced by plotting $\hat{\bm{\beta}}_1$ against $\hat{\bm{\beta}}_2$.
+Thus, critical items (i.e., those fairly apart from the diagonal) can be identified and eliminated.
+Further elaborations and additional test statistics for polytomous Rasch models can be found, e.g., in \citet{Glas+Verhelst:1995a}.
+
+\subsection{Non-parametric (``quasi-exact'') Tests}
+Based on the package \pkg{RaschSampler} by
+\citet{Verhelst+Hatzinger+Mair:2007} several Rasch model tests as
+proposed by \citep{Ponocny:2001} are provided.
+
+\subsection{Martin-Löf Test}
+Applying the LR-principle to subsets of items, Martin-Löf \citep[1973, see][]{Glas+Verhelst:1995a} suggested a statistic to
+evaluate if two groups of items are homogeneous, i.e.,
+to test the unidimensionality axiom.
+%-------------------------- end goodness-of-fit ------------------
+
+%---------------------------- APPLIED SECTION ----------------------------
+\section{The eRm package and application examples}
+\label{sec:pack}
+The underlying idea of the \pkg{eRm} package is to provide a user-friendly flexible tool to compute extended Rasch models.
+This implies, amongst others, an automatic generation of the design matrix $\bm{W}$.
+However, in order to test specific hypotheses the user may specify $\bm{W}$ allowing the package to be flexible enough for computing \acronym{IRT}-models beyond their regular applications.
+In the following subsections, various examples are provided pertaining to different model and design matrix scenarios.
+Due to intelligibility matters, the artificial data sets are kept rather small.
+A detailed description in German of applications of various extendend Rasch models using the \pkg{eRm} package can be found in \citet{Poinstingl+Mair+Hatzinger:07}.
+
+\subsection{Structure of the eRm package}
+Embedding \pkg{eRm} into the flexible framework of \proglang{R} is a crucial benefit over existing stand-alone programs like WINMIRA \citep{Davier:1998}, LPCM-WIN \citep{FiPS:98}, and others.
+
+Another important issue in the development phase was that the package should be flexible enough to allow for \acronym{CML} compatible polytomous generalizations of the basic Rasch model such as the \acronym{RSM} and the \acronym{PCM}.
+In addition, by introducing a design matrix concept linear extensions of these basic models should be applicable.
+This approach resulted in including the \acronym{LLTM}, the \acronym{LRSM} and the \acronym{LPCM} as the most general model into the \pkg{eRm} package.
+For the latter model the \acronym{CML} estimation was implemented which can be used for the remaining models as well.
+A corresponding graphical representation is given in Figure \ref{fig:body}.
+\begin{figure}[hbt]\centering%
+  \includegraphics[width=157mm, keepaspectratio=true]{UCML}%
+  \caption{Bodywork of the \pkg{eRm} routine}%
+  \label{fig:body}%
+\end{figure}
+
+An important benefit of the package with respect to linearly extended models is that for certain models the design matrix $\bm{W}$ can be generated automatically \citep[LPCM-WIN;][]{FiPS:98} also allows for specifying design matrices but in case of more complex models this can become a tedious task and the user must have a thorough understanding of establishing proper design structures).
+For repeated measurement models time contrasts in the \pkg{eRm} can be simply specified by defining the number of measurement points, i.e., {\tt mpoints}.
+To regard group contrasts like, e.g., treatment and control groups, a corresponding vector ({\tt groupvec}) can be specified that denotes which person belongs to which group.
+However, $\bm{W}$ can also be defined by the user.
+
+A recently added feature of the routine is the option to allow for structurally missing values.
+This is required, e.g., in situations when different subsets of items are presented to different groups of subjects as described in Section \ref{sec:mpcml}.
+These person groups are identified automatically: In the data matrix $\bm{X}$, those items which are not presented to a certain subject are declared as \code{NA}s, as usual in \proglang{R}.
+
+After solving the \acronym{CML} equations by the Newton-Raphson method, the output of the routine consists of the ``basic'' parameter estimates $\hat{\bm{\eta}}$, the corresponding variance-covariance matrix, and consequently the vector with the standard errors.
+Furthermore, the ordinary item parameter estimates $\hat{\bm{\beta}}$ are computed by using the linear transformation $\hat{\bm{\beta}}=\bm{W}\hat{\bm{\eta}}$.
+For ordinary Rasch models these basic parameters correspond to the item easiness.
+For the \acronym{RM}, the \acronym{RSM}, and the \acronym{PCM}, however, we display $\hat{\bm{\eta}}$ as $-\hat{\bm{\eta}}$, i.e., as difficulty.
+It has to be mentioned that the \acronym{CML} equation is solved with the restriction that one item parameter has to be fixed to zero (we use $\beta_1=0$).
+For the sake of interpretability, the resulting estimates $\hat{\bm{\beta}}$ can easily be transformed into ``sum-zero'' restricted $\hat{\bm{\beta}^*}$ by applying
+$\hat{\beta}_i^*=\hat{\beta}_i-\sum_i{\hat{\beta}_i}/k$.
+This transformation is also used for the graphical model test.
+%
+%
+%
+\subsection{Example 1: Rasch model}
+We start the example section with a  simple Rasch model based on a $100 \times 30$ data matrix.
+First, we estimate the item parameters using the function \code{RM()} and then the person parameters with \code{person.parameters()}.
+<<>>=
+library("eRm")
+res.rasch <- RM(raschdat1)
+pres.rasch <- person.parameter(res.rasch)
+@
+Then we use Andersen's LR-test for goodness-of-fit with mean split criterion:
+<<>>=
+lrres.rasch <- LRtest(res.rasch, splitcr = "mean")
+lrres.rasch
+@
+We see that the model fits and a graphical  representation of this result (subset of items only) is given in Figure \ref{fig:GOF} by means of a goodness-of-fit plot with confidence ellipses.
+<<plotGOF-lrres-rasch, eval=FALSE, fig=FALSE, results=hide>>=
+plotGOF(lrres.rasch, beta.subset=c(14,5,18,7,1), tlab="item", conf=list(ia=FALSE,col="blue",lty="dotted"))
+@
+\begin{figure}[hbt]\centering%
+<<plotGOF-lrres-rasch-plot, echo=FALSE, fig=TRUE>>=
+<<plotGOF-lrres-rasch>>
+@
+\caption{Goodness-of-fit plot for some items with confidence ellipses.}
+\label{fig:GOF}
+\end{figure}
+
+To be able to draw confidence ellipses it is needed to set \code{se = TRUE} when computing the LR-test.
+%
+%
+%
+\subsection[Example 2: LLTM as a restricted Rasch model]{Example 2: \acronym{LLTM} as a restricted Rasch model}
+As mentioned in Section \ref{Rep}, also the models with the linear extensions on the item parameters can be seen as special cases of their underlying basic model.
+In fact, the \acronym{LLTM} as presented below and following the original idea by \citet{Scheib:72}, is a restricted \acronym{RM}, i.e. the number of estimated parameters is smaller compared to a Rasch model.
+The data matrix $\bm{X}$ consists of $n=15$ persons and $k=5$ items.
+Furthermore, we specify a design matrix $\bm{W}$ (following Equation \ref{eq4}) with specific weight elements $w_{ij}$.
+<<>>=
+W <- matrix(c(1,2,1,3,2,2,2,1,1,1),ncol=2)
+res.lltm <- LLTM(lltmdat2, W)
+summary(res.lltm)
+@
+The \code{summary()} method provides point estimates and standard errors for the basic parameters and for the resulting item parameters.
+Note that item parameters in \pkg{eRm} are always estimated as easiness parameters according to equations \ref{eq1} and \ref{eq2} but not \ref{eq:rasch}.
+If the sign is switched, the user gets difficulty parameters (the standard errors remain the same, of course).
+However, all plotting functions \code{plotGOF}, \code{plotICC}, \code{plotjointICC}, and \code{plotPImap}, as well as the function \code{thresholds} display the difficulty parameters.
+The same applies for the basic parameters $\eta$ in the output of the \acronym{RM}, \acronym{RSM}, and \acronym{PCM}.
+%
+%
+%
+\subsection[Example 3: RSM and PCM]{Example 3: \protect\acronym{RSM} and \protect\acronym{PCM}}
+Again, we provide an artificial data set now with $n=300$ persons and $k=4$ items; each of them with $m+1=3$ categories.
+We start with the estimation of an \acronym{RSM} and, subsequently, we calculate the corresponding category-intersection parameters using the function \code{thresholds()}.
+<<>>=
+data(pcmdat2)
+res.rsm <- RSM(pcmdat2)
+thresholds(res.rsm)
+@
+The location parameter is basically the item difficulty and the thesholds are the points in the \acronym{ICC} plot given in Figure \ref{fig:ICC} where the category curves intersect:
+<<plotICC-res-rsm, eval=FALSE, fig=FALSE, results=hide>>=
+plotICC(res.rsm, mplot=TRUE, legpos=FALSE,ask=FALSE)
+@
+\begin{figure}[hbt]\centering%
+<<plotICC-res-rsm-plot, echo=FALSE, fig=TRUE>>=
+<<plotICC-res-rsm>>
+@
+\caption{\acronym{ICC} plot for an \acronym{RSM}.}
+\label{fig:ICC}
+\end{figure}
+
+The \acronym{RSM} restricts the threshold distances to be the same across all items.
+This strong assumption can be relaxed using a \acronym{PCM}.
+The results are represented in a person-item map (see Figure \ref{fig:PImap}).
+<<plotPImap-res-pcm, eval=FALSE, fig=FALSE, results=hide>>=
+res.pcm <- PCM(pcmdat2)
+plotPImap(res.pcm, sorted = TRUE)
+@
+\begin{figure}[hbt]\centering%
+<<plotPImap-res-pcm-plot, echo=FALSE, fig=TRUE>>=
+<<plotPImap-res-pcm>>
+@
+\caption{Person-Item map for a \acronym{PCM}.}
+\label{fig:PImap}
+\end{figure}
+
+After estimating the person parameters we can check the item-fit statistics.
+<<>>=
+pres.pcm <- person.parameter(res.pcm)
+itemfit(pres.pcm)
+@
+A likelihood ratio test comparing the \acronym{RSM} and the \acronym{PCM} indicates that the \acronym{PCM} provides a better fit.
+%Since none of the items is significant we can conclude that the data fit the \acronym{PCM}.
+<<>>=
+lr<- 2*(res.pcm$loglik-res.rsm$loglik)
+df<- res.pcm$npar-res.rsm$npar
+pvalue<-1-pchisq(lr,df)
+cat("LR statistic: ", lr, "  df =",df, "  p =",pvalue, "\n")
+@
+%
+%
+%
+\subsection[An LPCM for repeated measurements in different groups]{An \protect\acronym{LPCM} for repeated measurements in different groups}
+The most complex example refers to an \acronym{LPCM} with two measurement points.
+In addition, the hypothesis is of interest whether the treatment has an effect.
+The corresponding contrast is the last column in $\bm{W}$ below.
+
+First, the data matrix $\bm{X}$ is specified.
+We assume an artificial test consisting of $k=3$ items which was presented twice to the subjects.
+The first 3 columns in $\bm{X}$ correspond to the first test occasion, whereas the last 3 to the second occasion.
+Generally, the first $k$ columns correspond to the first test occasion, the next $k$ columns for the second, etc.
+In total, there are $n=20$ subjects.
+Among these, the first 10 persons belong to the first group (e.g., control), and the next 10 persons to the second group (e.g., treatment).
+This is specified by a group vector:
+<<>>=
+grouplpcm <- rep(1:2, each = 10)
+@
+Again, $\bm{W}$ is generated automatically.
+In general, for such designs the generation of $\bm{W}$ consists first of the item contrasts, followed by the time contrasts and finally by the group main effects except for the first measurement point (due to identifiability issues, as already described).
+<<>>=
+reslpcm <- LPCM(lpcmdat, mpoints = 2, groupvec = grouplpcm, sum0 = FALSE)
+model.matrix(reslpcm)
+@
+The parameter estimates are the following:
+<<>>=
+coef(reslpcm, parm="eta")
+@
+Testing whether the $\eta$-parameters equal 0 is mostly not of relevance for those parameters referring to the items (in this example $\eta_1,\,\ldots,\,\eta_8$).
+But for the remaining contrasts, $H_0: \eta_9=0$ (implying no general time effect) can not be rejected ($p=.44$), whereas hypothesis $H_0: \eta_{10}=0$ has to be rejected ($p=.004$) when applying a $z$-test.
+This suggests that there is a significant treatment effect over the measurement points.
+If a user wants to perform additional tests such as a Wald test for the equivalence of two $\eta$-parameters, the \code{vcov} method can be applied to get the variance-covariance matrix.
+%
+%
+%
+%
+%
+\section{Additional topics}
+This section will be extended successively with new developments and components which do not directly relate to the modeling core of \pkg{eRm} but may prove to be useful add-ons.
+%
+%
+%
+\subsection{The eRm simulation module}
+A recent \pkg{eRm} development is the implementation of a simulation module to generate 0-1 matrices for different Rasch scenarios.
+In this article we give a brief overview about the functionality and for more detailed descriptions (within the context of model testing) it is referred to \citet{Mair:2006} and \citet{Suarez+Glas:2003}.
+
+For each scenario the user has the option either to assign $\bm{\theta}$ and $\bm{\beta}$ as vectors to the simulation function (e.g., by drawing parameters from a uniform distribution) or to let the function draw the parameters from a $\mathcal{N}(0,1)$ distribution.
+The first scenario is the simulation of Rasch homogenous data by means of the function \code{sim.rasch()}.
+The parameter values are plugged into equation \ref{eq:rasch} and it results the matrix $\bm{P}$ of model probabilites which is of dimension $n \times k$.
+An element $p_{vi}$ indicates the probability that subject $v$ solves item $i$.
+In a second step the matrix $\bm{P}$ has to be transformed into the 0-1 data matrix $\bm{X}$.
+The recommended way to achieve this is to draw another random number $p^{\star}_{vi}$ from a uniform distribution in $[0;1]$ and perform the transformation according to the following rule:
+\begin{equation*}
+x_{vi} = \left\{
+ \begin{array}{rl}
+  1 & \text{if } p^{\star}_{vi} \leq p_{vi}\\
+  0 & \text{if } p^{\star}_{vi} > p_{vi}\\
+ \end{array} \right.
+\end{equation*}
+Alternatively, the user can specify a fixed cutpoint $p^{\star}:=p^{\star}_{vi}$ (e.g., $p^{\star} = 0.5$) and make the decision according to the same rule.
+This option is provided by means of the \code{cutpoint} argument.
+Caution is advised when using this deterministic option since this leads likely to ill-conditioned data matrices.
+
+The second scenario in this module regards the violation of the parallel \acronym{ICC} assumption which leads to the two-parameter logistic model (2-\acronym{PLM}) proposed by \citet{Birnbaum:1968}:
+\begin{equation}\label{eq:2pl}
+  \P(X_{vi}=1)=\frac{\exp(\alpha_i(\theta_v - \beta_i))}{1+\exp(\alpha_i(\theta_v-\beta_i))}.
+\end{equation}
+The parameter $\alpha_i$ denotes the item discrimination which for the Rasch model is 1 across all items.
+Thus, each item score gets a weight and the raw scores are not sufficient anymore.
+The function for simulating 2-\acronym{PL} data is \code{sim.2pl()} and if $\bm{\alpha}$ is not specified by the user by means of the argument \code{discrim}, the discrimination parameters are drawn from a log-normal distribution.
+The reasons for using this particular kind of distribution are the following: In the case of $\alpha_i = 1$ the \acronym{ICC} are Rasch consistent.
+Concerning the violations, it should be possible to achieve deviations in both directions (for $\alpha_i > 0$).
+If $\alpha_i > 0$ the \acronym{ICC} is steeper than in the Rasch case and, consequently, if $\alpha_i < 1$ the \acronym{ICC} is flatter.
+This bidirectional deviation around 1 is warranted by the lognormal distribution $LN(\mu,\sigma^2)$ with $\mu = 0$.
+Since it is a logarithmic distribution, $\alpha_i$ cannot be negative.
+The degrees of model violation can be steered by means of the dispersion parameter $\sigma^2$.
+A value of $\sigma^2 = .50$ already denotes a strong violation.
+The lower $\sigma^2$, the closer the values lie around 1.
+In this case the $\alpha_i$ are close to the Rasch slopes.
+
+Using the function \code{sim.xdim()} the unidimensionality assumptions is violated.
+This function allows for the simulation of multidimensional Rasch models as for instance given \citet{Glas:1992} and \citet{Adams+Wilson+Wang:1997}.
+Multidimensionality implies that one single item measures more than one latent construct.
+Let us denote the number of these latent traits by $D$.
+Consequently, each person has a vector of ability parameters $\bm{\theta}_v$ of length $D$.
+These vectors are drawn from a multivariate normal distribution with mean $\bm{\mu} = \bm{0}$ and VC-matrix $\bm{\Sigma}$ of dimension $D \times D$.
+This matrix has to be specified by the user with the argument \code{Sigma}.
+In order to achieve strong model violations, very low correlations such as .01 should be provided.
+To specify to which extend item $i$ is measuring each of the $D$ dimensions, a corresponding vector of weights $\bm{z}_i$ of length $D$ is defined.
+If the resulting $k \times D$ matrix $\bm{Z}$
+ is not provided by the user, \code{sim.xdim()} generates $\bm{Z}$ such that each $\bm{z}_i$ contains only nonzero element which indicates the assigned dimension.
+ This corresponds to the \emph{between-item multidimensional model} \citep{Adams+Wilson+Wang:1997}.
+ However, in any case the person part of the model is $\bm{z}_i^T \bm{\theta}_v$ which replaces $\theta_v$ in Equation \ref{eq:rasch}.
+
+Finally, locally dependent item responses can be produced by means of the function \code{sim.locdep()}.
+Local dependence implies the introduction of pair-wise item correlations $\delta_{ij}$.
+If these correlations are constant across items, the argument \code{it.cor} can be a single value $\delta$.
+A value $\delta = 0$ corresponds to the Rasch model whereas $\delta = 1$ leads to the strongest violation.
+Alternatively, for different pair-wise item correlations, the user can specify a VC-matrix $\Delta$ of dimension $k \times k$.
+The formal representation of the corresponding \acronym{IRT} model is
+\begin{equation}
+  \P(X_{vi}=1|X_{vj}=x_{vj})=\frac{\exp(\theta_v - \beta_i + x_{vj}\delta_{ij})}{1+\exp(\theta_v-\beta_i + x_{vj}\delta_{ij})}.
+\end{equation}
+This model was proposed by \citet{Jannarone:1986} and is suited to model locally dependent item responses.
+%
+%
+%
+%
+%
+\section{Discussion and outlook}
+\label{sec:disc}
+Here we give a brief outline of future \pkg{eRm} developments.
+The \acronym{CML} estimation  approach, in combination with the \acronym{EM}-algorithm, can also be used to estimate \textit{mixed Rasch models} (MIRA).
+The basic idea behind such models is that the extended Rasch model holds within subpopulations of individuals, but with different parameter values for each subgroup.
+Corresponding elaborations are given in \citet{RoDa:95}.
+
+In Rasch models the item discrimination parameter $\alpha_i$ is always fixed  to 1 and thus it does not appear in the basic equation.
+Allowing for different discrimination parameters across items leads to the two-parameter logistic model as given in Equation \ref{eq:2pl}.
+In this model the raw scores are not sufficient statistics anymore and hence \acronym{CML} can not be applied.
+2-\acronym{PL} models can be estimated by means of the \pkg{ltm} package \citep{Riz:06}.
+However, \citet{Verhelst+Glas:1995} formulated the one parameter logistic model \acronym{(OPLM)} where the $\alpha_i$ do not vary across the items but are unequal to one.
+The basic strategy to estimate \acronym{OPLM} is a three-step approach: First, the item parameters of the Rasch model are computed.
+Then, discrimination parameters are computed under certain restrictions.
+Finally, using these discrimination weights, the item parameters for the \acronym{OPLM} are estimated using \acronym{CML}.
+This is a more flexible version of the Rasch model in terms of different slopes.
+
+To conclude, the \pkg{eRm} package is a tool to estimate extended Rasch models for unidimensional traits.
+The generalizations towards different numbers of item categories, linear extensions to allow for introducing item covariates and/or trend and optionally group contrasts are important issues when examining item behavior and person performances in tests.
+This improves the feasibility of \acronym{IRT} models with respect to a wide variety of application areas.
+%
+%
+%
+%
+%
+\bibliography{eRmvig}%
+\newpage%
+\rotatebox[origin=c]{90}{\includegraphics[width=1.1\textheight]{eRm_object_tree.pdf}}%
+%
+%
+%
+\end{document}
diff --git a/inst/doc/eRm_object_tree.pdf b/vignettes/eRm_object_tree.pdf
old mode 100755
new mode 100644
similarity index 100%
rename from inst/doc/eRm_object_tree.pdf
rename to vignettes/eRm_object_tree.pdf
diff --git a/vignettes/eRmvig.bib b/vignettes/eRmvig.bib
new file mode 100644
index 0000000..3b28767
--- /dev/null
+++ b/vignettes/eRmvig.bib
@@ -0,0 +1,853 @@
+% This file was created with JabRef 2.10.
+% Encoding: UTF8
+
+
+ at Manual{gnuR,
+  Title = {{{\gnuR}: A Language and Environment for Statistical Computing}},
+
+  Address = {{Vienna, Austria}},
+  Author = {{{\gnuR} Core Team}},
+  Organization = {{{\gnuR} Foundation for Statistical Computing}},
+  Year = {2014},
+
+  Url = {http://www.R-project.org/}
+}
+
+ at Article{Adams+Wilson+Wang:1997,
+  Title = {{The Multidimensional Random Coefficients Multinomial Logit Model}},
+  Author = {Adams, R. J. and Wilson, M. and Wang, Wen-chung},
+  Journal = {{Applied Psychological Measurement}},
+  Year = {1997},
+  Number = {1},
+  Pages = {1--23},
+  Volume = {21},
+
+  Doi = {10.1177/0146621697211001}
+}
+
+ at InCollection{And:95,
+  Title = {{Polytomous Rasch Models and their Estimation}},
+  Author = {Andersen, Erling Bernhard},
+  Booktitle = {{Rasch Models: Foundations, Recent Developments, and Applications}},
+  Publisher = {Springer},
+  Year = {1995},
+
+  Address = {New York},
+  Editor = {Fischer, Gerhard H. and Molenaar, Ivo W.},
+  Pages = {271--292},
+
+  Doi = {10.1007/978-1-4612-4230-7_15}
+}
+
+ at InCollection{And:83,
+  Title = {A General Latent Structure Model for Contingency Table Data},
+  Author = {Andersen, Erling Bernhard},
+  Booktitle = {Principals of Modern Psychological Measurement},
+  Publisher = {Erlbaum},
+  Year = {1983},
+
+  Address = {Hillsdale, NJ},
+  Editor = {H. Wainer and S. Messik},
+  Pages = {117--138}
+}
+
+ at Article{And:73,
+  Title = {{A goodness of fit test for the Rasch model}},
+  Author = {Andersen, Erling Bernhard},
+  Journal = {{Psychometrika}},
+  Year = {1973},
+  Number = {1},
+  Pages = {123--140},
+  Volume = {38},
+
+  Doi = {10.1007/BF02291180}
+}
+
+ at Article{And:72,
+  Title = {{The Numerical Solution of a Set of Conditional Estimation Equations}},
+  Author = {Andersen, Erling Bernhard},
+  Journal = {{Journal of the Royal Statistical Society. Series B (Methodological)}},
+  Year = {1972},
+  Number = {1},
+  Pages = {42--54},
+  Volume = {34},
+
+  Url = {http://www.jstor.org/stable/2985049}
+}
+
+ at Article{Andersen:1970,
+  Title = {{Asymptotic Properties of Conditional Maximum-Likelihood Estimators}},
+  Author = {Andersen, Erling Bernhard},
+  Journal = {{Journal of the Royal Statistical Society. Series B (Methodological)}},
+  Year = {1970},
+  Number = {2},
+  Pages = {283--301},
+  Volume = {32},
+
+  Url = {http://www.jstor.org/stable/2984535}
+}
+
+ at Article{CAnd:07,
+  Title = {{Estimation of Models in a Rasch Family for Polytomous Items and Multiple Latent Variables}},
+  Author = {Anderson, Carolyn J. and Li, Zhushan and Vermunt, Jeroen K.},
+  Journal = {{Journal of Statistical Software}},
+  Year = {2007},
+  Number = {6},
+  Pages = {1--36},
+  Volume = {20},
+
+  Url = {http://www.jstatsoft.org/v20/i06/}
+}
+
+ at Book{Andrich:88,
+  Title = {{Rasch Models for Measurement}},
+  Author = {Andrich, David},
+  Publisher = {Sage Publications},
+  Year = {1988},
+
+  Address = {Newbury Park, CA},
+  Series = {{Quantitative Applications in the Social Sciences}},
+  Volume = {68}
+}
+
+ at Article{And:78,
+  Title = {{A rating formulation for ordered response categories}},
+  Author = {Andrich, David},
+  Journal = {{Psychometrika}},
+  Year = {1978},
+  Number = {4},
+  Pages = {561--573},
+  Volume = {43},
+
+  Doi = {10.1007/BF02293814}
+}
+
+ at Book{BaKi:04,
+  Title = {Item Response Theory: Parameter Estimation Techniques},
+  Author = {Baker, F. B. and Kim, S.},
+  Publisher = {Dekker},
+  Year = {2004},
+
+  Address = {New York},
+  Edition = {2nd}
+}
+
+ at InCollection{Birnbaum:1968,
+  Title = {Some latent trait models and their use in inferring an examinee's ability},
+  Author = {Birnbaum, A.},
+  Booktitle = {Statistical theories of mental test scores},
+  Publisher = {Addison-Wesley},
+  Year = {1968},
+
+  Address = {Reading, MA},
+  Editor = {F. M. Lord and M. R. Novick},
+  Pages = {395--479}
+}
+
+ at Article{Bock+Aitkin:1981,
+  Title = {{Marginal maximum likelihood estimation of item parameters: Application of an EM algorithm}},
+  Author = {Bock, R. Darrell and Aitkin, Murray},
+  Journal = {{Psychometrika}},
+  Year = {1981},
+  Number = {4},
+  Pages = {443--459},
+  Volume = {46},
+
+  Doi = {10.1007/BF02293801}
+}
+
+ at Article{Bor:06,
+  Title = {{The attack of the psychometricians}},
+  Author = {Borsboom, Denny},
+  Journal = {{Psychometrika}},
+  Year = {2006},
+  Number = {3},
+  Pages = {425--440},
+  Volume = {71},
+
+  Doi = {10.1007/s11336-006-1447-6}
+}
+
+ at Book{deBoeck+Wilson:2004,
+  Title = {{Explanatory item response models: A generalized linear and nonlinear approach}},
+  Author = {{de Boeck}, P. and Wilson, M.},
+  Publisher = {Springer},
+  Year = {2004},
+
+  Address = {New York}
+}
+
+ at Article{LeVe:86,
+  Title = {{Maximum Likelihood Estimation in Generalized Rasch Models}},
+  Author = {{de Leeuw}, J. and Verhelst, N.},
+  Journal = {{Journal of Educational Statistics}},
+  Year = {1986},
+  Number = {3},
+  Pages = {183--196},
+  Volume = {11},
+
+  Doi = {10.3102/10769986011003183}
+}
+
+ at InCollection{Fisch:95a,
+  Title = {{Derivations of the Rasch Model}},
+  Author = {Fischer, Gerhard H.},
+  Booktitle = {{Rasch Models: Foundations, Recent Developments, and Applications}},
+  Publisher = {Springer},
+  Year = {1995},
+
+  Address = {New York},
+  Editor = {Fischer, Gerhard H. and Molenaar, Ivo W.},
+  Pages = {15--38},
+
+  Doi = {10.1007/978-1-4612-4230-7_2}
+}
+
+ at InCollection{Fisch:95b,
+  Title = {{Linear Logistic Models for Change}},
+  Author = {Fischer, Gerhard H.},
+  Booktitle = {{Rasch Models: Foundations, Recent Developments, and Applications}},
+  Publisher = {Springer},
+  Year = {1995},
+
+  Address = {New York},
+  Editor = {Fischer, Gerhard H. and Molenaar, Ivo W.},
+  Pages = {157--180},
+
+  Doi = {10.1007/978-1-4612-4230-7_9}
+}
+
+ at InCollection{Fischer:1988,
+  Title = {Spezifische \uppercase{O}bjektvität: \uppercase{E}ine wissenschaftstheoretische \uppercase{G}rundlage des \uppercase{R}asch-\uppercase{M}odells [\uppercase{S}pecific objectivity: \uppercase{A}n epistemological foundation of the \uppercase{R}asch model]},
+  Author = {Fischer, Gerhard H.},
+  Booktitle = {{Moderne Testtheorie: Ein Abriss samt neuesten Beiträgen}},
+  Publisher = {Beltz},
+  Year = {1988},
+
+  Address = {Weinheim},
+  Editor = {Kubinger, Klaus D.},
+  Pages = {87--111}
+}
+
+ at InCollection{Fisch:77,
+  Title = {Linear Logistic Trait Models: Theory and Application},
+  Author = {Fischer, Gerhard H.},
+  Booktitle = {Structural Models of Thinking and Learning},
+  Publisher = {Huber},
+  Year = {1977},
+
+  Address = {Bern},
+  Editor = {H. Spada and W. F. Kempf},
+  Pages = {203--225}
+}
+
+ at Article{Fischer:1987,
+  Title = {{Applying the principles of specific objectivity and of generalizability to the measurement of change}},
+  Author = {Fischer, Gerhard H.},
+  Journal = {{Psychometrika}},
+  Year = {1987},
+  Pages = {565-587},
+  Volume = {52},
+
+  Doi = {10.1007/BF02294820}
+}
+
+ at Article{Fischer:1981,
+  Title = {{On the existence and uniqueness of maximum-likelihood estimates in the Rasch model}},
+  Author = {Fischer, Gerhard H.},
+  Journal = {{Psychometrika}},
+  Year = {1981},
+  Number = {1},
+  Pages = {59--77},
+  Volume = {46},
+
+  Doi = {10.1007/BF02293919}
+}
+
+ at Book{Fisch:74,
+  Title = {{Einführung in die Theorie psychologischer Tests [Introduction to Psychological Test Theory]}},
+  Author = {Fischer, Gerhard H.},
+  Publisher = {Huber},
+  Year = {1974},
+
+  Address = {Bern}
+}
+
+ at Article{Fisch:73,
+  Title = {{The linear logistic test model as an instrument in educational research}},
+  Author = {Fischer, Gerhard H.},
+  Journal = {{Acta Psychologica}},
+  Year = {1973},
+  Number = {6},
+  Pages = {359--374},
+  Volume = {37},
+
+  Doi = {10.1016/0001-6918(73)90003-6}
+}
+
+ at Article{FiPa:91,
+  Title = {{An extension of the rating scale model with an application to the measurement of change}},
+  Author = {Fischer, Gerhard H. and Parzer, P.},
+  Journal = {{Psychometrika}},
+  Year = {1991},
+  Number = {4},
+  Pages = {637--651},
+  Volume = {56},
+
+  Doi = {10.1007/BF02294496}
+}
+
+ at InCollection{FischPonocny:95,
+  Title = {{Extended Rating Scale and Partial Credit Models for Assessing Change}},
+  Author = {Fischer, Gerhard H. and Ponocny, Ivo},
+  Booktitle = {{Rasch Models: Foundations, Recent Developments, and Applications}},
+  Publisher = {Springer},
+  Year = {1995},
+
+  Address = {New York},
+  Editor = {Fischer, Gerhard H. and Molenaar, Ivo W.},
+  Pages = {353--370},
+
+  Doi = {10.1007/978-1-4612-4230-7_19}
+}
+
+ at Article{FiPo:94,
+  Title = {{An extension of the partial credit model with an application to the measurement of change}},
+  Author = {Fischer, Gerhard H. and Ponocny, Ivo},
+  Journal = {{Psychometrika}},
+  Year = {1994},
+  Number = {2},
+  Pages = {177--192},
+  Volume = {59},
+
+  Doi = {10.1007/BF02295182}
+}
+
+ at Book{FiPS:98,
+  Title = {Structural Rasch Modeling: Handbook of the Usage of LPCM-WIN 1.0},
+  Author = {Fischer, Gerhard H. and Ponocny-Seliger, E.},
+  Publisher = {ProGAMMA},
+  Year = {1998},
+
+  Address = {Groningen}
+}
+
+ at Article{FiSch:70,
+  Title = {{Algorithmen und Programme für das probabilistische Testmodell von Rasch [Algorithms and programs for Rasch's probabilistic test model]}},
+  Author = {Fischer, Gerhard H. and Scheiblechner, Hartmann},
+  Journal = {{Psychologische Beiträge}},
+  Year = {1970},
+  Pages = {23--51},
+  Volume = {12}
+}
+
+ at InCollection{FiJr:92,
+  Title = {Objectivity in Measurement: A Philosophical History of \uppercase{R}asch's Separability Theorem},
+  Author = {Fisher Jr., W. P.},
+  Booktitle = {Objective Measurement: Theory into Practice, Volume 1},
+  Publisher = {Ablex},
+  Year = {1992},
+
+  Address = {Norwood, NJ},
+  Editor = {M. Wilson},
+  Pages = {29--60}
+}
+
+ at InCollection{Glas:1992,
+  Title = {A Rasch Model with a Multivariate Distribution of Ability},
+  Author = {Glas, C. A. W.},
+  Booktitle = {Objective Measurement: Theory into Practice, Volume 1},
+  Publisher = {Ablex},
+  Year = {1992},
+
+  Address = {Norwood, NJ},
+  Editor = {M. Wilson},
+  Pages = {236--258}
+}
+
+ at InCollection{Glas+Verhelst:1995a,
+  Title = {{Testing the Rasch Model}},
+  Author = {Glas, Cees A. W. and Verhelst, Norman D.},
+  Booktitle = {{Rasch Models: Foundations, Recent Developments, and Applications}},
+  Publisher = {Springer},
+  Year = {1995},
+
+  Address = {New York},
+  Editor = {Fischer, Gerhard H. and Molenaar, Ivo W.},
+  Pages = {69--96},
+
+  Doi = {10.1007/978-1-4612-4230-7_5}
+}
+
+ at InCollection{Glas+Verhelst:1995b,
+  Title = {{Tests of Fit for Polytomous Rasch Models}},
+  Author = {Glas, Cees A. W. and Verhelst, Norman D.},
+  Booktitle = {{Rasch Models: Foundations, Recent Developments, and Applications}},
+  Publisher = {Springer},
+  Year = {1995},
+
+  Address = {New York},
+  Editor = {Fischer, Gerhard H. and Molenaar, Ivo W.},
+  Pages = {325--352},
+
+  Doi = {10.1007/978-1-4612-4230-7_18}
+}
+
+ at Article{GlVe:89,
+  Title = {{Extensions of the partial credit model}},
+  Author = {Glas, C. A. W. and Verhelst, Norman D.},
+  Journal = {{Psychometrika}},
+  Year = {1989},
+  Number = {4},
+  Pages = {635--659},
+  Volume = {54},
+
+  Doi = {10.1007/BF02296401}
+}
+
+ at Article{Gustafsson:1980,
+  Title = {{Testing and obtaining fit of data to the Rasch model}},
+  Author = {Gustafsson, J.-E.},
+  Journal = {{British Journal of Mathematical and Statistical Psychology}},
+  Year = {1980},
+  Number = {2},
+  Pages = {205--233},
+  Volume = {33},
+
+  Doi = {10.1111/j.2044-8317.1980.tb00609.x}
+}
+
+ at Article{Haberman:77,
+  Title = {{Maximum Likelihood Estimates in Exponential Response Models}},
+  Author = {Haberman, Shelby J.},
+  Journal = {{The Annals of Statistics}},
+  Year = {1977},
+  Number = {5},
+  Pages = {815--841},
+  Volume = {5},
+
+  Url = {http://www.jstor.org/stable/2958512}
+}
+
+ at Article{HatzingerRusch:2009:IRTwLLRA,
+  Title = {{IRT Models with Relaxed Assumptions in {\pkg{eRm}}: A manual-like instruction}},
+  Author = {Hatzinger, Reinhold and Rusch, Thomas},
+  Journal = {{Psychological Science Quarterly}},
+  Year = {2009},
+  Number = {1},
+  Pages = {87--120},
+  Volume = {51}
+}
+
+ at InCollection{Ho:95,
+  Title = {{Linear and Repeated Measures Models for the Person Parameters}},
+  Author = {Hoijtink, Herbert},
+  Booktitle = {{Rasch Models: Foundations, Recent Developments, and Applications}},
+  Publisher = {Springer},
+  Year = {1995},
+
+  Address = {New York},
+  Editor = {Fischer, Gerhard H. and Molenaar, Ivo W.},
+  Pages = {203--214},
+
+  Doi = {10.1007/978-1-4612-4230-7_11}
+}
+
+ at InCollection{Hoijtink+Boomsma:1995,
+  Title = {{On Person Parameter Estimation in the Dichotomous Rasch Model}},
+  Author = {Hoijtink, Herbert and Boomsma, Anne},
+  Booktitle = {{Rasch Models: Foundations, Recent Developments, and Applications}},
+  Publisher = {Springer},
+  Year = {1995},
+
+  Address = {New York},
+  Editor = {Fischer, Gerhard H. and Molenaar, Ivo W.},
+  Pages = {53--68},
+
+  Doi = {10.1007/978-1-4612-4230-7_4}
+}
+
+ at Article{Jannarone:1986,
+  Title = {{Conjunctive item response theory kernels}},
+  Author = {Jannarone, Robert J.},
+  Journal = {{Psychometrika}},
+  Year = {1986},
+  Number = {3},
+  Pages = {357--373},
+  Volume = {51},
+
+  Doi = {10.1007/BF02294060}
+}
+
+ at InCollection{Kubinger:1989,
+  Title = {Aktueller \uppercase{S}tand und kritische \uppercase{W}ürdigung der \uppercase{P}robabilistischen \uppercase{T}esttheorie [\uppercase{C}urrent status and critical appreciation of probabilistic test theory]},
+  Author = {Kubinger, Klaus D.},
+  Booktitle = {{Moderne Testtheorie: Ein Abriss samt neuesten Beiträgen}},
+  Publisher = {Beltz},
+  Year = {1989},
+
+  Address = {Weinheim},
+  Editor = {Kubinger, Klaus D.},
+  Pages = {19--83}
+}
+
+ at Article{Kub:05,
+  Title = {{Psychological Test Calibration Using the Rasch Model---Some Critical Suggestions on Traditional Approaches}},
+  Author = {Kubinger, Klaus D.},
+  Journal = {{International Journal of Testing}},
+  Year = {2005},
+  Number = {4},
+  Pages = {377--394},
+  Volume = {5},
+
+  Doi = {10.1207/s15327574ijt0504_3}
+}
+
+ at InCollection{Linacre:2004,
+  Title = {{Estimation Methods for Rasch Measures}},
+  Author = {Linacre, J. M.},
+  Booktitle = {{Introduction to Rasch Measurement}},
+  Publisher = {{JAM Press}},
+  Year = {2004},
+
+  Address = {Maple Grove, MN},
+  Editor = {Smith, Everett V. and Smith, Richard M.},
+  Pages = {25--48}
+}
+
+ at Article{Li:94,
+  Title = {{More on the Computation of Higher-Order Derivatives of the Elementary Symmetric Functions in the Rasch Model}},
+  Author = {Liou, M.},
+  Journal = {{Applied Psychological Measurement}},
+  Year = {1994},
+  Number = {1},
+  Pages = {53--62},
+  Volume = {18},
+
+  Doi = {10.1177/014662169401800105}
+}
+
+ at MastersThesis{Mair:2006,
+  Title = {Simulation Studies for Goodness-of-Fit Statistics in Item Response Theory},
+  Author = {P. Mair},
+  School = {Department of Psychology, University of Vienna},
+  Year = {2006}
+}
+
+ at Article{Mair+Hatzinger:2007,
+  Title = {{Extended Rasch Modeling: The {\pkg{eRm}} Package for the Application of IRT Models in {\gnuR}}},
+  Author = {P. Mair and R. Hatzinger},
+  Journal = {{Journal of Statistical Software}},
+  Year = {2007},
+  Number = {9},
+  Pages = {1--20},
+  Volume = {20},
+
+  Url = {http://www.jstatsoft.org/v20/i09}
+}
+
+ at Article{Mair+Hatzinger:2007b,
+  Title = {{{\acronym{CML}} based estimation of extended Rasch models with the {\pkg{eRm}} package in {\gnuR}}},
+  Author = {P. Mair and R. Hatzinger},
+  Journal = {{Psychology Science}},
+  Year = {2007},
+  Number = {1},
+  Pages = {26--43},
+  Volume = {49}
+}
+
+ at Article{Mast:82,
+  Title = {{A Rasch model for partial credit scoring}},
+  Author = {Masters, Geoff N.},
+  Journal = {{Psychometrika}},
+  Year = {1982},
+  Number = {2},
+  Pages = {149--174},
+  Volume = {47},
+
+  Doi = {10.1007/BF02296272}
+}
+
+ at Article{Mi:85,
+  Title = {{Estimation of Latent Group Effects}},
+  Author = {Mislevy, Robert J.},
+  Journal = {{Journal of the American Statistical Association}},
+  Year = {1985},
+  Number = {392},
+  Pages = {993--997},
+  Volume = {80},
+
+  Doi = {10.1080/01621459.1985.10478215}
+}
+
+ at InCollection{Molenaar:1995,
+  Title = {{Estimation of Item Parameters}},
+  Author = {Molenaar, Ivo W.},
+  Booktitle = {{Rasch Models: Foundations, Recent Developments, and Applications}},
+  Publisher = {Springer},
+  Year = {1995},
+
+  Address = {New York},
+  Editor = {Fischer, Gerhard H. and Molenaar, Ivo W.},
+  Pages = {39--51},
+
+  Doi = {10.1007/978-1-4612-4230-7_3}
+}
+
+ at InCollection{Pf:94,
+  Title = {{On Item Parameter Estimation in Certain Latent Trait Models}},
+  Author = {Pfanzagl, J.},
+  Booktitle = {{Contributions to Mathematical Psychology, Psychometrics, and Methodology}},
+  Publisher = {Springer},
+  Year = {1994},
+
+  Address = {New York},
+  Editor = {Fischer, Gerhard H. and Laming, Donald},
+  Pages = {249--263},
+
+  Doi = {10.1007/978-1-4612-4308-3_19}
+}
+
+ at Book{Poinstingl+Mair+Hatzinger:07,
+  Title = {{Manual zum Softwarepackage {\eRm}: Anwendung des Rasch-Modells}},
+  Author = {Poinstingl, H. and Mair, P. and Hatzinger, R.},
+  Publisher = {{Pabst Science Publishers}},
+  Year = {2007},
+
+  Address = {Lengerich}
+}
+
+ at Article{Ponocny:2002:ApplicabilitysomeIRT,
+  Title = {{On the Applicability of some IRT Models for Repeated Measurement Designs: Conditions, Consequences, and Goodness-of-Fit Tests}},
+  Author = {Ponocny, Ivo},
+  Journal = {{Methods of Psychological Research Online}},
+  Year = {2002},
+  Number = {1},
+  Pages = {21--40},
+  Volume = {7},
+
+  Url = {http://www.dgps.de/fachgruppen/methoden/mpr-online/issue16/art2/article.html}
+}
+
+ at Article{Ponocny:2001,
+  Title = {{Nonparametric goodness-of-fit tests for the Rasch model}},
+  Author = {Ponocny, Ivo},
+  Journal = {{Psychometrika}},
+  Year = {2001},
+  Number = {3},
+  Pages = {437--460},
+  Volume = {66},
+
+  Doi = {10.1007/BF02294444}
+}
+
+ at Article{Ra:77,
+  Title = {On specific objectivity: An attempt at formalising the request for generality and validity of scientific statements},
+  Author = {G. Rasch},
+  Journal = {Danish Yearbook of Philosophy},
+  Year = {1977},
+  Pages = {58--94},
+  Volume = {14}
+}
+
+ at InCollection{Ra:61,
+  Title = {On General Laws and the Meaning of Measurement in Psychology},
+  Author = {Rasch, G.},
+  Booktitle = {Proceedings of the IV. Berkeley Symposium on Mathematical Statistics and Probability, Vol. IV},
+  Publisher = {University of California Press},
+  Year = {1961},
+
+  Address = {Berkeley},
+  Pages = {321--333}
+}
+
+ at Book{Ra:60,
+  Title = {Probabilistic Models for some Intelligence and Attainment Tests},
+  Author = {Rasch, G.},
+  Publisher = {Danish Institute for Educational Research},
+  Year = {1960},
+
+  Address = {Copenhagen}
+}
+
+ at Article{Riz:06,
+  Title = {{{\pkg{ltm}}: An {\gnuR} Package for Latent Variable Modeling and Item Response Analysis}},
+  Author = {Rizopoulos, Dimitris},
+  Journal = {{Journal of Statistical Software}},
+  Year = {2006},
+  Number = {5},
+  Pages = {1--25},
+  Volume = {17},
+
+  Url = {http://www.jstatsoft.org/v17/i05}
+}
+
+ at InCollection{Rost:2001,
+  Title = {{The Growing Family of Rasch Models}},
+  Author = {Rost, Jürgen},
+  Booktitle = {{Essays on Item Response Theory}},
+  Publisher = {Springer},
+  Year = {2001},
+
+  Address = {New York},
+  Editor = {Boomsma, Anne and {van Duijn}, Marijtje A. J. and Snijders, Tom A. B.},
+  Pages = {25--42},
+  Series = {{Lecture Notes in Statistics}},
+  Volume = {157},
+
+  Doi = {10.1007/978-1-4613-0169-1_2}
+}
+
+ at Article{Ro:99,
+  Title = {{Was ist aus dem Rasch-Modell geworden? [What has become of the Rasch Model?]}},
+  Author = {Rost, Jürgen},
+  Journal = {{Psychologische Rundschau}},
+  Year = {1999},
+  Number = {3},
+  Pages = {140--156},
+  Volume = {50},
+
+  Doi = {10.1026//0033-3042.50.3.140}
+}
+
+ at InProceedings{RuschMaierHatzinger:2013:LLRA,
+  Title = {{Linear Logistic Models with Relaxed Assumptions in {\gnuR}}},
+  Author = {Rusch, Thomas and Maier, Marco Johannes and Hatzinger, Reinhold},
+  Booktitle = {{Algorithms from and for Nature and Life: Classification and Data Analysis}},
+  Year = {2013},
+
+  Address = {New York},
+  Editor = {Lausen, Berthold and {van den Poel}, Dirk and Ultsch, Alfred},
+  Pages = {337--344},
+  Publisher = {Springer},
+
+  Doi = {10.1007/978-3-319-00035-0_34}
+}
+
+ at Article{Scheib:72,
+  Title = {{Das Lernen und Lösen komplexer Denkaufgaben [The learning and solving of complex reasoning items]}},
+  Author = {Scheiblechner, Hartmann},
+  Journal = {{Zeitschrift für experimentelle und angewandte Psychologie}},
+  Year = {1972},
+  Pages = {456--506},
+  Volume = {3}
+}
+
+ at InCollection{Sm:04,
+  Title = {{Fit Analysis in Latent Trait Measurement Models}},
+  Author = {Smith, Richard M.},
+  Booktitle = {{Introduction to Rasch Measurement}},
+  Publisher = {{JAM Press}},
+  Year = {2004},
+
+  Address = {Maple Grove, MN},
+  Editor = {Smith, Everett V. and Smith, Richard M.},
+  Pages = {73--92}
+}
+
+ at Article{Suarez+Glas:2003,
+  Title = {{Evaluation of global testing procedures for item fit to the Rasch model}},
+  Author = {Suárez-Falcón, J. C. and Glas, C. A. W.},
+  Journal = {{British Journal of Mathematical and Statistical Psychology}},
+  Year = {2003},
+  Number = {1},
+  Pages = {127--143},
+  Volume = {56},
+
+  Doi = {10.1348/000711003321645395}
+}
+
+ at Article{VedB:01,
+  Title = {{Some Mantel-Haenszel tests of Rasch model assumptions}},
+  Author = {Verguts, T. and {de Boeck}, P.},
+  Journal = {{British Journal of Mathematical and Statistical Psychology}},
+  Year = {2001},
+  Number = {1},
+  Pages = {21--37},
+  Volume = {54},
+
+  Doi = {10.1348/000711001159401}
+}
+
+ at Article{Verhelst+Hatzinger+Mair:2007,
+  Title = {{The Rasch sampler}},
+  Author = {N. Verhelst and R. Hatzinger and P. Mair},
+  Journal = {{Journal of Statistical Software}},
+  Year = {2007},
+  Number = {4},
+  Pages = {1--14},
+  Volume = {20},
+
+  Url = {http://www.jstatsoft.org/v20/i04}
+}
+
+ at InCollection{Verhelst+Glas:1995,
+  Title = {{The One Parameter Logistic Model}},
+  Author = {Verhelst, Norman D. and Glas, Cees A. W.},
+  Booktitle = {{Rasch Models: Foundations, Recent Developments, and Applications}},
+  Publisher = {Springer},
+  Year = {1995},
+
+  Address = {New York},
+  Editor = {Fischer, Gerhard H. and Molenaar, Ivo W.},
+  Pages = {215--238},
+
+  Doi = {10.1007/978-1-4612-4230-7_12}
+}
+
+ at Book{Davier:1998,
+  Title = {{WINMIRA: A Windows program for mixed Rasch models}},
+  Author = {{von Davier}, M.},
+  Publisher = {IPN},
+  Year = {1998},
+
+  Address = {Kiel}
+}
+
+ at InCollection{RoDa:95,
+  Title = {{Polytomous Mixed Rasch Models}},
+  Author = {{von Davier}, Matthias and Rost, Jürgen},
+  Booktitle = {{Rasch Models: Foundations, Recent Developments, and Applications}},
+  Publisher = {Springer},
+  Year = {1995},
+
+  Address = {New York},
+  Editor = {Fischer, Gerhard H. and Molenaar, Ivo W.},
+  Pages = {371--382},
+
+  Doi = {10.1007/978-1-4612-4230-7_20}
+}
+
+ at Article{Warm:1989,
+  Title = {{Weighted likelihood estimation of ability in item response theory}},
+  Author = {Warm, Thomas A.},
+  Journal = {{Psychometrika}},
+  Year = {1989},
+  Number = {3},
+  Pages = {427--450},
+  Volume = {54},
+
+  Doi = {10.1007/BF02294627}
+}
+
+ at Article{Wright+Panchapakesan:1969,
+  Title = {{A Procedure for Sample-Free Item Analysis}},
+  Author = {B. Wright and N. Panchapakesan},
+  Journal = {{Educational and Psychological Measurement}},
+  Year = {1969},
+  Number = {1},
+  Pages = {23--48},
+  Volume = {29},
+
+  Doi = {10.1177/001316446902900102}
+}
+
+ at Book{Wright+Masters:1982,
+  Title = {{Rating Scale Analysis: Rasch Measurement}},
+  Author = {Wright, B. D. and Masters, G. N.},
+  Publisher = {Mesa Press},
+  Year = {1982},
+
+  Address = {Chicago}
+}
+
diff --git a/inst/doc/modelhierarchy.pdf b/vignettes/modelhierarchy.pdf
old mode 100755
new mode 100644
similarity index 100%
rename from inst/doc/modelhierarchy.pdf
rename to vignettes/modelhierarchy.pdf

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/r-cran-erm.git



More information about the debian-science-commits mailing list