[r-cran-erm] 05/33: Import Upstream version 0.12-0

Andreas Tille tille at debian.org
Mon Dec 12 11:19:32 UTC 2016


This is an automated email from the git hooks/post-receive script.

tille pushed a commit to branch master
in repository r-cran-erm.

commit 699359a76c28c4ab3fa3d6527bb6066646516f2d
Author: Andreas Tille <tille at debian.org>
Date:   Mon Dec 12 11:20:00 2016 +0100

    Import Upstream version 0.12-0
---
 COPYING                     |  340 ++++
 COPYRIGHTS                  |   10 +
 DESCRIPTION                 |   28 +
 NAMESPACE                   |   80 +
 NEWS                        |   52 +
 R/IC.default.R              |    4 +
 R/IC.ppar.r                 |   60 +
 R/IC.r                      |    2 +
 R/LLTM.R                    |   49 +
 R/LPCM.R                    |   52 +
 R/LRSM.R                    |   48 +
 R/LRtest.R                  |    3 +
 R/LRtest.Rm.R               |  190 ++
 R/MLoef.R                   |  102 +
 R/NPtest.R                  |  354 ++++
 R/PCM.R                     |   54 +
 R/RM.R                      |   54 +
 R/ROCR_aux.R                |   88 +
 R/RSM.R                     |   55 +
 R/Rsquared.R                |   23 +
 R/Waldtest.R                |    3 +
 R/Waldtest.Rm.R             |  169 ++
 R/checkdata.R               |  159 ++
 R/cldeviance.R              |   58 +
 R/cmlprep.R                 |   74 +
 R/coef.eRm.R                |    9 +
 R/coef.ppar.R               |    6 +
 R/confint.eRm.r             |   27 +
 R/confint.ppar.r            |   22 +
 R/confint.threshold.r       |   18 +
 R/cwdeviance.r              |   16 +
 R/datcheck.LRtest.r         |   44 +
 R/datcheck.R                |  119 ++
 R/datprep_LLTM.R            |   56 +
 R/datprep_LPCM.R            |   87 +
 R/datprep_LRSM.R            |   94 +
 R/datprep_PCM.R             |   57 +
 R/datprep_RM.R              |   26 +
 R/datprep_RSM.R             |   65 +
 R/fitcml.R                  |   91 +
 R/gofIRT.R                  |    1 +
 R/gofIRT.ppar.R             |   58 +
 R/hoslem.R                  |   31 +
 R/itemfit.R                 |    3 +
 R/itemfit.ppar.r            |   31 +
 R/labeling.internal.r       |   96 +
 R/likLR.R                   |   40 +
 R/logLik.eRm.r              |   11 +
 R/logLik.ppar.r             |    9 +
 R/model.matrix.eRm.R        |    3 +
 R/performance.R             |  305 +++
 R/performance_measures.R    |  482 +++++
 R/performance_plots.R       |  533 +++++
 R/person.parameter.R        |    1 +
 R/person.parameter.eRm.R    |  244 +++
 R/personfit.R               |    3 +
 R/personfit.ppar.R          |   42 +
 R/pifit.internal.r          |   40 +
 R/plist.internal.R          |   25 +
 R/plot.ppar.r               |   41 +
 R/plotCI.R                  |  163 ++
 R/plotGOF.LR.R              |  200 ++
 R/plotGOF.R                 |    3 +
 R/plotICC.R                 |    3 +
 R/plotICC.Rm.R              |  178 ++
 R/plotPImap.R               |  109 ++
 R/plotjointICC.R            |    3 +
 R/plotjointICC.dRm.R        |   54 +
 R/pmat.R                    |    3 +
 R/pmat.default.R            |    4 +
 R/pmat.ppar.R               |   64 +
 R/predict.ppar.R            |   26 +
 R/prediction.R              |  179 ++
 R/print.ICr.r               |    8 +
 R/print.LR.R                |   12 +
 R/print.MLoef.r             |   22 +
 R/print.eRm.R               |   27 +
 R/print.gof.R               |   10 +
 R/print.ifit.R              |   17 +
 R/print.logLik.eRm.r        |    8 +
 R/print.logLik.ppar.r       |    7 +
 R/print.pfit.R              |   17 +
 R/print.ppar.R              |   72 +
 R/print.resid.R             |   18 +
 R/print.step.r              |   24 +
 R/print.threshold.r         |   10 +
 R/print.wald.R              |   13 +
 R/residuals.ppar.R          |    9 +
 R/rostdeviance.r            |   26 +
 R/sim.2pl.R                 |   54 +
 R/sim.locdep.R              |   55 +
 R/sim.rasch.R               |   38 +
 R/sim.xdim.R                |   87 +
 R/stepwiseIt.R              |    3 +
 R/stepwiseIt.eRm.R          |  148 ++
 R/summary.LR.r              |   32 +
 R/summary.MLoef.r           |   37 +
 R/summary.eRm.R             |   47 +
 R/summary.gof.R             |   20 +
 R/summary.ppar.R            |   40 +
 R/summary.threshold.r       |   10 +
 R/thresholds.eRm.r          |   61 +
 R/thresholds.r              |    1 +
 R/vcov.eRm.R                |   11 +
 R/zzz.R                     |   38 +
 data/lltmdat1.rda           |  Bin 0 -> 955 bytes
 data/lltmdat2.rda           |  Bin 0 -> 125 bytes
 data/lpcmdat.rda            |  Bin 0 -> 213 bytes
 data/lrsmdat.rda            |  Bin 0 -> 317 bytes
 data/pcmdat.rda             |  Bin 0 -> 210 bytes
 data/pcmdat2.rda            |  Bin 0 -> 710 bytes
 data/raschdat1.rda          |  Bin 0 -> 957 bytes
 data/raschdat2.rda          |  Bin 0 -> 156 bytes
 data/rsmdat.rda             |  Bin 0 -> 199 bytes
 inst/doc/Rplots.pdf         | 4506 +++++++++++++++++++++++++++++++++++++++++++
 inst/doc/UCML.jpg           |  Bin 0 -> 42742 bytes
 inst/doc/Z.cls              |  239 +++
 inst/doc/eRmvig.R           |  101 +
 inst/doc/eRmvig.Rnw         | 1006 ++++++++++
 inst/doc/eRmvig.bib         |  695 +++++++
 inst/doc/eRmvig.pdf         |  Bin 0 -> 433168 bytes
 inst/doc/index.html         |   10 +
 inst/doc/jss.bst            | 1647 ++++++++++++++++
 inst/doc/modelhierarchy.pdf |  Bin 0 -> 7638 bytes
 man/IC.Rd                   |   45 +
 man/LLTM.Rd                 |   90 +
 man/LPCM.Rd                 |   81 +
 man/LRSM.Rd                 |   80 +
 man/LRtest.Rd               |  136 ++
 man/MLoef.Rd                |   71 +
 man/NPtest.Rd               |  167 ++
 man/PCM.Rd                  |   67 +
 man/RM.Rd                   |   79 +
 man/RSM.Rd                  |   67 +
 man/Waldtest.Rd             |   64 +
 man/eRm-package.Rd          |   66 +
 man/gofIRT.Rd               |   60 +
 man/itemfit.ppar.Rd         |   84 +
 man/person.parameter.Rd     |  102 +
 man/plotICC.Rd              |  124 ++
 man/plotPImap.Rd            |   53 +
 man/predict.ppar.Rd         |   38 +
 man/print.eRm.Rd            |   64 +
 man/raschdat.Rd             |   30 +
 man/sim.2pl.Rd              |   61 +
 man/sim.locdep.Rd           |   55 +
 man/sim.rasch.Rd            |   40 +
 man/sim.xdim.Rd             |   64 +
 man/stepwiseIt.Rd           |   68 +
 man/thresholds.Rd           |   75 +
 src/components.c            |   68 +
 src/components.h            |   34 +
 src/geodist.c               |   74 +
 src/geodist.h               |   34 +
 154 files changed, 17163 insertions(+)

diff --git a/COPYING b/COPYING
new file mode 100755
index 0000000..d60c31a
--- /dev/null
+++ b/COPYING
@@ -0,0 +1,340 @@
+		    GNU GENERAL PUBLIC LICENSE
+		       Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+     59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+			    Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users.  This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it.  (Some other Free Software Foundation software is covered by
+the GNU Library General Public License instead.)  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+  To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have.  You must make sure that they, too, receive or can get the
+source code.  And you must show them these terms so they know their
+rights.
+
+  We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+  Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software.  If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+  Finally, any free program is threatened constantly by software
+patents.  We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary.  To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+

+		    GNU GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License.  The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language.  (Hereinafter, translation is included without limitation in
+the term "modification".)  Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+  1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+  2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) You must cause the modified files to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    b) You must cause any work that you distribute or publish, that in
+    whole or in part contains or is derived from the Program or any
+    part thereof, to be licensed as a whole at no charge to all third
+    parties under the terms of this License.
+
+    c) If the modified program normally reads commands interactively
+    when run, you must cause it, when started running for such
+    interactive use in the most ordinary way, to print or display an
+    announcement including an appropriate copyright notice and a
+    notice that there is no warranty (or else, saying that you provide
+    a warranty) and that users may redistribute the program under
+    these conditions, and telling the user how to view a copy of this
+    License.  (Exception: if the Program itself is interactive but
+    does not normally print such an announcement, your work based on
+    the Program is not required to print an announcement.)
+

+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+    a) Accompany it with the complete corresponding machine-readable
+    source code, which must be distributed under the terms of Sections
+    1 and 2 above on a medium customarily used for software interchange; or,
+
+    b) Accompany it with a written offer, valid for at least three
+    years, to give any third party, for a charge no more than your
+    cost of physically performing source distribution, a complete
+    machine-readable copy of the corresponding source code, to be
+    distributed under the terms of Sections 1 and 2 above on a medium
+    customarily used for software interchange; or,
+
+    c) Accompany it with the information you received as to the offer
+    to distribute corresponding source code.  (This alternative is
+    allowed only for noncommercial distribution and only if you
+    received the program in object code or executable form with such
+    an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it.  For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable.  However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+

+  4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License.  Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+  5. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Program or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+  6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+  7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+

+  8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded.  In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+  9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation.  If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+  10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission.  For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this.  Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+			    NO WARRANTY
+
+  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+		     END OF TERMS AND CONDITIONS
+

+	    How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+    Gnomovision version 69, Copyright (C) year  name of author
+    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+  `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+  <signature of Ty Coon>, 1 April 1989
+  Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs.  If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library.  If this is what you want to do, use the GNU Library General
+Public License instead of this License.
diff --git a/COPYRIGHTS b/COPYRIGHTS
new file mode 100755
index 0000000..bfcbe3f
--- /dev/null
+++ b/COPYRIGHTS
@@ -0,0 +1,10 @@
+COPYRIGHT STATUS
+----------------
+
+This code is
+
+  Copyright (C) 2009 Patrick Mair and Reinhold Hatzinger
+
+All code is subject to the GNU General Public License, Version 2. See
+the file COPYING for the exact conditions under which you may
+redistribute it.
diff --git a/DESCRIPTION b/DESCRIPTION
new file mode 100755
index 0000000..ff263e2
--- /dev/null
+++ b/DESCRIPTION
@@ -0,0 +1,28 @@
+Package: eRm
+Type: Package
+Title: Extended Rasch Modeling.
+Version: 0.12-0
+Date: 2010-04-07
+Author: Patrick Mair, Reinhold Hatzinger, Marco Maier
+Maintainer: Patrick Mair <patrick.mair at wu.ac.at>
+Description: eRm fits Rasch models (RM), linear logistic test models
+        (LLTM), rating scale model (RSM), linear rating scale models
+        (LRSM), partial credit models (PCM), and linear partial credit
+        models (LPCM). Missing values are allowed in the data matrix.
+        Additional features are the ML estimation of the person
+        parameters, Andersen's LR-test, item-specific Wald test,
+        Martin-L�f-Test, nonparametric Monte-Carlo Tests, itemfit and
+        personfit statistics including infit and outfit measures,
+        various ICC and related plots, automated stepwise item
+        elimination, simulation module for various binary data
+        matrices. An eRm platform is provided at R-forge (see URL).
+License: GPL
+Encoding: latin1
+URL: http://r-forge.r-project.org/projects/erm/
+Imports: graphics, stats, MASS, methods
+Depends: R (>= 2.9.0), gtools, splines, methods, RaschSampler
+LazyData: yes
+LazyLoad: yes
+Packaged: 2010-04-08 14:17:05 UTC; hatz
+Repository: CRAN
+Date/Publication: 2010-04-08 14:57:49
diff --git a/NAMESPACE b/NAMESPACE
new file mode 100755
index 0000000..473d9ac
--- /dev/null
+++ b/NAMESPACE
@@ -0,0 +1,80 @@
+useDynLib(eRm)
+import("stats", "graphics")
+export(RM)
+export(LLTM)
+export(RSM)
+export(LRSM)
+export(PCM)
+export(LPCM)
+export(LRtest)
+export(MLoef)
+export(itemfit)
+export(person.parameter)
+export(personfit)
+export(plotGOF)
+export(plotICC)
+export(plotjointICC)
+export(plotPImap)
+export(pmat)
+export(Waldtest)
+export(IC)
+export(thresholds)
+export(sim.2pl)
+export(sim.rasch)
+export(sim.xdim)
+export(sim.locdep)
+export(stepwiseIt)
+export(gofIRT)
+export(NPtest)
+
+S3method(print, eRm)
+S3method(summary, eRm)
+S3method(summary, ppar)
+S3method(summary, LR)
+S3method(summary, MLoef)
+S3method(model.matrix, eRm)
+S3method(coef, eRm)
+S3method(coef, ppar)
+S3method(vcov, eRm)
+S3method(print, LR)
+S3method(print, MLoef)
+S3method(print, ifit)
+S3method(print, wald)
+S3method(print, pfit)
+S3method(print, ppar)
+S3method(print, step)
+S3method(itemfit, ppar)
+S3method(LRtest, Rm)
+S3method(person.parameter, eRm)
+S3method(personfit, ppar)
+S3method(plotGOF, LR)
+S3method(plotICC, Rm)
+S3method(plotjointICC, dRm)
+S3method(plot, ppar)
+S3method(pmat, ppar)
+S3method(residuals, ppar)
+S3method(Waldtest, Rm)
+S3method(logLik, eRm)
+S3method(logLik, ppar)
+S3method(print, logLik.eRm)
+S3method(print, logLik.ppar)
+S3method(print, threshold)
+S3method(summary, threshold)
+S3method(thresholds, eRm)
+S3method(print, ICr)
+S3method(confint, eRm)
+S3method(confint, ppar)
+S3method(confint, threshold)
+S3method(IC, ppar)
+S3method(stepwiseIt, eRm)
+S3method(gofIRT, ppar)
+S3method(predict, ppar)
+S3method(summary, gof)
+S3method(print, gof)
+S3method(print, T1obj)
+S3method(print, T2obj)
+S3method(print, T4obj)
+S3method(print, T7obj)
+S3method(print, T7aobj)
+S3method(print, T10obj)
+S3method(print, T11obj)
diff --git a/NEWS b/NEWS
new file mode 100755
index 0000000..7ca4d94
--- /dev/null
+++ b/NEWS
@@ -0,0 +1,52 @@
+Changes in Version 0.12-0
+
+  o for RM, RSM, and PCM: eta parameters are now
+    diplayed as difficulty parameters
+    print and summary methods changed accordingly
+
+  o new labeling of eta parameters in RM, RSM, and PCM.
+    they now are labeled according to the estimated
+    parameters for items (RM), items + categories (RSM),
+    items x categories (PCM)
+
+  o function MLoef for Martin-Loef-Test added
+
+  o df in personfit and itemfit corrected
+
+  o the logLik functions now extract the log-likelhood
+    and df into objects of class logLik.eRm and loglik.ppar
+    with elements loglik and df. the corresponding
+    print methods have been modified accordingly.
+
+  o function coef.ppar to extract person parameter estimates added
+
+  o option for beta parameters added to coef.eRm
+
+  o in confint.eRm: default parm = "beta"
+
+  o minor modifications in the help file for IC()
+
+  o plotPImap: revised rug added, bug concerning item.subset fixed,
+    minor modifications to enhance readability
+
+  o minor modifications in plotjointICC: allows for main title and colors,
+    option legpos = FALSE suppresses legends, dev.new removed,
+    legend = FALSE produced incorrect labeling
+
+  o minor modifications in plotICC: allows for main title and colors,
+    default coloring with col = NULL instead of NA for compatibility,
+    option legpos = FALSE suppresses legends, mplot is now FALSE if
+    only one item is specified
+
+  o plot.ppar: dev.new removed
+
+  o option 'visible' in print.ifit und print.pfit to allow for avoiding
+    overly long output and for extraction of infit and outfit values
+    (maybe changed to a coef method later)
+
+  o strwrap() for NPtest print methods to break long lines
+
+  o new methods IC.default and pmat.default for enhanced error messages
+
+  o lazy loading package and datafiles
+
diff --git a/R/IC.default.R b/R/IC.default.R
new file mode 100755
index 0000000..51bf8ad
--- /dev/null
+++ b/R/IC.default.R
@@ -0,0 +1,4 @@
+`IC.default` <-
+function(object)
+# error message for using incorrect object
+{ stop("IC() requires object of class 'ppar', obtained from person.parameter()") }
diff --git a/R/IC.ppar.r b/R/IC.ppar.r
new file mode 100755
index 0000000..0c865bf
--- /dev/null
+++ b/R/IC.ppar.r
@@ -0,0 +1,60 @@
+IC.ppar <- function(object)
+{
+#computes loglik, AIC, BIC, and cAIC  for JML, MML, CML
+#object of class ppar
+
+
+  #---------- full likelihood ----------
+  X <- object$X
+  if (length(object$pers.ex) > 0) X01 <- object$X01[-object$pers.ex,] else X01 <- object$X01
+  mt_vek <- apply(X,2,max,na.rm=TRUE)             #number of categories - 1 for each item
+  mt_ind <- rep(1:length(mt_vek),mt_vek)
+  mt_seq <- sequence(mt_vek)
+  gmemb <- object$gmemb
+
+  pmat <- pmat(object) 
+  pmat.l0 <- tapply(1:length(mt_ind),mt_ind, function(ind) { #expand pmat for 0-th category
+                             vec0 <- 1-rowSums(as.matrix(pmat[,ind]))     #prob for 0th category
+                             cbind(vec0,pmat[,ind])
+                            })
+  pmat0 <- matrix(unlist(pmat.l0),nrow=length(gmemb))        #X01 matrix 0th category included
+  X01.l0 <- tapply(1:length(mt_ind), mt_ind, function(ind) { #expand X01 for 0-th category
+                            vec0 <- 1-rowSums(as.matrix(X01[,ind]))     #prob for 0th category
+                            cbind(vec0,X01[,ind])
+                            })
+  X010 <- matrix(unlist(X01.l0),nrow=length(gmemb))          #X01 matrix 0th category included
+  loglik.full <- sum(log(na.exclude(pmat0[X010 == 1])))      #vector of "observed" solving probabilities
+
+  N.ex <- dim(object$X.ex)[1]                                #number of persons (excluded)
+  npar.full <- (dim(object$W)[2])+sum(object$npar)           #number of item + person parameters
+  AIC.full <- -2*loglik.full + 2*npar.full
+  BIC.full <- -2*loglik.full + log(N.ex)*npar.full
+  cAIC.full <- -2*loglik.full + log(N.ex)*npar.full + npar.full
+  fullvec <- c(loglik.full, npar.full, AIC.full, BIC.full, cAIC.full)
+
+  #------------ MML -----------
+  N <- dim(object$X)[1]
+  rv <- rowSums(object$X, na.rm = TRUE)                       #person raw scores
+  npar.mml <- (dim(object$W)[2])#+(length(table(rv)))
+  lmml <- sum(table(rv)*log(table(rv)/N))+object$loglik.cml   #MML likelihood
+  AIC.mml <- -2*lmml + 2*npar.mml
+  BIC.mml <- -2*lmml + log(N)*npar.mml
+  cAIC.mml <- -2*lmml + log(N)*npar.mml + npar.mml
+  mmlvec <- c(lmml, npar.mml, AIC.mml, BIC.mml, cAIC.mml)
+  
+  #------------- CML ---------------
+  npar.cml <- dim(object$W)[2]
+  lcml <- object$loglik.cml
+  AIC.cml <- -2*lcml + 2*npar.cml
+  BIC.cml <- -2*lcml + log(N)*npar.cml
+  cAIC.cml <- -2*lcml + log(N)*npar.cml + npar.cml
+  cmlvec <- c(lcml, npar.cml, AIC.cml, BIC.cml, cAIC.cml)
+  
+  ICtable <- rbind(fullvec, mmlvec, cmlvec)
+  rownames(ICtable) <- c("joint log-lik", "marginal log-lik", "conditional log-lik")
+  colnames(ICtable) <- c("value", "npar", "AIC", "BIC", "cAIC")
+
+  result <- list(ICtable = ICtable)
+  class(result) <- "ICr"
+  result
+}
diff --git a/R/IC.r b/R/IC.r
new file mode 100755
index 0000000..40cd9d7
--- /dev/null
+++ b/R/IC.r
@@ -0,0 +1,2 @@
+`IC` <-
+function(object)UseMethod("IC")
diff --git a/R/LLTM.R b/R/LLTM.R
new file mode 100755
index 0000000..b28a0e8
--- /dev/null
+++ b/R/LLTM.R
@@ -0,0 +1,49 @@
+`LLTM` <-
+function(X, W, mpoints = 1, groupvec = 1, se = TRUE, sum0 = TRUE, etaStart)
+{
+#...X: person*(item*times) matrix (T1|T2|...)
+
+model <- "LLTM"
+call<-match.call()
+
+if (missing(W)) W <- NA
+else W <- as.matrix(W)
+
+if (missing(etaStart)) etaStart <- NA
+else etaStart <- as.vector(etaStart)
+
+XWcheck <- datcheck(X,W,mpoints,groupvec,model)                              #inital check of X and W
+X <- XWcheck$X
+
+lres <- likLR(X,W,mpoints,groupvec,model,st.err=se,sum0,etaStart)
+parest <- lres$parest                             #full groups for parameter estimation
+
+loglik <- -parest$minimum                         #log-likelihood value
+iter <- parest$iterations                         #number of iterations
+convergence <- parest$code
+etapar <- parest$estimate                         #eta estimates
+betapar <- as.vector(lres$W%*% etapar)            #beta estimates
+if (se) {
+  se.eta <- sqrt(diag(solve(parest$hessian)))         #standard errors
+  se.beta <- sqrt(diag(lres$W%*%solve(parest$hessian)%*%t(lres$W)))   #se beta
+} else {
+  se.eta <- rep(NA,length(etapar))
+  se.beta <- rep(NA,length(betapar))
+}
+
+X01 <- lres$X01
+labs <- labeling.internal(model,X,X01,lres$W,etapar,betapar,mpoints,max(groupvec))    #labeling for L-models
+W <- labs$W
+etapar <- labs$etapar
+betapar <- labs$betapar
+
+npar <- dim(lres$W)[2]                            #number of parameters
+
+result <- list(X=X,X01=X01,model=model,loglik=loglik,npar=npar,iter=iter,convergence=convergence,
+               etapar=etapar,se.eta=se.eta,hessian=parest$hessian,betapar=betapar,
+               se.beta=se.beta,W=W,mpoints=mpoints,ngroups=max(groupvec),groupvec=groupvec,call=call)
+
+class(result) <- "eRm"                         #classes: simple RM and extended RM
+result
+}
+
diff --git a/R/LPCM.R b/R/LPCM.R
new file mode 100755
index 0000000..c9e930f
--- /dev/null
+++ b/R/LPCM.R
@@ -0,0 +1,52 @@
+`LPCM` <-
+function(X, W, mpoints = 1, groupvec = 1, se = TRUE, sum0 = TRUE, etaStart)
+{
+
+#-------------------main programm-------------------
+
+model <- "LPCM"
+call<-match.call()
+
+if (missing(W)) W <- NA
+else W <- as.matrix(W)
+
+if (missing(etaStart)) etaStart <- NA
+else etaStart <- as.vector(etaStart)
+
+XWcheck <- datcheck(X,W,mpoints,groupvec,model)                              #inital check of X and W
+groupvec <- XWcheck$groupvec
+X <- XWcheck$X
+
+lres <- likLR(X,W,mpoints,groupvec,model,st.err=se,sum0,etaStart)
+parest <- lres$parest                             #full groups for parameter estimation
+
+loglik <- -parest$minimum                         #log-likelihood value
+iter <- parest$iterations                         #number of iterations
+convergence <- parest$code
+etapar <- parest$estimate                         #eta estimates
+betapar <- as.vector(lres$W%*% etapar)            #beta estimates
+if (se) {
+  se.eta <- sqrt(diag(solve(parest$hessian)))         #standard errors
+  se.beta <- sqrt(diag(lres$W%*%solve(parest$hessian)%*%t(lres$W)))   #se beta
+} else {
+  se.eta <- rep(NA,length(etapar))
+  se.beta <- rep(NA,length(betapar))
+}
+
+
+X01 <- lres$X01
+labs <- labeling.internal(model,X,X01,lres$W,etapar,betapar,mpoints,max(groupvec))    #labeling for L-models
+W <- labs$W
+etapar <- labs$etapar
+betapar <- labs$betapar
+
+npar <- dim(lres$W)[2]                            #number of parameters
+
+result <- list(X=X,X01=X01,model=model,loglik=loglik,npar=npar,iter=iter,convergence=convergence,
+               etapar=etapar,se.eta=se.eta,hessian=parest$hessian,betapar=betapar,
+               se.beta=se.beta,W=W,mpoints=mpoints,ngroups=max(groupvec),groupvec=groupvec,call=call)
+
+class(result) <- "eRm"                                 #classes: simple RM and extended RM
+result
+}
+
diff --git a/R/LRSM.R b/R/LRSM.R
new file mode 100755
index 0000000..6e601ff
--- /dev/null
+++ b/R/LRSM.R
@@ -0,0 +1,48 @@
+`LRSM` <-
+function(X, W, mpoints = 1, groupvec = 1, se = TRUE, sum0 = TRUE, etaStart)
+{
+
+model <- "LRSM"
+call<-match.call()
+
+if (missing(W)) W <- NA
+else W <- as.matrix(W)
+
+if (missing(etaStart)) etaStart <- NA
+else etaStart <- as.vector(etaStart)
+
+XWcheck <- datcheck(X,W,mpoints,groupvec,model)                              #inital check of X and W
+X <- XWcheck$X
+
+lres <- likLR(X,W,mpoints,groupvec,model,st.err=se,sum0,etaStart)
+parest <- lres$parest                             #full groups for parameter estimation
+
+loglik <- -parest$minimum                         #log-likelihood value
+iter <- parest$iterations                         #number of iterations
+convergence <- parest$code
+etapar <- parest$estimate                         #eta estimates
+betapar <- as.vector(lres$W%*% etapar)            #beta estimates
+if (se) {
+  se.eta <- sqrt(diag(solve(parest$hessian)))         #standard errors
+  se.beta <- sqrt(diag(lres$W%*%solve(parest$hessian)%*%t(lres$W)))   #se beta
+} else {
+  se.eta <- rep(NA,length(etapar))
+  se.beta <- rep(NA,length(betapar))
+}
+
+X01 <- lres$X01
+labs <- labeling.internal(model,X,X01,lres$W,etapar,betapar,mpoints,max(groupvec))    #labeling for L-models
+W <- labs$W
+etapar <- labs$etapar
+betapar <- labs$betapar
+
+npar <- dim(lres$W)[2]                            #number of parameters
+
+result <- list(X=X,X01=X01,model=model,loglik=loglik,npar=npar,iter=iter,convergence=convergence,
+               etapar=etapar,se.eta=se.eta,hessian=parest$hessian,betapar=betapar,
+               se.beta=se.beta,W=W,mpoints=mpoints,ngroups=max(groupvec),groupvec=groupvec,call=call)
+
+class(result) <- "eRm"                                 #classes: simple RM and extended RM
+result
+}
+
diff --git a/R/LRtest.R b/R/LRtest.R
new file mode 100755
index 0000000..cc07687
--- /dev/null
+++ b/R/LRtest.R
@@ -0,0 +1,3 @@
+`LRtest` <-
+function(object,splitcr="median",se=FALSE)UseMethod("LRtest")
+
diff --git a/R/LRtest.Rm.R b/R/LRtest.Rm.R
new file mode 100755
index 0000000..84033d3
--- /dev/null
+++ b/R/LRtest.Rm.R
@@ -0,0 +1,190 @@
+`LRtest.Rm` <-
+function(object, splitcr="median", se=FALSE)
+{
+# performs Andersen LR-test
+# object... object of class RM
+# splitcr... splitting criterion for LR-groups. "all.r" corresponds to a complete
+#            raw score split (r=1,...,k-1), "median" to a median raw score split,
+#            "mean" corresponds to the mean raw score split.
+#            optionally also a vector of length n for group split can be submitted.
+# se...whether standard errors should be computed
+
+
+call<-match.call()
+
+spl.gr<-NULL
+
+X.original<-object$X
+if (length(splitcr)>1 && is.character(splitcr)){    # if splitcr is character vector, treated as factor
+   splitcr<-as.factor(splitcr)
+}
+if (is.factor(splitcr)){
+   spl.nam<-deparse(substitute(splitcr))
+   spl.lev<-levels(splitcr)
+   spl.gr<-paste(spl.nam,spl.lev,sep=" ")
+   splitcr<-unclass(splitcr)
+}
+
+numsplit<-is.numeric(splitcr)
+if (any(is.na(object$X))) {
+  if (!numsplit && splitcr=="mean") {                                   #mean split
+    spl.gr<-c("Raw Scores < Mean", "Raw Scores >= Mean")
+    X<-object$X
+    # calculates index for NA groups
+    # from person.parameter.eRm
+      dichX <- ifelse(is.na(X),1,0)
+      strdata <- apply(dichX,1,function(x) {paste(x,collapse="")})
+      gmemb <- as.vector(data.matrix(data.frame(strdata)))
+    gindx<-unique(gmemb)
+    rsum.all<-rowSums(X,na.rm=T)
+    grmeans<-tapply(rsum.all,gmemb,mean)      #sorted
+    ngr<-table(gmemb)                         #sorted
+    m.all<-rep(grmeans,ngr)                   #sorted,expanded
+    rsum.all<-rsum.all[order(gmemb)]
+    spl<-ifelse(rsum.all<m.all,1,2)
+    splitcr<-spl
+    object$X<-X[order(gmemb),]
+  }
+  if (!numsplit && splitcr=="median") {                                   #median split
+    spl.gr<-c("Raw Scores <= Median", "Raw Scores > Median")
+    # cat("Warning message: Persons with median raw scores are assigned to the lower raw score group!\n")
+    X<-object$X
+    # calculates index for NA groups
+    # from person.parameter.eRm
+      dichX <- ifelse(is.na(X),1,0)
+      strdata <- apply(dichX,1,function(x) {paste(x,collapse="")})
+      gmemb <- as.vector(data.matrix(data.frame(strdata)))
+    gindx<-unique(gmemb)
+    rsum.all<-rowSums(X,na.rm=T)
+    grmed<-tapply(rsum.all,gmemb,median)      #sorted
+    ngr<-table(gmemb)                         #sorted
+    m.all<-rep(grmed,ngr)                     #sorted,expanded
+    rsum.all<-rsum.all[order(gmemb)]
+    spl<-ifelse(rsum.all<=m.all,1,2)
+    splitcr<-spl
+    object$X<-X[order(gmemb),]
+  }
+}
+
+if (!is.numeric(splitcr)) {
+  if (splitcr=="all.r") {                                    #full raw score split
+    rvind <- apply(object$X,1,sum,na.rm=TRUE)                      #person raw scoobject
+    Xlist <- by(object$X,rvind,function(x) x)
+    names(Xlist) <- as.list(sort(unique(rv)))
+    }
+
+  if (splitcr=="median") {                                   #median split
+    spl.gr<-c("Raw Scores <= Median", "Raw Scores > Median")
+    cat("Warning message: Persons with median raw scores are assigned to the lower raw score group!\n")
+    rv <- apply(object$X,1,sum,na.rm=TRUE)
+    rvsplit <- median(rv)
+    rvind <- rep(0,length(rv))
+    rvind[rv > rvsplit] <- 1                                 #group with highraw scoobject
+    Xlist <- by(object$X,rvind,function(x) x)
+    names(Xlist) <- list("low","high")
+    }
+
+  if (splitcr=="mean") {                                     #mean split
+    spl.gr<-c("Raw Scores < Mean", "Raw Scores >= Mean")
+    rv <- apply(object$X,1,sum,na.rm=TRUE)
+    rvsplit <- mean(rv)
+    rvind <- rep(0,length(rv))
+    rvind[rv > rvsplit] <- 1                                 #group with highraw scoobject
+    Xlist <- by(object$X,rvind,function(x) x)
+    names(Xlist) <- list("low","high")
+    }
+}
+
+if (is.numeric(splitcr)) {                                 #manual raw score split
+  spl.nam<-deparse(substitute(splitcr))
+  if (length(splitcr)!=dim(object$X)[1]){
+    stop("Mismatch between length of split vector and number of persons!")
+  } else {
+    rvind <- splitcr
+    Xlist <- by(object$X,rvind, function(x) x)
+    names(Xlist) <- as.list(sort(unique(splitcr)))
+    if(is.null(spl.gr)){
+      spl.lev<-names(Xlist)
+      spl.gr<-paste(spl.nam,spl.lev,sep=" ")
+    }
+  }
+}
+
+#----------item to be deleted---------------
+del.pos.l <- lapply(Xlist, function(x) {
+                    it.sub <- datcheck.LRtest(x,object$X,object$model)  #items to be removed within subgroup
+                    })
+
+del.pos <- unique(unlist(del.pos.l))
+if ((length(del.pos)) >= (dim(object$X)[2]-1)) {
+  stop("\nNo items with appropriate response patterns left to perform LR-test!\n")
+}
+
+if (length(del.pos) > 0) {
+  warning("\nThe following items were excluded due to inappropriate response patterns within subgroups: ",immediate.=TRUE)
+    cat(colnames(object$X)[del.pos], sep=" ","\n")
+    cat("Full and subgroup models are estimated without these items!\n")
+}
+
+
+if (length(del.pos) > 0) {
+  X.el <- object$X[,-(del.pos)]
+} else {
+  X.el <- object$X
+}
+Xlist.n <- by(X.el,rvind,function(y) y)
+names(Xlist.n) <- names(Xlist)
+if (length(del.pos) > 0) Xlist.n <- c(Xlist.n,list(X.el))
+
+if (object$model=="RM") {
+       likpar <- sapply(Xlist.n,function(x) {                       #matrix with loglik and npar for each subgroup
+                               objectg <- RM(x,se=se)
+                               likg <- objectg$loglik
+                               nparg <- length(objectg$etapar)
+                              # betalab <- colnames(objectg$X)
+                               list(likg,nparg,objectg$betapar,objectg$etapar,objectg$se.beta)
+                               })
+       }
+if (object$model=="PCM") {
+       likpar <- sapply(Xlist.n,function(x) {                       #matrix with loglik and npar for each subgroup
+                               objectg <- PCM(x,se=se)
+                               likg <- objectg$loglik
+                               nparg <- length(objectg$etapar)
+                               list(likg,nparg,objectg$betapar,objectg$etapar,objectg$se.beta)
+                               })
+       }
+if (object$model=="RSM") {
+       likpar <- sapply(Xlist.n,function(x) {                       #matrix with loglik and npar for each subgroup
+                               objectg <- RSM(x,se=se)
+                               likg <- objectg$loglik
+                               nparg <- length(objectg$etapar)
+                               list(likg,nparg,objectg$betapar,objectg$etapar,objectg$se.beta)
+                               })
+       }
+
+if (length(del.pos) > 0) {                  #re-estimate full model
+  pos <- length(Xlist.n)                    #position of the full model
+  loglik.all <- likpar[1,pos][[1]]          #loglik full model
+  etapar.all <- rep(0,likpar[2,pos])         #etapar full model (filled with 0 for df computation)
+  likpar <- likpar[,-pos]
+  Xlist.n <- Xlist.n[-pos]
+} else {
+  loglik.all <- object$loglik
+  etapar.all <- object$etapar
+}
+
+loglikg <- sum(unlist(likpar[1,]))                    #sum of likelihood value for subgroups
+LR <- 2*(abs(loglikg-loglik.all))                  #LR value
+df = sum(unlist(likpar[2,]))-(length(etapar.all))  #final degrees of freedom
+pvalue <- 1-pchisq(LR,df)                             #pvalue
+
+betalist <- likpar[3,]                                #organizing betalist
+
+
+result <- list(X=X.original, X.list=Xlist.n, model=object$model,LR=LR,
+               df=df, pvalue=pvalue, likgroup=unlist(likpar[1,],use.names=FALSE),
+               betalist=betalist, etalist=likpar[4,],selist=likpar[5,], spl.gr=spl.gr, call=call)
+class(result) <- "LR"
+result
+}
+
diff --git a/R/MLoef.R b/R/MLoef.R
new file mode 100755
index 0000000..81c8aae
--- /dev/null
+++ b/R/MLoef.R
@@ -0,0 +1,102 @@
+MLoef <- function(robj, splitcr="median")
+{
+# performs the Martin-L�f LR-test
+# robj... object of class RM
+# splitcr... splitting criterion for two groups. "median" (default) and "mean"
+#            split items in two groups according to the median/mean or item raw
+#            scores.
+#            a vector of length k (number of items) containing two different
+#            elements signifying group membership of items can be supplied.
+
+  if(all(class(robj) != "Rm")) stop("robj must be of class \"Rm\".")
+
+  wrning <- NULL   # initialize an object for warnings
+
+  if(length(splitcr) == 1){   # generate split-vector if "mean" or "median"
+    if(splitcr=="median"){
+      raw.scores <- colSums(robj$X01,na.rm=T)
+      numsplit <- as.numeric(raw.scores > median(raw.scores,na.rm=T))
+      if( any(raw.scores == median(raw.scores,na.rm=T)) ){   # Only if one item's raw score == the median, a warning is issued
+        wrning <- which(raw.scores == median(raw.scores,na.rm=T))   # append a warning-slot to the object for print and summary methods
+        cat("Item(s)",paste(names(wrning),collapse=", "),"with raw score equal to the median assigned to the lower raw score group!\n")
+      }
+    }
+    if(splitcr=="mean"){
+      raw.scores <- colSums(robj$X01,na.rm=T)
+      numsplit <- as.numeric(raw.scores > mean(raw.scores,na.rm=T))
+      if( any(raw.scores == mean(raw.scores,na.rm=T)) ){   # Only if one item's raw score == the mean, a warning is issued
+        wrning <- which(raw.scores == mean(raw.scores,na.rm=T))   # append a warning-slot to the object for print and summary methods
+        cat("Item(s)",paste(names(wrning),collapse=", "),"with raw score equal to the mean assigned to the lower raw score group!\n")
+      }
+    }
+  }
+  else{   # check if the submitted split-vector is appropriate
+    if(length(splitcr) != ncol(robj$X01)) stop("Split vector too long/short.")
+    if(length(unique(splitcr)) > 2) stop("Only two groups allowed.")
+    if(length(unique(splitcr)) < 2) stop("Split vector must contain two groups.")
+    numsplit <- splitcr
+  }
+  sp.groups <- unique(numsplit)
+  i.groups <- list(which(numsplit == sp.groups[1]), which(numsplit == sp.groups[2]))
+  
+  # check if one group countains less than 2 items
+  if( (length(i.groups[[1]]) < 2) | (length(i.groups[[2]]) < 2) ){
+    stop("Each group of items must contain at least 2 items.")
+  }
+
+  # check if one group contains subject with <=1 valid responses
+  if(any(rowSums(is.na(robj$X01[,i.groups[[1]]])) >= (length(i.groups[[1]]) - 1)))stop("Group 1 contains subjects with less than two valid responses.")
+  if(any(rowSums(is.na(robj$X01[,i.groups[[2]]])) >= (length(i.groups[[2]]) - 1)))stop("Group 2 contains subjects with less than two valid responses.")
+
+  ### possible missing patterns and classification of persons into groups
+  MV.X <- apply(matrix(as.numeric(is.na(robj$X01)),ncol=ncol(robj$X01)),1,paste,collapse="")
+  MV.p <- sort(unique(MV.X))
+  MV.g <- numeric(length=length(MV.X))
+  g <- 1
+  for(i in MV.p){
+    MV.g[MV.X == i] <- g;
+    g <- g + 1
+  }
+  na.X01 <- list()
+  for(i in 1:length(MV.p)){
+    na.X01[[i]] <- matrix(robj$X01[which(MV.g == i),], ncol=ncol(robj$X01))
+  }
+
+  res1 <- RM(robj$X01[,i.groups[[1]]])
+  res2 <- RM(robj$X01[,i.groups[[2]]])
+  
+  ### calculating the numerator and denominator
+  ml.num <- ml.den <- numeric()
+  
+  for(i in 1:length(MV.p)){
+    .temp.num <- table(rowSums(na.X01[[i]],na.rm=T))
+    ml.num[i] <- sum(log((.temp.num/sum(.temp.num))^.temp.num))
+   
+    if(nrow(na.X01[[i]]) > 1){
+      .temp.den <- table(rowSums(na.X01[[i]][,i.groups[[1]]],na.rm=T),
+                         rowSums(na.X01[[i]][,i.groups[[2]]],na.rm=T))
+    }
+    else{
+      .temp.den <- table(sum(na.X01[[i]][,i.groups[[1]]],na.rm=T),
+                         sum(na.X01[[i]][,i.groups[[2]]],na.rm=T))
+    }
+    ml.den[i] <- sum(log((.temp.den/sum(.temp.den))^.temp.den))
+  }
+ 
+  a <- sum(ml.num)
+  b <- sum(ml.den)
+  k <- c(length(i.groups[[1]]),length(i.groups[[2]]))
+
+  ML.LR <- -2*( (a + robj$loglik) - (b + res1$loglik + res2$loglik) )
+  DF <- prod(k) - 1
+  p.value <- 1 - pchisq(ML.LR, DF)
+  
+  result <- list(X01=robj$X01, model=robj$model, LR=ML.LR,
+                 df=DF, p.value=p.value, L0=robj$loglik,  L1=res1$loglik,  L2=res2$loglik,
+                 theta.table.RM=table(rowSums(robj$X01)),                        # both used for the plotting
+                 theta.table.MLoef=table(rowSums(res1$X01),rowSums(res2$X01)),   # routine plot.MLoef
+                 items1=i.groups[[1]], items2=i.groups[[2]], k=k,
+                 splitcr=splitcr, split.vector=numsplit, warning=wrning, call=match.call())
+  class(result) <- "MLoef"
+  return(result)
+}
diff --git a/R/NPtest.R b/R/NPtest.R
new file mode 100755
index 0000000..0a39d5e
--- /dev/null
+++ b/R/NPtest.R
@@ -0,0 +1,354 @@
+NPtest<-function(obj, n=NULL, method="T1", ...){
+
+   if (is.matrix(obj) || is.data.frame(obj)){ # input is datamatrix -  RaschSampler object is generated
+      if (!all(obj %in% 0:1)) stop("Data matrix must be binary, NAs not allowed")
+      if (is.null(n)) n <- 500
+      obj<-rsampler(obj,rsctrl(burn_in=256, n_eff=n, step=32))
+   }
+
+   switch(method,
+         "T1"=T1(obj),
+         "T2"=T2(obj, ...),
+         "T4"=T4(obj, ...),
+         "T7"=T7(obj, ...),
+         "T7a"=T7a(obj, ...),
+         "T10"=T10(obj, ...),
+         "T11"=T11(obj)
+   )
+}
+
+T1<-function(rsobj){
+     T1stat<-function(x){      # calculates statistic T1
+        unlist(lapply(1:(k-1),function(i) lapply((i+1):k, function(j) sum(x[,i]==x[,j]))))
+     }
+     n_eff<-rsobj$n_eff                         # number of simulated matrices
+     n_tot<-rsobj$n_tot                         # number of simulated matrices
+     k<-rsobj$k                                 # number of columns of matrices
+
+     res<-rstats(rsobj,T1stat)                  # calculates statistic for each matrix
+
+     res<-do.call(cbind, lapply(res,as.vector)) # converts result list to matrix
+     T1vec<-apply(res, 1, function(x) sum(x[2:(n_tot)]>=x[1])/n_eff)
+     T1mat<-matrix(,k,k)
+     T1mat[lower.tri(T1mat)] <- T1vec           # lower triangular matrix of p-values
+     result<-list(n_eff=n_eff, prop=T1vec, T1mat=T1mat) # T1obj
+     class(result)<-"T1obj"
+     result
+}
+
+T2<-function(rsobj,idx=NULL,stat="var"){
+
+     T2.Var.stat<-function(x){       # calculates statistic T2
+        var(rowSums(x[,idx, drop=FALSE]))
+     }
+     T2.MAD1.stat<-function(x){       # calculates statistic T2
+        y<-rowSums(x[,idx, drop=FALSE])           # mean absolute deviation
+        mean(abs(y-mean(y)))
+     }
+     T2.MAD2.stat<-function(x){       # calculates statistic T2
+        mad(rowSums(x[,idx, drop=FALSE]),constant=1) # unscaled median absolute deviation
+     }
+     T2.Range.stat<-function(x){     # calculates statistic T2
+        diff(range(rowSums(x[,idx, drop=FALSE])))
+     }
+     n<-rsobj$n
+     n_eff<-rsobj$n_eff
+     k<-rsobj$k                      # number of columns of matrices
+     if(is.null(idx))
+         stop("No item(s) for subscale specified (use idx!)")
+     res<-switch(stat,
+          "var"=rstats(rsobj,T2.Var.stat),
+          "mad1"=rstats(rsobj,T2.MAD1.stat),
+          "mad2"=rstats(rsobj,T2.MAD2.stat),
+          "range"=rstats(rsobj,T2.Range.stat),
+          stop("stat must be one of \"var\", \"mad1\", \"mad2\", \"range\"")
+     )
+     res<-unlist(res)
+     prop<-sum(res[2:(n_eff+1)]>=res[1])/n_eff
+     result<-list(n_eff=n_eff, prop=prop, idx=idx, stat=stat, T2vec=res) # T2obj
+     class(result)<-"T2obj"
+     result
+}
+
+
+T4<-function(rsobj,idx=NULL,group=NULL,alternative="high"){
+
+     T4.stat<-function(x){      # calculates statistic T4
+        sign*sum(rowSums(x[gr,idx,drop=FALSE]))
+     }
+     n_eff<-rsobj$n_eff                         # number of simulated matrices
+     n_tot<-rsobj$n_tot                         # number of all matrices
+     k<-rsobj$k                                 # number of items
+     if(is.null(idx))
+         stop("No item(s) for subscale specified (use idx!)")
+     if(is.null(group))
+         stop("No group specified (use group!)")
+     if(alternative=="high")
+        sign <- 1
+     else if(alternative=="low")
+        sign <- -1
+     else
+        stop("alternative incorrectly specified! (use either \"high\" or \"low\")")
+
+     gr<-as.logical(group)                      # group definition (logical)
+     res<-rstats(rsobj,T4.stat)
+     res<-unlist(res)
+     prop<-sum(res[2:(n_tot)]>=res[1])/n_eff
+     gr.nam <- deparse(substitute(group))
+     gr.n <- sum(group)
+     result<-list(n_eff=n_eff, prop=prop, idx=idx, gr.nam=gr.nam, gr.n=gr.n, T4vec=res, alternative=alternative)   # T4obj
+     class(result)<-"T4obj"
+     result
+}
+
+T7<-function(rsobj,idx=NULL){
+     T7.stat<-function(x){      # calculates statistic T7
+        calcT7<-function(i,j){  # calculates sum for all items in subscale
+          if(sitscor[i]>sitscor[j]){
+              sum(submat[,j]>submat[,i])   #
+              # t<-table(submat[,i],submat[,j])    # odds ratio gives the same result
+              # OR<-t[1]*t[4]/(t[2]*t[3])
+              # 1/OR
+          } else
+              NA
+        }
+        submat<-x[,idx]
+        submat<-submat[,order(itscor,decreasing=TRUE)]
+        RET<-unlist(lapply(1:(m-1), function(i) lapply((i+1):m, function(j) calcT7(i,j))))
+        RET
+     }
+
+     n_eff<-rsobj$n_eff                         # number of simulated matrices
+     n_tot<-rsobj$n_tot                         # number of all matrices
+     k<-rsobj$k                                 # number of items
+     if(is.null(idx))
+         stop("No items for subscale specified (use idx!)")
+     else if (length(idx)<2)
+         stop("At least 2 items have to be specified with idx!")
+     submat<-rsextrmat(rsobj,1)[,idx]
+     itscor<-colSums(submat)
+     names(itscor)<-colnames(submat)<-idx
+
+     submat<-submat[,order(itscor,decreasing=TRUE)]
+     sitscor<-sort(itscor,decreasing=TRUE)      # sorted itemscore
+     m<-length(itscor)
+
+     resList<-rstats(rsobj,T7.stat)
+     res<-sapply(resList,sum,na.rm=TRUE)
+     prop<-sum(res[2:(n_eff+1)]>=res[1])/n_eff
+     result<-list(n_eff=n_eff, prop=prop, itscor=itscor, T7vec=res)   # T7obj
+     class(result)<-"T7obj"
+     result
+}
+T7a<-function(rsobj,idx=NULL){
+     T7a.stat<-function(x){      # calculates statistic T7a
+        calcT7a<-function(i,j){  # calculates sum for single Itempair
+          if(sitscor[i]>sitscor[j]){
+              sum(submat[,j]>submat[,i])   #
+              # t<-table(submat[,i],submat[,j])    # odds ratio gives the same result
+              # OR<-t[1]*t[4]/(t[2]*t[3])
+              # 1/OR
+          } else
+              NA
+        }
+        submat<-x[,idx]
+        submat<-submat[,order(itscor,decreasing=TRUE)]
+        RET<-unlist(lapply(1:(m-1), function(i) lapply((i+1):m, function(j) calcT7a(i,j))))
+        RET
+     }
+
+     n_eff<-rsobj$n_eff                         # number of simulated matrices
+     n_tot<-rsobj$n_tot                         # number of all matrices
+     k<-rsobj$k                                 # number of items
+     if(is.null(idx))
+         stop("No items for subscale specified (use idx!)")
+     else if (length(idx)<2)
+         stop("At least 2 items have to be specified with idx!")
+     submat<-rsextrmat(rsobj,1)[,idx]
+     itscor<-colSums(submat)
+     names(itscor)<-colnames(submat)<-idx
+     submat<-submat[,order(itscor,decreasing=TRUE)]
+     sitscor<-sort(itscor,decreasing=TRUE)      # sorted itemscore
+     m<-length(itscor)
+
+     res<-rstats(rsobj,T7a.stat)
+     res<-do.call(cbind, lapply(res,as.vector)) # converts result list to matrix
+     T7avec<-apply(res, 1, function(x) sum(x[2:(n_tot)]>=x[1])/n_eff)
+     T7anam<-NULL
+     for (i in 1:(m-1)) for(j in (i+1):m )
+          T7anam<-c(T7anam, paste("(",names(sitscor[i]),">",names(sitscor[j]),")",sep="",collapse=""))
+     names(T7avec)<-T7anam
+     result<-list(n_eff=n_eff, prop=T7avec,itscor=itscor)    # T7aobj
+     class(result)<-"T7aobj"
+     result
+}
+
+T10<-function(rsobj, splitcr="median"){
+      calc.groups<-function(x,splitcr){
+        if (length(splitcr) > 1)  {        # numeric vectors converted to factors
+            if (length(splitcr) != nrow(x)) {
+                stop("Mismatch between length of split vector and number of persons!")
+            }
+            splitcr <- as.factor(splitcr)
+            if (length(levels(splitcr))>2) {
+                stop("Split vector defines more than 2 groups (only two allowed)!")
+            }
+            spl.lev <- levels(splitcr)
+            #spl.gr <- paste(spl.nam, spl.lev, sep = " ")  # not necessary for the time being
+            hi <- splitcr==spl.lev[1] # first level is high group
+        } else if (!is.numeric(splitcr)) {
+            spl.nam <- splitcr
+            if (splitcr == "median") {
+                spl.gr <- c("Raw Scores <= Median", "Raw Scores > Median")
+                rv <- rowSums(x)
+                rvsplit <- median(rv)
+                hi <- rv > rvsplit
+            }
+            if (splitcr == "mean") {
+                spl.gr <- c("Raw Scores < Mean", "Raw Scores >= Mean")
+                rv <- rowSums(x)
+                rvsplit <- mean(rv)
+                hi <- rv > rvsplit
+            }
+        }
+        list(hi=hi,spl.nam=spl.nam) # spl.nam is returned due to lex scoping even if not defined here
+      }
+      T10.stat<-function(x){      # calculates statistic T10 for one matrix
+        nij.hi<-unlist(lapply(1:k,function(i) lapply(1:k, function(j) sum(x[hi,i]>x[hi,j]))))
+        nij.low<-unlist(lapply(1:k,function(i) lapply(1:k, function(j) sum(x[!hi,i]>x[!hi,j]))))
+        nji.hi<- unlist(lapply(1:k,function(i) lapply(1:k, function(j) sum(x[hi,i]<x[hi,j]))))
+        nji.low<- unlist(lapply(1:k,function(i) lapply(1:k, function(j) sum(x[!hi,i]<x[!hi,j]))))
+        RET<-sum(abs(nij.hi*nji.low-nij.low*nji.hi))
+        RET
+      }
+      spl.nam <- deparse(substitute(splitcr))
+      n_eff<-rsobj$n_eff                         # number of simulated matrices
+      n_tot<-rsobj$n_tot                         # number of all matrices
+      k<-rsobj$k                                 # number of columns of matrices
+      obj<-rsextrobj(rsobj,1,1)                  # extract first matrix
+      x<-matrix(obj$inpmat,obj$n,obj$k)
+      ans <- calc.groups(x,splitcr)      # calculate grouping vector (logical)
+      hi<-ans$hi
+      hi.n<-sum(hi)
+      low.n<-sum(!hi)
+
+      res<-rstats(rsobj,T10.stat)                # for each matrix calculate T10
+
+      res<-unlist(res)
+      prop<-sum(res[2:(n_eff+1)]>=res[1])/n_eff
+      result<-list(n_eff=n_eff, prop=prop,spl.nam=ans$spl.nam,hi.n=hi.n,low.n=low.n,T10vec=res)  # T10obj
+      class(result)<-"T10obj"
+      result
+}
+
+
+T11<-function(rsobj){
+      T11.stat<-function(x){
+         as.vector(cor(x))
+      }
+      calc.T11<-function(x){      # calculates statistic T11 for one matrix
+         sum(abs(x-rho))
+      }
+      n_eff<-rsobj$n_eff                         # number of simulated matrices
+      n_tot<-rsobj$n_tot                         # number of all matrices
+      k<-rsobj$k                                 # number of columns of matrices
+      res<-rstats(rsobj,T11.stat)                # for each matrix calculate all r_ij's
+
+      cormats <- matrix(unlist(res),nrow=k*k)    # k*k x n_tot matrix, each colum contains one corr matrix
+      rho<-apply(cormats[,2:n_tot],1,mean)       # vector of estimated "real" rho_ij's
+      T11obs<-calc.T11(cormats[,1])              # vector of observed r_ij's
+      prop<-sum(apply(cormats[, 2:n_tot],2,calc.T11)>=T11obs)/n_eff
+      result<-list(n_eff=n_eff, prop=prop, T11r=cormats[,1], T11rho=rho)   # T11obj
+      class(result)<-"T11obj"
+      result
+}
+
+print.T1obj<-function(x,alpha=0.05,...){
+  txt1<-"\nNonparametric RM model test: T1 (local dependence - increased inter-item correlations)\n"
+  writeLines(strwrap(txt1, exdent=5))
+  cat("    (counting cases with equal responses on both items)\n\n")
+  cat("Number of sampled matrices:", x$n_eff,"\n")
+  cat("\nNumber of Item-Pairs tested:", length(x$prop),"\n")
+  cat("\nItem-Pairs with one-sided p <", alpha,"\n\n")
+  T1mat<-x$T1mat
+  idx<-which(T1mat<alpha,arr.ind=TRUE)
+  val<-T1mat[which(T1mat<alpha)]
+  names(val)<-apply(idx,1,function(x) paste("(",x[2],",",x[1],")",sep="",collapse=""))
+  if (length(val)>0)
+     print(round(val,digits=3))
+  else
+     cat("none\n\n")
+}
+print.T2obj<-function(x,...){
+  prop<-x$prop
+  idx<-x$idx
+  stat<-x$stat
+  statnam<-switch(stat,
+     "var"="variance",
+     "mad1"="mean absolute deviation",
+     "mad2"="median absolute deviation",
+     "range"="range"
+  )
+  txt<-"\nNonparametric RM model test: T2 (local dependence - model deviating subscales)\n"
+  writeLines(strwrap(txt, exdent=5))
+  cat("    (dispersion of subscale person rawscores)\n\n")
+  cat("Number of sampled matrices:", x$n_eff,"\n")
+  cat("Items in subscale:", idx,"\n")
+  cat("Statistic:", statnam,"\n")
+  cat("one-sided p-value:",prop,"\n")
+#  cat("    (proportion of sampled",statnam," GE observed)\n\n")
+}
+print.T4obj<-function(x,...){
+  prop<-x$prop
+  idx<-x$idx
+  gr.nam<-x$gr.nam
+  gr.n<-x$gr.n
+  alternative<-x$alternative
+  cat("\nNonparametric RM model test: T4 (Group anomalies - DIF)\n")
+  cat("    (counting", alternative, "raw scores on item(s) for specified group)\n\n")
+  cat("Number of sampled matrices:", x$n_eff,"\n")
+  cat("Items in Subscale:", idx,"\n")
+  cat("Group:",gr.nam,"  n =",gr.n,"\n")
+  cat("one-sided p-value:",prop,"\n\n")
+#  cat("    (proportion of sampled raw scores GE observed)\n\n")
+}
+
+print.T7obj<-function(x,...){
+  prop<-x$prop
+  cat("\nNonparametric RM model test: T7 (different discrimination - 2PL)\n")
+  cat("    (counting cases with response 1 on more difficult and 0 on easier item)\n\n")
+  cat("Number of sampled matrices:", x$n_eff,"\n")
+  cat("\nItem Scores:\n")
+  print(x$itscor)
+  cat("one-sided p-value:",prop,"\n\n")
+}
+print.T7aobj<-function(x,...){
+  prop<-x$prop
+  cat("\nNonparametric RM model test: T7a (different discrimination - 2PL)\n")
+  cat("    (counting cases with response 1 on more difficult and 0 on easier item)\n\n")
+  cat("Number of sampled matrices:", x$n_eff,"\n")
+  cat("Item Scores:\n")
+  print(x$itscor)
+  cat("\nItem-Pairs: (i>j ... i easier than j)\n\n")
+  print(round(prop,digits=3))
+}
+print.T10obj<-function(x,...){
+  spl.nam<-x$spl.nam
+  prop<-x$prop
+  hi.n<-x$hi.n
+  low.n<-x$low.n
+  cat("\nNonparametric RM model test: T10 (global test - subgroup-invariance)\n\n")
+  cat("Number of sampled matrices:", x$n_eff,"\n")
+  cat("Split:",spl.nam,"\n")
+  cat("Group 1: n = ",hi.n,"  Group 2: n =",low.n,"\n")
+  cat("one-sided p-value:",prop,"\n\n")
+#  cat("    (proportion of sampled statistics GE observed)\n\n")
+}
+print.T11obj<-function(x,...){
+  prop<-x$prop
+  cat("\nNonparametric RM model test: T11 (global test - local dependence)\n")
+  cat("    (sum of deviations between observed and expected inter-item correlations)\n\n")
+  cat("Number of sampled matrices:", x$n_eff,"\n")
+  cat("one-sided p-value:",prop,"\n\n")
+#  cat("    (proportion of sampled sums GE observed)\n\n")
+}
diff --git a/R/PCM.R b/R/PCM.R
new file mode 100755
index 0000000..b3a6f2e
--- /dev/null
+++ b/R/PCM.R
@@ -0,0 +1,54 @@
+`PCM` <-
+function(X, W, se = TRUE, sum0 = TRUE, etaStart)
+{
+#...X: person*item scores matrix (starting from 0)
+
+#-------------------main programm-------------------
+call<-match.call()
+model <- "PCM"
+groupvec <- 1
+mpoints <- 1
+
+if (missing(W)) W <- NA
+else W <- as.matrix(W)
+
+if (missing(etaStart)) etaStart <- NA
+else etaStart <- as.vector(etaStart)
+
+XWcheck <- datcheck(X,W,mpoints,groupvec,model)                              #inital check of X and W
+X <- XWcheck$X
+
+lres <- likLR(X,W,mpoints,groupvec,model,st.err=se,sum0,etaStart)
+parest <- lres$parest                             #full groups for parameter estimation
+
+loglik <- -parest$minimum                         #log-likelihood value
+iter <- parest$iterations                         #number of iterations
+convergence <- parest$code
+etapar <- parest$estimate                         #eta estimates
+betapar <- as.vector(lres$W%*% etapar)            #beta estimates
+if (se) {
+  se.eta <- sqrt(diag(solve(parest$hessian)))         #standard errors
+  se.beta <- sqrt(diag(lres$W%*%solve(parest$hessian)%*%t(lres$W)))   #se beta
+} else {
+  se.eta <- rep(NA,length(etapar))
+  se.beta <- rep(NA,length(betapar))
+}
+
+X01 <- lres$X01
+labs <- labeling.internal(model,X,X01,lres$W,etapar,betapar,mpoints,max(groupvec))    #labeling for L-models
+W <- labs$W
+etapar <- labs$etapar
+betapar <- labs$betapar
+
+etapar <- -etapar          # output difficulty  rh 25-03-2010
+
+npar <- dim(lres$W)[2]                            #number of parameters
+
+result <- list(X=X,X01=X01,model=model,loglik=loglik,npar=npar,iter=iter,convergence=convergence,
+               etapar=etapar,se.eta=se.eta,hessian=parest$hessian,betapar=betapar,
+               se.beta=se.beta,W=W,call=call)
+
+class(result) <- c("Rm","eRm")                         #classes: simple RM and extended RM
+result
+}
+
diff --git a/R/RM.R b/R/RM.R
new file mode 100755
index 0000000..aa5799e
--- /dev/null
+++ b/R/RM.R
@@ -0,0 +1,54 @@
+`RM` <-
+function(X, W, se = TRUE, sum0 = TRUE, etaStart)
+{
+#...X: 0/1 person*item matrix
+
+#-------------------main programm-------------------
+
+call<-match.call()
+groupvec <- 1
+mpoints <- 1
+model <- "RM"
+
+if (missing(W)) W <- NA
+else W <- as.matrix(W)
+
+if (missing(etaStart)) etaStart <- NA
+else etaStart <- as.vector(etaStart)
+
+XWcheck <- datcheck(X,W,mpoints,groupvec,model)                              #inital check of X and W
+X <- XWcheck$X
+
+lres <- likLR(X,W,mpoints,groupvec,model,st.err=se,sum0,etaStart)
+parest <- lres$parest                             #full groups for parameter estimation
+
+loglik <- -parest$minimum                         #log-likelihood value
+iter <- parest$iterations                         #number of iterations
+convergence <- parest$code
+etapar <- parest$estimate                         #eta estimates
+betapar <- as.vector(lres$W%*% etapar)            #beta estimates
+if (se) {
+  se.eta <- sqrt(diag(solve(parest$hessian)))         #standard errors
+  se.beta <- sqrt(diag(lres$W%*%solve(parest$hessian)%*%t(lres$W)))   #se beta
+} else {
+  se.eta <- rep(NA,length(etapar))
+  se.beta <- rep(NA,length(betapar))
+}
+
+X01 <- lres$X01
+labs <- labeling.internal(model,X,X01,lres$W,etapar,betapar,mpoints,max(groupvec))    #labeling for L-models
+W <- labs$W
+etapar <- labs$etapar
+betapar <- labs$betapar
+
+etapar <- -etapar          # output difficulty  rh 25-03-2010
+
+npar <- dim(lres$W)[2]                            #number of parameters
+
+result <- list(X=X,X01=X01,model=model,loglik=loglik,npar=npar,iter=iter,convergence=convergence,
+               etapar=etapar,se.eta=se.eta,hessian=parest$hessian,betapar=betapar,
+               se.beta=se.beta,W=W,call=call)
+class(result) <- c("dRm","Rm","eRm")                    #classes: dichotomous RM, RM (RM, PCM, RSM), and extended RM (all)
+result
+}
+
diff --git a/R/ROCR_aux.R b/R/ROCR_aux.R
new file mode 100755
index 0000000..c73b13b
--- /dev/null
+++ b/R/ROCR_aux.R
@@ -0,0 +1,88 @@
+## ---------------------------------------------------------------------------
+## Dealing with argument lists, especially '...'
+## ---------------------------------------------------------------------------
+
+## return list of selected arguments, skipping those that
+## are not present in arglist
+.select.args <- function( arglist, args.to.select, complement=FALSE) {
+    match.bool <- names(arglist) %in% args.to.select
+    if (complement==TRUE) match.bool <- !match.bool
+    return( arglist[ match.bool] )
+}
+
+## return arguments in arglist which match prefix, with prefix removed
+## ASSUMPTION: prefix is separated from rest by a '.'; this is removed along
+## with the prefix
+.select.prefix <- function( arglist, prefixes, complement=FALSE ) {
+    match.expr <- paste(paste('(^',prefixes,'\\.)',sep=""),collapse='|')
+    match.bool <- (1:length(arglist)) %in% grep( match.expr, names(arglist) )
+    if (complement==TRUE) match.bool <- !match.bool
+    arglist <- arglist[ match.bool]
+    names(arglist) <- sub( match.expr, '', names(arglist))
+    
+    return( arglist )
+}
+
+.garg <- function( arglist, arg, i=1) {
+    if (is.list(arglist[[arg]])) arglist[[ arg ]][[i]]
+    else arglist[[ arg ]]
+}
+
+.sarg <- function( arglist, ...) {
+    ll <- list(...)
+    for (argname in names(ll) ) {
+        arglist[[ argname ]] <- ll[[ argname ]]
+    }
+    return(arglist)
+}
+
+.farg <- function( arglist, ...) {
+    ll <- list(...)
+    for (argname in names(ll) ) {
+        if (length(arglist[[argname]])==0)
+          arglist[[ argname ]] <- ll[[ argname ]]
+    }
+    return(arglist)
+}
+
+.slice.run <- function( arglist, runi=1) {
+    r <- lapply( names(arglist), function(name) .garg( arglist, name, runi))
+    names(r) <- names(arglist)
+    r
+}
+
+## ---------------------------------------------------------------------------
+## Line segments
+## ---------------------------------------------------------------------------
+
+.construct.linefunct <- function( x1, y1, x2, y2) {
+    if (x1==x2) {
+        stop("Cannot construct a function from data.")
+    }
+
+    lf <- eval(parse(text=paste("function(x) {",
+        "m <- (",y2,"-",y1,") / (",x2,"-",x1,");",
+        "c <- ",y1," - m * ",x1,";",
+        "return( m * x + c)}",sep=" ")))
+    lf
+}
+
+.intersection.point <- function( f, g ) {
+    ## if lines are parallel, no intersection point
+    if (f(1)-f(0) == g(1)-g(0)) {
+        return( c(Inf,Inf) )
+    }
+
+    ## otherwise, choose search interval
+    imin <- -1
+    imax <- 1
+    while (sign(f(imin)-g(imin)) == sign(f(imax)-g(imax))) {
+        imin <- 2*imin
+        imax <- 2*imax
+    }
+    h <- function(x) { f(x) - g(x) }
+
+    intersect.x <- uniroot( h, interval=c(imin-1,imax+1) )$root
+    intersect.y <- f( intersect.x )
+    return( c(intersect.x, intersect.y ))
+}
diff --git a/R/RSM.R b/R/RSM.R
new file mode 100755
index 0000000..b3a66b2
--- /dev/null
+++ b/R/RSM.R
@@ -0,0 +1,55 @@
+`RSM` <-
+function(X, W, se = TRUE, sum0 = TRUE, etaStart)
+{
+#...X: person*item scores matrix (starting from 0)
+
+#-------------------main programm-------------------
+
+call<-match.call()
+groupvec <- 1
+mpoints <- 1
+model <- "RSM"
+
+if (missing(W)) W <- NA
+else W <- as.matrix(W)
+
+if (missing(etaStart)) etaStart <- NA
+else etaStart <- as.vector(etaStart)
+
+XWcheck <- datcheck(X,W,mpoints,groupvec,model)                              #inital check of X and W
+X <- XWcheck$X
+
+lres <- likLR(X,W,mpoints,groupvec,model,st.err=se,sum0,etaStart)
+parest <- lres$parest                             #full groups for parameter estimation
+
+loglik <- -parest$minimum                         #log-likelihood value
+iter <- parest$iterations                         #number of iterations
+convergence <- parest$code
+etapar <- parest$estimate                         #eta estimates
+betapar <- as.vector(lres$W%*% etapar)            #beta estimates
+if (se) {
+  se.eta <- sqrt(diag(solve(parest$hessian)))         #standard errors
+  se.beta <- sqrt(diag(lres$W%*%solve(parest$hessian)%*%t(lres$W)))   #se beta
+} else {
+  se.eta <- rep(NA,length(etapar))
+  se.beta <- rep(NA,length(betapar))
+}
+
+X01 <- lres$X01
+labs <- labeling.internal(model,X,X01,lres$W,etapar,betapar,mpoints,max(groupvec))    #labeling for L-models
+W <- labs$W
+etapar <- labs$etapar
+betapar <- labs$betapar
+
+etapar <- -etapar          # output difficulty  rh 25-03-2010
+
+npar <- dim(lres$W)[2]                            #number of parameters
+
+result <- list(X=X,X01=X01,model=model,loglik=loglik,npar=npar,iter=iter,convergence=convergence,
+               etapar=etapar,se.eta=se.eta,hessian=parest$hessian,betapar=betapar,
+               se.beta=se.beta,W=W,call=call)
+
+class(result) <- c("Rm","eRm")                         #classes: simple RM and extended RM
+result
+}
+
diff --git a/R/Rsquared.R b/R/Rsquared.R
new file mode 100755
index 0000000..afec64a
--- /dev/null
+++ b/R/Rsquared.R
@@ -0,0 +1,23 @@
+Rsquared <- function(object, pi.hat)
+{
+# objects of class ppar
+# computes Pearson R^2 and SS R^2 for objects of class ppar
+
+  #Pi <- pmat(object)                              #expected values
+  if (length(object$pers.ex) > 0){
+    y <- as.vector(t(object$X[-object$pers.ex,])) #observed values
+  } else {
+    y <- as.vector(t(object$X))
+  }
+  pi.hat <- as.vector(t(pi.hat))
+
+  R.P <- cor(y, pi.hat)^2                                 #Squared Pearson correlation
+  R.SS <- 1-(sum((y - pi.hat)^2)/sum((y - mean(y))^2))    #SS-R^2
+  
+  loglik.full <- sum(y*log(pi.hat) + (1-y)*log(1-pi.hat), na.rm = TRUE)  #full likelihood
+  loglik.0 <- sum(y*log(mean(y)) + (1-y)*log(1-mean(y)))    #L0 (Agresti, Sec. 6.2.5)
+  R.MF <- (loglik.0 - loglik.full)/loglik.full
+
+  result <- list(R2.P = R.P, R2.SS = R.SS, R2.MF = R.MF)
+  result
+}
\ No newline at end of file
diff --git a/R/Waldtest.R b/R/Waldtest.R
new file mode 100755
index 0000000..9757b27
--- /dev/null
+++ b/R/Waldtest.R
@@ -0,0 +1,3 @@
+`Waldtest` <-
+function(object,splitcr="median")UseMethod("Waldtest")
+
diff --git a/R/Waldtest.Rm.R b/R/Waldtest.Rm.R
new file mode 100755
index 0000000..73b7257
--- /dev/null
+++ b/R/Waldtest.Rm.R
@@ -0,0 +1,169 @@
+`Waldtest.Rm` <-
+function(object, splitcr="median")
+{
+# performs item-based Wald test (Fischer & Molenaar, p.90)
+# object... object of class RM
+# splitcr... splitting criterion for LR-groups. "median" to a median raw score split,
+#            "mean" corobjectponds to the mean raw score split.
+#            optionally also a vector of length n for group split can be submitted.
+
+call<-match.call()
+
+spl.gr<-NULL
+
+X.original<-object$X
+if (length(splitcr)>1 && is.character(splitcr)){    # if splitcr is character vector, treated as factor
+   splitcr<-as.factor(splitcr)
+}
+if (is.factor(splitcr)){
+   spl.nam<-deparse(substitute(splitcr))
+   spl.lev<-levels(splitcr)
+   spl.gr<-paste(spl.nam,spl.lev,sep=" ")
+   splitcr<-unclass(splitcr)
+}
+
+numsplit<-is.numeric(splitcr)
+if (any(is.na(object$X))) {
+  if (!numsplit && splitcr=="mean") {                                   #mean split
+    spl.gr<-c("Raw Scores < Mean", "Raw Scores >= Mean")
+    X<-object$X
+    # calculates index for NA groups
+    # from person.parameter.eRm
+      dichX <- ifelse(is.na(X),1,0)
+      strdata <- apply(dichX,1,function(x) {paste(x,collapse="")})
+      gmemb <- as.vector(data.matrix(data.frame(strdata)))
+    gindx<-unique(gmemb)
+    rsum.all<-rowSums(X,na.rm=TRUE)
+    grmeans<-tapply(rsum.all,gmemb,mean)      #sorted
+    ngr<-table(gmemb)                         #sorted
+    m.all<-rep(grmeans,ngr)                   #sorted,expanded
+    rsum.all<-rsum.all[order(gmemb)]
+    spl<-ifelse(rsum.all<m.all,1,2)
+    splitcr<-spl
+    object$X<-X[order(gmemb),]
+  }
+  if (!numsplit && splitcr=="median") {                                   #median split
+    spl.gr<-c("Raw Scores <= Median", "Raw Scores > Median")
+    cat("Warning message: Persons with median raw scores are assigned to the lower raw score group!\n")
+    X<-object$X
+    # calculates index for NA groups
+    # from person.parameter.eRm
+      dichX <- ifelse(is.na(X),1,0)
+      strdata <- apply(dichX,1,function(x) {paste(x,collapse="")})
+      gmemb <- as.vector(data.matrix(data.frame(strdata)))
+    gindx<-unique(gmemb)
+    rsum.all<-rowSums(X,na.rm=TRUE)
+    grmed<-tapply(rsum.all,gmemb,median)      #sorted
+    ngr<-table(gmemb)                         #sorted
+    m.all<-rep(grmed,ngr)                     #sorted,expanded
+    rsum.all<-rsum.all[order(gmemb)]
+    spl<-ifelse(rsum.all<=m.all,1,2)
+    splitcr<-spl
+    object$X<-X[order(gmemb),]
+  }
+}
+
+
+if (is.numeric(splitcr)){
+  spl.nam<-deparse(substitute(splitcr))
+  if (length(table(splitcr)) > 2) stop("Dichotomous person split required!")
+  if (length(splitcr) != dim(object$X)[1]) {
+    stop("Mismatch between length of split vector and number of persons!")
+  } else {
+    rvind <- splitcr
+    Xlist <- by(object$X,rvind, function(x) x)
+    names(Xlist) <- as.list(sort(unique(splitcr)))
+    if(is.null(spl.gr)){
+      spl.lev<-names(Xlist)
+      spl.gr<-paste(spl.nam,spl.lev,sep=" ")
+    }
+  }}
+
+if (!is.numeric(splitcr)) {
+  if (splitcr=="median") {                                   #median split
+    rv <- apply(object$X,1,sum,na.rm=TRUE)
+    rvsplit <- median(rv)
+    rvind <- rep(0,length(rv))
+    rvind[rv > rvsplit] <- 1                                 #group with high raw score object
+    Xlist <- by(object$X,rvind,function(x) x)
+    names(Xlist) <- list("low","high")
+    }
+
+  if (splitcr=="mean") {                                     #mean split
+    rv <- apply(object$X,1,sum,na.rm=TRUE)
+    rvsplit <- mean(rv)
+    rvind <- rep(0,length(rv))
+    rvind[rv > rvsplit] <- 1                                 #group with highraw scoobject
+    Xlist <- by(object$X,rvind,function(x) x)
+    names(Xlist) <- list("low","high")
+    }
+
+}
+
+del.pos.l <- lapply(Xlist, function(x) {
+                    it.sub <- datcheck.LRtest(x,object$X,object$model)  #items to be removed within subgroup
+                    })
+
+del.pos <- unique(unlist(del.pos.l))
+if ((length(del.pos)) >= (dim(object$X)[2]-1)) {
+  stop("\nNo items with appropriate response patterns left to perform Wald-test!\n")
+}
+
+if (length(del.pos) > 0) {
+    warning("\nThe following items were excluded due to inappropriate response patterns within subgroups: ",immediate.=TRUE)
+    cat(colnames(object$X)[del.pos], sep=" ","\n")
+    cat("Subgroup models are estimated without these items!\n")
+}
+
+if (length(del.pos) > 0) {
+  X.el <- object$X[,-(del.pos)]
+} else {
+  X.el <- object$X
+}
+Xlist.n <- by(X.el,rvind,function(y) y)
+names(Xlist.n) <- names(Xlist)
+
+if (object$model=="RM") {
+       likpar <- sapply(Xlist.n,function(x) {                       #matrix with loglik and npar for each subgroup
+                               objectg <- RM(x)
+                               parg <- objectg$etapar
+                               seg <- objectg$se.eta
+                               list(parg,seg,objectg$betapar,objectg$se.beta)
+                               })
+       }
+if (object$model=="PCM") {
+       likpar <- sapply(Xlist.n,function(x) {                       #matrix with loglik and npar for each subgroup
+                               objectg <- PCM(x)
+                               parg <- objectg$etapar
+                               seg <- objectg$se.eta
+                               list(parg,seg,objectg$betapar,objectg$se.beta)
+                               })
+       }
+if (object$model=="RSM") {
+       likpar <- sapply(Xlist.n,function(x) {                       #matrix with loglik and npar for each subgroup
+                               objectg <- RSM(x)
+                               parg <- objectg$etapar
+                               seg <- objectg$se.eta
+                               list(parg,seg,objectg$betapar,objectg$se.beta)
+                               })
+       }
+
+
+betapar1 <- likpar[3,][[1]]
+beta1.se <- likpar[4,][[1]]
+betapar2 <- likpar[3,][[2]]
+beta2.se <- likpar[4,][[2]]
+num <- (betapar1-betapar2)
+denom <- sqrt(beta1.se^2 + beta2.se^2)
+W.i <- num/denom
+pvalues <- (1-pnorm(abs(W.i)))*2
+
+coef.table <- cbind(W.i,pvalues)
+dimnames(coef.table) <- list(names(betapar1),c("z-statistic","p-value"))
+
+result <- list(coef.table=coef.table,betapar1=betapar1,se.beta1=beta1.se,betapar2=betapar2,
+se.beta2=beta2.se, spl.gr=spl.gr, call=call, it.ex = del.pos)
+class(result) <- "wald"
+result
+}
+
diff --git a/R/checkdata.R b/R/checkdata.R
new file mode 100755
index 0000000..7b460b3
--- /dev/null
+++ b/R/checkdata.R
@@ -0,0 +1,159 @@
+# uses
+#   component.dist
+#   reachability
+#   geodist
+#   symmetrize
+#   components.c
+#   geodist.c
+#
+# from R package sna
+
+
+# function to check for ill-conditioned data in the RM
+#   requires package sna
+
+##checkdata<-function(x)
+##{
+##    k<-ncol(x)
+##    adj<-matrix(0,nc=k,nr=k)
+##    for (i in 1:k) for(j in 1:k) {
+##        adj[i,j]<- 1*any(x[,i]>x[,j],na.rm=TRUE)
+##    }
+##
+##    #library(sna)
+##    #adj <- diag.remove(adj)
+##    # %print(adj)  # adjacency marix
+##    cd <- component.dist(adj, connected = "strong")
+##    cm <- cd$membership
+##    cmp <- max(cm)
+##
+##
+##    if(cmp>1) {
+##         cat("Data:",deparse(substitute(x)),"are ill-conditioned\n")
+##         cat("Number of strong components",cmp,"\n")
+##         cat("Component membership of items: ",cm,"\n")
+##    } else
+##         cat("Data:",deparse(substitute(x)),"are well-conditioned\n")
+##}
+##
+######################################################
+component.dist<-
+function (dat, connected = c("strong", "weak", "unilateral",
+    "recursive"))
+{
+#   dat <- as.sociomatrix.sna(dat)
+#   if (is.list(dat))
+#       return(lapply(dat, component.dist, connected = connected))
+#   else if (length(dim(dat)) > 2)
+#       return(apply(dat, 1, component.dist, connected = connected))
+    n <- dim(dat)[2]
+    if (any(dat != t(dat)))
+        dat <- switch(match.arg(connected), weak = symmetrize(dat,
+            rule = "weak"), unilateral = reachability(dat), strong = symmetrize(reachability(dat),
+            rule = "strong"), recursive = symmetrize(dat, rule = "strong"))
+#   if (match.arg(connected) == "unilateral")
+#       if (any(dat != t(dat)))
+#           warning("Nonunique unilateral component partition detected in component.dist.  Problem vertices will be arbitrarily assigned to one of their components.\n")
+    membership <- rep(0, n)
+    membership <- .C("component_dist_R", as.double(dat), as.double(n),
+        membership = as.double(membership), PACKAGE="eRm")$membership
+    o <- list()
+    o$membership <- membership
+    o$csize <- vector()
+    for (i in 1:max(membership)) o$csize[i] <- length(membership[membership ==
+        i])
+    o$cdist <- vector()
+    for (i in 1:n) o$cdist[i] <- length(o$csize[o$csize == i])
+    o
+}
+
+#reachability - Find the reachability matrix of a graph.
+reachability<-function(dat,geodist.precomp=NULL){
+   #Pre-process the raw input
+#   dat<-as.sociomatrix.sna(dat)
+#   if(is.list(dat))
+#     return(lapply(dat,reachability,geodist.precomp=geodist.precomp))
+#   else if(length(dim(dat))>2)
+#     return(apply(dat,1,reachability,geodist.precomp=geodist.precomp))
+#     return(unlist(apply(dat,1,function(x,geodist.precomp){list(reachability(x, geodist.precomp=geodist.precomp))},geodist.precomp=geodist.precomp),recursive=FALSE))
+   #End pre-processing
+   #Get the counts matrix
+   if(is.null(geodist.precomp))
+      cnt<-geodist(dat)$counts
+   else
+      cnt<-geodist.precomp$counts
+   #Dichotomize and return
+   apply(cnt>0,c(1,2),as.numeric)
+}
+
+#geodist - Find the numbers and lengths of geodesics among nodes in a graph
+#using a BFS, a la Brandes (2000).  (Thanks, Ulrik!)
+geodist<-function(dat,inf.replace=Inf){
+   #Pre-process the raw input
+#   dat<-as.sociomatrix.sna(dat)
+#   if(is.list(dat))
+#     return(lapply(dat,geodist,inf.replace=inf.replace))
+#   else if(length(dim(dat))>2)
+#     return(apply(dat,1,geodist,inf.replace=inf.replace))
+   #End pre-processing
+   n<-dim(dat)[2]
+   #Initialize the matrices
+   sigma<-matrix(0,nrow=n,ncol=n)
+   gd<-matrix(Inf,nrow=n,ncol=n)
+   #Perform the calculation
+   geo<-.C("geodist_R",as.double(dat),as.double(n),gd=as.double(gd), sigma=as.double(sigma),NAOK=TRUE,PACKAGE="eRm")
+   #Return the results
+   o<-list()
+   o$counts<-matrix(geo$sigma,n,n)
+   o$gdist<-matrix(geo$gd,n,n)
+   o$gdist[o$gdist==Inf]<-inf.replace  #Patch Infs, if desired
+   o
+}
+
+#symmetrize - Convert a graph or graph stack to a symmetric form.  Current rules
+#for symmetrizing include "upper" and "lower" diagonals, "weak" connectedness
+#rule, and a "strong" connectedness rule.
+symmetrize<-function(mats,rule="weak"){
+   #Pre-process the raw input
+#   mats<-as.sociomatrix.sna(mats)
+#   if(is.list(mats))
+#     return(lapply(mats,symmetrize,rule=rule))
+   #End pre-processing
+   #Build the input data structures
+#   if(length(dim(mats))>2){
+#      m<-dim(mats)[1]
+#      n<-dim(mats)[2]
+#      o<-dim(mats)[3]
+#      d<-mats
+#   }else{
+      m<-1
+      n<-dim(mats)[1]
+      o<-dim(mats)[2]
+      d<-array(dim=c(1,n,o))
+      d[1,,]<-mats
+#   }
+   #Apply the symmetry rule
+   for(i in 1:m){
+      if(rule=="upper"){
+#         temp<-d[i,,]
+#         for(j in 1:n)
+#            temp[j:n,j]<-temp[j,j:n]
+#         d[i,,]<-temp
+#      }else if(rule=="lower"){
+#         temp<-d[i,,]
+#         for(j in 1:n)
+#            temp[j,j:n]<-temp[j:n,j]
+#         d[i,,]<-temp
+#      }else if(rule=="weak"){
+#         d[i,,]<-matrix(as.numeric(d[i,,]|t(d[i,,])),nrow=n,ncol=o)
+      }else if(rule=="strong"){
+         d[i,,]<-matrix(as.numeric(d[i,,]&t(d[i,,])),nrow=n,ncol=o)
+      }
+   }
+   #Return the symmetrized matrix
+   if(m==1)
+      out<-d[1,,]
+   else
+      out<-d
+   out
+}
diff --git a/R/cldeviance.R b/R/cldeviance.R
new file mode 100755
index 0000000..6dd437c
--- /dev/null
+++ b/R/cldeviance.R
@@ -0,0 +1,58 @@
+cldeviance <- function(object, groups.gr = "rawscore", pi.hat)
+{
+# computes the collapsed deviance of
+# object of class ppar
+
+
+  k <- dim(object$X)[2]                             #number of items
+  N <- dim(object$X)[1]                             #number of persons (full)
+
+  #----------- define group vector ---------------
+  if (groups.gr == "rawscore") indvec.full <- rowSums(object$X, na.rm = TRUE)    #person raw scores
+  #if (groups.gr == "pattern") {                                                  #pattern-wise
+  #  X.string <- apply(object$X, 1, paste, collapse = "")
+  #  indvec.full <- rank(X.string, ties.method = "min")
+  #} 
+  if (is.numeric(groups.gr)) {
+    if (length(groups.gr) != dim(object$X)[1]) stop("Group vector must be of length N (number of subjects in object$X)!")
+    indvec.full <- groups.gr
+  }
+  #---------- end define group vector -----------
+  
+  #---- reduce group vector (pers.ex)------
+  if (length(object$pers.ex) > 0) {                 #persons eliminated
+    y <- object$X[-object$pers.ex,]                #observed values
+    indvec.red <- indvec.full[-object$pers.ex]
+  } else {
+    y <- (object$X)
+    indvec.red <- indvec.full
+  }
+
+  #pi.hat <- pmat(object)
+  #gmemb.ext <- rep(object$gmemb, each = k)          #gmemb extended to response vector
+  #pi.hat <- as.vector(t(pmat(object)))              #fitted values
+  
+  dev.g <- tapply(1:length(indvec.red), indvec.red, function(ii) {     #D component for each group
+                  n.g <- length(ii)                                    #number of group subjects
+                  y.g <- colSums(rbind(y[ii,]))                        #group responses
+                  pi.g <- rbind(pi.hat[ii,])[1,]                   #vector with fitted values
+                  devvec <- mapply(function(yy, pp) {                  #compute deviance for each item
+                             if ((yy > 0) && (yy < n.g)) {
+                               term1 <- yy*log(yy/(n.g*pp))
+                               term2 <- (n.g-yy)*log((n.g-yy)/(n.g*(1-pp)))
+                               dev <- sign(yy-n.g*pp)*sqrt(2*(term1+term2))
+                             }
+                             if (yy == 0) dev <- -sqrt(2*n.g*abs(log(1-pp)))
+                             if (yy == n.g) dev <- sqrt(2*n.g*abs(log(pp)))
+                             return(dev)
+                            },y.g, pi.g)
+                  return(sum(devvec^2))                                #item-wise sum of squared devres
+                })
+
+  value <- sum(dev.g)
+  df <- (length(unique(indvec.red)))*k
+  p.value <- 1-pchisq(value, df = df)
+
+  result <- list(value = value, df = df, p.value = p.value)
+  return(result)
+}
diff --git a/R/cmlprep.R b/R/cmlprep.R
new file mode 100755
index 0000000..70ef165
--- /dev/null
+++ b/R/cmlprep.R
@@ -0,0 +1,74 @@
+`cmlprep` <-
+function(X01,mt_vek,mpoints,Groups,W,gmemb)
+{
+
+  levs <- (gmemb-1)*max(Groups)+Groups              #merge Groups and gmemb vector into level vector
+  
+  if (length(Groups)==1) {                          #if no group contrast
+     x_mt <- colSums(X01,na.rm=TRUE)                #item category raw scores as vector
+     #eventuell x_mtlist auf NA gruppen aufbrechen
+     x_mtlist <- list(x_mt) 
+     ngroups <- 1
+    
+  } else {                                            #if groups defined
+    ngroups <- max(Groups)                            #number of groups
+    x_mtlist <- by(X01,levs,colSums,na.rm=TRUE)       #item-category raw scores for each group (as list)
+    x_mtlist.G <- by(X01,Groups,colSums,na.rm=TRUE)   #item-category raw scores for each group (as list)
+    x_mt <- as.vector(unlist(x_mtlist.G))             #as vector: g1|g2|...
+  }
+
+  end1 <- length(mt_vek)*mpoints*ngroups
+  mt_ind <- rep(1:end1,rep(mt_vek,mpoints*ngroups)) #category index vector (for converting x_mt into list)
+  x_tmt <- split(x_mt,mt_ind)                       #list for likelihood: item-wise * ngroups
+  rtot <- sum(mt_vek)*mpoints
+
+  ics <-  rep(sequence(mt_vek),mpoints)                 #item category scores for each item as vector
+  rv <- apply(X01,1,function(x) {                       #person raw scores
+                      ics[!is.na(x)]%*%na.exclude(x)}) 
+
+  if (ngroups > 1) {                                    #groups
+    seglen <- sum(mt_vek)                               #length of beta vector (called segment)
+    gind <- rep(rep(1:ngroups,rep(seglen,ngroups)),mpoints)                 #index vector for group extraction
+  } else {
+    gind <- rep(1,dim(W)[1])
+  }
+  
+  
+  rvlist <- split(rv,levs)                              #split person raw scores due to levels
+  nrlist <- lapply(rvlist,function(rvel) {                                    #list with item raw score frequencies for each group (transposed)
+                            rvtab <- table(rvel)                              #raw score frequencies
+                            dnamevek <- as.numeric(unlist(dimnames(rvtab)))   #different raw scores for 0 fill up
+                            nr <- rep (0,rtot+1)                              #setting 0 raw score frequencies
+                            nr[dnamevek+1] <- rvtab                           #vector with person raw scores from 1:rtot (with 0 fill up)
+                            nr <- nr[-1]
+                            return(nr)
+                          })
+                 
+  
+  if ((ngroups > 1) && (length(unique(gmemb)))) {              #NA groups AND Groups
+    gg <- table(Groups,gmemb)
+    gg[gg > 0] <- 1
+       
+    g_NA <- as.vector(rowSums(gg))                             #How many NA-sub groups in each Group
+    
+    grgm <- cbind(Groups, gmemb)
+    grgmst <- apply(grgm,1,function(x) {                       #merge indexes to characters
+                paste(x[1],x[2]) })
+    GGind <- rank(unique(grgmst))    
+    levtab <- table(levs)                                      #frequencies of levels
+    gby <- rep(GGind,levtab)                                   #ordering by NAgroups nested in Group
+  } else {
+    g_NA <- 1
+    gby <- gmemb
+  }
+  
+  NAstruc <- by(!is.na(X01),gby,function(x) {                  #list of unique NA structures for each Group
+                                    x.u <- unique(x)
+                                    as.numeric(as.matrix(x.u))}) #NA's are coded with 0
+                                    
+  NAcheck <- sapply(NAstruc,sum)                               #if for certain NAgroups only 1 item was presented
+                                    
+list(x_mt=x_mt,mt_ind=mt_ind,x_tmt=x_tmt,rtot=rtot,nrlist=nrlist,gind=gind,x_mtlist=x_mtlist,
+     NAstruc=NAstruc,g_NA=g_NA)
+}
+
diff --git a/R/coef.eRm.R b/R/coef.eRm.R
new file mode 100755
index 0000000..9b05835
--- /dev/null
+++ b/R/coef.eRm.R
@@ -0,0 +1,9 @@
+`coef.eRm` <-
+function(object, parm = "beta", ...) {         # option "beta" added rh 2010-03-07
+   if(parm == "beta")
+       object$betapar
+   else if(parm == "eta")
+       object$etapar
+   else
+       stop("'parm' incorrectly specified")
+}
diff --git a/R/coef.ppar.R b/R/coef.ppar.R
new file mode 100755
index 0000000..150977f
--- /dev/null
+++ b/R/coef.ppar.R
@@ -0,0 +1,6 @@
+`coef.ppar` <-
+function(object, ...) {
+   x <- object$theta.table[,1]
+   names(x) <- rownames(object$theta.table)
+   x
+}
diff --git a/R/confint.eRm.r b/R/confint.eRm.r
new file mode 100755
index 0000000..d9702c2
--- /dev/null
+++ b/R/confint.eRm.r
@@ -0,0 +1,27 @@
+confint.eRm <- function(object, parm="beta", level = 0.95, ...)
+{
+#parm...either "beta" or "eta"
+#object of class "eRm"
+
+a <- (1 - level)/2
+a <- c(a, 1 - a)
+pct <- paste(a*100,"%")
+fac <- qnorm(a)
+
+if (parm=="eta") {
+  cf <- object$etapar
+  ses <- object$se.eta
+  dn <- names(object$etapar) }
+if (parm=="beta") {
+  cf <- object$betapar
+  ses <- object$se.beta
+  dn <- names(object$betapar) }
+
+ci <- array(NA, dim = c(length(cf), 2), dimnames = list(dn,pct))
+ci[] <- cf + ses %o% fac
+ci
+
+}
+
+
+
diff --git a/R/confint.ppar.r b/R/confint.ppar.r
new file mode 100755
index 0000000..2cec67a
--- /dev/null
+++ b/R/confint.ppar.r
@@ -0,0 +1,22 @@
+confint.ppar <- function(object, parm, level = 0.95, ...)
+{
+#parm...either "beta" or "eta"
+#object of class "ppar"
+
+a <- (1 - level)/2
+a <- c(a, 1 - a)
+pct <- paste(a*100,"%")
+fac <- qnorm(a)
+
+cf <- object$thetapar
+ses <- object$se.theta
+
+ci <- list(NULL)
+for (i in 1:length(cf)) {
+  ci[[i]] <- array(NA, dim = c(length(cf[[i]]), 2), dimnames = list(names(object$thetapar[[i]]),pct))
+  ci[[i]][] <- cf[[i]] + ses[[i]] %o% fac
+}
+names(ci) <- paste("NAgroup",1:length(ci),sep="")
+
+ci
+}
\ No newline at end of file
diff --git a/R/confint.threshold.r b/R/confint.threshold.r
new file mode 100755
index 0000000..f67af39
--- /dev/null
+++ b/R/confint.threshold.r
@@ -0,0 +1,18 @@
+confint.threshold <- function(object, parm, level = 0.95, ...)
+{
+#object of class "threshold"
+
+a <- (1 - level)/2
+a <- c(a, 1 - a)
+pct <- paste(a*100,"%")
+fac <- qnorm(a)
+
+cf <- object$threshpar
+ses <- object$se.thresh
+dn <- names(object$threshpar)
+
+ci <- array(NA, dim = c(length(cf), 2), dimnames = list(dn,pct))
+ci[] <- cf + ses %o% fac
+ci
+
+}
\ No newline at end of file
diff --git a/R/cwdeviance.r b/R/cwdeviance.r
new file mode 100755
index 0000000..4faf561
--- /dev/null
+++ b/R/cwdeviance.r
@@ -0,0 +1,16 @@
+cwdeviance <- function(object, pi.hat)
+{
+# computes casewise deviance for objects of class ppar
+
+  X <- object$X.ex
+  loglik.full <- sum(X*log(pi.hat)+(1-X)*log(1-pi.hat), na.rm = TRUE)  #for ordinary logistic regression
+  npar.full <- (dim(object$W)[2])+sum(object$npar)           #number of estimated item + person parameters
+  npar.sat <- sum(nrow(pi.hat)*ncol(pi.hat))
+
+  value <- -2*loglik.full
+  df <- npar.sat-npar.full
+  p.value <- 1-pchisq(value, df = df)
+  
+  result <- list(value = value, df = df, p.value = p.value)
+  result
+}
\ No newline at end of file
diff --git a/R/datcheck.LRtest.r b/R/datcheck.LRtest.r
new file mode 100755
index 0000000..c4c4fe2
--- /dev/null
+++ b/R/datcheck.LRtest.r
@@ -0,0 +1,44 @@
+datcheck.LRtest <- function(x, X, model)
+{
+#sanity checks for LRtest (internal function of LRtest.R)
+#x...submatrix (splitted with "splitcr" and called within Xlist) 
+#X...original data matrix (from model fit)
+
+exclude <- NULL                                             #vector with items to be excluded
+
+#----check full/0 responses------
+n.NA <- colSums(apply(X,2,is.na))                                   #number of NA's per column
+maxri <- (dim(X)[1]*(apply(X,2,max,na.rm=TRUE)))-n.NA               #maximum item raw scores with NA
+ri <- apply(x,2,sum,na.rm=TRUE)                              #item raw scores
+exclude <- c(exclude,which((ri==maxri) | (ri==0)))  
+
+#----check full(-1) NA's---------
+allna.vec <- apply(x,2,function(y) {
+                         naTF <- is.na(y)
+                         (sum(naTF) >= length(y-1)) 
+                         }) 
+exclude <- c(exclude,which(allna.vec))
+
+#----minimum category = 0--------
+ri.min <- apply(x,2,min,na.rm=TRUE)                                 #if no 0 responses
+exclude <- c(exclude,which(ri.min!=0))
+
+#----RSM-checks for same number of categories--------
+if ((model == "RSM") || (model == "LRSM")) {
+   highcat <- max(X, na.rm=TRUE)                    #highest category in original data
+   highcat.sub <- apply(x,2,max,na.rm=TRUE)             #RSM check for equal number of categories
+   exclude <- c(exclude,which(highcat.sub != highcat))
+}
+
+#---PCM checks for all categories responses---------
+if ((model=="PCM") || (model=="LPCM")) {                         #check if there are missing categories for PCM (for RSM doesn't matter)
+  cat.data <- apply(X,2,function(y) list(unique(na.exclude(y)))) #categories of orginal data
+  cat.sub <- apply(x,2,function(y) list(unique(na.exclude(y))))  #categories of subgroup data
+  catcomp <- mapply(function(y.s,y.d) {
+                      (length(y.s[[1]]) == (length(y.d[[1]])))
+                    },cat.sub,cat.data)
+  exclude <- c(exclude,which(!catcomp))
+}
+
+return(unique(exclude))             #return vector with items to be eliminated
+}
\ No newline at end of file
diff --git a/R/datcheck.R b/R/datcheck.R
new file mode 100755
index 0000000..9ab27b3
--- /dev/null
+++ b/R/datcheck.R
@@ -0,0 +1,119 @@
+`datcheck` <-
+function(X,W,mpoints,groupvec,model)
+{
+  if (is.data.frame(X))  {X <- as.matrix(X)}                  #X as data frame allowed
+    
+  if (is.null(colnames(X))) {                                 #determine item names
+    if (mpoints > 1) {
+      mpind <- paste("t",rep(1:mpoints,each=(dim(X)[2]/mpoints),1),sep="") #time points
+      itemind <- paste("I",1:(dim(X)[2]/mpoints),sep="")  
+      colnames(X) <- paste(itemind,mpind)
+    } else {  
+      colnames(X) <- paste("I",1:dim(X)[2],sep="")                         #item labels
+  }}
+  if (is.null(rownames(X))) rownames(X) <- paste("P",1:dim(X)[1],sep="")   #person labels
+   
+#----------------------- check groupvec --------------------------
+  
+  if ((length(groupvec) > 1) && (length(groupvec) != dim(X)[1])) {
+    stop("Wrong specification of groupvec!")}
+    
+  if (min(groupvec)!=1) {
+    stop("Group specification must start with 1!")}
+    
+  if (length(unique(groupvec))!=(max(groupvec))) {
+    stop("Group vector is specified wrongly!")}
+  
+  if ((max(groupvec) > 1) && (mpoints==1)) {
+    stop("Model not identifiable! Group contrasts can only be imposed for repeated measurement designs.") }
+  
+  if ((length(groupvec) > 1) && any(is.na(X))) {
+    stop("Model with repeated measures, group specification and NAs cannot be computed!") }
+  
+#----------------------- check X --------------------------------
+allna.vec <- apply(X,2,function(y) {all(is.na(y))})                 #eliminate items with all NA's
+if (any(allna.vec)) {stop("There are items with full NA responses which must be deleted!")}
+
+allna.vec <- apply(X,1,function(y) {all(is.na(y))})                 #eliminate items with all NA's
+if (any(allna.vec)) {stop("There are persons with full NA responses which must be deleted!")}
+
+allna.vec <- apply(X,1,function(y) {sum(is.na(y))})
+if (any(allna.vec == (dim(X)[2]-1))) {stop("Subjects with only 1 valid response must be removed!")}
+
+ri.min <- apply(X,2,min,na.rm=TRUE)                                 #if no 0 responses
+if (any(ri.min > 0)) {
+  cat("Warning message: The following items have no 0-responses: \n")
+  cat(colnames(X)[ri.min>0],sep=", ")
+  cat("\n")
+  cat("Responses are shifted such that lowest category is 0. \n")
+  cat("\n") 
+} 
+X <- t(apply(X,1,function(y) {y-ri.min}))                           #shift down to 0
+
+ri <- apply(X,2,sum,na.rm=TRUE)                                     #item raw scores
+n.NA <- colSums(apply(X,2,is.na))                                   #number of NA's per column
+maxri <- (dim(X)[1]*(apply(X,2,max,na.rm=TRUE)))-n.NA               #maximum item raw scores with NA
+TFcol <- ((ri==maxri) | (ri==0))  
+X.n <- X[,!TFcol]                                                   #new matrix with excluded items
+item.ex <- (1:dim(X)[2])[TFcol]                                     #excluded items
+if (length(item.ex) > 0) {
+  if (mpoints == 1) {
+    cat("Warning message: The following items were excluded due to complete 0/full responses: \n")
+    cat(colnames(X)[item.ex],sep=", ")
+    cat("\n") 
+  } else {
+    cat("The following items show complete 0/full responses: \n")
+    cat(colnames(X)[item.ex],sep=", ")
+    cat("\n") 
+    stop("Estimation cannot be performed! Delete the correponding items for the other measurement points as well! \n")
+}}  
+
+if ((model=="PCM") || (model=="LPCM")) {                         #check if there are missing categories for PCM (for RSM doesn't matter)
+  tablist <- apply(X,2,function(x) list(as.vector(table(x))))
+  tablen <- sapply(tablist,function(x) length(x[[1]]))
+  xmax <- apply(X,2,max)+1
+  indwrong <- which(tablen != xmax)
+  if (length(indwrong) > 0) {
+    cat("The following items do not have responses on each category: \n")
+    cat(colnames(X)[indwrong],sep=", ")
+    cat("\n")
+    cat("Warning message: Estimation may not be feasible. Please check data matrix! \n")
+    cat("\n")
+  }
+}  
+
+
+#-------------------------- ill conditioned for RM and LLTM --------------
+if ((model=="RM") || (model=="LLTM")) {
+  if (length(table(X.n)) != 2) stop("Dichotomous data matrix required!")
+  k.t <- dim(X.n)[2]/mpoints                                    #check for each mpoint separately
+  t.ind <- rep(1:mpoints,1,each=k.t)                            
+  X.nlv <- split(t(X.n),t.ind)                                  #split X due to mpoints
+  cn.lv <- split(colnames(X.n),t.ind)
+  X.nl <- lapply(X.nlv,matrix,ncol=k.t,byrow=TRUE)
+  for (i in 1:length(X.nl)) colnames(X.nl[[i]]) <- cn.lv[[i]]
+  
+  for (l in 1:mpoints) {                                       #check within mpoint
+    X.nll <- X.nl[[l]]
+    k <- ncol(X.nll)
+    adj <- matrix(0,nc=k,nr=k)
+    for (i in 1:k) for(j in 1:k) {
+        adj[i,j]<- 1*any(X.nll[,i]> X.nll[,j],na.rm=TRUE)
+    }
+    cd <- component.dist(adj, connected = "strong")
+    cm <- cd$membership
+    cmp <- max(cm)
+    if(cmp>1) {
+         cmtab <- table(cm)
+         maxcm.n <- as.numeric(names(cmtab)[cmtab!=max(cmtab)])
+         suspcol <- (1:length(cm))[tapply(cm,1:length(cm),function(x) any(maxcm.n==x))]
+         n.suspcol <- colnames(X.nll)[suspcol]
+         cat("Suspicious items:",n.suspcol,"\n")
+         stop("Estimation stopped due to ill-conditioned data matrix X!")
+    } 
+}}
+#----------------------- end ill-conditioned check -------------------------------   
+ 
+list(X=X.n,groupvec=groupvec)
+}
+
diff --git a/R/datprep_LLTM.R b/R/datprep_LLTM.R
new file mode 100755
index 0000000..72cc839
--- /dev/null
+++ b/R/datprep_LLTM.R
@@ -0,0 +1,56 @@
+`datprep_LLTM` <-
+function(X,W,mpoints,Groups,sum0)
+{
+# Design matrix see Fischer & Molenaar, p. 159  
+  
+  #TFrow <- (rowSums(X)==0 | rowSums(X)==(dim(X)[2]))  #el. persons with 0/K rawscore
+  #X <- X[!TFrow,]
+  
+  ngroups <- max(Groups)
+  X01 <- X
+  N <- dim(X)[1]                                  #number of persons
+  K <- dim(X)[2]/mpoints                            #number of items
+  mt_vek <- rep(1,K)
+  
+  #automatized generation of the design matrix W
+  if (length(W)==1) {
+    W11diag <- diag(1,(sum(mt_vek)-1))                #build up design matrix
+    if (sum0) {
+      w110 <- rep(-1,(sum(mt_vek)-1))                 #sum0 restriction
+    } else {
+      w110 <- rep(0,(sum(mt_vek)-1))                  #first item category parameter set to 0
+    }
+    W11 <- rbind(w110,W11diag)                        #RM design matrix 
+    ZW <- dim(W11)[1]
+    
+    W1 <- NULL
+    for (i in 1:(mpoints*ngroups)) W1 <- rbind(W1,W11)    #first part with virtual items
+    
+    if (mpoints > 1) {                                    #more than 1 measurement points
+      if (ngroups > 1) {                                  #more than 1 group/more mpoints
+        t_mp1 <- rep(1:mpoints,rep(ZW*ngroups,mpoints))
+        t_mp <- factor(t_mp1)
+        g_ng1 <- rep(rep(1:ngroups,rep(ZW,ngroups)),mpoints)
+        g_ng <- factor(g_ng1)
+        W2 <- model.matrix(~t_mp+g_ng)[,-1]               #main effects g and mp
+        W2[1:(ZW*ngroups),] <- 0                          #remove main effects for the first test occasion 
+      } else {                                            #1 group/more mpoints
+        t_mp <- gl(mpoints,ZW)                            #factor for measurement points
+        W2 <- model.matrix(~t_mp)[,-1] }
+    } else if (ngroups > 1) {                             #1 mpoint/more groups
+        g_ng <- gl(ngroups,ZW)
+        W2 <- model.matrix(~g_ng)[,-1] 
+        warning("Group contrasts without repeated measures can not be estimated!")
+    } else if (ngroups == 1) W2 <- NULL                   #1 mpoint/1 group
+        
+  W <- cbind(W1,W2)
+  colnames(W) <- NULL
+  rownames(W) <- NULL 
+  }
+  
+  list(X=X,X01=X01,mt_vek=mt_vek,W=W)
+#Output: X01      ... 0/1 response matrix of dimension N*rtot
+#        mt_vek   ... vector of length K with number of categories - 1 (for each item)
+#        W        ... design matrix of dimension (K*T)*((K-1)*(T-1)+1)
+}
+
diff --git a/R/datprep_LPCM.R b/R/datprep_LPCM.R
new file mode 100755
index 0000000..59f1ddc
--- /dev/null
+++ b/R/datprep_LPCM.R
@@ -0,0 +1,87 @@
+`datprep_LPCM` <-
+function(X,W,mpoints,Groups,sum0)
+{
+  #TFrow <- (rowSums(X)==0)                       #el. persons with 0 rawscore
+  #X <- X[!TFrow,]
+
+  ngroups <- max(Groups)
+  N <- dim(X)[1]                                  #number of persons
+  K <- dim(X)[2]/mpoints                          #number of items
+  mt_vek <- apply(X,2,max,na.rm=TRUE)[1:K]                   #number of categories - 1 for each item
+  mt_vek_0 <- mt_vek+1                            #number of categories for each item
+  
+  X01_0 <- matrix(rep(0,(N*sum(mt_vek_0)*mpoints)),nrow=N) #empty 0/1 matrix  
+  K1 <- dim(X)[2]
+  cummt0 <- c(0,cumsum(rep(mt_vek_0,mpoints))[1:(K1-1)])+1     #index vector for 0th category
+  indmatp <- apply(X,1,function(xi) {xi+cummt0})  #preparing index matrix for 1 responses
+  imp1 <- as.vector(indmatp)
+  imp2 <- rep(1:N,rep(K1,N))
+  indmat <- cbind(imp2,imp1)                      #final index matrix for 1 responses
+  X01_0[indmat] <- 1                              #0/1 matrix with 0th category
+  
+  
+  d1 <- 1:N
+  d2 <- 1:K1
+  coor <- expand.grid(d2,d1)[,c(2:1)]               #X coordinates
+  resvec <- as.vector(t(X))                         #X as vector (rowwise)
+  NAind <- as.matrix(coor[is.na(resvec),])          #index matrix for NA's in X
+  mt_vek.t <- rep(mt_vek,mpoints)
+    
+  if (length(NAind) > 0) {
+    NAindlist <- apply(NAind,1,function(x){
+                    #x <- unlist(x)
+                    co <- seq(cummt0[x[2]],cummt0[x[2]]+mt_vek.t[x[2]])
+                    NAind01 <- cbind(rep(x[1],length(co)),co)
+                    rownames(NAind01) <- NULL
+                    data.frame(NAind01,row.names=NULL)                                               #list with NA indices
+                    })
+    indmatNA <- matrix(unlist(lapply(NAindlist, function(x) {t(as.matrix(x))})),ncol=2,byrow=TRUE)   #matrix with NA indices 
+    X01_0[indmatNA] <- NA
+  }
+  
+  X01 <- X01_0[,-cummt0]
+    
+  #automatized generation of the design matrix W
+  if (length(W)==1) {
+    W11diag <- diag(1,(sum(mt_vek)-1))                   #build up design matrix
+    
+    if (sum0) {
+      w110 <- rep(-1,(sum(mt_vek)-1))                 #sum0 restriction
+    } else {
+      w110 <- rep(0,(sum(mt_vek)-1))                  #first item category parameter set to 0
+    }
+    
+    W11 <- rbind(w110,W11diag)                               #PCM design matrix 
+    ZW <- dim(W11)[1]
+    
+    W1 <- NULL
+    for (i in 1:(mpoints*ngroups)) W1 <- rbind(W1,W11)  #first part with virtual items
+    
+    if (mpoints > 1) {                            #more than 1 measurement points
+      if (ngroups > 1) {                          #more than 1 group/more mpoints
+        t_mp1 <- rep(1:mpoints,rep(ZW*ngroups,mpoints))
+        t_mp <- factor(t_mp1)
+        g_ng1 <- rep(rep(1:ngroups,rep(ZW,ngroups)),mpoints)
+        g_ng <- factor(g_ng1)
+        W2 <- model.matrix(~t_mp+g_ng)[,-1]               #main effects g and mp
+        W2[1:(ZW*ngroups),] <- 0                          #remove main effects for the first test occasion 
+      } else {                                    #1 group/more mpoints
+        t_mp <- gl(mpoints,ZW)             #factor for measurement points
+        W2 <- model.matrix(~t_mp)[,-1] }
+    } else if (ngroups > 1) {                     #1 mpoint/more groups
+        g_ng <- gl(ngroups,ZW)
+        W2 <- model.matrix(~g_ng)[,-1] 
+        warning("Group contrasts without repeated measures can not be estimated!")
+    } else if (ngroups == 1) W2 <- NULL           #1 mpoint/1 group
+        
+  catvek <- sequence(mt_vek)
+  W2_cat <- W2*catvek                             #imposing item categories
+  W <- cbind(W1,W2_cat)                           #design matrix completed
+  colnames(W) <- NULL
+  rownames(W) <- NULL 
+  }
+  
+   
+  list(X=X,X01=X01,mt_vek=mt_vek,W=W)
+}
+
diff --git a/R/datprep_LRSM.R b/R/datprep_LRSM.R
new file mode 100755
index 0000000..014862b
--- /dev/null
+++ b/R/datprep_LRSM.R
@@ -0,0 +1,94 @@
+`datprep_LRSM` <-
+function(X,W,mpoints,Groups,sum0)
+{
+  #TFrow <- (rowSums(X)==0)                       #el. persons with 0 rawscore
+  #X <- X[!TFrow,]
+
+  ngroups <- max(Groups)                          #number of groups
+  N <- dim(X)[1]                                  #number of persons
+  K <- dim(X)[2]/mpoints                          #number of items
+  hmax <- max(X,na.rm=TRUE)                       #highest category
+  mt_vek <- rep(hmax,K)                           #number of categories - 1 for each item                  
+  mt_vek_0 <- mt_vek+1                            #number of categories for each item
+  
+  X01_0 <- matrix(rep(0,(N*sum(mt_vek_0)*mpoints)),nrow=N) #empty 0/1 matrix  
+  K1 <- dim(X)[2]
+  cummt0 <- c(0,cumsum(rep(mt_vek_0,mpoints))[1:(K1-1)])+1     #index vector for 0th category
+  indmatp <- apply(X,1,function(xi) {xi+cummt0})  #preparing index matrix for 1 responses
+  imp1 <- as.vector(indmatp)
+  imp2 <- rep(1:N,rep(K1,N))
+  indmat <- cbind(imp2,imp1)                      #final index matrix for 1 responses
+  X01_0[indmat] <- 1                              #0/1 matrix with 0th category
+  
+  d1 <- 1:N
+  d2 <- 1:K1
+  coor <- expand.grid(d2,d1)[,c(2:1)]               #X coordinates
+  resvec <- as.vector(t(X))                         #X as vector (rowwise)
+  NAind <- as.matrix(coor[is.na(resvec),])          #index matrix for NA's in X
+  mt_vek.t <- rep(mt_vek,mpoints)
+   
+  if (length(NAind) > 0) {
+    NAindlist <- apply(NAind,1,function(x){
+                    co <- seq(cummt0[x[2]],cummt0[x[2]]+mt_vek.t[x[2]])
+                    NAind01 <- cbind(rep(x[1],length(co)),co)
+                    rownames(NAind01) <- NULL
+                    data.frame(NAind01,row.names=NULL)                                               #list with NA indices
+                    })
+    indmatNA <- matrix(unlist(lapply(NAindlist, function(x) {t(as.matrix(x))})),ncol=2,byrow=TRUE)   #matrix with NA indices 
+    X01_0[indmatNA] <- NA
+  }
+  
+  X01 <- X01_0[,-cummt0]
+  
+  #automatized generation of the design matrix W
+  if (length(W)==1) {                             #generating design matrix
+    e_it <- gl(K,hmax)                            #factor for item parameters
+    e_cat <- gl(hmax,1,K*hmax)                    #factor for category par
+    
+    if (sum0) {
+      Xm <- model.matrix(~e_it+e_cat)[,-1]          #dummy coding
+      Xm[1:hmax,1:(K-1)] <- -1                      #first item to be sum0 normalized
+    } else {
+      Xm <- model.matrix(~e_it+e_cat)[,-1]          #design matrix with 0/1 contrasts (without intercept)
+    }
+    
+    catvek <- 1:hmax                              #preparing the item design vectors
+    e_itnew <- catvek*Xm[,1:(K-1)]                  
+    Xm[,1:(K-1)] <- e_itnew
+    W11 <- Xm                                     #first part (same as RSM) without virtual items
+    ZW <- dim(W11)[1]
+    
+    W1 <- NULL
+    for (i in 1:(mpoints*ngroups)) W1 <- rbind(W1,W11)  #first part with virtual items
+    
+    if (mpoints > 1) {                            #more than 1 measurement points
+      if (ngroups > 1) {                          #more than 1 group/more mpoints
+        t_mp1 <- rep(1:mpoints,rep(ZW*ngroups,mpoints))
+        t_mp <- factor(t_mp1)
+        g_ng1 <- rep(rep(1:ngroups,rep(ZW,ngroups)),mpoints)
+        g_ng <- factor(g_ng1)
+        W2 <- model.matrix(~t_mp+g_ng)[,-1]               #main effects g and mp
+        W2[1:(ZW*ngroups),] <- 0                          #remove main effects for the first test occasion 
+      } else {                                    #1 group/more mpoints
+        mp <- gl(mpoints,ZW)             #factor for measurement points
+        W2 <- model.matrix(~mp)[,-1] }
+    } else if (ngroups > 1) {                     #1 mpoint/more groups
+        g <- gl(ngroups,ZW)
+        W2 <- model.matrix(~g)[,-1] 
+        warning("Group contrasts without repeated measures can not be estimated!")
+    } else if (ngroups == 1) W2 <- NULL           #1 mpoint/1 group
+        
+  
+  contr <- W2*catvek                             #imposing item categories
+  if (is.matrix(contr)==TRUE) {
+     contrrow <- apply(contr,1,function(x) {x*1:dim(contr)[2]})            #imposing multiplicative factor over time & group contrasts
+     W <- cbind(W1,t(contrrow))                     #design matrix completed
+  } else {W <- cbind(W1,contr)}
+  
+  colnames(W) <- NULL
+  rownames(W) <- NULL 
+  }
+  
+  list(X=X,X01=X01,mt_vek=mt_vek,W=W)
+}
+
diff --git a/R/datprep_PCM.R b/R/datprep_PCM.R
new file mode 100755
index 0000000..de8e774
--- /dev/null
+++ b/R/datprep_PCM.R
@@ -0,0 +1,57 @@
+`datprep_PCM` <-
+function(X,W,sum0)
+{
+#... X: data matrix with response categories to be converted into 0/1 matrix
+
+  #TFrow <- (rowSums(X)==0)  #el. persons with 0/K rawscore
+  #X <- X[!TFrow,]
+
+  #converting into 0/1 matrix
+  N <- dim(X)[1]                                  #number of persons
+  mt_vek <- apply(X,2,max,na.rm=TRUE)             #number of categories - 1 for each item
+  mt_vek_0 <- mt_vek+1                            #number of categories for each item
+  X01_0 <- matrix(rep(0,(N*sum(mt_vek_0))),nrow=N)#empty 0/1 matrix
+  K <- length(mt_vek)                             #number of items
+  cummt0 <- c(0,cumsum(mt_vek_0)[1:(K-1)])+1      #index vector for 0th category
+  indmatp <- apply(X,1,function(xi) {xi+cummt0})  #preparing index matrix for 1 responses
+  imp1 <- as.vector(indmatp)
+  imp2 <- rep(1:N,rep(K,N))
+  indmat <- cbind(imp2,imp1)                      #final index matrix for 1 responses
+  X01_0[indmat] <- 1                              #0/1 matrix with 0th category
+  
+  NAindmat <- rbind(imp2,rep(1:K,N),c(t(X)))         #impose NA structure
+  rownames(NAindmat) <- NULL
+  NAind <- t(NAindmat[1:2,is.na(NAindmat[3,])])      #index matrix for NA's in X
+   
+  if (length(NAind) > 0) {
+    NAindlist <- apply(NAind,1,function(x){
+                    co <- seq(cummt0[x[2]],cummt0[x[2]]+mt_vek[x[2]])
+                    NAind01 <- cbind(rep(x[1],length(co)),co)
+                    data.frame(NAind01,row.names=NULL)                                               #list with NA indices
+                    })
+    indmatNA <- matrix(unlist(lapply(NAindlist, function(x) {t(as.matrix(x))})),ncol=2,byrow=TRUE)   #matrix with NA indices 
+    X01_0[indmatNA] <- NA
+  }
+  
+  X01 <- X01_0[,-cummt0]                          #delete 0-category answers --> final 0/1 pattern matrix (dim N*sum(mt_vek))
+    
+       
+  #automatized generation of the design matrix W
+  if (length(W)==1) {
+    W1 <- diag(1,(sum(mt_vek)-1))                   #build up design matrix
+    if (sum0) {
+      w1 <- rep(-1,(sum(mt_vek)-1))                         #sum0 restriction
+    } else {
+      w1 <- rep(0,(sum(mt_vek)-1))                          #first item parameter set to 0
+    }
+    W <- rbind(w1,W1)                               #PCM design matrix 
+    colnames(W) <- NULL
+    rownames(W) <- NULL 
+  }
+
+  list(X=X,X01=X01,mt_vek=mt_vek,W=W)
+#Output: X01      ... 0/1 response matrix of dimension N*rtot
+#        mt_vek   ... vector of length K with number of categories - 1 (for each item)
+#        W        ... design matrix of dimension sum(mt_vek)*sum(mt_vek)
+}
+
diff --git a/R/datprep_RM.R b/R/datprep_RM.R
new file mode 100755
index 0000000..c6617f8
--- /dev/null
+++ b/R/datprep_RM.R
@@ -0,0 +1,26 @@
+`datprep_RM` <-
+function(X,W,sum0)                       #prepares data matrix for Rasch model
+{ 
+  X01 <- X                                        #X is already X(0,1)
+  
+  mt_vek <- rep(1,dim(X01)[2])                    #number of categories for each item
+  K <- length(mt_vek)
+  
+  #automatized generation of the design matrix W
+  if (length(W)==1) {
+    W1 <- diag(1,(K-1))                           #build up design matrix
+    if (sum0) {
+      w1 <- rep(-1,(K-1))                         #sum0 restriction
+    } else {
+      w1 <- rep(0,(K-1))                          #first item parameter set to 0
+    }
+    W <- rbind(w1,W1)                             #RM design matrix  
+    colnames(W) <- NULL
+    rownames(W) <- NULL  
+  }                                                     
+  list(X=X,X01=X01,mt_vek=mt_vek,W=W)
+#Output: X01      ... 0/1 response matrix of dimension N*rtot
+#        mt_vek   ... 1-vector of length K 
+#        W        ... design matrix of dimension K*K 
+}
+
diff --git a/R/datprep_RSM.R b/R/datprep_RSM.R
new file mode 100755
index 0000000..ff9a688
--- /dev/null
+++ b/R/datprep_RSM.R
@@ -0,0 +1,65 @@
+`datprep_RSM` <-
+function(X,W,sum0)
+{
+#... X: data matrix with response categories to be converted into 0/1 matrix  
+  
+  max.it <- apply(X,2,max,na.rm=TRUE)             #RSM check for equal number of categories
+  if (length(table(max.it)) > 1) stop("RSM can not be computed since number of categories are not the same for each item!\n")
+  
+  N <- dim(X)[1]                                  #number of persons
+  K <- dim(X)[2]                                  #number of items
+  hmax <- max(X,na.rm=TRUE)                       #highest category
+  mt_vek <- rep(hmax,K)                           #vector with number of categories - 1 for each item
+
+  mt_vek_0 <- mt_vek+1                            #number of categories for each item
+  X01_0 <- matrix(rep(0,(N*sum(mt_vek_0))),nrow=N) #empty 0/1 matrix
+  K <- length(mt_vek)     
+  cummt0 <- c(0,cumsum(mt_vek_0)[1:(K-1)])+1      #index vector for 0th category
+  indmatp <- apply(X,1,function(xi) {xi+cummt0})  #preparing index matrix for 1 responses
+  imp1 <- as.vector(indmatp)
+  imp2 <- rep(1:N,rep(K,N))
+  indmat <- cbind(imp2,imp1)                      #final index matrix for 1 responses
+  X01_0[indmat] <- 1                              #0/1 matrix with 0th category
+  
+  NAindmat <- rbind(imp2,rep(1:K,N),c(t(X)))         #impose NA structure
+  rownames(NAindmat) <- NULL
+  NAind <- t(NAindmat[1:2,is.na(NAindmat[3,])])      #index matrix for NA's in X
+   
+  if (length(NAind) > 0) {
+    NAindlist <- apply(NAind,1,function(x){
+                    co <- seq(cummt0[x[2]],cummt0[x[2]]+mt_vek[x[2]])
+                    NAind01 <- cbind(rep(x[1],length(co)),co)
+                    data.frame(NAind01,row.names=NULL)                                               #list with NA indices
+                    })
+    indmatNA <- matrix(unlist(lapply(NAindlist, function(x) {t(as.matrix(x))})),ncol=2,byrow=TRUE)   #matrix with NA indices 
+    X01_0[indmatNA] <- NA
+  }
+  
+  X01 <- X01_0[,-cummt0]                          #delete 0-category answers --> final 0/1 pattern matrix (dim N*sum(mt_vek))
+  
+  #automatized generation of the design matrix W
+  if (length(W)==1) {
+    e_it <- gl(K,hmax)                              #factor for item parameters
+    e_cat <- gl(hmax,1,K*hmax)                      #factor for category par
+    
+    if (sum0) {
+      Xm <- model.matrix(~e_it+e_cat)[,-1]          #dummy coding
+      Xm[1:hmax,1:(K-1)] <- -1                      #first item to be sum0 normalized
+    } else {
+      Xm <- model.matrix(~e_it+e_cat)[,-1]          #design matrix with 0/1 contrasts (without intercept)
+    }
+  
+    catvek <- 1:hmax                                #preparing the item design vectors
+    e_itnew <- catvek*Xm[,1:(K-1)]                  
+    Xm[,1:(K-1)] <- e_itnew
+    W <- Xm                                         #final design matrix    
+    colnames(W) <- NULL
+    rownames(W) <- NULL 
+  }
+                                                                    
+  list(X=X,X01=X01,mt_vek=mt_vek,W=W)
+#Output: X01      ... 0/1 response matrix of dimension N*rtot
+#        mt_vek   ... vector of length K with number of categories - 1 (for each item)
+#        W        ... design matrix of dimension sum(mt_vek)*((K-1)+(hmax-1))
+}
+
diff --git a/R/fitcml.R b/R/fitcml.R
new file mode 100755
index 0000000..69e2c25
--- /dev/null
+++ b/R/fitcml.R
@@ -0,0 +1,91 @@
+`fitcml` <-
+function (mt_ind,nrlist,x_mt,rtot,W,ngroups,gind,x_mtlist,NAstruc,g_NA,st.err,etaStart)
+{
+
+#cml function for call in nlm
+cml <- function(eta)
+{
+
+beta <- as.vector(W%*%eta)
+beta.list <- split(beta,gind)      
+beta.list1 <- beta.list
+
+betaNA <- mapply(function(x,y) {rbind(x,y)},beta.list1,NAstruc,SIMPLIFY=FALSE)         #beta and NAstructure as list (over Groups)
+
+
+Lg <- lapply(betaNA, function(betaNAmat) {        #gamma functions for each Group x NAgroup combination 
+
+         #print(betaNAmat)
+         beta.vec <- betaNAmat[1,]                #get parameter vector beta
+         
+         Lg.NA <- apply(matrix(betaNAmat[-1,],ncol=length(beta.vec)),1, function(NAvec) {                 #likelihood for each NAgroup within Groups                                          
+            
+            beta_list <- as.list(split(beta.vec[NAvec==1],mt_ind[1:(length(beta.vec[NAvec==1]))]))        #list of virtual item-category parameters per item
+            parlist <- lapply(beta_list,exp)                                #initial epsilon as list
+      
+            #------------------gamma functions----------------------
+            g_iter <- NULL                                                  #computation of the gamma functions
+            K <- length(parlist)
+            for (t in 1:(K-1)) {                                            #building up J1,...,Jt,...,Js
+      
+              if (t==1) {                                                   #first iteration step
+                gterm <- c(1,parlist[[t]])                                  #0th element included
+              }else
+              {
+               gterm <- g_iter                                              #gamma previous iteration with 0th el
+               g_iter <- NULL
+              }
+      
+              parvek <- c(1,parlist[[t+1]])                                 #eps vector in current iteration with 0th el
+              h <- length(parvek)                                           #dimensions for matrix
+              mt <- length(gterm)
+              rtot1 <- h+mt-1                                               #number of possible raw scores (0 included)
+      
+              gtermvek <- rep(c(gterm,rep(0,h)),h)                          #building up matrix for gamma term
+              gtermvek <- gtermvek[-((length(gtermvek)-h+1):length(gtermvek))]      #eliminating last h 0's
+              gmat <- matrix(gtermvek,nrow=rtot1,ncol=h)
+              emat <- matrix(rep(parvek,rep(rtot1,h)),ncol=h,nrow=rtot1)            #building up matrix for eps term
+              gmat_new <- gmat*emat                                                 #merge matrices
+              g_iter <- rowSums(gmat_new)                                           #gamma functions in current iteration are rowsums
+            }
+           #----------------- end gamma functions ------------------
+      
+           Lg.NA <- as.vector(g_iter[2:(rtot+1)])                                                 #final gamma vector stored in gamma (without gamma0)
+           return(Lg.NA)
+           }) 
+})          
+
+
+#----------------- log-likelihood -----------------------
+                               
+#=========to be deleted
+#L1t <- (mapply(function(x,z) {
+#                   x[!is.na(z)]%*%na.exclude(z)
+#                   },nrlist,lapply(Lg,log)))          #sum up L1-terms (group-wise)
+#L2t <- (mapply("%*%",x_mtlist,beta.list1))            #sum up L2-terms (group-wise)
+#print(L1t-L2t)
+#==========end delete
+
+
+L1 <- sum(mapply(function(x,z) {
+                   x[!is.na(z)]%*%na.exclude(z)
+                   },nrlist,lapply(Lg,log)))        #sum up L1-terms (group-wise)
+
+L2 <- sum(mapply("%*%",x_mtlist,beta.list1))        #sum up L2-terms (group-wise)
+
+L1-L2                                               #actual likelihood value
+#print(L1-L2)                                              
+#----------------- end likelihood -----------------------
+}
+
+
+eta <- etaStart                                     #starting values for eta parameters
+
+options(warn=-1)                                    #turn off warnings for NA/Inf
+fit <- nlm(cml,eta,hessian=st.err,iterlim=5000)     #NLM optimizer
+
+#options(warn=0)
+
+#fit <- optim(eta,cml,method="BFGS",hessian=TRUE) 
+}
+
diff --git a/R/gofIRT.R b/R/gofIRT.R
new file mode 100755
index 0000000..9095a92
--- /dev/null
+++ b/R/gofIRT.R
@@ -0,0 +1 @@
+gofIRT <- function(object, groups.hl = 10, cutpoint = 0.5)UseMethod("gofIRT")
diff --git a/R/gofIRT.ppar.R b/R/gofIRT.ppar.R
new file mode 100755
index 0000000..69d8165
--- /dev/null
+++ b/R/gofIRT.ppar.R
@@ -0,0 +1,58 @@
+gofIRT.ppar <- function(object, groups.hl = 10, cutpoint = 0.5)
+{
+#S3 method for computing 3 deviances and hosmer-lemeshow test
+#object ... object of class ppar
+#ngroups.hl ... number of percentile groups for Hosmer-Lemeshow Test
+
+  if (max(object$X, na.rm = TRUE) > 1) stop("Tests for polytomous models not implemented yet!")
+  if (any(is.na(object$X))) stop("Test for data with missings not implemented yet!")
+   
+  pi.hat <- pmat(object)
+  groups.cldev <- "rawscore"
+
+  #---------------- compute test statistics ----------------------------
+  res.cl <- unlist(cldeviance(object, groups.gr = groups.cldev, pi.hat = pi.hat))
+  res.hl <- unlist(hoslem(object, groups.hl = groups.hl, pi.hat = pi.hat))
+  res.rost <- unlist(rostdeviance(object))
+  res.cw <- unlist(cwdeviance(object, pi.hat))
+  
+  res.table <- rbind(res.cl, res.hl, res.rost, res.cw)
+  colnames(res.table) <- c("value","df","p-value")
+  rownames(res.table) <- c("Collapsed Deviance", "Hosmer-Lemeshow", "Rost Deviance", "Casewise Deviance")
+  #------------------- end test statistics ----------------------------
+  
+  #---------------------- R-squared -----------------------------------
+  res.r2 <- Rsquared(object, pi.hat = pi.hat)
+  #---------------------- end R-squared -------------------------------
+  
+  #--------------------------- classifier stuff -----------------------
+  pred.X <- predict(object, cutpoint = cutpoint)        #predicted data matrix
+  observed <- as.vector(object$X.ex)
+  predicted <- as.vector(pred.X)
+  confmat <- table(predicted, observed)
+  accuracy <- sum(diag(confmat))/sum(confmat)
+  sens <- as.vector((confmat[2,2])/(colSums(confmat)[2]))
+  spez <- as.vector((confmat[1,1])/(colSums(confmat)[1]))
+  cl.list <- list(confmat = confmat, accuracy = accuracy, sensitivity = sens, specificity = spez)
+  
+  probvec <- as.vector(pi.hat)
+  rocpr.res <- prediction(probvec[!is.na(probvec)], observed[!is.na(observed)])
+  roc.res <- performance(rocpr.res, "tpr","fpr")                   #produce ROC output
+  
+  spezvec <- 1-(roc.res at x.values[[1]])         #vector of specificities (different cuts)
+  sensvec <- roc.res at y.values[[1]]             #vector of sensitivities (different cuts)
+  cutvec <- roc.res at alpha.values[[1]]          #vector with thresholds
+  sscmat <- cbind(cutvec, sensvec - spezvec)[order(abs(sensvec-spezvec), decreasing = FALSE),]
+  thresh.opt <- mean(sscmat[1:2,1])
+   
+  auc.all <- performance(rocpr.res, "auc")                      #area under ROC
+  auc.res <- auc.all at y.values[[1]]
+  gini <- (2*auc.res)-1
+  
+  #----------------------- end classifier ----------------------------------
+ 
+  result <- list(test.table = res.table, R2 = res.r2, classifier = cl.list, AUC = auc.res, 
+                 Gini = gini, ROC = roc.res, opt.cut = thresh.opt, predobj = rocpr.res)
+  class(result) <- "gof"
+  result
+}
diff --git a/R/hoslem.R b/R/hoslem.R
new file mode 100755
index 0000000..82f3144
--- /dev/null
+++ b/R/hoslem.R
@@ -0,0 +1,31 @@
+hoslem <- function(object, groups.hl = 10, pi.hat)
+{
+# computes the Hosmer-Lemeshow test for objects of class "ppar"
+# groups.hl ... number of groups for percentile splitting
+
+  K <- dim(object$X)[2]
+  N <- dim(object$X.ex)[1]
+  
+  #Pi <- pmat(object)                            #expected values
+  if (length(object$pers.ex) > 0) {
+    y <- as.vector(t(object$X[-object$pers.ex,]))   #observed values
+  } else {
+    y <- as.vector(t(object$X))
+  }
+  pi.hat <- as.vector(t(pi.hat))
+
+  cutpoints <- quantile(pi.hat, probs = seq(0, 1, 1/groups.hl))                     #perzentiles
+  groupvec <- cut(pi.hat, cutpoints, include.lowest = TRUE, labels = 1:groups.hl)   #recode ph.hat
+
+  
+  o.g <- tapply(y, groupvec, sum)               #number of 1-responses in group
+  n.g <- table(groupvec)                        #number of responses in group
+  pi.mean <- tapply(pi.hat, groupvec, mean)     #average response probabilites
+
+  value <- sum((o.g - n.g*pi.mean)^2/(n.g *pi.mean*(1-pi.mean)))    #HM-test statistic
+  df <- groups.hl - 2
+  p.value <- 1 - pchisq(value, df)
+
+  result <- list(value = value, df = df, p.value = p.value)
+  result
+}
diff --git a/R/itemfit.R b/R/itemfit.R
new file mode 100755
index 0000000..47bea51
--- /dev/null
+++ b/R/itemfit.R
@@ -0,0 +1,3 @@
+`itemfit` <-
+function(object)UseMethod("itemfit")
+
diff --git a/R/itemfit.ppar.r b/R/itemfit.ppar.r
new file mode 100755
index 0000000..755fc47
--- /dev/null
+++ b/R/itemfit.ppar.r
@@ -0,0 +1,31 @@
+`itemfit.ppar` <-
+function(object)
+# computes Chi-square based itemfit statistics
+# for object of class "ppar" (from person.parameter)
+{
+  if (length(object$pers.ex)==0) {
+    X <- object$X
+  } else {
+    X <- object$X[-object$pers.ex,]
+  }
+
+  VE <- pifit.internal(object)                  #compute expectation and variance term
+  Emat <- VE$Emat
+  Vmat <- VE$Vmat
+
+  st.res <- (X-Emat)/sqrt(Vmat)
+  sq.res <- st.res^2                            #squared standardized residuals
+  ifit <- colSums(sq.res,na.rm=TRUE)
+
+  idf <- apply(X,2,function(x) {length(na.exclude(x))})
+
+  i.outfitMSQ <- ifit/idf
+
+  isumVmat<-colSums(Vmat)
+  i.infitMSQ <- colSums(sq.res*Vmat, na.rm = TRUE)/isumVmat
+
+  result <- list(i.fit=ifit,i.df=idf,st.res=st.res,i.outfitMSQ=i.outfitMSQ,i.infitMSQ=i.infitMSQ)
+  class(result) <- "ifit"
+  result
+}
+
diff --git a/R/labeling.internal.r b/R/labeling.internal.r
new file mode 100755
index 0000000..6720d7c
--- /dev/null
+++ b/R/labeling.internal.r
@@ -0,0 +1,96 @@
+labeling.internal <- function(model,X,X01,W,etapar,betapar,mpoints,ngroups)
+{
+#labeling for W, eta, beta.
+
+if (is.null(colnames(W))) {                             #eta labels
+    names(etapar) <- paste("eta",1:dim(W)[2])
+    colnames(W) <- names(etapar)
+  } else {
+    names(etapar) <- colnames(W)
+  }
+
+if(model=="RM"){                                                              #  new labelling of
+    if (!is.null(colnames(X)))                                                #  eta parameters for
+       names(etapar) <- colnames(X)[2:ncol(X)]                                #  RM, RSM, PCM
+    else                                                                      #  rh, 25-03-2010
+       names(etapar) <- paste("I",2:ncol(X),sep="")                           #
+}                                                                             #  gives estimated
+                                                                              #  item (RM)
+if(model=="RSM"){                                                             #  item + category (RSM)
+    if (!is.null(colnames(X))) {                                              #  item x category (PCM)
+       names(etapar)[1:(ncol(X)-1)] <- colnames(X)[2:ncol(X)]                 #  parameters
+    } else {                                                                  #
+       names(etapar[1:(ncol(X)-1)]) <- paste("I",2:ncol(X),sep="")            #
+    }                                                                         #
+    maxcat <- max(X,na.rm=TRUE)                                               #
+    if (maxcat>1)                                                             #
+       names(etapar)[ncol(X):length(etapar)] <- paste("Cat ",2:maxcat,sep="") #
+}                                                                             #
+                                                                              #
+                                                                              #
+if(model=="PCM"){                                                             #
+    indmt <- apply(X,2,max,na.rm=TRUE)   # number of categories               #
+    catnames <- sequence(indmt)                                               #
+                                                                              #
+    if (!is.null(colnames(X))) {                                              #
+       itnames <- colnames(X)                                                 #
+    } else {                                                                  #
+       itnames <- paste("I",1:ncol(X),sep="")                                 #
+    }                                                                         #
+    etanames <- rep(itnames, indmt)                                           #
+    etanames <- paste(etanames[-1],catnames[-1],sep=".c")                     #
+    names(etapar) <- etanames                                                 #
+}                                                                             #
+
+if (mpoints == 1) {                                     #no mpoints labels
+  if ((model=="RM") || (model=="LLTM")) {               #no category labels
+    betanames <- paste("beta",colnames(X))
+  } else {
+    indmt <- apply(X,2,max,na.rm=TRUE)
+    catnames <- sequence(indmt)
+    itnames <- rep(colnames(X),indmt)
+    betanames <- paste("beta",paste(itnames,catnames,sep=".c"))
+  }
+
+} else {                                                                 #repeated measurement models
+  indmt0 <- apply(X,2,max,na.rm=TRUE)
+  indmt <- rep(apply(X,2,max,na.rm=TRUE),ngroups)
+  catnames <- sequence(indmt)                                            #category names
+  if (substr(colnames(X)[1],1,2)=="I1") {                                #if item names specified by user
+    itemind <- rep(paste("I",1:(dim(X)[2]/mpoints),sep=""),mpoints)      #item labels
+  } else {
+    itemind <- colnames(X)
+  }
+
+  itnames <- rep(itemind,indmt0)
+
+  if (ngroups > 1) {
+    ind.it <- rep(1:mpoints,each = length(itnames)/mpoints)           #item label index
+    itnames <- as.vector(unlist(tapply(itnames, ind.it, function(x) rep(x, ngroups))))
+  }
+
+
+  if (model == "LLTM") {
+    icnames <- rep(itnames,(dim(W)[1]/length(itnames)))
+  } else {
+    icnames <- paste(itnames,catnames,sep=".c")
+  }
+  t.lab <- paste("t",rep(1:mpoints,each=length(icnames)/mpoints),sep="") #time labels
+  if (ngroups > 1) {
+    g.lab <- rep(paste("g",rep(1:ngroups,each=length(icnames)/mpoints/ngroups),sep=""),mpoints)
+    betanames <- paste(icnames,t.lab,g.lab)
+  } else {
+    betanames <- paste(icnames,t.lab)
+  }
+}
+
+
+if (is.null(rownames(W))) {                      #no labels provided
+    rownames(W) <- betanames
+    names(betapar) <- betanames
+  } else {
+    names(betapar) <- rownames(W)
+ }
+
+list(W=W,etapar=etapar,betapar=betapar)
+}
diff --git a/R/likLR.R b/R/likLR.R
new file mode 100755
index 0000000..bf6cc7a
--- /dev/null
+++ b/R/likLR.R
@@ -0,0 +1,40 @@
+`likLR` <-
+function (X,W,mpoints,Groups,model,st.err,sum0,etaStart)
+{
+
+if (any(is.na(X))) {
+  dichX <- ifelse(is.na(X),1,0)
+  strdata <- apply(dichX,1,function(x) {paste(x,collapse="")})
+  gmemb <- as.vector(data.matrix(data.frame(strdata)))
+} else {
+  gmemb <- rep(1,dim(X)[1])
+}
+
+#data preparation, design matrix generation for various models
+if (model=="RM") { Xprep <- datprep_RM(X,W,sum0)
+} else if (model=="LLTM") { Xprep <- datprep_LLTM(X,W,mpoints,Groups,sum0)
+} else if (model=="RSM") { Xprep <- datprep_RSM(X,W,sum0)
+} else if (model=="PCM") { Xprep <- datprep_PCM(X,W,sum0)
+} else if (model=="LRSM") { Xprep <- datprep_LRSM(X,W,mpoints,Groups,sum0)
+} else if (model=="LPCM")  {Xprep <- datprep_LPCM(X,W,mpoints,Groups,sum0)
+}
+
+if (any(is.na(etaStart))) etaStart <- rep(0,dim(Xprep$W)[2])       #check starting vector
+if (length(etaStart) != dim(Xprep$W)[2]) stop("Vector with starting values does not match number of parameters!") 
+ng <- max(Groups)
+if ((dim(Xprep$W)[1]) != ((dim(Xprep$X01)[2])*ng)) stop("Mismatch between number of rows (beta's) in W and number of items (categories) in X!")
+
+
+Lprep <- cmlprep(Xprep$X01,Xprep$mt_vek,mpoints,Groups,Xprep$W,gmemb)                   
+parest <- fitcml(Lprep$mt_ind,Lprep$nrlist,Lprep$x_mt,Lprep$rtot,Xprep$W,
+                 max(Groups),gind=Lprep$gind,x_mtlist=Lprep$x_mtlist,
+                 Lprep$NAstruc,g_NA=Lprep$g_NA,st.err,etaStart)      
+
+W1 <- Xprep$W
+#rownames(W1) <- NULL
+#colnames(W1) <- paste("eta",1:dim(W1)[2],sep="")
+options(warn=0)
+                         
+list(W=W1,parest=parest,X01=Xprep$X01)                          #returns design matrix and results
+}
+
diff --git a/R/logLik.eRm.r b/R/logLik.eRm.r
new file mode 100755
index 0000000..7abef85
--- /dev/null
+++ b/R/logLik.eRm.r
@@ -0,0 +1,11 @@
+logLik.eRm <- function(object,...)
+{
+#object of class eRm
+  # val <- object$loglik
+  # attr(val, "df") <- object$npar
+  val <- list(loglik = object$loglik, df =  object$npar) # rh 26-03-2010
+  class(val) <- "logLik.eRm"
+  val
+}
+
+
diff --git a/R/logLik.ppar.r b/R/logLik.ppar.r
new file mode 100755
index 0000000..861b470
--- /dev/null
+++ b/R/logLik.ppar.r
@@ -0,0 +1,9 @@
+logLik.ppar <- function(object,...)
+{
+#object of class ppar
+  # val <- object$loglik
+  # attr(val, "df") <- object$npar
+  val <- list(loglik = object$loglik, df =  object$npar) # rh 26-03-2010
+  class(val) <- "logLik.ppar"
+  val
+}
diff --git a/R/model.matrix.eRm.R b/R/model.matrix.eRm.R
new file mode 100755
index 0000000..bd65217
--- /dev/null
+++ b/R/model.matrix.eRm.R
@@ -0,0 +1,3 @@
+`model.matrix.eRm` <-
+function(object,...) object$W                 #design matrix
+
diff --git a/R/performance.R b/R/performance.R
new file mode 100755
index 0000000..b85f531
--- /dev/null
+++ b/R/performance.R
@@ -0,0 +1,305 @@
+performance <- function(prediction.obj, measure,
+                        x.measure="cutoff", ...) {
+
+    ## define the needed environments
+    envir.list <- .define.environments()
+    long.unit.names <- envir.list$long.unit.names
+    function.names <- envir.list$function.names
+    obligatory.x.axis <- envir.list$obligatory.x.axis
+    optional.arguments <- envir.list$optional.arguments
+    default.values <- envir.list$default.values
+    
+    ## abort in case of misuse
+    if (class(prediction.obj) != 'prediction' ||
+        !exists(measure, where=long.unit.names, inherits=FALSE) ||
+        !exists(x.measure, where=long.unit.names, inherits=FALSE)) {
+      stop(paste("Wrong argument types: First argument must be of type",
+                 "'prediction'; second and optional third argument must",
+                 "be available performance measures!"))
+    }
+    
+    ## abort, if attempt is made to use a measure that has an obligatory
+    ## x.axis as the x.measure (cannot be combined)
+    if (exists( x.measure, where=obligatory.x.axis, inherits=FALSE )) {
+        message <- paste("The performance measure",
+                         x.measure,
+                         "can only be used as 'measure', because it has",
+                         "the following obligatory 'x.measure':\n",
+                         get( x.measure, envir=obligatory.x.axis))
+        stop(message)
+    }
+
+    ## if measure is a performance measure with obligatory x.axis, then
+    ## enforce this axis:
+    if (exists( measure, where=obligatory.x.axis, inherits=FALSE )) {
+        x.measure <- get( measure, envir=obligatory.x.axis )
+    }
+
+    if (x.measure == "cutoff" ||
+        exists( measure, where=obligatory.x.axis, inherits=FALSE )) {
+
+        ## fetch from '...' any optional arguments for the performance
+        ## measure at hand that are given, otherwise fill up the default values
+        optional.args <- list(...)
+        argnames <- c()
+        if ( exists( measure, where=optional.arguments, inherits=FALSE )) {
+            argnames <- get( measure, envir=optional.arguments )
+            default.arglist <- list()
+            for (i in 1:length(argnames)) {
+                default.arglist <- c(default.arglist,
+                                     get(paste(measure,":",argnames[i],sep=""),
+                                         envir=default.values, inherits=FALSE))
+            }
+            names(default.arglist) <- argnames
+
+            for (i in 1:length(argnames)) {
+                templist <- list(optional.args,
+                                 default.arglist[[i]])
+                names(templist) <- c('arglist', argnames[i])
+                
+                optional.args <- do.call('.farg', templist)
+            }
+        }
+        optional.args <- .select.args( optional.args, argnames )
+        
+        ## determine function name
+        function.name <- get( measure, envir=function.names )
+
+        ## for each x-validation run, compute the requested performance measure
+        x.values <- list()
+        y.values <- list()
+        for (i in 1:length( prediction.obj at predictions )) {
+            argumentlist <- .sarg(optional.args,
+                                  predictions= prediction.obj at predictions[[i]],
+                                  labels= prediction.obj at labels[[i]],
+                                  cutoffs= prediction.obj at cutoffs[[i]],
+                                  fp= prediction.obj at fp[[i]],
+                                  tp= prediction.obj at tp[[i]],
+                                  fn= prediction.obj at fn[[i]],
+                                  tn= prediction.obj at tn[[i]],
+                                  n.pos= prediction.obj at n.pos[[i]],
+                                  n.neg= prediction.obj at n.neg[[i]],
+                                  n.pos.pred= prediction.obj at n.pos.pred[[i]],
+                                  n.neg.pred= prediction.obj at n.neg.pred[[i]])
+
+            ans <- do.call( function.name, argumentlist )
+
+            if (!is.null(ans[[1]])) x.values <- c( x.values, list( ans[[1]] ))
+            y.values <- c( y.values, list( ans[[2]] ))
+        }
+
+        if (! (length(x.values)==0 || length(x.values)==length(y.values)) ) {
+            stop("Consistency error.")
+        }
+        
+        ## create a new performance object
+        return( new("performance",
+                    x.name       = get( x.measure, envir=long.unit.names ),
+                    y.name       = get( measure, envir=long.unit.names ),
+                    alpha.name   = "none",
+                    x.values     = x.values,
+                    y.values     = y.values,
+                    alpha.values = list() ))
+    } else {
+        perf.obj.1 <- performance( prediction.obj, measure=x.measure, ... )
+        perf.obj.2 <- performance( prediction.obj, measure=measure, ... )
+        return( .combine.performance.objects( perf.obj.1, perf.obj.2 ) )
+    }
+}
+
+.combine.performance.objects <- function( p.obj.1, p.obj.2 ) {
+    ## some checks for misusage (in any way, this function is
+    ## only for internal use)
+    if ( p.obj.1 at x.name != p.obj.2 at x.name ) {
+        stop("Error: Objects need to have identical x axis.")
+    }
+    if ( p.obj.1 at alpha.name != "none" || p.obj.2 at alpha.name != "none") {
+        stop("Error: At least one of the two objects has already been merged.")
+    }
+    if (length(p.obj.1 at x.values) != length(p.obj.2 at x.values)) {
+        stop(paste("Only performance objects with identical number of",
+                   "cross-validation runs can be combined."))
+    }
+
+    x.values <- list()
+    x.name <- p.obj.1 at y.name
+    y.values <- list()
+    y.name <- p.obj.2 at y.name
+    alpha.values <- list()
+    alpha.name <- p.obj.1 at x.name
+
+    for (i in 1:length( p.obj.1 at x.values )) {
+        x.values.1 <- p.obj.1 at x.values[[i]]
+        y.values.1 <- p.obj.1 at y.values[[i]]
+        x.values.2 <- p.obj.2 at x.values[[i]]
+        y.values.2 <- p.obj.2 at y.values[[i]]
+
+        ## cutoffs of combined object = merged cutoffs of simple objects
+        cutoffs <- sort( unique( c(x.values.1, x.values.2)), decreasing=TRUE )
+
+        ## calculate y.values at cutoffs using step function
+        y.values.int.1 <- approxfun(x.values.1, y.values.1,
+                                    method="constant",f=1,rule=2)(cutoffs)
+        y.values.int.2 <- approxfun(x.values.2, y.values.2,
+                                    method="constant",f=1,rule=2)(cutoffs)
+
+        ## 'approxfun' ignores NA and NaN
+        objs <- list( y.values.int.1, y.values.int.2)
+        objs.x <- list( x.values.1, x.values.2 )
+        na.cutoffs.1.bool <- is.na( y.values.1) & !is.nan( y.values.1 )
+        nan.cutoffs.1.bool <- is.nan( y.values.1)
+        na.cutoffs.2.bool <- is.na( y.values.2) & !is.nan( y.values.2 )
+        nan.cutoffs.2.bool <- is.nan( y.values.2)
+        bools <- list(na.cutoffs.1.bool, nan.cutoffs.1.bool,
+                      na.cutoffs.2.bool, nan.cutoffs.2.bool)
+        values <- c(NA,NaN,NA,NaN)
+        
+        for (j in 1:4) {
+            for (k in which(bools[[j]])) {
+                interval.max <- objs.x[[ ceiling(j/2) ]][k]
+                interval.min <- -Inf
+                if (k < length(objs.x[[ ceiling(j/2) ]])) {
+                    interval.min <- objs.x[[ ceiling(j/2) ]][k+1]
+                }
+                objs[[ ceiling(j/2) ]][cutoffs <= interval.max &
+                                       cutoffs > interval.min ] <- values[j]
+            }
+        }
+
+        alpha.values <- c(alpha.values, list(cutoffs))
+        x.values <- c(x.values, list(objs[[1]]))
+        y.values <- c(y.values, list(objs[[2]]))
+    }
+    
+    return( new("performance",
+                x.name=x.name, y.name=y.name,
+                alpha.name=alpha.name, x.values=x.values,
+                y.values=y.values, alpha.values=alpha.values))
+}
+
+.define.environments <- function() {
+    ## There are five environments: long.unit.names, function.names,
+    ## obligatory.x.axis, optional.arguments, default.values
+    
+    ## Define long names corresponding to the measure abbreviations.
+    long.unit.names <- new.env()
+    assign("none","None", envir=long.unit.names)
+    assign("cutoff", "Cutoff", envir=long.unit.names)
+    assign("acc", "Accuracy", envir=long.unit.names)
+    assign("err", "Error Rate", envir=long.unit.names)
+    assign("fpr", "False positive rate", envir=long.unit.names)
+    assign("tpr", "True positive rate", envir=long.unit.names)
+    assign("rec", "Recall", envir=long.unit.names)
+    assign("sens", "Sensitivity", envir=long.unit.names)
+    assign("fnr", "False negative rate", envir=long.unit.names)
+    assign("tnr", "True negative rate", envir=long.unit.names)
+    assign("spec", "Specificity", envir=long.unit.names)
+    assign("ppv", "Positive predictive value", envir=long.unit.names)
+    assign("prec", "Precision", envir=long.unit.names)
+    assign("npv", "Negative predictive value", envir=long.unit.names)
+    assign("fall", "Fallout", envir=long.unit.names)
+    assign("miss", "Miss", envir=long.unit.names)
+    assign("pcfall", "Prediction-conditioned fallout", envir=long.unit.names)
+    assign("pcmiss", "Prediction-conditioned miss", envir=long.unit.names)
+    assign("rpp", "Rate of positive predictions", envir=long.unit.names)
+    assign("rnp", "Rate of negative predictions", envir=long.unit.names)
+    assign("auc","Area under the ROC curve", envir=long.unit.names)
+    assign("cal", "Calibration error", envir=long.unit.names)
+    assign("mwp", "Median window position", envir=long.unit.names)
+    assign("prbe","Precision/recall break-even point", envir=long.unit.names)
+    assign("rch", "ROC convex hull", envir=long.unit.names)
+    assign("mxe", "Mean cross-entropy", envir=long.unit.names)
+    assign("rmse","Root-mean-square error", envir=long.unit.names)
+    assign("phi", "Phi correlation coefficient", envir=long.unit.names)
+    assign("mat","Matthews correlation coefficient", envir=long.unit.names)
+    assign("mi", "Mutual information", envir=long.unit.names)
+    assign("chisq", "Chi-square test statistic", envir=long.unit.names)
+    assign("odds","Odds ratio", envir=long.unit.names)
+    assign("lift", "Lift value", envir=long.unit.names)
+    assign("f","Precision-Recall F measure", envir=long.unit.names)
+    assign("sar", "SAR", envir=long.unit.names)
+    assign("ecost", "Expected cost", envir=long.unit.names)
+    assign("cost", "Explicit cost", envir=long.unit.names)
+
+    ## Define function names corresponding to the measure abbreviations.
+    function.names <- new.env()
+    assign("acc", ".performance.accuracy", envir=function.names)
+    assign("err", ".performance.error.rate", envir=function.names)
+    assign("fpr", ".performance.false.positive.rate", envir=function.names)
+    assign("tpr", ".performance.true.positive.rate", envir=function.names)
+    assign("rec", ".performance.true.positive.rate", envir=function.names)
+    assign("sens", ".performance.true.positive.rate", envir=function.names)
+    assign("fnr", ".performance.false.negative.rate", envir=function.names)
+    assign("tnr", ".performance.true.negative.rate", envir=function.names)
+    assign("spec", ".performance.true.negative.rate", envir=function.names)
+    assign("ppv", ".performance.positive.predictive.value",
+           envir=function.names)
+    assign("prec", ".performance.positive.predictive.value",
+           envir=function.names)
+    assign("npv", ".performance.negative.predictive.value",
+           envir=function.names)
+    assign("fall", ".performance.false.positive.rate", envir=function.names)
+    assign("miss", ".performance.false.negative.rate", envir=function.names)
+    assign("pcfall", ".performance.prediction.conditioned.fallout",
+           envir=function.names)
+    assign("pcmiss", ".performance.prediction.conditioned.miss",
+           envir=function.names)
+    assign("rpp", ".performance.rate.of.positive.predictions",
+           envir=function.names)
+    assign("rnp", ".performance.rate.of.negative.predictions",
+           envir=function.names)
+    assign("auc", ".performance.auc", envir=function.names)
+    assign("cal", ".performance.calibration.error", envir=function.names)
+    assign("prbe", ".performance.precision.recall.break.even.point",
+           envir=function.names)
+    assign("rch", ".performance.rocconvexhull", envir=function.names)
+    assign("mxe", ".performance.mean.cross.entropy", envir=function.names)
+    assign("rmse", ".performance.root.mean.squared.error",
+           envir=function.names)
+    assign("phi", ".performance.phi", envir=function.names)
+    assign("mat", ".performance.phi", envir=function.names)
+    assign("mi", ".performance.mutual.information", envir=function.names)
+    assign("chisq", ".performance.chisq", envir=function.names)
+    assign("odds", ".performance.odds.ratio", envir=function.names)
+    assign("lift", ".performance.lift", envir=function.names)
+    assign("f", ".performance.f", envir=function.names)
+    assign("sar", ".performance.sar", envir=function.names)
+    assign("ecost", ".performance.expected.cost", envir=function.names)
+    assign("cost", ".performance.cost", envir=function.names)
+
+    ## If a measure comes along with an obligatory x axis (including "none"),
+    ## list it here.
+    obligatory.x.axis <- new.env()
+    assign("mxe", "none", envir=obligatory.x.axis)
+    assign("rmse", "none", envir=obligatory.x.axis)
+    assign("prbe", "none", envir=obligatory.x.axis)
+    assign("auc", "none", envir=obligatory.x.axis)
+    assign("rch","none", envir=obligatory.x.axis)
+    ## ecost requires probability cost function as x axis, which is handled
+    ## implicitly, not as an explicit performance measure.
+    assign("ecost","none", envir=obligatory.x.axis)  
+    
+    ## If a measure has optional arguments, list the names of the
+    ## arguments here.
+    optional.arguments <- new.env()
+    assign("cal", "window.size", envir=optional.arguments)
+    assign("f", "alpha", envir=optional.arguments)
+    assign("cost", c("cost.fp", "cost.fn"), envir=optional.arguments)
+    assign("auc", "fpr.stop", envir=optional.arguments)
+        
+    ## If a measure has additional arguments, list the default values
+    ## for them here. Naming convention: e.g. "cal" has an optional
+    ## argument "window.size" the key to use here is "cal:window.size"
+    ## (colon as separator)
+    default.values <- new.env()
+    assign("cal:window.size", 100, envir=default.values)
+    assign("f:alpha", 0.5, envir=default.values)
+    assign("cost:cost.fp", 1, envir=default.values)
+    assign("cost:cost.fn", 1, envir=default.values)
+    assign("auc:fpr.stop", 1, envir=default.values) 
+    
+    list(long.unit.names=long.unit.names, function.names=function.names,
+         obligatory.x.axis=obligatory.x.axis,
+         optional.arguments=optional.arguments,
+         default.values=default.values)
+}
diff --git a/R/performance_measures.R b/R/performance_measures.R
new file mode 100755
index 0000000..44dd6d3
--- /dev/null
+++ b/R/performance_measures.R
@@ -0,0 +1,482 @@
+## ------------------------------------------------------------------------
+## classical machine learning contingency table measures
+## ------------------------------------------------------------------------
+
+.performance.accuracy <-
+  function(predictions, labels, cutoffs, fp, tp, fn, tn,
+           n.pos, n.neg, n.pos.pred, n.neg.pred) {
+    
+      list( cutoffs, (tn+tp) / length(predictions) )
+  }
+
+.performance.error.rate <-
+  function(predictions, labels, cutoffs, fp, tp, fn, tn,
+           n.pos, n.neg, n.pos.pred, n.neg.pred) {
+
+      list( cutoffs, (fn+fp) / length(predictions) )
+  }
+
+.performance.false.positive.rate <-
+  function(predictions, labels, cutoffs, fp, tp, fn, tn,
+           n.pos, n.neg, n.pos.pred, n.neg.pred) {
+    
+      list( cutoffs, fp / n.neg )
+  }
+
+.performance.true.positive.rate <-
+  function(predictions, labels, cutoffs, fp, tp, fn, tn,
+           n.pos, n.neg, n.pos.pred, n.neg.pred) {
+    
+      list( cutoffs, tp / n.pos )
+  }
+
+.performance.false.negative.rate <-
+  function(predictions, labels, cutoffs, fp, tp, fn, tn,
+           n.pos, n.neg, n.pos.pred, n.neg.pred) {
+    
+      list( cutoffs, fn / n.pos )
+  }
+
+.performance.true.negative.rate <-
+  function(predictions, labels, cutoffs, fp, tp, fn, tn,
+           n.pos, n.neg, n.pos.pred, n.neg.pred) {
+    
+      list( cutoffs, tn / n.neg )
+  }
+
+.performance.positive.predictive.value <-
+  function(predictions, labels, cutoffs, fp, tp, fn, tn,
+           n.pos, n.neg, n.pos.pred, n.neg.pred) {
+    
+      ppv <- tp / (fp + tp)
+      list( cutoffs, ppv )
+  }
+
+.performance.negative.predictive.value <-
+  function(predictions, labels, cutoffs, fp, tp, fn, tn,
+           n.pos, n.neg, n.pos.pred, n.neg.pred) {
+    
+      npv <- tn / (tn + fn)
+      list( cutoffs, npv )
+  }
+
+.performance.prediction.conditioned.fallout <-
+  function(predictions, labels, cutoffs, fp, tp, fn, tn,
+           n.pos, n.neg, n.pos.pred, n.neg.pred) {
+      ppv <- .performance.positive.predictive.value(predictions, labels,
+                                                    cutoffs, fp, tp, fn, tn,
+                                                    n.pos, n.neg, n.pos.pred,
+                                                    n.neg.pred)[[2]]
+      list( cutoffs, 1 - ppv )
+  }
+
+.performance.prediction.conditioned.miss <-
+  function(predictions, labels, cutoffs, fp, tp, fn, tn,
+           n.pos, n.neg, n.pos.pred, n.neg.pred) {
+      npv <- .performance.negative.predictive.value(predictions, labels,
+                                                    cutoffs, fp, tp, fn, tn,
+                                                    n.pos, n.neg, n.pos.pred,
+                                                    n.neg.pred)[[2]]
+      list( cutoffs, 1 - npv )
+  }
+
+## ------------------------------------------------------------------------
+## ...not actually performance measures, but very useful as a second axis
+## against which to plot a "real" performance measure
+## (popular example: lift charts)
+## ------------------------------------------------------------------------
+
+.performance.rate.of.positive.predictions <-
+  function(predictions, labels, cutoffs, fp, tp, fn, tn,
+           n.pos, n.neg, n.pos.pred, n.neg.pred) {
+
+      list( cutoffs, n.pos.pred / (n.pos + n.neg) )
+  }
+
+.performance.rate.of.negative.predictions <-
+  function(predictions, labels, cutoffs, fp, tp, fn, tn,
+           n.pos, n.neg, n.pos.pred, n.neg.pred) {
+
+      list( cutoffs, n.neg.pred / (n.pos + n.neg) )
+  }
+
+
+## ------------------------------------------------------------------------
+## Classical statistical contingency table measures
+## ------------------------------------------------------------------------
+
+.performance.phi <-
+  function(predictions, labels, cutoffs, fp, tp, fn, tn,
+           n.pos, n.neg, n.pos.pred, n.neg.pred) {
+
+      list(cutoffs,
+           (tn*tp - fn*fp) / sqrt(n.pos * n.neg * n.pos.pred * n.neg.pred) )
+  }
+
+.performance.mutual.information <-
+  function(predictions, labels, cutoffs, fp, tp, fn, tn,
+           n.pos, n.neg, n.pos.pred, n.neg.pred) {
+
+      n.samples <- n.pos + n.neg
+      mi <- c()
+      for (k in 1:length(cutoffs)) {
+          kij <- rbind( c(tn[k],fn[k]), c(fp[k],tp[k]) )
+
+          ki.j. <- rbind(c(n.neg * n.neg.pred[k], n.neg.pred[k] * n.pos),
+                         c(n.neg * n.pos.pred[k], n.pos * n.pos.pred[k]))
+
+          log.matrix <- log2( kij / ki.j.)
+          log.matrix[kij/ki.j.==0] <- 0
+          
+          mi <- c(mi,  log2(n.samples) + sum( kij * log.matrix) / n.samples  )
+      }
+
+      list( cutoffs, mi )
+  }
+
+
+.performance.chisq <-
+  function(predictions, labels, cutoffs, fp, tp, fn, tn,
+           n.pos, n.neg, n.pos.pred, n.neg.pred) {
+
+      chisq <- c()
+      for (i in 1:length(cutoffs)) {
+          A <- rbind( c( tn[i], fn[i]), c(fp[i], tp[i]) )
+          chisq <- c(chisq, chisq.test(A,correct=F)$statistic )
+      }
+      list( cutoffs, chisq )
+  }
+
+.performance.odds.ratio <- 
+  function(predictions, labels, cutoffs, fp, tp, fn, tn,
+           n.pos, n.neg, n.pos.pred, n.neg.pred) {
+      
+
+    list( cutoffs, tp * tn / (fn * fp) )
+    
+}
+
+## ------------------------------------------------------------------------
+## Other measures based on contingency tables
+## ------------------------------------------------------------------------
+
+.performance.lift <-
+  function(predictions, labels, cutoffs, fp, tp, fn, tn,
+           n.pos, n.neg, n.pos.pred, n.neg.pred) {
+      
+      n.samples <- n.pos + n.neg
+      list( cutoffs, (tp / n.pos) / (n.pos.pred / n.samples) )
+  }
+
+.performance.f <-
+  function(predictions, labels, cutoffs, fp, tp, fn, tn,
+           n.pos, n.neg, n.pos.pred, n.neg.pred, alpha) {
+
+      prec <- .performance.positive.predictive.value(predictions, labels,
+                                                     cutoffs, fp, tp, fn, tn,
+                                                     n.pos, n.neg, n.pos.pred,
+                                                     n.neg.pred)[[2]]
+      list( cutoffs,  1/ ( alpha*(1/prec) + (1-alpha)*(1/(tp/n.pos))  ) )
+  }
+
+.performance.rocconvexhull <-
+  function(predictions, labels, cutoffs, fp, tp, fn, tn,
+           n.pos, n.neg, n.pos.pred, n.neg.pred) {
+      
+      x <- fp / n.neg
+      y <- tp / n.pos
+
+      finite.bool <- is.finite(x) & is.finite(y)
+      x <- x[ finite.bool ]
+      y <- y[ finite.bool ]
+      if (length(x) < 2) {
+          stop("Not enough distinct predictions to compute ROC convex hull.")
+      }
+
+      ## keep only points on the convex hull
+      ind <- chull(x, y)
+      x.ch <- x[ind]
+      y.ch <- y[ind]
+
+      ## keep only convex hull points above the diagonal, except (0,0)
+      ## and (1,1)
+      ind.upper.triangle <- x.ch < y.ch
+      x.ch <- c(0, x.ch[ind.upper.triangle], 1)
+      y.ch <- c(0, y.ch[ind.upper.triangle], 1)
+
+      ## sort remaining points by ascending x value
+      ind <- order(x.ch)
+      x.ch <- x.ch[ind]
+      y.ch <- y.ch[ind]
+
+      list( x.ch, y.ch )
+  }
+
+## ----------------------------------------------------------------------------
+## Cutoff-independent measures
+## ----------------------------------------------------------------------------
+
+.performance.auc <-
+  function(predictions, labels, cutoffs, fp, tp, fn, tn,
+           n.pos, n.neg, n.pos.pred, n.neg.pred, fpr.stop) {
+      
+      x <- fp / n.neg
+      y <- tp / n.pos
+
+      finite.bool <- is.finite(x) & is.finite(y)
+      x <- x[ finite.bool ]
+      y <- y[ finite.bool ]
+      if (length(x) < 2) {
+          stop(paste("Not enough distinct predictions to compute area",
+                     "under the ROC curve."))
+      }
+
+      if (fpr.stop < 1) {
+        ind <- max(which( x <= fpr.stop ))
+        tpr.stop <- approxfun( x[ind:(ind+1)], y[ind:(ind+1)] )(fpr.stop)
+        x <- c(x[1:ind], fpr.stop)
+        y <- c(y[1:ind], tpr.stop)
+      }
+      
+      ans <- list()
+      auc <- 0
+      for (i in 2:length(x)) {
+          auc <- auc + 0.5 * (x[i] - x[i-1]) * (y[i] + y[i-1])
+      }
+      ans <- list( c(), auc)
+      names(ans) <- c("x.values","y.values")
+      return(ans)
+  }
+
+.performance.precision.recall.break.even.point <-
+  function(predictions, labels, cutoffs, fp, tp, fn, tn,
+           n.pos, n.neg, n.pos.pred, n.neg.pred) {
+
+      pred <- prediction( predictions, labels)
+      perf <- performance( pred, measure="prec", x.measure="rec")
+      x <- rev(perf at x.values[[1]])
+      y <- rev(perf at y.values[[1]])
+      alpha <- rev(perf at alpha.values[[1]])
+
+      finite.bool <- is.finite(alpha) & is.finite(x) & is.finite(y)
+      x <- x[ finite.bool ]
+      y <- y[ finite.bool ]
+      alpha <- alpha[ finite.bool ]
+
+      if (length(x) < 2) {
+          stop(paste("Not enough distinct predictions to compute",
+                     "precision/recall intersections."))
+      }
+      intersection.cutoff <- c()
+      intersection.pr <- c()
+      
+      ## find all intersection points by looking at all intervals (i,i+1):
+      ## if the difference function between x and y has different signs at the
+      ## interval boundaries, then an intersection point is in the interval;
+      ## compute as the root of the difference function
+      if ( (x[1]-y[1]) == 0) {
+          intersection.cutoff <- c( alpha[1] )
+          intersection.pr <- c( x[1] )
+      }
+
+      for (i in (1:(length(alpha)-1))) {
+          if ((x[i+1]-y[i+1]) == 0) {
+              intersection.cutoff <- c( intersection.cutoff, alpha[i+1] )
+              intersection.pr <- c( intersection.pr, x[i+1] )
+          } else if ((x[i]-y[i])*(x[i+1]-y[i+1]) < 0 ) {
+              ans <- uniroot(approxfun(c(alpha[i], alpha[i+1] ),
+                                       c(x[i]-y[i], x[i+1]-y[i+1])),
+                             c(alpha[i],alpha[i+1]))
+              intersection.cutoff <- c(intersection.cutoff, ans$root)
+              intersection.pr <- c(intersection.pr, ans$f.root)
+          }
+      }
+
+      list( rev(intersection.cutoff), rev(intersection.pr) )
+  }
+
+
+.performance.calibration.error <-
+  function(predictions, labels, cutoffs, fp, tp, fn, tn,
+           n.pos, n.neg, n.pos.pred, n.neg.pred, window.size) {
+
+      if (window.size > length(predictions)) {
+          stop("Window size exceeds number of predictions.")
+      }
+      if (min(predictions)<0 || max(predictions)>1) {
+          stop("Calibration error needs predictions between 0 and 1")
+      }
+      
+      pos.label <- levels(labels)[2]
+      neg.label <- levels(labels)[1]
+
+      ordering <- rev(order( predictions ))
+      predictions <- predictions[ ordering ]
+      labels <- labels[ ordering ]
+
+      median.cutoffs <- c()
+      calibration.errors <- c()
+
+      for (left.index in 1 : (length(predictions) - window.size+1) ) {
+          right.index <- left.index + window.size - 1
+          pos.fraction <-
+            sum(labels[left.index : right.index] == pos.label) / window.size
+          mean.prediction <- mean( predictions[ left.index : right.index ] )
+
+          calibration.errors <- c(calibration.errors,
+                                  abs(pos.fraction - mean.prediction))
+          median.cutoffs <- c(median.cutoffs,
+                              median(predictions[left.index:right.index]))
+      }
+      list( median.cutoffs, calibration.errors )
+  }
+
+
+.performance.mean.cross.entropy <-
+  function(predictions, labels, cutoffs, fp, tp, fn, tn,
+           n.pos, n.neg, n.pos.pred, n.neg.pred) {
+
+      if (! all(levels(labels)==c(0,1)) ||
+          any(predictions<0) || any(predictions>1) ) {
+          stop(paste("Class labels need to be 0 and 1 and predictions between",
+                     "0 and 1 for mean cross entropy."))
+      }
+      
+      pos.label <- levels(labels)[2]
+      neg.label <- levels(labels)[1]
+    
+      list( c(), - 1/length(predictions) *
+           (sum( log( predictions[which(labels==pos.label)] ))  +
+            sum( log( 1 - predictions[which(labels==neg.label)] ))) )
+  }
+
+
+.performance.root.mean.squared.error <-
+  function(predictions, labels, cutoffs, fp, tp, fn, tn,
+           n.pos, n.neg, n.pos.pred, n.neg.pred) {
+      ## convert labels from factor to numeric values
+      labels <- as.numeric(levels(labels))[labels]
+      if (any(is.na(labels))) {
+          stop("For rmse predictions have to be numeric.")
+      }
+      list( c(),  sqrt( 1/length(predictions) *
+                      sum( (predictions - labels)^2 ))  )
+  }
+
+## ----------------------------------------------------------------------------
+## Derived measures:
+## ----------------------------------------------------------------------------
+
+.performance.sar <- function( predictions, labels, cutoffs, fp, tp, fn, tn,
+           n.pos, n.neg, n.pos.pred, n.neg.pred) {
+
+    pred <- prediction( predictions, labels)
+    perf.acc <- performance( pred, measure="acc")
+    perf.rmse <- performance( pred, measure="rmse")
+    perf.auc <- performance( pred, measure="auc")
+
+    list(cutoffs,
+         1/3 * (perf.acc at y.values[[1]] +
+                (1 - perf.rmse at y.values[[1]]) +
+                perf.auc at y.values[[1]]))
+}
+
+## ----------------------------------------------------------------------------
+## Measures taking into account actual cost considerations
+## ----------------------------------------------------------------------------
+
+.performance.expected.cost <-
+  function(predictions, labels, cutoffs, fp, tp, fn, tn,
+           n.pos, n.neg, n.pos.pred, n.neg.pred) {
+      
+      ## kick out suboptimal values (i.e. fpr/tpr pair for which another one
+      ## with same fpr and higher tpr exists, 
+      ## or one for which one with same tpr but lower fpr exists
+
+      if (n.neg==0 || n.pos==0) {
+          stop(paste("At least one positive and one negative sample are",
+                     "needed to compute a cost curve."))
+      }
+      fpr <- fp / n.neg
+      tpr <- tp / n.pos
+
+      ## sort by fpr (ascending), in case of ties by descending tpr
+      ind <- order(fpr,-tpr)
+      
+      fpr <- fpr[ind]
+      tpr <- tpr[ind]
+      ## for tied fprs, only the one with the highest tpr is kept
+      ind <- !duplicated(fpr)
+      fpr <- fpr[ind]
+      tpr <- tpr[ind]
+
+      ## for tied tprs, only keep the one with the lowest fpr
+      ind <- order(-tpr,fpr)
+      fpr <- fpr[ind]
+      tpr <- tpr[ind]
+      ind <- !duplicated(tpr)
+      fpr <- fpr[ind]
+      tpr <- tpr[ind]
+
+      if (!any(0==fpr & 0==tpr)) {
+          fpr <- c(0,fpr)
+          tpr <- c(0,tpr)
+      }
+      if (!any(1==fpr & 1==tpr)) {
+          fpr <- c(fpr,1)
+          tpr <- c(tpr,1)
+      }
+      
+      ## compute all functions
+      f <- list()
+      for (i in 1:length(fpr)) {
+          f <- c(f, .construct.linefunct( 0, fpr[i], 1, 1-tpr[i] ))
+      }
+      
+      ## compute all intersection points
+      x.values <- c()
+      y.values <- c()
+      for (i in 1:(length(fpr)-1)) {
+          for (j in (i+1):length(fpr)) {
+              ans <- .intersection.point( f[[i]], f[[j]] )
+              if (all(is.finite(ans))) {
+                  y.values.at.current.x <- c()
+                  for (k in 1:length(f)) {
+                      y.values.at.current.x <- c(y.values.at.current.x,
+                                                 f[[k]](ans[1]))
+                  }
+                  if (abs(ans[2] - min(y.values.at.current.x )) <
+                      sqrt(.Machine$double.eps)) {
+                      
+                      x.values <- c(x.values, ans[1])
+                      y.values <- c(y.values, ans[2])
+                  }
+              }
+          }
+      }
+
+      if (!any(0==x.values & 0==y.values)) {
+          x.values <- c(0,x.values)
+          y.values <- c(0,y.values)
+      }
+      if (!any(1==x.values & 0==y.values)) {
+          x.values <- c(x.values,1)
+          y.values <- c(y.values,0)
+      }
+
+      ind <- order( x.values)
+      list( x.values[ind], y.values[ind] )
+  }
+
+
+.performance.cost <-
+  function(predictions, labels, cutoffs, fp, tp, fn, tn,
+           n.pos, n.neg, n.pos.pred, n.neg.pred, cost.fp, cost.fn) {
+      
+    n.samples <- n.pos + n.neg
+    cost <- ((n.pos / n.samples) * (fn / n.pos) * cost.fn +
+             (n.neg / n.samples) * (fp / n.neg) * cost.fp)
+    list( cutoffs, cost )
+}
+
+
diff --git a/R/performance_plots.R b/R/performance_plots.R
new file mode 100755
index 0000000..f992861
--- /dev/null
+++ b/R/performance_plots.R
@@ -0,0 +1,533 @@
+## ----------------------------------------------------------------------------
+## plot method for objects of class 'performance'
+## ----------------------------------------------------------------------------
+
+.get.arglist <- function( fname, arglist ) {
+     if (fname=='plot')
+       return(.select.args(arglist,
+                           union(names(formals(plot.default)), names(par()))))
+     else if (fname=='plot.xy')
+       return(.select.args(arglist,
+                           union( names(formals(plot.xy)), names(par()))))
+     else return( .select.prefix( arglist, fname) )
+ }
+
+.downsample <- function( perf, downsampling ) {
+    for (i in 1:length(perf at alpha.values)) {
+        if (downsampling < 1 && downsampling > 0)
+          ind <- round(seq(1, length(perf at alpha.values[[i]]),
+                           length=(length(perf at alpha.values[[i]]) *
+                                   downsampling)))
+        else if (downsampling > 1)
+          ind <- round(seq(1, length(perf at alpha.values[[i]]),
+                           length=downsampling))
+        else ind <- 1:length(perf at alpha.values[[i]])
+        perf at alpha.values[[i]] <- perf at alpha.values[[i]][ind]
+        perf at x.values[[i]] <- perf at x.values[[i]][ind]
+        perf at y.values[[i]] <- perf at y.values[[i]][ind]
+    }
+    return(perf)
+}
+
+.plot.performance <-
+  function(perf, ..., avg="none",
+           spread.estimate="none", spread.scale=1, show.spread.at=c(),
+           colorize=FALSE, colorize.palette=rev(rainbow(256,start=0, end=4/6)),
+           colorkey=colorize, colorkey.relwidth=0.25, colorkey.pos="right",
+           print.cutoffs.at=c(),
+           cutoff.label.function=function(x) { round(x,2) },
+           downsampling=0, add=FALSE) {
+
+      arglist <- c(lapply( as.list(environment()), eval ), list(...) )
+
+      if (length(perf at y.values) != length(perf at x.values)) {
+          stop("Performance object cannot be plotted.")
+      }
+      if (is.null(perf at alpha.values) && (colorize==TRUE ||
+                                         length(print.cutoffs.at)>0)) {
+          stop(paste("Threshold coloring or labeling cannot be performed:",
+                     "performance object has no threshold information."))
+      }
+      if ((avg=="vertical" || avg=="horizontal") &&
+          (colorize==TRUE || length(print.cutoffs.at)>0)) {
+          stop(paste("Threshold coloring or labeling is only well-defined for",
+                     "'no' or 'threshold' averaging."))
+      }
+    
+      if (downsampling >0 ) perf <- .downsample( perf, downsampling)
+
+      ## for infinite cutoff, assign maximal finite cutoff + mean difference
+      ## between adjacent cutoff pairs
+      if (length(perf at alpha.values)!=0) perf at alpha.values <-
+        lapply(perf at alpha.values,
+               function(x) { isfin <- is.finite(x);
+                             x[is.infinite(x)] <-
+                               (max(x[isfin]) +
+                                mean(abs(x[isfin][-1] -
+                                         x[isfin][-length(x[isfin])])));
+                             x } )
+      ## remove samples with x or y not finite
+      for (i in 1:length(perf at x.values)) {
+          ind.bool <- (is.finite(perf at x.values[[i]]) &
+                       is.finite(perf at y.values[[i]]))
+          
+          if (length(perf at alpha.values)>0)
+            perf at alpha.values[[i]] <- perf at alpha.values[[i]][ind.bool]
+          
+          perf at x.values[[i]] <- perf at x.values[[i]][ind.bool]
+          perf at y.values[[i]] <- perf at y.values[[i]][ind.bool]
+      }
+      arglist <- .sarg( arglist, perf=perf)
+    
+      if (add==FALSE) do.call( ".performance.plot.canvas", arglist )
+
+      if (avg=="none") do.call(".performance.plot.no.avg", arglist)  
+      else if (avg=="vertical")
+        do.call(".performance.plot.vertical.avg", arglist)
+      else if (avg=="horizontal")
+        do.call(".performance.plot.horizontal.avg", arglist)
+      else if (avg=="threshold")
+        do.call(".performance.plot.threshold.avg", arglist)
+  }
+
+## ---------------------------------------------------------------------------
+## initializing plots and plotting a canvas
+## (can be skipped using 'plot( ..., add=TRUE)'
+## ---------------------------------------------------------------------------
+
+.performance.plot.canvas <- function(perf, avg, ...) {
+
+    arglist <- list(...)
+
+    axis.names <- list(x=perf at x.name, y=perf at y.name)
+    if (avg=="horizontal" || avg=="threshold")
+      axis.names$x <- paste("Average", tolower(axis.names$x))
+    if (avg=="vertical" || avg=="threshold")
+      axis.names$y <- paste("Average", tolower(axis.names$y))
+    arglist <- .farg(arglist, xlab=axis.names$x, ylab=axis.names$y)
+
+    arglist <-
+      .farg(arglist,
+            xlim=c(min(unlist(perf at x.values)), max(unlist(perf at x.values))),
+            ylim=c(min(unlist(perf at y.values)), max(unlist(perf at y.values))))
+    
+    do.call("plot", .sarg(.slice.run(.get.arglist('plot', arglist)),
+                          x=0.5, y=0.5, type='n', axes=FALSE))
+    do.call( "axis", .sarg(.slice.run(.get.arglist('xaxis', arglist)),
+                           side=1))
+    do.call( "axis", .sarg(.slice.run(.get.arglist('yaxis', arglist)),
+                           side=2))
+
+    if (.garg(arglist,'colorkey')==TRUE) {
+        colors <- rev( .garg(arglist,'colorize.palette') )
+        max.alpha <- max(unlist(perf at alpha.values))
+        min.alpha <- min(unlist(perf at alpha.values))
+        col.cutoffs <- rev(seq(min.alpha,max.alpha, length=length( colors )))
+
+        if ( .garg(arglist,'colorkey.pos')=="right") {
+            
+            ## axis drawing (ticks + labels)
+            ## The interval [min.alpha,max.alpha] needs to be mapped onto
+            ## the interval [min.y,max.y], rather than onto the interval
+            ## [ylim[1],ylim[2]] ! In the latter case, NAs could occur in
+            ## approxfun below, because axTicks can be out of the ylim-range
+            ## ('yxaxs': 4%region)
+            max.y <- max(axTicks(4))
+            min.y <- min(axTicks(4))
+            alpha.ticks <- .garg( arglist, c("coloraxis.at"))
+            if (length(alpha.ticks)==0)
+              alpha.ticks <- approxfun(c(min.y, max.y),
+                                       c(min.alpha, max.alpha)) ( axTicks(4))
+            alpha2y <- approxfun(c(min(alpha.ticks), max(alpha.ticks)),
+                                 c(min.y,max.y))
+            arglist <-
+              .sarg(arglist,
+                    coloraxis.labels=.garg(arglist,
+                      'cutoff.label.function')(alpha.ticks),
+                    coloraxis.at=alpha2y(alpha.ticks))
+            
+            do.call("axis",
+                    .sarg(.slice.run(.get.arglist('coloraxis', arglist)),
+                          side=4))
+
+            ## draw colorkey
+            ## each entry in display.bool corresponds to one rectangle of
+            ## the colorkey.
+            ## Only rectangles within the alpha.ticks range are plotted.
+            ## y.lower, y.upper, and colors, are the attributes of the visible
+            ## rectangles (those for which display.bool=TRUE)
+            display.bool <- (col.cutoffs >= min(alpha.ticks) &
+                             col.cutoffs < max(alpha.ticks))
+            y.lower <- alpha2y( col.cutoffs )[display.bool]
+            colors <- colors[display.bool]
+            if (length(y.lower>=2)) {
+                y.width <- y.lower[2] - y.lower[1]
+                y.upper <- y.lower + y.width
+                x.left <- .garg(arglist,'xlim')[2] +
+                  ((.garg(arglist,'xlim')[2] - .garg(arglist,'xlim')[1]) *
+                   (1-.garg(arglist,'colorkey.relwidth'))*0.04)
+                x.right <- .garg(arglist,'xlim')[2] +
+                  (.garg(arglist,'xlim')[2] -.garg(arglist,'xlim')[1]) * 0.04
+                rect(x.left, y.lower, x.right, y.upper,
+                     col=colors, border=colors,xpd=NA)
+            }
+        } else if (.garg(arglist, 'colorkey.pos') == "top") {
+            ## axis drawing (ticks + labels)
+            max.x <- max(axTicks(3))
+            min.x <- min(axTicks(3))
+            alpha.ticks <- .garg( arglist, c("coloraxis.at"))
+            if (length(alpha.ticks)==0) {
+                alpha.ticks <- approxfun(c(min.x, max.x),
+                                         c(min.alpha, max.alpha))(axTicks(3))
+            }
+            alpha2x <- approxfun(c( min(alpha.ticks), max(alpha.ticks)),
+                                 c( min.x, max.x))
+            arglist <- .sarg(arglist,
+                             coloraxis.labels=.garg(arglist,
+                               'cutoff.label.function')(alpha.ticks),
+                             coloraxis.at= alpha2x(alpha.ticks)) 
+            do.call("axis",
+                    .sarg(.slice.run( .get.arglist('coloraxis', arglist)),
+                          side=3))
+
+            ## draw colorkey
+            display.bool <- (col.cutoffs >= min(alpha.ticks) &
+                             col.cutoffs < max(alpha.ticks))
+            x.left <- alpha2x( col.cutoffs )[display.bool]
+            colors <- colors[display.bool]
+            if (length(x.left)>=2) {
+                x.width <- x.left[2] - x.left[1]
+                x.right <- x.left + x.width
+                y.lower <- .garg(arglist,'ylim')[2] +
+                  (.garg(arglist,'ylim')[2] - .garg(arglist,'ylim')[1]) *
+                    (1-.garg(arglist,'colorkey.relwidth'))*0.04
+                y.upper <- .garg(arglist,'ylim')[2] +
+                  (.garg(arglist,'ylim')[2] - .garg(arglist,'ylim')[1]) * 0.04
+                rect(x.left, y.lower, x.right, y.upper,
+                     col=colors, border=colors, xpd=NA)
+            }
+        }
+    }
+    
+    do.call( "box", .slice.run( .get.arglist( 'box', arglist)))
+}
+
+## ----------------------------------------------------------------------------
+## plotting performance objects when no curve averaging is wanted
+## ----------------------------------------------------------------------------
+
+.performance.plot.no.avg <- function( perf, ... ) {
+
+    arglist <- list(...)
+    arglist <- .farg(arglist, type= 'l')
+    
+    if (.garg(arglist, 'colorize') == TRUE) {
+        colors <- rev( .garg( arglist, 'colorize.palette') )
+        max.alpha <- max(unlist(perf at alpha.values))
+        min.alpha <- min(unlist(perf at alpha.values))
+        col.cutoffs <- rev(seq(min.alpha,max.alpha, length=length(colors)+1))
+        col.cutoffs <- col.cutoffs[2:length(col.cutoffs)]
+    }
+    
+    for (i in 1:length(perf at x.values)) {
+        if (.garg(arglist, 'colorize') == FALSE) {
+            do.call("plot.xy",
+                    .sarg(.slice.run(.get.arglist('plot.xy', arglist), i),
+                          xy=(xy.coords(perf at x.values[[i]],
+                                        perf at y.values[[i]]))))
+        } else {
+            for (j in 1:(length(perf at x.values[[i]])-1)) {
+                segment.coloring <-
+                  colors[min(which(col.cutoffs <= perf at alpha.values[[i]][j]))]
+                do.call("plot.xy",
+                        .sarg(.slice.run(.get.arglist('plot.xy', arglist), i),
+                              xy=(xy.coords(perf at x.values[[i]][j:(j+1)],
+                                            perf at y.values[[i]][j:(j+1)])),
+                              col= segment.coloring))
+            }
+        }
+
+        print.cutoffs.at <- .garg(arglist, 'print.cutoffs.at',i)
+        if (! is.null(print.cutoffs.at)) {
+            text.x <- approxfun(perf at alpha.values[[i]], perf at x.values[[i]],
+                                rule=2, ties=mean)(print.cutoffs.at)
+            text.y <- approxfun(perf at alpha.values[[i]], perf at y.values[[i]],
+                                rule=2, ties=mean)(print.cutoffs.at)
+            do.call("points",
+                    .sarg(.slice.run(.get.arglist('points', arglist),i),
+                          x= text.x,
+                          y= text.y))
+            do.call("text",
+                    .farg(.slice.run( .get.arglist('text', arglist),i),
+                          x= text.x,
+                          y= text.y,
+                          labels=(.garg(arglist,
+                                        'cutoff.label.function',
+                                        i)(print.cutoffs.at))))
+        }
+    }
+}
+
+## ----------------------------------------------------------------------------
+## plotting performance objects when vertical curve averaging is wanted
+## ----------------------------------------------------------------------------
+
+.performance.plot.vertical.avg <- function( perf, ...) {
+    arglist <- list(...)
+    arglist <- .farg(arglist,
+                     show.spread.at= (seq(min(unlist(perf at x.values)),
+                                          max(unlist(perf at x.values)),
+                                          length=11)))
+    perf.avg <- perf
+    x.values <- seq(min(unlist(perf at x.values)), max(unlist(perf at x.values)),
+                    length=max( sapply(perf at x.values, length)))
+    for (i in 1:length(perf at y.values)) {
+        perf.avg at y.values[[i]] <-
+          approxfun(perf at x.values[[i]], perf at y.values[[i]],
+                    ties=mean, rule=2)(x.values)
+    }
+    perf.avg at y.values <- list(rowMeans( data.frame( perf.avg at y.values )))
+    perf.avg at x.values <- list(x.values)
+    perf.avg at alpha.values <- list()
+
+    ## y.values at show.spread.at (midpoint of error bars )
+    show.spread.at.y.values <-
+      lapply(as.list(1:length(perf at x.values)),
+             function(i) {
+                 approxfun(perf at x.values[[i]], perf at y.values[[i]],
+                           rule=2,
+                           ties=mean)( .garg(arglist, 'show.spread.at'))
+             })
+
+    show.spread.at.y.values <- as.matrix(data.frame(show.spread.at.y.values ))
+    colnames(show.spread.at.y.values) <- c()
+    ## now, show.spread.at.y.values[i,] contains the curve y values at the
+    ## sampling x value .garg(arglist,'show.spread.at')[i]
+    
+    if (.garg(arglist, 'spread.estimate') == "stddev" ||
+        .garg(arglist, 'spread.estimate') == "stderror") {
+        bar.width <- apply(show.spread.at.y.values, 1, sd)
+        if (.garg(arglist, 'spread.estimate') == "stderror") {
+            bar.width <- bar.width / sqrt( ncol(show.spread.at.y.values) )
+        }
+        bar.width <- .garg(arglist, 'spread.scale') * bar.width
+
+        suppressWarnings(do.call("plotCI",
+                                 .farg(.sarg(.get.arglist( 'plotCI', arglist),
+                                             x=.garg(arglist,
+                                               'show.spread.at'),
+                                             y=rowMeans(
+                                               show.spread.at.y.values),
+                                             uiw= bar.width,
+                                             liw= bar.width,
+                                             err= 'y',
+                                             add= TRUE),
+                                       gap= 0,
+                                       type= 'n')))
+    }
+    
+    if (.garg(arglist, 'spread.estimate') == "boxplot") {
+        do.call("boxplot",
+                .farg(.sarg(.get.arglist( 'boxplot', arglist),
+                            x= data.frame(t(show.spread.at.y.values)),
+                            at= .garg(arglist, 'show.spread.at'),
+                            add= TRUE,
+                            axes= FALSE),
+                      boxwex= (1/(2*(length(.garg(arglist,
+                                                  'show.spread.at')))))))
+        do.call("points",
+                .sarg(.get.arglist( 'points', arglist),
+                      x= .garg(arglist, 'show.spread.at'),
+                      y= rowMeans(show.spread.at.y.values)))
+    }
+    
+    do.call( ".plot.performance", .sarg(arglist,
+                                       perf= perf.avg,
+                                       avg= 'none',
+                                       add= TRUE))
+}
+
+## ----------------------------------------------------------------------------
+## plotting performance objects when horizontal curve averaging is wanted
+## ----------------------------------------------------------------------------
+
+.performance.plot.horizontal.avg <- function( perf, ...) {
+    arglist <- list(...)
+    arglist <- .farg(arglist,
+                     show.spread.at= seq(min(unlist(perf at y.values)),
+                       max(unlist(perf at y.values)),
+                       length=11))
+    perf.avg <- perf
+    y.values <- seq(min(unlist(perf at y.values)), max(unlist(perf at y.values)),
+                    length=max( sapply(perf at y.values, length)))
+    for (i in 1:length(perf at x.values)) {
+        perf.avg at x.values[[i]] <- approxfun(perf at y.values[[i]],
+                                            perf at x.values[[i]],
+                                            ties=mean, rule=2)(y.values)
+    }
+    perf.avg at x.values <- list(rowMeans( data.frame( perf.avg at x.values )))
+    perf.avg at y.values <- list(y.values)
+    perf.avg at alpha.values <- list()
+    
+    ## x.values at show.spread.at (midpoint of error bars )
+    show.spread.at.x.values <-
+      lapply(as.list(1:length(perf at y.values)),
+             function(i) {
+                 approxfun(perf at y.values[[i]],
+                           perf at x.values[[i]],
+                           rule=2, ties=mean)(.garg(arglist,'show.spread.at'))
+             } )
+    show.spread.at.x.values <- as.matrix(data.frame(show.spread.at.x.values))
+    colnames(show.spread.at.x.values) <- c()
+    ## now, show.spread.at.x.values[i,] contains the curve x values at the
+    ## sampling y value .garg(arglist,'show.spread.at')[i]
+    
+    if (.garg(arglist,'spread.estimate') == 'stddev' ||
+        .garg(arglist,'spread.estimate') == 'stderror') {
+        bar.width <- apply(show.spread.at.x.values, 1, sd)
+        if (.garg(arglist,'spread.estimate')== 'stderror') {
+            bar.width <- bar.width / sqrt( ncol(show.spread.at.x.values) )
+        }
+        bar.width <- .garg(arglist,'spread.scale') * bar.width
+
+        suppressWarnings(do.call("plotCI",
+                                 .farg(.sarg(.get.arglist( 'plotCI', arglist),
+                                             x= rowMeans(
+                                               show.spread.at.x.values),
+                                             y= .garg(arglist,
+                                               'show.spread.at'),
+                                             uiw= bar.width,
+                                             liw= bar.width,
+                                             err= 'x',
+                                             add= TRUE),
+                                       gap= 0,
+                                       type= 'n')))
+    }
+    
+    if (.garg(arglist,'spread.estimate') == "boxplot") {
+        do.call("boxplot",
+                .farg(.sarg(.get.arglist( 'boxplot', arglist),
+                            x= data.frame(t(show.spread.at.x.values)),
+                            at= .garg(arglist,'show.spread.at'),
+                            add= TRUE,
+                            axes= FALSE,
+                            horizontal= TRUE),
+                      boxwex= 1/(2*(length(.garg(arglist,'show.spread.at'))))))
+        do.call("points", .sarg(.get.arglist( 'points', arglist),
+                                x= rowMeans(show.spread.at.x.values),
+                                y= .garg(arglist,'show.spread.at')))
+    }
+    
+    do.call( ".plot.performance", .sarg(arglist,
+                                        perf= perf.avg,
+                                        avg= 'none',
+                                        add= TRUE))
+}
+
+## ----------------------------------------------------------------------------
+## plotting performance objects when threshold curve averaging is wanted
+## ----------------------------------------------------------------------------
+
+.performance.plot.threshold.avg <- function( perf, ...) {
+    arglist <- list(...)
+    arglist <- .farg(arglist,
+                     show.spread.at= seq(min(unlist(perf at x.values)),
+                       max(unlist(perf at x.values)),
+                       length=11))
+
+    perf.sampled <- perf
+    alpha.values <- rev(seq(min(unlist(perf at alpha.values)),
+                            max(unlist(perf at alpha.values)),
+                            length=max( sapply(perf at alpha.values, length))))
+    for (i in 1:length(perf.sampled at y.values)) {
+        perf.sampled at x.values[[i]] <-
+          approxfun(perf at alpha.values[[i]],perf at x.values[[i]],
+                    rule=2, ties=mean)(alpha.values)
+        perf.sampled at y.values[[i]] <-
+          approxfun(perf at alpha.values[[i]], perf at y.values[[i]],
+                    rule=2, ties=mean)(alpha.values)
+    }
+
+    ## compute average curve
+    perf.avg <- perf.sampled
+    perf.avg at x.values <- list( rowMeans( data.frame( perf.avg at x.values)))
+    perf.avg at y.values <- list(rowMeans( data.frame( perf.avg at y.values)))
+    perf.avg at alpha.values <- list( alpha.values )
+    
+    x.values.spread <-
+      lapply(as.list(1:length(perf at x.values)),
+             function(i) {
+                 approxfun(perf at alpha.values[[i]], perf at x.values[[i]],
+                           rule=2, ties=mean)(.garg(arglist,'show.spread.at'))
+             } )
+    x.values.spread <- as.matrix(data.frame( x.values.spread ))
+    y.values.spread <-
+      lapply(as.list(1:length(perf at y.values)),
+             function(i) {
+                 approxfun(perf at alpha.values[[i]], perf at y.values[[i]],
+                           rule=2, ties=mean)(.garg(arglist,'show.spread.at'))
+             } )
+    y.values.spread <- as.matrix(data.frame( y.values.spread ))
+
+    if (.garg(arglist,'spread.estimate')=="stddev" ||
+        .garg(arglist,'spread.estimate')=="stderror") {
+
+        x.bar.width <- apply(x.values.spread, 1, sd)
+        y.bar.width <- apply(y.values.spread, 1, sd)
+        if (.garg(arglist,'spread.estimate')=="stderror") {
+            x.bar.width <- x.bar.width / sqrt( ncol(x.values.spread) )
+            y.bar.width <- y.bar.width / sqrt( ncol(x.values.spread) )
+        }
+        x.bar.width <- .garg(arglist,'spread.scale') * x.bar.width
+        y.bar.width <- .garg(arglist,'spread.scale') * y.bar.width
+
+        suppressWarnings( do.call("plotCI",
+                                  .farg(.sarg(.get.arglist( 'plotCI', arglist),
+                                              x= rowMeans(x.values.spread),
+                                              y= rowMeans(y.values.spread),
+                                              uiw= x.bar.width,
+                                              liw= x.bar.width,
+                                              err= 'x',
+                                              add= TRUE),
+                                        gap= 0,
+                                        type= 'n')))
+        
+        suppressWarnings( do.call("plotCI",
+                                  .farg(.sarg(.get.arglist( 'plotCI', arglist),
+                                              x= rowMeans(x.values.spread), 
+                                              y= rowMeans(y.values.spread),
+                                              uiw= y.bar.width, 
+                                              liw= y.bar.width, 
+                                              err= 'y', 
+                                              add= TRUE),
+                                        gap= 0,
+                                        type= 'n')))
+    }
+
+    if (.garg(arglist,'spread.estimate')=="boxplot") {
+        do.call("boxplot",
+                .farg(.sarg(.get.arglist('boxplot', arglist),
+                            x= data.frame(t(x.values.spread)),
+                            at= rowMeans(y.values.spread),
+                            add= TRUE,
+                            axes= FALSE,
+                            horizontal= TRUE),
+                      boxwex= 1/(2*(length(.garg(arglist,'show.spread.at'))))))
+        do.call("boxplot",
+                .farg(.sarg(.get.arglist('boxplot', arglist),
+                            x= data.frame(t(y.values.spread)),
+                            at= rowMeans(x.values.spread),
+                            add= TRUE,
+                            axes= FALSE),
+                      boxwex= 1/(2*(length(.garg(arglist,'show.spread.at'))))))
+        do.call("points", .sarg(.get.arglist('points', arglist),
+                                x= rowMeans(x.values.spread),
+                                y= rowMeans(y.values.spread)))
+    }
+    
+    do.call( ".plot.performance", .sarg(arglist,
+                                       perf= perf.avg,
+                                       avg= 'none',
+                                       add= TRUE))
+}
+
diff --git a/R/person.parameter.R b/R/person.parameter.R
new file mode 100755
index 0000000..a2491f7
--- /dev/null
+++ b/R/person.parameter.R
@@ -0,0 +1 @@
+person.parameter <- function(object)UseMethod("person.parameter")
\ No newline at end of file
diff --git a/R/person.parameter.eRm.R b/R/person.parameter.eRm.R
new file mode 100755
index 0000000..0a7b423
--- /dev/null
+++ b/R/person.parameter.eRm.R
@@ -0,0 +1,244 @@
+`person.parameter.eRm` <-
+function(object)
+# estimation of the person parameters with jml
+# object of class eRm
+# se... whether standard errors should be computed
+# splineInt... whether spline interpolation should be carried out
+
+{
+
+se <- TRUE
+splineInt <- TRUE
+options(warn=0)
+
+X <- object$X
+#collapse X
+#X.full <- object$X
+
+max.it <- apply(X,2,max,na.rm=TRUE)                               #maximum item raw score without NA
+rp <- rowSums(X,na.rm=TRUE)                                       #person raw scores
+maxrp <- apply(X,1,function(x.i) {sum(max.it[!is.na(x.i)])})      #maximum item raw score for person i
+TFrow <- ((rp==maxrp) | (rp==0))
+
+pers.exe <- (1:dim(X)[1])[TFrow]                                  #persons excluded from estimation due to 0/full
+pers.exe.names<-rownames(X)[pers.exe]
+pers.in<-(1:dim(X)[1])[-pers.exe]                                 #persons in estimation
+
+if (length(pers.exe) > 0) {                                 #data matrix persons (full/0) excluded)
+      X.ex <- object$X[-pers.exe,]                                        
+  } else {
+      X.ex <- object$X
+  }
+
+
+if (any(is.na(X))) {
+  dichX <- ifelse(is.na(X),1,0)
+  strdata <- apply(dichX,1,function(x) {paste(x,collapse="")})
+  gmemb.X <- as.vector(data.matrix(data.frame(strdata)))
+} else {
+  gmemb.X <- rep(1,dim(X)[1])
+}
+
+if (length(pers.exe) > 0) X <- X[-pers.exe,]
+X.dummy <- X
+
+if (any(is.na(X))) {
+  dichX <- ifelse(is.na(X),1,0)
+  strdata <- apply(dichX,1,function(x) {paste(x,collapse="")})
+  gmemb <- as.vector(data.matrix(data.frame(strdata)))
+  gmemb1 <- gmemb
+} else {
+  gmemb <- rep(1,dim(X)[1])
+  gmemb1 <- gmemb
+}
+
+mt_vek <- apply(X,2,max,na.rm=TRUE)             #number of categories - 1 for each item
+mt_ind <- rep(1:length(mt_vek),mt_vek)          #index i for items
+
+indvec <- NULL             #establish matrix with unique raw scores
+for (i in unique(gmemb)) {
+    gmemb.ind <- which(gmemb == i)
+    collapse.vec <- which(!duplicated(rowSums(rbind(X[gmemb==i,]),na.rm = TRUE)))
+    indvec <- c(indvec, gmemb.ind[collapse.vec])
+  }
+#for (i in unique(gmemb)) indvec <- c(indvec,!duplicated(rowSums(rbind(X[gmemb==i,]),na.rm = TRUE)))
+indvec <- sort(indvec)
+X <- X[indvec,]                                 #collapsing X
+
+beta.all <- object$betapar
+
+if (!is.null(object$ngroups))
+  if (object$ngroups > 1) stop("Estimation of person parameters for models with group contrasts not possible!")
+
+if (is.null(object$mpoints))  { mpoints <- 1
+} else {mpoints <- object$mpoints}
+
+r.pall <- rowSums(X,na.rm=TRUE)                 #person raw scores
+
+
+X01 <- object$X01
+if (length(pers.exe) > 0) X01 <- X01[-pers.exe,]   #if persons excluded due to 0/full response
+
+X01 <- X01[indvec,]                                #collapsed version
+gmemb <- gmemb[indvec]                             #collapsed version
+rownames(X01) <- rownames(X)
+
+rowvec <- 1:(dim(X01)[1])
+
+fitlist <- tapply(rowvec,gmemb,function(rind) {         #list with nlm outputs
+
+    if (length(rind) > 1) {
+       ivec <- !is.na(X[rind[1],])                      #non-NA elements
+       r.i <- colSums(X[rind,ivec],na.rm=TRUE)          #item raw scores
+    } else {                                        #if only one person belongs to raw score group
+       ivec <- !is.na(X[rind[1],])
+       r.i <- X[rind,ivec]
+      # r.i <- X[rind,]
+      # r.i[is.na(r.i)] <- 0
+    }
+    #r.i <- colSums(object$X[rind,],na.rm=TRUE)         #item raw scores
+    r.p <- r.pall[rind]                                 #person raw scores for current NA group
+    X01g <- rbind(X01[rind,])
+    beta <- beta.all[!is.na(X01g[1,])]
+    X01beta <- rbind(X01g,beta.all)                     #matrix with unique 0/1 response patterns and beta vector in the last row
+    theta <- rep(0,length(r.p))
+
+    #==================== ML routines ===================================
+    jml.rasch <- function(theta)         #fast ML for RM only
+    {
+      ksi <- exp(theta)
+      denom <- 1/exp(-beta)              #-beta instead of beta since beta are easiness parameter
+      lnL <- sum(r.p*theta)-sum(r.i*(-beta))-sum(log(1+outer(ksi,denom)))
+      -lnL
+    }
+
+    jml <- function(theta)               #ML for all other models
+    {
+        t1t2.list <- tapply(1:(dim(X01beta)[2]),mt_ind, function(xin) {
+                     #xb <- (t(X01beta)[xin,])
+                     xb <- rbind(t(X01beta)[xin,])     #0/1 responses and beta parameters for one item
+                     beta.i <- c(0,xb[,dim(xb)[2]])    #item parameter with 0
+
+                     #person responses (row-wise) on each category for current item
+                     if ((dim(xb)[1] > 1) && (length(xin == 1))) {
+                       x01.i <-  as.matrix(xb[,1:(dim(xb)[2]-1)])
+                     } else {
+                       x01.i <- rbind(xb[,1:(dim(xb)[2]-1)])  #0/1 matrix for item i without beta
+                     }
+
+                     cat0 <- rep(0,dim(x01.i)[2])
+                     cat0[colSums(x01.i)==0] <- 1      #those with 0 on the 1-kth category get a 1
+                     x01.i0 <- rbind(cat0,x01.i)       #appending response vector for 0th category
+
+                     ind.h <- 0:(length(beta.i)-1)
+                     theta.h <- ind.h %*% t(theta)     #n. categories times theta
+                     #!!!FIXME
+                     term1 <- (theta.h+beta.i)*x01.i0  #category-person matrix
+                     t1.i <- sum(colSums(term1))       #sum over categories and persons
+                     #print(t1.i)
+
+                     term2 <- exp(theta.h+beta.i)
+                     t2.i <- sum(log(colSums(term2)))   #sum over categories and persons
+                     #print(t2.i)
+
+                     return(c(t1.i,t2.i))
+                   })
+      termlist <- matrix(unlist(t1t2.list),ncol=2,byrow=TRUE)
+      termlist <- termlist[!is.na(rowSums(termlist)),]
+      st1st2 <- colSums(termlist, na.rm = TRUE) #sum term1, term2
+
+      lnL <- st1st2[1]-st1st2[2]
+      -lnL
+    }
+    #==================== end ML routines ================================
+
+    #==================== call optimizer =================================
+    if (object$model == "RM") {
+      fit <- nlm(jml.rasch,theta,hessian=se,iterlim=1000)
+    } else {
+      fit <- nlm(jml,theta,hessian=se,iterlim=1000)
+    }
+    #fit2 <- optim(theta,jml.rasch,method="BFGS",hessian=TRUE)
+
+    #=================== end call optimizer ==============================
+    loglik <- -fit$minimum
+    niter <- fit$iterations
+    thetapar <- fit$estimate
+    if (se) {
+      se <- sqrt(diag(solve(fit$hessian)))
+    } else {
+      se <- NA
+      fit$hessian <- NA }
+
+list(loglik=loglik,niter=niter,thetapar=thetapar,se=se,hessian=fit$hessian)
+})
+
+
+loglik <- NULL
+niter <- NULL
+npar <- NULL
+thetapar <- list(NULL)
+se.theta <- list(NULL)
+hessian <- list(NULL)
+for (i in 1:length(fitlist)) {
+  loglik <- c(loglik,fitlist[[i]]$loglik)
+  niter <- c(niter,fitlist[[i]]$niter)
+  npar <- c(npar,length(fitlist[[i]]$thetapar))
+  thetapar[[i]] <- fitlist[[i]]$thetapar
+  se.theta[[i]] <- fitlist[[i]]$se
+  hessian[[i]] <- fitlist[[i]]$hessian
+}
+
+if (splineInt) {                                           #cubic spline interpolation for missing, 0, full raw scores
+  x <- rowSums(X,na.rm=TRUE)
+  xlist <- split(x,gmemb)
+  pred.list <- mapply(function(xx,yy) {
+                       y <- tapply(yy,xx, function(xy) {xy[1]})
+                       x <- unique(sort(xx))
+                       if ((length(x) > 3) || (length(y) > 3)) {        #otherwise splinereg is not admissible
+                         fm1 <- interpSpline(x,y)
+                         pred.val <- predict(fm1, 0:sum(max.it))
+                       } else {
+                         warning("Spline interpolation is not performed!\n  Less than 4 different person parameters estimable!\n  Perhaps in (NA) subgroup(s).")
+                         NULL
+                       }},xlist,thetapar,SIMPLIFY=FALSE)
+  X.n <- object$X
+  if (any(sapply(pred.list,is.null)))  pred.list <- NULL                           #no spline interpolation applicable
+
+}
+
+names(thetapar) <- names(se.theta) <- paste("NAgroup",1:length(thetapar),sep="")
+
+#---------expand theta and se.theta, labeling -------------------
+for (i in unique(gmemb)) {
+  o.r <- rowSums(rbind(X.dummy[gmemb1==i,]), na.rm = TRUE)             #orginal raw scores
+  names(o.r) <- rownames(X.dummy)[gmemb1 == i]
+  c.r <- rowSums(rbind(X[gmemb==i,]), na.rm = TRUE)                     #collapsed raw scores
+  match.ind <- match(o.r, c.r)
+  thetapar[[i]] <- thetapar[[i]][match.ind]           #de-collapse theta's
+  se.theta[[i]] <- se.theta[[i]][match.ind]           #de-collapse se's
+  names(thetapar[[i]]) <- names(se.theta[[i]]) <- names(o.r)
+}
+#--------------- end expand, labeling ---------------------------
+
+
+
+
+#---------------------- theta.table new ----------------------
+thetavec <- unlist(thetapar)
+ind.orig <- as.vector(unlist(tapply(1:length(gmemb1), gmemb1, function(ind) {ind})))
+theta.orig <- tapply(thetavec, ind.orig, function(ii) return(ii))   #original order theta parameter
+theta.table <- data.frame(theta.orig, gmemb1)
+colnames(theta.table) <- c("Person Parameter","NAgroup")
+rownames(theta.table) <- rownames(X.ex)
+
+
+result <- list(X = X.n, X01 = object$X01, X.ex = X.ex, W = object$W, model = object$model,
+               loglik = loglik, loglik.cml = object$loglik, npar = npar, iter = niter, betapar = object$betapar,
+               thetapar = thetapar, se.theta = se.theta, theta.table = theta.table,
+               pred.list = pred.list, hessian = hessian, mpoints = mpoints,
+               pers.ex = pers.exe, gmemb = gmemb1)
+class(result) <- "ppar"
+result
+}
+
diff --git a/R/personfit.R b/R/personfit.R
new file mode 100755
index 0000000..f4b5194
--- /dev/null
+++ b/R/personfit.R
@@ -0,0 +1,3 @@
+`personfit` <-
+function(object)UseMethod("personfit")
+
diff --git a/R/personfit.ppar.R b/R/personfit.ppar.R
new file mode 100755
index 0000000..ebf0052
--- /dev/null
+++ b/R/personfit.ppar.R
@@ -0,0 +1,42 @@
+`personfit.ppar` <-
+function(object)
+# computes Chi-square based itemfit statistics (Smith, p.77ff)
+# for object of class "ppar" (from person.parameter)
+{
+
+  if (length(object$pers.ex)==0) {
+    X <- object$X
+  } else {
+    X <- object$X[-object$pers.ex,]
+  }
+
+  #rp <- rowSums(X,na.rm=TRUE)
+  #mt_vek <- apply(X,2,max,na.rm=TRUE)
+  #maxrp <- sum(mt_vek)
+  #TFrow <- ((rp==maxrp) | (rp==0))              #exclude full and 0 responses
+
+  VE <- pifit.internal(object)                  #compute expectation and variance term
+  Emat <- VE$Emat
+  Vmat <- VE$Vmat
+
+  st.res <- (X-Emat)/sqrt(Vmat)
+  #st.res <- (X[!TFrow,]-Emat)/sqrt(Vmat)
+
+  sq.res <- st.res^2                            #squared standardized residuals
+  pfit <- rowSums(sq.res,na.rm=TRUE)
+
+  pdf <- apply(X,1,function(x) {length(na.exclude(x))})
+
+  #pdf <- apply(X[!TFrow,],1,function(x) {length(na.exclude(x))})   #degress of freedom (#of persons per item)
+
+  p.outfitMSQ <- pfit/pdf
+
+  psumVmat<-rowSums(Vmat)
+  p.infitMSQ <- rowSums(sq.res*Vmat, na.rm = TRUE)/psumVmat
+
+  result <- list(p.fit = pfit, p.df = pdf, st.res = st.res, p.outfitMSQ = p.outfitMSQ,
+                 p.infitMSQ = p.infitMSQ)
+  class(result) <- "pfit"
+  result
+}
+
diff --git a/R/pifit.internal.r b/R/pifit.internal.r
new file mode 100755
index 0000000..f35487f
--- /dev/null
+++ b/R/pifit.internal.r
@@ -0,0 +1,40 @@
+pifit.internal <- function(object)
+{
+#object of class ppar
+#function is called in itemfit.ppar and personfit.ppar
+
+
+  X <- object$X
+  mt_vek <- apply(X,2,max,na.rm=TRUE)             #number of categories - 1 for each item
+  mt_ind <- rep(1:length(mt_vek),mt_vek)
+  mt_seq <- sequence(mt_vek)
+  gmemb <- object$gmemb
+
+  pmat <- pmat(object)                          #matrix with model probabilites
+
+  #-----------------matrix with expected response patterns--------------
+  Emat.cat <- t(apply(pmat,1,function(x) x*mt_seq))
+  if ((object$model == "RM") || (object$model == "LLTM")) { 
+    Emat <- Emat.cat
+  } else {
+    E.list <- tapply(1:length(mt_ind),mt_ind, function(ind) {rowSums(cbind(Emat.cat[,ind]),na.rm=TRUE)})
+    Emat <- matrix(unlist(E.list),ncol=dim(X)[2],dimnames=list(rownames(pmat),colnames(X)))
+  } 
+  #------------------------variance term for standardized residuals------
+  pmat.l0 <- tapply(1:length(mt_ind),mt_ind, function(ind) {
+                            vec0 <- 1-rowSums(as.matrix(pmat[,ind]))     #prob for 0th category
+                            cbind(vec0,pmat[,ind])
+                            })
+  pmat0 <- matrix(unlist(pmat.l0),nrow=length(gmemb))   #prob matrix 0th category included
+  mt_vek0 <- integer(0)                                 #add 0th category to all indices
+  for (i in mt_vek) mt_vek0 <- c(mt_vek0, 0:i)
+  mt_ind0 <- rep(1:length(mt_vek),mt_vek+1)
+  colnames(Emat) <- NULL
+  Emat0 <- t(apply(Emat[,mt_ind0],1,function(x) {mt_vek0 - x}))
+  Vmat.cat <- (Emat0)^2*pmat0
+  V.list <- tapply(1:length(mt_ind0),mt_ind0, function(ind) {rowSums(Vmat.cat[,ind],na.rm=TRUE)})
+  Vmat <- matrix(unlist(V.list),ncol=dim(X)[2],dimnames=list(rownames(pmat),colnames(X)))
+
+  result <- list(Emat=Emat,Vmat=Vmat)
+
+}
diff --git a/R/plist.internal.R b/R/plist.internal.R
new file mode 100755
index 0000000..e28fd1b
--- /dev/null
+++ b/R/plist.internal.R
@@ -0,0 +1,25 @@
+`plist.internal` <-
+function(object,theta)
+# computes a list of expected probabilities for objects of class Rm
+# with 0th category included!
+{
+
+X <- object$X
+mt_vek <- apply(X,2,max,na.rm=TRUE)             #number of categories - 1 for each item
+mt_ind <- rep(1:length(mt_vek),mt_vek)
+
+
+#--------compute list matrix of probabilites for fixed theta) 
+p.list <- tapply(object$betapar,mt_ind,function(beta.i) {
+                 beta.i <- c(0,beta.i)
+                 ind.h <- 0:(length(beta.i)-1)
+                 theta.h <- ind.h %*% t(theta)          #multiply category with 
+                 #tb <- exp(theta.h-beta.i)
+                 tb <- exp(theta.h+beta.i)
+                 denom <- colSums(tb)
+                 pi.mat <- apply(tb,1,function(y) {y/denom})
+                 return(pi.mat)
+               })
+return(p.list)
+}
+
diff --git a/R/plot.ppar.r b/R/plot.ppar.r
new file mode 100755
index 0000000..010c2fd
--- /dev/null
+++ b/R/plot.ppar.r
@@ -0,0 +1,41 @@
+plot.ppar <- function(x,xlab="Person Raw Scores",ylab="Person Parameters (Theta)",main=NULL,...)
+### function (x, y = NULL, type = "p", xlim = NULL, ylim = NULL,
+###     log = "", main = NULL, sub = NULL,
+###     xlab = "Person Raw Scores",
+###     ylab = "Person Parameters (Theta)",
+###     ann = par("ann"), axes = TRUE, frame.plot = axes, panel.first = NULL,
+###     panel.last = NULL, asp = NA, ...)
+# plot of the person raw scores against the person parameters
+# x...object of class "ppar" (resulting from person.parameter.eRm)
+{
+  pl <- x$pred.list                              #list with spline interpolations
+
+  if (is.null(pl)) stop("Spline interpolation required in person.parameter.eRm!")
+
+  X <- x$X
+  if (length(x$pers.ex) > 0) {
+    X <- X[-x$pers.ex,]
+    #gmemb <- x$gmemb[-x$pers.ex]
+  }
+  gmemb <- x$gmemb
+  X.list <- split(as.data.frame(X),as.factor(gmemb))
+
+  if (length(pl) > 1) {
+    for (i in 1:length(pl)) main.text <- paste("Person Parameter Plot of Group",i)
+  } else {
+    main.text <- "Plot of the Person Parameters"
+  }
+
+  if (!is.null(main)) main.text <- main
+
+  for (i in 1:length(pl)) {
+
+    #dev.new()
+    plot(rowSums(X.list[[i]],na.rm=TRUE),x$thetapar[[i]],xlim=c(min(pl[[i]]$x),max(pl[[i]]$x)),
+         ylim=c(min(pl[[i]]$y),max(pl[[i]]$y)),xlab=xlab,ylab=ylab,
+         main=main.text,...)
+    lines(pl[[i]]$x,pl[[i]]$y)
+  }
+}
+
+
diff --git a/R/plotCI.R b/R/plotCI.R
new file mode 100755
index 0000000..68a9c65
--- /dev/null
+++ b/R/plotCI.R
@@ -0,0 +1,163 @@
+# $Id: plotCI.R 1318 2009-05-08 21:56:38Z warnes $
+
+
+plotCI <- function (x,
+                    y = NULL,
+                    uiw,
+                    liw = uiw,
+                    ui,
+                    li,
+
+                    err='y',
+                    ylim=NULL,
+                    xlim=NULL,
+                    type="p",
+
+                    col=par("col"),
+                    barcol=col,
+                    pt.bg = par("bg"),
+
+                    sfrac = 0.01,
+                    gap=1,
+
+                    lwd=par("lwd"),
+                    lty=par("lty"),
+
+                    labels=FALSE,
+
+                    add=FALSE,
+                    xlab,
+                    ylab,
+
+                    minbar,
+                    maxbar,
+                    ...
+                    )
+{
+
+  if (is.list(x)) {
+    y <- x$y
+    x <- x$x
+  }
+
+  if(invalid(xlab))
+    xlab <- deparse(substitute(x))
+
+  if(invalid(ylab))
+    {
+      if(is.null(y))
+        {
+          xlab  <- ""
+          ylab <- deparse(substitute(x))
+        }
+      else
+        ylab <- deparse(substitute(y))
+    }
+
+  if (is.null(y)) {
+    if (is.null(x))
+      stop("both x and y NULL")
+    y <- as.numeric(x)
+    x <- seq(along = x)
+  }
+
+
+  if(err=="y")
+    z  <- y
+  else
+    z  <- x
+
+  if(invalid(uiw))
+    uiw <- NA
+  if(invalid(liw))
+    liw <- NA
+
+
+  if(invalid(ui))
+    ui <- z + uiw
+  if(invalid(li))
+    li <- z - liw
+
+  if(!invalid(minbar))
+    li <- ifelse( li < minbar, minbar, li)
+
+  if(!invalid(maxbar))
+    ui <- ifelse( ui > maxbar, maxbar, ui)
+
+   if(err=="y")
+     {
+       if(is.null(ylim))
+         ylim <- range(c(y, ui, li), na.rm=TRUE)
+       if(is.null(xlim) && !is.R() )
+         xlim <- range( x, na.rm=TRUE)
+     }
+   else if(err=="x")
+     {
+       if(is.null(xlim))
+         xlim <- range(c(x, ui, li), na.rm=TRUE)
+       if(is.null(ylim) && !is.R() )
+         ylim <- range( x, na.rm=TRUE)
+     }
+
+  if(!add)
+    {
+      if(invalid(labels) || labels==FALSE )
+        plot(x, y, ylim = ylim, xlim=xlim, col=col,
+             xlab=xlab, ylab=ylab, ...)
+      else
+        {
+          plot(x, y, ylim = ylim, xlim=xlim, col=col, type="n",
+               xlab=xlab, ylab=ylab,  ...)
+          text(x, y, label=labels, col=col, ... )
+        }
+    }
+  if(is.R())
+    myarrows <- function(...) arrows(...)
+# works only using R!!
+# else
+#   myarrows <- function(x1,y1,x2,y2,angle,code,length,...)
+#     {
+#       segments(x1,y1,x2,y2,open=TRUE,...)
+#       if(code==1)
+#         segments(x1-length/2,y1,x1+length/2,y1,...)
+#       else
+#         segments(x2-length/2,y2,x2+length/2,y2,...)
+#     }
+
+  if(err=="y")
+    {
+      if(gap!=FALSE)
+        gap <- strheight("O") * gap
+      smidge <- par("fin")[1] * sfrac
+
+
+      # draw upper bar
+      if(!is.null(li))
+          myarrows(x , li, x, pmax(y-gap,li), col=barcol, lwd=lwd,
+                 lty=lty, angle=90, length=smidge, code=1)
+      # draw lower bar
+      if(!is.null(ui))
+          myarrows(x , ui, x, pmin(y+gap,ui), col=barcol,
+                 lwd=lwd, lty=lty, angle=90, length=smidge, code=1)
+    }
+  else
+    {
+      if(gap!=FALSE)
+        gap <- strwidth("O") * gap
+      smidge <- par("fin")[2] * sfrac
+
+      # draw left bar
+      if(!is.null(li))
+        myarrows(li, y, pmax(x-gap,li), y, col=barcol, lwd=lwd,
+                 lty=lty, angle=90, length=smidge, code=1)
+      if(!is.null(ui))
+        myarrows(ui, y, pmin(x+gap,ui), y, col=barcol, lwd=lwd,
+                 lty=lty, angle=90, length=smidge, code=1)
+
+    }
+
+  ## _now_ draw the points (to avoid having lines drawn 'through' points)
+  points(x, y, col = col, lwd = lwd, bg = pt.bg, type = type, ...)
+
+  invisible(list(x = x, y = y))
+}
diff --git a/R/plotGOF.LR.R b/R/plotGOF.LR.R
new file mode 100755
index 0000000..93e28ff
--- /dev/null
+++ b/R/plotGOF.LR.R
@@ -0,0 +1,200 @@
+`plotGOF.LR` <-
+function(x,beta.subset="all", main="Graphical Model Check", xlab=NULL,ylab=NULL,tlab="item",
+         ylim=c(-3,3),xlim=c(-3,3),type="p",pos="4", conf=NULL, ctrline=NULL,...)
+{
+# graphical model check
+# beta.subset...plot only a subset of beta-parameters; either "all" or an index vector
+# x...object of class LR (from LRtest)
+# tlab ... labelling: "item" abbreviated beta parameter name, "number" number from beta par list,
+#            "identify" interactive, "none"
+# pos ... (where the textlabel appears)
+# conf ... confidence ellipses: NULL or list(gamma=0.95, col="red", ia=TRUE, lty="dashed")
+# ctrline ... control lines (confidence bands): NULL or list(gamma=0.95,lty="solid", col="blue")
+# ...     additional graphic parameters
+
+if (length(x$likgroup) > 2) warning("Only the parameters for the first two subgroups are plotted!")
+
+
+if (is.null(xlab)) xlab<-paste("Beta for Group: ",x$spl.gr[1],sep="")
+if (is.null(ylab)) ylab<-paste("Beta for Group: ",x$spl.gr[2],sep="")
+
+nparg1 <- length(x$betalist[[1]])
+nparg2 <- length(x$betalist[[2]])
+if (nparg1 != nparg2) stop("Unequal number of parameters in the subgroups! Plot cannot be produced, choose another split in LRtest!")
+
+beta1 <- x$betalist[[1]] * -1  # -1 to obtain difficulty parameters
+beta2 <- x$betalist[[2]] * -1
+
+
+
+if (is.character(beta.subset)) {
+  if (beta.subset=="all") {
+    beta.subset <- 1:length(beta1)
+    #textlab <- names(beta1)
+    switch(EXPR=tlab,
+      item=textlab <- substr(names(beta1),6,100),  #remove "beta " from names
+      number=textlab <- 1:length(beta1),
+      identify=labs <- substr(names(beta1),6,100)
+    )
+  } else {
+    textlab <- beta.subset
+  }
+} else {
+  #textlab <- names(beta1)[beta.subset]
+    switch(EXPR=tlab,
+      item=textlab <- substr(names(beta1)[beta.subset],6,100),  #remove "beta " from names
+      number=textlab <- beta.subset,
+      identify=labs <- substr(names(beta1)[beta.subset],6,100)
+    )
+}
+
+
+#yshift <- (ylim[2]-ylim[1])/30
+yshift<-0
+
+plot(beta1[beta.subset],beta2[beta.subset],main=main,xlab=xlab,
+       ylab=ylab,ylim=ylim,xlim=xlim,type=type,...)
+abline(0,1)
+if(exists("textlab")) {
+      text(beta1[beta.subset],beta2[beta.subset]+yshift,labels=textlab,pos=pos,...)
+}
+if(exists("labs")) {
+      options(locatorBell = FALSE)
+      xycoords <- cbind(beta1[beta.subset], beta2[beta.subset])
+      nothing<-identify(xycoords,labels = labs,atpen=TRUE,offset=1)
+}
+
+# se's needed for ellipses and control lines
+
+if(is.list(conf) || is.list(ctrline)){
+
+   if(any(is.na(unlist(x$selist)))) {
+      warning("Confidence ellipses or control lines cannot be plotted.\n  LR object without standard errors. Use option 'se=TRUE' in LRtest()")
+      conf <- ctrline <- NULL
+   } else {
+      s1 <- x$selist[[1]]
+      s2 <- x$selist[[2]]
+      v1 <- s1^2
+      v2 <- s2^2
+      suspicious.se<-any(cbind(s1,s2)[beta.subset]>10)
+      if(suspicious.se){
+         warning("Suspicious size of standard error(s).\n  Check model specification, split criterion, data.")
+      }
+   }
+
+   if(any(abs(cbind(beta1,beta2)[beta.subset])>8)){
+      warning("Suspicious size of parameter estimate(s).\n  Check model specification, split criterion, data.")
+      if(is.null(conf)) conf$ia <- FALSE
+   }
+
+}
+
+# confidence ellipses
+
+if(is.list(conf)){
+
+
+    # (interactive) plot of confidence ellipses
+
+    ## function ellipse() from package car
+    ellipse <-
+    function (center, shape, radius, center.pch = 19, center.cex = 1.5,
+        segments = 51, add = TRUE, xlab = "", ylab = "", las = par("las"),
+        col = palette()[2], lwd = 2, lty = 1, ...)
+    {
+        if (!(is.vector(center) && 2 == length(center)))
+            stop("center must be a vector of length 2")
+        if (!(is.matrix(shape) && all(2 == dim(shape))))
+            stop("shape must be a 2 by 2 matrix")
+        angles <- (0:segments) * 2 * pi/segments
+        unit.circle <- cbind(cos(angles), sin(angles))
+        ellipse <- t(center + radius * t(unit.circle %*% chol(shape)))
+        if (add)
+            lines(ellipse, col = col, lwd = lwd, lty = lty, ...)
+        else plot(ellipse, xlab = xlab, ylab = ylab, type = "l",
+            col = col, lwd = lwd, lty = lty, las = las, ...)
+        if (center.pch)
+            points(center[1], center[2], pch = center.pch, cex = center.cex,
+                col = col)
+    }
+
+    if(is.null(conf$gamma)) conf$gamma <- 0.95
+    if(is.null(conf$col)) conf$col <- "red"
+    if(is.null(conf$lty)) conf$lty <- "dotted"
+    if(is.null(conf$ia)) conf$ia <- FALSE
+
+
+    z <- qnorm((conf$gamma+1)/2)
+
+    ci1u <- beta1 + z*s1
+    ci1l <- beta1 - z*s1
+    ci2u <- beta2 + z*s2
+    ci2l <- beta2 - z*s2
+
+
+
+    if(conf$ia) {
+
+
+         identifyEll <- function(x, y, ci1u, ci1l, ci2u,ci2l, v1, v2, conf, n=length(x), ...)
+         ## source: example from help("identify")
+         ## a function to use identify to select points, and overplot the
+         ## points with a cofidence ellipse as they are selected
+         {
+             xy <- xy.coords(x, y); x <- xy$x; y <- xy$y
+             sel <- rep(FALSE, length(x)); res <- integer(0)
+             while(sum(sel) < n) {
+                 ans <- identify(x[!sel], y[!sel], n=1, plot=FALSE, ...)
+                 if(!length(ans)) break
+                 ans <- which(!sel)[ans]
+                 i <- ans
+            lines(rep(x[i],2),c(ci2u[i],ci2l[i]),col=conf$col, lty=conf$lty)
+            lines(c(ci1u[i],ci1l[i]), rep(y[i],2),col=conf$col,lty=conf$lty)
+            ellipse(center=c(x[i],y[i]),matrix(c(v1[i],0,0,v2[i]),2),z,segments=200,center.cex=0.5,lwd=1, col=conf$col)
+                 #points(x[ans], y[ans], pch = pch)
+                 sel[ans] <- TRUE
+                 res <- c(res, ans)
+             }
+             #res
+         }
+         identifyEll(beta1[beta.subset],beta2[beta.subset],
+                             ci1u[beta.subset], ci1l[beta.subset], ci2u[beta.subset], ci2l[beta.subset],
+                             v1[beta.subset], v2[beta.subset], conf)
+    } else {
+
+         # non-interactive: plot of all ellipses at once
+
+         for (i in beta.subset) {
+            x<-beta1
+            y<-beta2
+            lines(rep(x[i],2),c(ci2u[i],ci2l[i]),col=conf$col, lty=conf$lty)
+            lines(c(ci1u[i],ci1l[i]), rep(y[i],2),col=conf$col,lty=conf$lty)
+            ellipse(center=c(x[i],y[i]),matrix(c(v1[i],0,0,v2[i]),2),z,segments=200,center.cex=0.5,lwd=1, col=conf$col)
+         }
+    }
+}
+
+
+# 95% control lines (Wright)
+
+if(is.list(ctrline)){
+
+    if(is.null(ctrline$gamma)) ctrline$gamma <- 0.95
+    if(is.null(ctrline$col)) ctrline$col <- "blue"
+    if(is.null(ctrline$lty)) ctrline$lty <- "solid"
+
+    z <- qnorm((ctrline$gamma+1)/2)
+
+    d<-(beta1+beta2)/2
+    se.d<-sqrt(v1+v2)
+    d<-sort(d)
+    se.d<-se.d[order(d)]
+    upperx<-d-z*se.d/2
+    uppery<-d+z*se.d/2
+    lines(upperx,uppery, col=ctrline$col, lty=ctrline$lty)
+    lines(uppery,upperx, col=ctrline$col, lty=ctrline$lty)
+
+
+}
+
+}
diff --git a/R/plotGOF.R b/R/plotGOF.R
new file mode 100755
index 0000000..ffb6d66
--- /dev/null
+++ b/R/plotGOF.R
@@ -0,0 +1,3 @@
+`plotGOF` <-
+function(x,...)UseMethod("plotGOF")
+
diff --git a/R/plotICC.R b/R/plotICC.R
new file mode 100755
index 0000000..98f39e0
--- /dev/null
+++ b/R/plotICC.R
@@ -0,0 +1,3 @@
+`plotICC` <-
+function(object,...)UseMethod("plotICC")
+
diff --git a/R/plotICC.Rm.R b/R/plotICC.Rm.R
new file mode 100755
index 0000000..c1eab1d
--- /dev/null
+++ b/R/plotICC.Rm.R
@@ -0,0 +1,178 @@
+`plotICC.Rm` <-
+function(object, item.subset = "all", empICC = NULL, empCI = NULL, mplot = NULL,    # ask,mplot added rh 2007-12-01
+         xlim = c(-4,4), ylim = c(0,1),
+         xlab = "Latent Dimension", ylab = "Probability to Solve", main=NULL,       # main rh 2010-03-06
+         col = NULL, lty = 1, legpos = "left", ask = TRUE, ...)
+
+# produces ICC plots
+# object of class Rm
+{
+
+  X <- object$X
+  if (is.null(col)) col <- 1:(max(apply(X,2,max,na.rm=TRUE))+1)
+
+# some sanity checks
+
+if (is.null(empICC)) {
+      emp.plot <- FALSE
+
+} else if (!is.element(empICC[[1]], c("raw","loess","tukey","kernel"))) {
+      ##empirical[[1]] <- "none"
+      emp.plot <- FALSE
+      warning('empICC must be one of "raw","loess","tukey","kernel"!\n')
+
+} else  if (object$model != "RM") {
+      warning("Empirical ICCs can only be plotted for a dichotomous Rasch model!\n")
+      emp.plot <- FALSE
+
+} else {
+
+      th.est <- person.parameter(object)
+      thetapar <- th.est$thetapar
+      if (length(thetapar)!=1) {      #Too complicated with NA'groups (for each NAgroup separate plots...)
+        warning("Empirical ICCs are not produced for different NA groups!\n")
+        emp.plot <- FALSE
+      } else {
+        thetapar.u <- unique(round(unlist(thetapar),5))
+        if (length(thetapar.u)<4) {
+            warning("No empirical ICCs for less the 4 different person parameters!\n")
+        emp.plot <- FALSE
+        } else
+            emp.plot <- TRUE
+
+      }
+}
+
+
+
+  theta <- seq(xlim[1],xlim[2],by=0.1)                          #x-axis
+  p.list <- plist.internal(object,theta)                        #matrix of probabilities
+  th.ord <- order(theta)
+
+  if (any(item.subset=="all")) {
+    textlab <- colnames(object$X)
+    ivec <- 1:length(p.list)
+  } else {
+      if (is.character(item.subset)) {                         #item names specified
+      ivectemp <- t(as.matrix(1:length(p.list)))
+      colnames(ivectemp) <- colnames(object$X)
+      ivec <- ivectemp[,item.subset]
+      textlab <- item.subset
+      textlab[ivec] <- textlab
+      it.legend <- item.subset
+    } else {                                                    #numeric vector specified
+      textlab <- colnames(object$X)[item.subset]
+      textlab[item.subset] <- textlab
+      ivec <- item.subset
+    }
+  }
+
+  if (object$model=="RM") {                                     #Rasch model
+    p.list <- lapply(p.list,function(x) {x[,-1]})               #Delete 0-probabilites
+    p.mat <- matrix(unlist(p.list),ncol=length(p.list))         #matrix with solving probabilities
+    text.ylab <- p.mat[(1:length(theta))[theta==median(theta)],]
+  }
+
+  ## plot for non RMs #################
+  if (object$model != "RM"){
+       if (ask) par("ask"=TRUE)                                 # added rh 2007-12-01
+       if (is.null(mplot))  mplot<-FALSE
+       if (mplot) par(mfrow=c(2,2))
+    for (j in 1:length(ivec)) {                                 # loop for items
+         i <- ivec[j]
+
+       yp <- as.matrix(p.list[[i]])
+       yy <- yp[th.ord,]
+
+       if(is.null(main)) main<-paste("ICC plot for item ",textlab[i])    # rh 2010-03-06
+       matplot(sort(theta),yy,type="l",lty=lty,col=col,
+               #main=paste("ICC plot for item ",textlab[i]),xlim=xlim,  # replaced to allow for user titles rh 2010-03-06
+               main=main, xlim=xlim,
+               ylim=ylim,xlab=xlab,ylab=ylab,...)
+       if (is.character(legpos))
+          legend(legpos,legend=paste(c("Category"),0:(dim(yp)[2]-1)), col=col,lty=lty, ...)  # added rh 2007-12-01
+    }
+
+  ## plot for  RMs #####################
+  } else {
+
+       if (is.null(mplot) && length(ivec)>1)  mplot<-TRUE else mplot<-FALSE  # rh 2010-03-06 no mfrow(2,2) if only 1 item
+       if (mplot) par(mfrow=c(2,2))
+
+       if (ask) par("ask"=TRUE)                       # added rh 2007-12-01
+    for (j in 1:length(ivec)) {                                 #runs over items
+         i <- ivec[j]
+
+       yp <- as.matrix(p.list[[i]])
+       yy <- yp[th.ord,]
+       if(is.null(main)) main<-paste("ICC plot for item ",textlab[i])    # rh 2010-03-06
+       matplot(sort(theta),yy,type="l",lty=lty,col=col,
+               #main=paste("ICC plot for item ",textlab[i]),xlim=xlim,  # replaced to allow for user titles rh 2010-03-06
+               main=main, xlim=xlim,
+               ylim=ylim,xlab=xlab,ylab=ylab,...)
+               ##ylim=ylim,xlab=xlab,ylab=ylab,"ask"=TRUE,...)
+
+       ## empirical ICC
+       if (emp.plot) {
+          freq.table <- as.matrix(table(rowSums(X),X[,i]))
+          rel.freq <- freq.table[,2]/rowSums(freq.table)
+          idx <- as.numeric(rownames(freq.table))
+          xy<-cbind(th.est$pred.list[[1]]$y[idx+1],rel.freq)
+
+
+          if(empICC[[1]]=="loess")
+               if(!is.null(empICC$smooth)) smooth<-empICC$smooth else smooth<-0.75
+          if(empICC[[1]]=="kernel")
+               if(!is.null(empICC$smooth)) smooth<-empICC$smooth else smooth<-0.5
+
+          nn <- rowSums(freq.table)
+          switch(empICC[[1]],
+              "raw"={},
+              "loess"={xy[,2]<-loess(xy[,2]~xy[,1],span=smooth)$fitted},#+;cyf<-cbind(xy[,2] * nn, nn)},
+              "tukey"={xy[,2]<-smooth(xy[,2])},#;cyf<-cbind(xy[,2] * nn, nn)}
+              "kernel"={xy[,2]<-ksmooth(xy[,1],xy[,2],bandwidth=smooth,x.points=xy[,1])[[2]]}
+          )
+          xy[,2] <- ifelse(xy[,2]>1,1,ifelse(xy[,2]<0,0,xy[,2])) # bounding p in [0,1]
+
+          if(is.null(empICC$type)) empICC$type <- "p"
+          if(is.null(empICC$pch)) empICC$pch <- 1
+          if(is.null(empICC$col)) empICC$col <- "black"
+          if(is.null(empICC$lty)) empICC$lty <- "solid"
+
+
+          # confidence intervals for empirical ICC
+          if(!is.null(empCI)) {
+            # functions from prop.test()
+            p.L <- function(x, n, alpha) {
+                if (x <= 0) 0 else qbeta(alpha, x, n - x + 1)}
+            p.U <- function(x, n, alpha) {
+                if (x >= n) 1 else qbeta(1 - alpha, x + 1, n - x)}
+            CINT <- function(x, n, conf.level){
+                alpha <- (1 - conf.level)/2
+                c(p.L(x,n, alpha), p.U(x,n, alpha))
+            }
+
+            if(is.null(empCI$clevel)) empCI$clevel <- 0.95
+            if(is.null(empCI$col)) empCI$col <- "red"
+            if(is.null(empCI$lty)) empCI$lty <- "dotted"
+
+
+            cyf<-cbind(xy[,2] * nn, nn)
+            cy<-apply(cyf,1,function(x) CINT(x[1],x[2],empCI$clevel))
+
+
+            apply(cbind(xy[,1],t(cy)),1,function(x)segments(x[1],x[2],x[1],x[3],lty=empCI$lty,col=empCI$col))
+          }
+
+          # plots the point estimates of the empirical ICC
+          lines(xy[,1],xy[,2],type=empICC$type, pch=empICC$pch, col=empICC$col, lty=empICC$lty, ...)
+
+
+       } # end if(emp.plot)
+    }
+  }
+  ## reset graphics parameters
+  par("ask"=FALSE) # added rh 2007-12-01
+  par(mfrow=c(1,1))
+}
+
diff --git a/R/plotPImap.R b/R/plotPImap.R
new file mode 100755
index 0000000..3684ee6
--- /dev/null
+++ b/R/plotPImap.R
@@ -0,0 +1,109 @@
+`plotPImap` <-
+function(object, item.subset="all", sorted = FALSE, main="Person-Item Map",
+                 latdim="Latent Dimension", pplabel="Person\nParameter\nDistribution",
+                 cex.gen=0.7, xrange=NULL, warn.ord=TRUE, irug=TRUE)
+{
+    def.par <- par(no.readonly = TRUE) # save default, for resetting...
+
+# Item-Person map currently only for RM, PCM and RSM
+
+    if ((object$model == "LLTM") || (object$model == "LRSM") || (object$model == "LPCM"))
+         stop("Item-Person Map are computed only for RM, RSM, and PCM!")
+
+# compute threshtable (from betapars for dichotomous models) and item names
+    if (object$model == "RM" || max(object$X,na.rm=TRUE) < 2){
+       dRm <- TRUE
+       threshtable<-cbind(object$betapar, object$betapar) * -1 # betapars are easiness parameteres
+       rownames(threshtable)<-substring(rownames(threshtable), first=6, last=9999)
+    } else {
+       dRm <- FALSE
+       threshtable<-thresholds(object)$threshtable[[1]]
+    }
+    tr<-as.matrix(threshtable)
+    if (is.character(item.subset)){
+       if ( all(item.subset %in% rownames(threshtable)))
+          tr<-tr[item.subset,]
+       else if(!(item.subset=="all"))
+          stop("item.subset misspecified. Use 'all' or vector of valid item indices.")
+    } else {
+       if ( all(item.subset %in% 1:ncol(tr)) && length(item.subset>1))
+          tr<-tr[item.subset,]
+       else
+          stop("item.subset misspecified. Use 'all' or vector of valid item indices.")
+    }
+
+    if (sorted)
+      tr<-tr[order(tr[,1],decreasing=FALSE),]
+
+    loc<-as.matrix(tr[,1])
+    tr<-as.matrix(tr[,-1])
+
+    # person parameters unlist in case of several for NA groups
+    suppressWarnings(pp<-person.parameter(object))
+    theta<-unlist(pp$thetapar)
+
+    tt<-table(theta)
+    ttx<-as.numeric(names(tt))
+
+    yrange <- c(0,nrow(tr)+1)
+    if (is.null(xrange))
+       xrange<-range(c(tr,theta),na.rm=T)
+    nf <- layout(matrix(c(2,1),2,1,byrow=TRUE), heights = c(1,3), T)#, c(0,3), TRUE)
+    #layout.show(nf)
+
+
+    par(mar=c(2.5,4,0,1))
+    plot(xrange,yrange, xlim=xrange, ylim=yrange, main="",
+        ylab="",type="n", yaxt="n", xaxt="n")#,cex.lab=0.7,lheight=0.1)
+    axis(2,at=1:nrow(tr),labels=rev(rownames(tr)),las=2,cex.axis=cex.gen)
+    axis(1,at=seq(floor(xrange[1]),ceiling(xrange[2])),cex.axis=cex.gen,padj=-1.5)
+    mtext(latdim,1,1.2,cex=cex.gen+.1)
+    #mtext("low",1,1,at=ceiling(xrange[2]),cex=.7)
+    #mtext("high",1,1,at=floor(xrange[1]),cex=.7)
+
+### BEGIN irug
+
+    if(irug == TRUE){
+      y.offset <- nrow(tr)*.0275
+
+      tr.rug <- as.numeric(tr)
+      if(any(is.na(tr.rug))) tr.rug <- tr.rug[-which(is.na(tr.rug))]
+      segments(tr.rug,rep(yrange[2],length(tr.rug))+y.offset,
+               tr.rug,rep(yrange[2],length(tr.rug))+100)
+    }
+
+### END irug
+
+    warn<-rep(" ",nrow(tr))
+    for (j in 1:nrow(tr)){
+      i<-nrow(tr)+1-j
+      assign("trpoints",tr[i,!is.na(tr[i,])])
+      npnts<-length(trpoints)
+      points(sort(trpoints),rep(j,npnts),type="b")
+      if (dRm) {
+         lines(xrange*1.5,rep(j,2),lty="dotted")
+         text(sort(trpoints),rep(j,npnts),rownames(tr)[i], cex=cex.gen,pos=3) # different labelling for dRm
+      } else {
+         #lines(xrange*1.5,rep(j,2),lty="dotted")
+         text(sort(trpoints),rep(j,npnts),(1:npnts)[order(trpoints)],cex=cex.gen,pos=1)
+         if(!all(sort(trpoints)==trpoints)) warn[j]<-"*"
+
+      }
+      points(loc[i],j,pch=20,cex=1.5) # plot item locations
+      text(loc[i],j,rev(rownames(tr)[i]),cex=cex.gen,pos=3)
+
+    }
+    if (warn.ord) axis(4,at=1:nrow(tr),tick=FALSE, labels=warn, padj=-1.5)#,cex.axis=cex.gen)
+
+    # person parameters
+    par(mar=c(0,4,3,1))
+    #hist(theta,main=title,axes=FALSE, ylab="", xlim=xrange, col="lightgray")
+    plot(ttx,tt,type="n", main=main, axes=FALSE, ylab="", xlim=xrange, ylim=c(0,max(tt)))
+    points(ttx,tt,type="h", col="gray", lend=2,lwd=5)
+    #axis(4)
+    mtext(pplabel,2,0.5,las=2,cex=cex.gen)
+    box()
+
+    par(def.par)
+}
+
diff --git a/R/plotjointICC.R b/R/plotjointICC.R
new file mode 100755
index 0000000..ad9d5fb
--- /dev/null
+++ b/R/plotjointICC.R
@@ -0,0 +1,3 @@
+`plotjointICC` <-
+function(object,...)UseMethod("plotjointICC")
+
diff --git a/R/plotjointICC.dRm.R b/R/plotjointICC.dRm.R
new file mode 100755
index 0000000..66c8aac
--- /dev/null
+++ b/R/plotjointICC.dRm.R
@@ -0,0 +1,54 @@
+`plotjointICC.dRm` <-
+function(object, item.subset = "all", legend=TRUE, xlim=c(-4,4),ylim=c(0,1),
+         xlab="Latent Dimension",ylab="Probability to Solve",lty=1,legpos="left",
+         main="ICC plot",col=NULL,...)
+
+
+#produces one common ICC plot for Rasch models only
+#object of class "dRm"
+#item.subset...specify items that have to be plotted; if NA, all items are used
+#legend...if legend should be plotted
+
+{
+  theta <- seq(xlim[1],xlim[2],by=0.1)
+
+  if (any(item.subset=="all")) {
+    it.legend <- 1:dim(object$X)[2]
+  } else {
+    if (is.character(item.subset)) {
+      it.legend <- item.subset
+      betatemp <- t(as.matrix(object$betapar))
+      colnames(betatemp) <- colnames(object$X)
+      object$betapar <- betatemp[,item.subset]
+    } else {
+      it.legend <- colnames(object$X)[item.subset]
+      object$betapar <- object$betapar[item.subset]
+    }
+    object$X <- object$X[,item.subset]                            #pick out items defined in itemvec
+  }
+
+
+  th.ord <- order(theta)
+
+  p.list <- plist.internal(object,theta)
+  p.list <- lapply(p.list,function(x) {x[,-1]})               #Delete 0-probabilites
+  p.mat <- matrix(unlist(p.list),ncol=length(p.list))
+  text.ylab <- p.mat[(1:length(theta))[theta==median(theta)],]
+
+  #dev.new()
+
+  if(is.null(main)) main=""
+  if(is.null(col)) col=1:(dim(p.mat)[2])
+  #pmICCs<-cbind(sort(theta),p.mat[th.ord,])
+  matplot(sort(theta),p.mat[th.ord,],type="l",lty=lty,col=col,
+          main=main,xlim=xlim,ylim=ylim,xlab=xlab,ylab=ylab,...)
+  if (is.character(legpos)){
+     if (!legend) {
+         #text(x=median(theta),y=text.ylab,labels=paste("I",1:(dim(p.mat)[2]),sep=""),col=1:(dim(p.mat)[2]))
+         text(x=median(theta),y=text.ylab,labels=it.legend,col=1:(dim(p.mat)[2]))
+     } else {
+         legend(legpos,legend=paste("Item",it.legend),col=1:(dim(p.mat)[2]),lty=lty,...)
+     }
+  }
+}
+
diff --git a/R/pmat.R b/R/pmat.R
new file mode 100755
index 0000000..c8a1767
--- /dev/null
+++ b/R/pmat.R
@@ -0,0 +1,3 @@
+`pmat` <-
+function(object)UseMethod("pmat")
+
diff --git a/R/pmat.default.R b/R/pmat.default.R
new file mode 100755
index 0000000..f5ce821
--- /dev/null
+++ b/R/pmat.default.R
@@ -0,0 +1,4 @@
+`pmat.default` <-
+function(object)
+# error message for using incorrect object
+{ stop("pmat() requires object of class 'ppar', obtained from person.parameter()") }
diff --git a/R/pmat.ppar.R b/R/pmat.ppar.R
new file mode 100755
index 0000000..7a93e84
--- /dev/null
+++ b/R/pmat.ppar.R
@@ -0,0 +1,64 @@
+`pmat.ppar` <-
+function(object)
+# computes a list of expected probabilities for objects of class "ppar" for each NA-subgroup
+# without category!
+{
+
+X <- object$X
+mt_vek <- apply(X,2,max,na.rm=TRUE)             #number of categories - 1 for each item
+mt_ind <- rep(1:length(mt_vek),mt_vek)
+
+rp <- rowSums(X,na.rm=TRUE)
+maxrp <- sum(mt_vek)
+TFrow <- ((rp==maxrp) | (rp==0))
+
+pmat.l <- lapply(object$thetapar, function(theta1) {                       #runs over missing structures
+             theta <- theta1
+             p.list <- tapply(object$betapar,mt_ind,function(beta.i) {     #matrices of expected prob as list (over items)
+                     beta.i <- c(0,beta.i)
+                     ind.h <- 0:(length(beta.i)-1)
+                     theta.h <- ind.h %*% t(theta)
+                     tb <- exp(theta.h+beta.i)
+                     denom <- colSums(tb)
+                     pi.mat <- apply(tb,1,function(y) {y/denom})
+                     return(pi.mat)
+                   })
+    
+    p.list0 <- lapply(p.list,function(pl) {rbind(pl)[,-1]})               #delete 0th category
+    pmat <- matrix(unlist(p.list0),nrow=length(theta1))      #save as matrix
+    return(pmat)
+  }) 
+
+#----------item-category labels----------
+cnames <- substr(names(object$betapar),6,40)
+for (i in 1:length(pmat.l)) dimnames(pmat.l[[i]]) <- list(names(object$thetapar[[i]]),cnames)
+#-----------end labels-------   
+
+if (length(object$pers.ex) > 0) {    
+      X <- object$X[-object$pers.ex,]                                        #list with raw scores
+      X01 <- object$X01[-object$pers.ex,]     
+  } else {
+      X <- object$X
+      X01 <- object$X01
+  }
+
+NApos <- tapply(1:length(object$gmemb),object$gmemb,function(ind) {   #positions for NA replacement
+                       xvec <- X01[ind[1],]
+                       which(is.na(xvec))
+                       })
+
+pmat <- NULL
+for (i in 1:length(pmat.l)) {
+       pmat.l[[i]][,NApos[[i]]] <- NA            #insert NA's
+       pmat <- rbind(pmat,pmat.l[[i]])
+       }
+
+#-------------- reorder the p-matrix ---------------      
+ind.orig <- as.vector(unlist(tapply(1:length(object$gmemb), object$gmemb, function(ind) {ind})))
+pmat.orig.list <- by(pmat, ind.orig, function(ii) return(ii))
+pmat.orig <- as.matrix(do.call(rbind, pmat.orig.list))      #final P-matrix (corresponding to X)
+rownames(pmat.orig) <- rownames(X)
+
+return(pmat.orig)
+}
+
diff --git a/R/predict.ppar.R b/R/predict.ppar.R
new file mode 100755
index 0000000..340442e
--- /dev/null
+++ b/R/predict.ppar.R
@@ -0,0 +1,26 @@
+predict.ppar <- function(object, cutpoint = "randomized", ...)
+{
+# predict method for objects of class ppar
+# cutpoint ... either value between 0 and 1, or randomized assignment
+
+Pi <- pmat(object)                            #expected values
+X <- object$X.ex
+if (max(X, na.rm = TRUE) > 1) stop("Available for dichotomous models only!")
+
+K <- dim(X)[2]
+N <- dim(X)[1]
+
+y <- as.vector(t(X))   #observed values
+pi.hat <- as.vector(t(Pi))
+
+if (cutpoint == "randomized") {
+   pvec <- runif(length(y))
+} else {
+  pvec <- rep(cutpoint, length(y))
+}
+
+classvec <- (pvec < pi.hat)*1                #expected 0/1 vector
+classmat <- matrix(classvec, ncol = K, nrow = N, byrow = TRUE)
+dimnames(classmat) <- list(rownames(X), colnames(X))
+return(classmat)
+}
diff --git a/R/prediction.R b/R/prediction.R
new file mode 100755
index 0000000..faaa3f0
--- /dev/null
+++ b/R/prediction.R
@@ -0,0 +1,179 @@
+prediction <- function(predictions, labels, label.ordering=NULL) {
+
+    ## bring 'predictions' and 'labels' into list format,
+    ## each list entry representing one x-validation run
+
+    ## convert predictions into canonical list format
+    if (is.data.frame(predictions)) {
+        names(predictions) <- c()
+        predictions <- as.list(predictions)
+    } else if (is.matrix(predictions)) {
+        predictions <- as.list(data.frame(predictions))
+        names(predictions) <- c()
+    } else if (is.vector(predictions) && !is.list(predictions)) {
+        predictions <- list(predictions)
+    } else if (!is.list(predictions)) {
+        stop("Format of predictions is invalid.")
+    } 
+    ## if predictions is a list -> keep unaltered
+  
+    ## convert labels into canonical list format
+    if (is.data.frame(labels)) {
+        names(labels) <- c()
+        labels <- as.list( labels)
+    } else if (is.matrix(labels)) {
+        labels <- as.list( data.frame( labels))
+        names(labels) <- c()
+    } else if ((is.vector(labels) ||
+                is.ordered(labels) ||
+                is.factor(labels)) &&
+               !is.list(labels)) {
+        labels <- list( labels)
+    } else if (!is.list(labels)) {
+        stop("Format of labels is invalid.")
+    }
+    ## if labels is a list -> keep unaltered
+
+    ## Length consistency checks
+    if (length(predictions) != length(labels))
+      stop(paste("Number of cross-validation runs must be equal",
+                 "for predictions and labels."))
+    if (! all(sapply(predictions, length) == sapply(labels, length)))
+      stop(paste("Number of predictions in each run must be equal",
+                 "to the number of labels for each run."))
+    
+    ## only keep prediction/label pairs that are finite numbers
+    for (i in 1:length(predictions)) {
+        finite.bool <- is.finite( predictions[[i]] )
+        predictions[[i]] <- predictions[[i]][ finite.bool ]
+        labels[[i]] <- labels[[i]][ finite.bool ]
+    }
+
+    ## abort if 'labels' format is inconsistent across
+    ## different cross-validation runs
+    label.format=""  ## one of 'normal','factor','ordered'
+    if (all(sapply( labels, is.factor)) &&
+        !any(sapply(labels, is.ordered))) {
+        label.format <- "factor"
+    } else if (all(sapply( labels, is.ordered))) {
+        label.format <- "ordered"
+    } else if (all(sapply( labels, is.character)) || 
+               all(sapply( labels, is.numeric)) ||
+               all(sapply( labels, is.logical))) {
+        label.format <- "normal"
+    } else {
+        stop(paste("Inconsistent label data type across different",
+                   "cross-validation runs."))
+    }
+    
+    ## abort if levels are not consistent across different
+    ## cross-validation runs
+    if (! all(sapply(labels, levels)==levels(labels[[1]])) ) {
+        stop(paste("Inconsistent factor levels across different",
+                   "cross-validation runs."))
+    }
+        
+    ## convert 'labels' into ordered factors, aborting if the number
+    ## of classes is not equal to 2.
+    levels <- c()
+    if ( label.format == "ordered" ) {
+        if (!is.null(label.ordering)) {
+            stop(paste("'labels' is already ordered. No additional",
+                       "'label.ordering' must be supplied."))
+        } else {
+            levels <- levels(labels[[1]])
+        }
+    } else {
+        if ( is.null( label.ordering )) {
+            if ( label.format == "factor" ) levels <- sort(levels(labels[[1]]))
+            else levels <- sort( unique( unlist( labels)))
+        } else {
+          ## if (!setequal( levels, label.ordering)) {
+          if (!setequal( unique(unlist(labels)), label.ordering )) {
+            stop("Label ordering does not match class labels.")
+          }
+          levels <- label.ordering
+        }
+        for (i in 1:length(labels)) {
+            if (is.factor(labels))
+              labels[[i]] <- ordered(as.character(labels[[i]]),
+                                     levels=levels)
+            else labels[[i]] <- ordered( labels[[i]], levels=levels)
+        }
+
+    }
+
+    if (length(levels) != 2) {
+        message <- paste("Number of classes is not equal to 2.\n",
+                         "ROCR currently supports only evaluation of ",
+                         "binary classification tasks.",sep="")
+        stop(message)
+    }
+
+    ## determine whether predictions are continuous or categorical
+    ## (in the latter case stop; scheduled for the next ROCR version)
+    if (!is.numeric( unlist( predictions ))) {
+        stop("Currently, only continuous predictions are supported by ROCR.")
+    }
+
+    ## compute cutoff/fp/tp data
+
+    cutoffs <- list()
+    fp <- list()
+    tp <- list()
+    fn <- list()
+    tn <- list()
+    n.pos <- list()
+    n.neg <- list()
+    n.pos.pred <- list()
+    n.neg.pred <- list()
+    for (i in 1:length(predictions)) {
+        n.pos <- c( n.pos, sum( labels[[i]] == levels[2] ))
+        n.neg <- c( n.neg, sum( labels[[i]] == levels[1] ))
+        ans <- .compute.unnormalized.roc.curve( predictions[[i]], labels[[i]] )
+        cutoffs <- c( cutoffs, list( ans$cutoffs ))
+        fp <- c( fp, list( ans$fp ))
+        tp <- c( tp, list( ans$tp ))
+        fn <- c( fn, list( n.pos[[i]] - tp[[i]] ))
+        tn <- c( tn, list( n.neg[[i]] - fp[[i]] ))
+        n.pos.pred <- c(n.pos.pred, list(tp[[i]] + fp[[i]]) )
+        n.neg.pred <- c(n.neg.pred, list(tn[[i]] + fn[[i]]) )
+    }
+
+
+    return( new("prediction", predictions=predictions,
+                labels=labels,
+                cutoffs=cutoffs,
+                fp=fp,
+                tp=tp,
+                fn=fn,
+                tn=tn,
+                n.pos=n.pos,
+                n.neg=n.neg,
+                n.pos.pred=n.pos.pred,
+                n.neg.pred=n.neg.pred))
+}
+
+
+## fast fp/tp computation based on cumulative summing
+.compute.unnormalized.roc.curve <- function( predictions, labels ) {
+    ## determine the labels that are used for the pos. resp. neg. class :
+    pos.label <- levels(labels)[2]
+    neg.label <- levels(labels)[1]
+
+    pred.order <- order(predictions, decreasing=TRUE)
+    predictions.sorted <- predictions[pred.order]
+    tp <- cumsum(labels[pred.order]==pos.label)
+    fp <- cumsum(labels[pred.order]==neg.label)
+
+    ## remove fp & tp for duplicated predictions
+    ## as duplicated keeps the first occurrence, but we want the last, two
+    ## rev are used.
+    ## Highest cutoff (Infinity) corresponds to tp=0, fp=0
+    dups <- rev(duplicated(rev(predictions.sorted)))
+    tp <- c(0, tp[!dups])
+    fp <- c(0, fp[!dups])
+    cutoffs <- c(Inf, predictions.sorted[!dups])
+    
+    return(list( cutoffs=cutoffs, fp=fp, tp=tp ))
+}
diff --git a/R/print.ICr.r b/R/print.ICr.r
new file mode 100755
index 0000000..ee53880
--- /dev/null
+++ b/R/print.ICr.r
@@ -0,0 +1,8 @@
+print.ICr <- function(x,...)
+{
+#print method for objects of class "ICr" (from function "IC")
+
+ cat("\nInformation Criteria: \n")
+ print(x$ICtable)
+ cat("\n")
+}
\ No newline at end of file
diff --git a/R/print.LR.R b/R/print.LR.R
new file mode 100755
index 0000000..423063a
--- /dev/null
+++ b/R/print.LR.R
@@ -0,0 +1,12 @@
+`print.LR` <-
+function(x,...)
+{
+#print method for object of class "LR" (LRtest)
+  cat("\n")
+  cat("Andersen LR-test: \n")
+  cat("LR-value:", round(x$LR,3),"\n")
+  cat("Chi-square df:",x$df,"\n")
+  cat("p-value: ",round(x$pvalue,3),"\n")
+  cat("\n")
+}
+
diff --git a/R/print.MLoef.r b/R/print.MLoef.r
new file mode 100755
index 0000000..5deeabe
--- /dev/null
+++ b/R/print.MLoef.r
@@ -0,0 +1,22 @@
+print.MLoef <- function(x,...)
+{
+#print method for object of class "MLoef" (MLoef)
+
+# prepare message for split criteria
+  if( length(x$splitcr) == 1){
+    if( (x$splitcr == "median") | (x$splitcr == "mean")){ spl <- x$splitcr }
+  }
+  else{ spl <- "user-defined" }
+#
+#  if(!is.null(x$warning)){
+#    if(x$splitcr == "median") cat("Warning: Item(s)",paste(names(x$warning),collapse=", "),"with raw score equal to the median assigned to the lower raw score group!\n")
+#    if(x$splitcr == "mean") cat("Warning: Item(s)",paste(names(x$warning),collapse=", "),"with raw score equal to the mean assigned to the lower raw score group!\n")
+#  }
+  cat("\n")
+  cat("Martin-Loef-Test (split criterion: ",spl,")\n",sep="")
+
+  cat(paste("LR-value:",round(x$LR,3),"\n"))
+  cat(paste("Chi-square df:",round(x$df,3),"\n"))
+  cat(paste("p-value:",round(x$p.value,3)),"\n")
+  cat("\n")
+}
diff --git a/R/print.eRm.R b/R/print.eRm.R
new file mode 100755
index 0000000..6d8cd88
--- /dev/null
+++ b/R/print.eRm.R
@@ -0,0 +1,27 @@
+`print.eRm` <-
+function(x,...)  {                                         #print method for all models
+  cat("\n")
+  cat("Results of", x$model, "estimation: \n")
+  cat("\n")
+  cat("Call: ", deparse(x$call), "\n")
+  cat("\n")
+  cat("Conditional log-likelihood:", x$loglik, "\n")
+  cat("Number of iterations:", x$iter, "\n")
+  cat("Number of parameters:", x$npar, "\n")
+  cat("\n")
+  if (x$model %in% c("RM","RSM","PCM"))                    #eta parameters
+      cat("Item (Category) Difficulty Parameters (eta):")  # new labelling rh 25-03-2010
+  else                                                     # now difficulty for RM, RSM, PCM
+      cat("Basic Parameters eta:")
+  cat("\n")
+  etapar <- x$etapar
+  #nameeta <- paste("eta",1:dim(x$W)[2])
+  se <- x$se.eta
+  result <- rbind(etapar, se)
+  #colnames(result) <- nameeta
+  rownames(result) <- c("Estimate", "Std.Err")
+  print(result)
+  cat("\n\n")
+  invisible(result)
+}
+
diff --git a/R/print.gof.R b/R/print.gof.R
new file mode 100755
index 0000000..616bdf8
--- /dev/null
+++ b/R/print.gof.R
@@ -0,0 +1,10 @@
+print.gof <- function(x, ...)
+{
+  #print method for objects of class "gof" (from gofIRT.ppar)
+  cdv <- round(x$test.table[1,], 3)
+  cat("\nGoodness-of-Fit Results:")
+  cat("\nCollapsed Deviance = ", cdv[1], " (df = ", cdv[2], ", p-value = ", cdv[3], ")", sep ="")
+  cat("\nPearson R2:", round(x$R2$R2.P, 3))
+  cat("\nArea Under ROC:", round(x$AUC, 3))
+  cat("\n\n")
+}
\ No newline at end of file
diff --git a/R/print.ifit.R b/R/print.ifit.R
new file mode 100755
index 0000000..827363d
--- /dev/null
+++ b/R/print.ifit.R
@@ -0,0 +1,17 @@
+`print.ifit` <-
+function(x, visible=TRUE, ...)
+# print method for itemfit
+# x...object of class "ifit" from (itemfit)
+{
+  pvalues <- 1-pchisq(x$i.fit,x$i.df-1)  # df correction rh 10-01-20
+  coef.table <- cbind(round(x$i.fit,3),x$i.df-1,round(pvalues,3),round(x$i.outfitMSQ,3),round(x$i.infitMSQ,3))
+  colnames(coef.table) <- c("Chisq","df","p-value","Outfit MSQ", "Infit MSQ" )
+  rownames(coef.table) <- names(x$i.fit)
+  if (visible){       # added rh 10-01-20
+    cat("\nItemfit Statistics: \n")
+    print(coef.table)
+    cat("\n")
+  }
+  invisible(coef.table)
+}
+
diff --git a/R/print.logLik.eRm.r b/R/print.logLik.eRm.r
new file mode 100755
index 0000000..be80f1a
--- /dev/null
+++ b/R/print.logLik.eRm.r
@@ -0,0 +1,8 @@
+`print.logLik.eRm` <-
+function (x, digits = getOption("digits"),...)
+{
+    cat("Conditional log Lik.: ", format(x$loglik, digits = digits),
+        " (df=", format(x$df), ")\n", sep = "")
+    invisible(x)
+}
+
diff --git a/R/print.logLik.ppar.r b/R/print.logLik.ppar.r
new file mode 100755
index 0000000..6782ec8
--- /dev/null
+++ b/R/print.logLik.ppar.r
@@ -0,0 +1,7 @@
+`print.logLik.ppar` <-
+function (x, digits = getOption("digits"),...)
+{
+    cat("Unconditional (joint) log Lik.: ", format(x$loglik, digits = digits),
+        " (df=", format(x$df), ")\n", sep = "")
+    invisible(x)
+}
diff --git a/R/print.pfit.R b/R/print.pfit.R
new file mode 100755
index 0000000..29cb3b8
--- /dev/null
+++ b/R/print.pfit.R
@@ -0,0 +1,17 @@
+`print.pfit` <-
+function(x, visible=TRUE, ...)
+# print method for personfit
+# x...object of class "pfit" from (personfit)
+{
+  pvalues <- 1-pchisq(x$p.fit,x$p.df-1)  # df correction rh 10-01-20
+  coef.table <- cbind(round(x$p.fit,3),x$p.df-1,round(pvalues,3),round(x$p.outfitMSQ,3),round(x$p.infitMSQ,3))
+  colnames(coef.table) <- c("Chisq","df","p-value","Outfit MSQ", "Infit MSQ" )
+  rownames(coef.table) <- names(x$p.fit)
+  if (visible){       # added rh 10-01-20
+     cat("\nPersonfit Statistics: \n")
+     print(coef.table)
+     cat("\n")
+  }
+  invisible(coef.table)
+}
+
diff --git a/R/print.ppar.R b/R/print.ppar.R
new file mode 100755
index 0000000..30bd031
--- /dev/null
+++ b/R/print.ppar.R
@@ -0,0 +1,72 @@
+`print.ppar` <-
+function(x,...)
+# print method for person.parameter
+# x...object of class ppar
+{
+  cat("\n")
+  cat("Person Parameters:")
+  cat("\n")
+  
+  if (length(x$pers.ex) > 0) {    
+      X <- x$X[-x$pers.ex,]                                        #list with raw scores
+      sumlist <- by(x$X[-x$pers.ex,],x$gmemb,rowSums,na.rm=TRUE)
+    } else {
+      X <- x$X
+      sumlist <- by(x$X,x$gmemb,rowSums,na.rm=TRUE)
+    }
+  
+  if (is.null(x$pred.list)) {                                       #no spline Interpolation
+    coef.list <-  mapply(function(sm,th,se) {
+                           th.u <- tapply(th,sm, function(tm) {tm[1]})     #due to rounding errors, pck out first one 
+                           se.u <- tapply(se,sm, function(ss) {ss[1]})
+                           sm.u <- unique(sort(sm))
+                           
+                           smth <- cbind(sm.u,th.u,se.u)
+                           return(smth)
+                         },sumlist,x$thetapar,x$se,SIMPLIFY=FALSE)
+  } else {                                                          #if spline Interpolation
+    #TFvec <- sapply(x$pred.list,is.null)                            #for these NA groups no spline interpolation was computed
+    #predind <- (1:length(x$pred.list))[!TFvec]
+    #x$pred.list <- x$pred.list[predind]
+    
+    coef.list <- mapply(function(sm,pl,se) {
+                            se.u <- tapply(se,sm, function(ss) {ss[1]})
+                            sm.u <- unique(sort(sm))
+                            
+                            TFvec <- pl$x %in% sm.u
+                            se.ind <- 1:length(TFvec)
+                            se.all <- rep(NA,length(se.ind))
+                            se.all[se.ind[TFvec]] <- se.u              
+                            
+                            cbind(pl$x,pl$y,se.all)
+                            },sumlist,x$pred.list,x$se,SIMPLIFY=FALSE)
+  }
+  
+  if (dim(coef.list[[1]])[2] == 2) {                            #if no standard errors were computed
+    coef.list <- lapply(coef.list,function(cl) {cbind(cl,NA)})
+  }
+  
+ # if (any(is.na(x$X))) {                                       #recompute gmemb without persons excluded
+ #   dichX <- ifelse(is.na(x$X),1,0)
+ #   strdata <- apply(dichX,1,function(x) {paste(x,collapse="")})
+ #   gmemb <- as.vector(data.matrix(data.frame(strdata)))
+ # } else {
+ #   gmemb <- rep(1,dim(x$X)[1])
+ # }
+   
+  for (i in 1:length(x$thetapar)) {
+    cat("\n")
+    if (length(x$thetapar) > 1) {
+      cat("Person NA Group:",i,"\n")
+      xvec <- rep(NA, (dim(x$X)[2]))
+      notNApos <- which(!is.na(as.vector(rbind(X[x$gmemb == i,])[1,])))
+      xvec[notNApos] <- "x"
+      cat("NA pattern:",xvec,"\n")
+    }
+    colnames(coef.list[[i]]) <- c("Raw Score","Estimate","Std.Error")
+    rownames(coef.list[[i]]) <- rep("",dim(coef.list[[i]])[1])
+    print(coef.list[[i]])  
+  }
+  invisible(coef.list)
+}
+
diff --git a/R/print.resid.R b/R/print.resid.R
new file mode 100755
index 0000000..0f13730
--- /dev/null
+++ b/R/print.resid.R
@@ -0,0 +1,18 @@
+`print.resid` <-
+function(x,...)
+# print method for object of class "resid" (from residuals.ppar)
+{
+    cat("\nStandardized Residuals \n")
+    for (i in 1:length(x$st.res)) {
+      if (length(x$st.res) > 1) {cat("Person NA Group:",i,"\n")}
+      print(x$st.res[[i]])
+      cat("\n")
+    }
+    cat("\nSquared Standardized Residuals \n")
+    for (i in 1:length(x$sq.res)) {
+      if (length(x$sq.res) > 1) {cat("Person NA Group:",i,"\n")}
+      print(x$sq.res[[i]])
+      cat("\n")
+    }
+}
+
diff --git a/R/print.step.r b/R/print.step.r
new file mode 100755
index 0000000..1593120
--- /dev/null
+++ b/R/print.step.r
@@ -0,0 +1,24 @@
+print.step <- function(x, ...)
+{
+  cat("\nResults for stepwise item elimination:\n")
+  cat("Number of steps:",x$nsteps,"\n")
+
+  if (!is.null(x$res.wald)) {
+    cat("Criterion: Waldtest\n\n")
+    print(round(x$res.wald, 3))
+    cat("\n")
+  }
+  
+  if (!is.null(x$res.itemfit)) {
+    cat("Criterion: Itemfit\n\n")
+    print(round(x$res.itemfit, 3))
+    cat("\n")
+  }
+  
+  if (!is.null(x$res.LR)) {
+    cat("Criterion: Andersen's LR-test\n\n")
+    print(round(x$res.LR, 3))
+    cat("\n")
+  }
+  invisible(x)
+}
\ No newline at end of file
diff --git a/R/print.threshold.r b/R/print.threshold.r
new file mode 100755
index 0000000..144c9c3
--- /dev/null
+++ b/R/print.threshold.r
@@ -0,0 +1,10 @@
+print.threshold <- function(x,...)
+{
+    cat("\n")
+    for (i in 1:length(x$threshtable)) {
+      cat("Design Matrix Block ",i,":\n",sep="")
+      print(round(x$threshtable[[i]],5))
+      cat("\n")
+    }
+    invisible(x$threshtable)
+}
\ No newline at end of file
diff --git a/R/print.wald.R b/R/print.wald.R
new file mode 100755
index 0000000..14fca7d
--- /dev/null
+++ b/R/print.wald.R
@@ -0,0 +1,13 @@
+`print.wald` <-
+function(x,...)
+#print method for objects of class "wald" (from waldtest)
+{
+   #if (!is.null(x$betalab)) {
+   #  cat("Warning Message: Item",x$betalab[1],"was not tested due to sum-0 restriction.\n")
+   #}
+   cat("\nWald test on item level (z-values):\n\n")
+   print(round(x$coef.table,3))
+   cat("\n")
+   invisible(round(x$coef.table,3))
+}
+
diff --git a/R/residuals.ppar.R b/R/residuals.ppar.R
new file mode 100755
index 0000000..142ea98
--- /dev/null
+++ b/R/residuals.ppar.R
@@ -0,0 +1,9 @@
+`residuals.ppar` <-
+function(object,...)
+# computes standardized residuals
+# for object of class "ppar" (from person.parameter)
+{
+  result <- itemfit(object)$st.res
+  result
+}
+
diff --git a/R/rostdeviance.r b/R/rostdeviance.r
new file mode 100755
index 0000000..08008de
--- /dev/null
+++ b/R/rostdeviance.r
@@ -0,0 +1,26 @@
+rostdeviance <- function(object)
+{
+# Analysis of Deviance Table (Test against a saturated model)
+# object... object of class ppar
+
+#---------------saturated model---------------------
+  X <- object$X
+  N <- dim(X)[1]                     #number of subjects
+  K <- dim(X)[2]                     #number of items
+  x.ch <- apply(X,1,toString)        #response patters as string vectors
+  nx <- as.vector(table(x.ch))       #pattern frequencies
+  lsat <- sum(nx*(log(nx/N)))        #log-likelihood of saturated model (Rost, p.334)
+  #npar.sat <- length(nx)
+  npar.sat <- prod(apply(X, 2, max) + 1) - 1  #number of possible response patterns - 1
+#------------end saturated model--------------------
+
+  rv <- rowSums(X, na.rm = TRUE)                          #person raw scores
+  lmml <- sum(table(rv)*log(table(rv)/N))+object$loglik.cml   #MML likelihood
+  npar.mml <- dim(object$W)[2]        #+ length(table(rv)) ... not sure about that
+  
+  dev <- -2*(lmml - lsat)             #deviance
+  df.chi <- npar.sat - npar.mml
+  p.value <- 1-pchisq(dev,df.chi)
+  result <- list(value = dev, df = df.chi, p.value = p.value)
+  return(result)
+}
diff --git a/R/sim.2pl.R b/R/sim.2pl.R
new file mode 100755
index 0000000..5273eb5
--- /dev/null
+++ b/R/sim.2pl.R
@@ -0,0 +1,54 @@
+sim.2pl <- function(persons, items, discrim = 0.25, seed = NULL, cutpoint = "randomized")
+{
+
+# simulation of Birnbaum's 2-PL (non-parallel ICCs)
+# violation is steered by the standard deviation sdlog.
+# meanlog in rlnorm is 0 which implies that the random numbers lie asymmetrically around 1. If sdlog = 0 the data are Rasch homogeneous.
+# For IRT applications, values up to 0.5 should be considered. 
+
+if (length(items) == 1) {
+  if (!is.null(seed)) set.seed(seed)
+  schwierig <- rnorm(items)      #standard normal distributed
+  n.items <- items
+} else {
+  schwierig <- items
+  n.items <- length(items)
+}
+
+if (length(persons) == 1) {
+  if (!is.null(seed)) set.seed(seed)
+  faehig <- rnorm(persons)
+  n.persons <- persons
+} else {
+  faehig <- persons
+  n.persons <- length(persons)
+}
+
+
+if (length(discrim) > 1) {
+ alpha <- discrim 
+} else {
+ if (!is.null(seed)) set.seed(seed) 
+ alpha <- rlnorm(n.items, 0, sdlog = discrim)         #discrimination parameter
+}
+
+psolve <- matrix(0, n.persons, n.items)
+
+for (i in 1:n.persons)
+	for (j in 1:n.items)
+	psolve[i,j]<-exp(alpha[j]*(faehig[i]-schwierig[j]))/(1+exp(alpha[j]*(faehig[i]-schwierig[j])))
+
+if (cutpoint == "randomized") {
+  if (!is.null(seed)) set.seed(seed)
+    R <-(matrix(runif(n.items*n.persons),n.persons,n.items) < psolve)*1
+} else {
+    R <- (cutpoint < psolve)*1
+}
+
+
+
+
+return(R)
+}
+
+
diff --git a/R/sim.locdep.R b/R/sim.locdep.R
new file mode 100755
index 0000000..2923d5a
--- /dev/null
+++ b/R/sim.locdep.R
@@ -0,0 +1,55 @@
+sim.locdep <- function(persons, items, it.cor = 0.25, seed = NULL, cutpoint = "randomized")
+{
+# simulating data according to the local dependence model by Jannarone (1986)
+# it.cor represents the pairwise item correlation. If it is a single value, it is constant over all items,
+# otherwise a symmetric matrix of dimension n.items x n.items
+# it.cor = 1 reflects strong violation, it.cor = 0 corresponds to the Rasch model.
+
+
+if (length(items) == 1) {
+  if (!is.null(seed)) set.seed(seed)
+  schwierig <- rnorm(items)      #standard normal distributed
+  n.items <- items
+} else {
+  schwierig <- items
+  n.items <- length(items)
+}
+
+if (length(persons) == 1) {
+  if (!is.null(seed)) set.seed(seed)
+  faehig <- rnorm(persons)
+  n.persons <- persons
+} else {
+  faehig <- persons
+  n.persons <- length(persons)
+}
+
+if (is.matrix(it.cor)) {
+  #if (dim(it.cor)!= c(n.items, n.items)) stop("it.cor must be symmetric and of dimension number of items")
+  delta <- it.cor
+} else {
+  delta <- matrix(it.cor, ncol = n.items, nrow = n.items)
+}
+
+Loesprob<-matrix(0,n.persons,n.items)
+
+if (!is.null(seed)) set.seed(seed)
+Random.numbers<-matrix(runif(n.items*n.persons),n.persons,n.items)
+R<-matrix(-5,n.persons,n.items)
+
+for (j in 1:n.items)
+	{
+	for (i in 1:n.persons)
+		{
+                if ((j %% 2) == 0)
+                {
+                  Loesprob[i,j]<-exp(faehig[i]-schwierig[j]+(R[i,j-1]-0.5)*delta[j,j-1])/(1+exp(faehig[i]-schwierig[j]+(R[i,j-1]-0.5)*delta[j,j-1]))
+		} else {
+                  Loesprob[i,j]<-exp(faehig[i]-schwierig[j])/(1+exp(faehig[i]-schwierig[j]))
+		}}
+	R[,j]<-(Random.numbers[,j]<Loesprob[,j])*1
+	}
+
+return(R)
+}
+
diff --git a/R/sim.rasch.R b/R/sim.rasch.R
new file mode 100755
index 0000000..aa038fe
--- /dev/null
+++ b/R/sim.rasch.R
@@ -0,0 +1,38 @@
+sim.rasch <-function(persons, items, seed = NULL, cutpoint = "randomized")
+{
+#produces rasch homogeneous data
+#cutpoint... probability or "randomized"
+
+if (length(items) == 1) {
+  if (!is.null(seed)) set.seed(seed)
+  schwierig <- rnorm(items)      #standard normal distributed
+  n.items <- items
+} else {
+  schwierig <- items
+  n.items <- length(items)
+}
+
+if (length(persons) == 1) {
+  if (!is.null(seed)) set.seed(seed)
+  faehig <- rnorm(persons)
+  n.persons <- persons
+} else {
+  faehig <- persons
+  n.persons <- length(persons)
+}
+
+fsmat <- outer(faehig, schwierig, "-")
+psolve <- exp(fsmat)/(1+exp(fsmat))
+
+if (cutpoint == "randomized") {
+  if (!is.null(seed)) set.seed(seed)
+    R <-(matrix(runif(n.items*n.persons),n.persons,n.items) < psolve)*1
+} else {
+   R <- (cutpoint < psolve)*1
+ }
+
+return(R)
+}
+
+
+
diff --git a/R/sim.xdim.R b/R/sim.xdim.R
new file mode 100755
index 0000000..62daaac
--- /dev/null
+++ b/R/sim.xdim.R
@@ -0,0 +1,87 @@
+sim.xdim <- function(persons, items, Sigma, weightmat, seed = NULL, cutpoint = "randomized")
+{
+
+# Sigma ... VC matrix for multinormal distribution
+# weightmat ... matrix of dimension k times D with weights. If omitted, equal weights are used.
+
+## function from MASS
+mvrnorm<-function (n = 1, mu, Sigma, tol = 1e-06, empirical = FALSE)
+{
+    p <- length(mu)
+    if (!all(dim(Sigma) == c(p, p)))
+        stop("incompatible arguments")
+    eS <- eigen(Sigma, symmetric = TRUE, EISPACK = TRUE)
+    ev <- eS$values
+    if (!all(ev >= -tol * abs(ev[1])))
+        stop("'Sigma' is not positive definite")
+    X <- matrix(rnorm(p * n), n)
+    if (empirical) {
+        X <- scale(X, TRUE, FALSE)
+        X <- X %*% svd(X, nu = 0)$v
+        X <- scale(X, FALSE, TRUE)
+    }
+    X <- drop(mu) + eS$vectors %*% diag(sqrt(pmax(ev, 0)), p) %*%
+        t(X)
+    nm <- names(mu)
+    if (is.null(nm) && !is.null(dn <- dimnames(Sigma)))
+        nm <- dn[[1]]
+    dimnames(X) <- list(nm, NULL)
+    if (n == 1)
+        drop(X)
+    else t(X)
+}
+
+
+if (missing(Sigma)) {
+  ndim <- ncol(persons)
+} else {
+  ndim <- nrow(Sigma)                      #number of dimensions
+}
+
+if (length(persons) == 1) {                #simulating
+  if (!is.null(seed)) set.seed(seed)
+  faehig <- mvrnorm(persons, mu = rep(0, nrow(Sigma)), Sigma = Sigma)
+} else {
+  faehig <- persons
+}
+if (length(items) == 1) {
+  if (!is.null(seed)) set.seed(seed)
+  schwierig <- rnorm(items)
+} else {
+  schwierig <- items
+}
+
+
+n.persons <- nrow(faehig)
+n.items <- length(schwierig)
+
+if (missing(weightmat)) {                      #specifying the weight matrix
+  weightmat <- matrix(0, ncol = ndim, nrow = n.items)
+  if (!is.null(seed)) set.seed(seed)
+  indvec <- sample(1:ndim, n.items, replace = TRUE)
+  for (i in 1:n.items) weightmat[i,indvec[i]] <- 1
+}
+
+Wp <- apply(weightmat, 1, function(wi) {      #n.persons times n.items matrix
+                     Xw <- t(wi) %*% t(faehig)})
+
+psolve <- matrix(0,n.persons,n.items)
+
+#class<-rep(1,n.persons)
+#class[sample(n.persons)[1:round(n.persons/2,0)]]<-2
+
+for (j in 1:n.items)
+  for (i in 1:n.persons)
+    psolve[i,j] <- exp(Wp[i,j]-schwierig[j])/(1+ exp(Wp[i,j]-schwierig[j]))
+
+if (cutpoint == "randomized") {
+  if (!is.null(seed)) set.seed(seed)
+  R <-(matrix(runif(n.items*n.persons),n.persons,n.items) < psolve)*1
+} else {
+  R <- (cutpoint < psolve)*1
+}
+
+return(R)
+}
+
+
diff --git a/R/stepwiseIt.R b/R/stepwiseIt.R
new file mode 100755
index 0000000..acdd0b9
--- /dev/null
+++ b/R/stepwiseIt.R
@@ -0,0 +1,3 @@
+`stepwiseIt` <-
+function(object, criterion = list("itemfit"), alpha = 0.05, verbose = TRUE,
+maxstep = NA)UseMethod("stepwiseIt")
diff --git a/R/stepwiseIt.eRm.R b/R/stepwiseIt.eRm.R
new file mode 100755
index 0000000..43b90fa
--- /dev/null
+++ b/R/stepwiseIt.eRm.R
@@ -0,0 +1,148 @@
+#function for stepwise item elimination
+
+stepwiseIt.eRm <- function(object, criterion = list("itemfit"), alpha = 0.05, verbose = TRUE,
+                       maxstep = NA)
+{
+# object of class dRm
+# criterion: either list("itemfit") or list("LRtest", splitcr) od list("Waldtest", splitcr)
+
+  #-------- sanity checks ---------
+  dummy <- match.arg(criterion[[1]], c("itemfit","LRtest","Waldtest"))
+  if (!is.list(criterion)) stop("Criterion must be provided as list!")
+  if (!any(class(object) == "dRm")) stop("Stepwise elimination implemented for dichotomous Rasch models only!")
+  #------- end sanity checks ------
+
+  X.new <- object$X
+  K <- dim(X.new)[2]
+  if (is.na(maxstep)) maxstep <- K
+
+  if (length(criterion) == 2) {
+      splitcr <- criterion[[2]]
+  } else {
+      splitcr <- "median"
+  }
+
+
+  #---------------- start elimination  ----------------
+  i <- 0
+  it.el <- rep(NA, K)                              #initialize outputs
+  el.names <- rep(NA, K)
+  wald.mat <- matrix(NA, ncol = 2, nrow = K)
+  itemfit.mat <- matrix(NA, ncol = 3, nrow = K)
+  LR.mat <- matrix(NA, ncol = 3, nrow = K)
+    
+  repeat
+  {
+    if((dim(X.new)[2]) == 2) {
+      warning("Only 2 items left: No Rasch homogeneous itemset found!", call. = FALSE)
+      break
+    }
+    if (i == maxstep) {
+      warning("Maximum number of steps reached!", call. = FALSE)
+      break
+    }
+
+    i <- i + 1
+    res <- RM(X.new)                                     #fit Rasch
+
+    #---------------- itemfit criterion ------------
+    if (criterion[[1]] == "itemfit") {
+      pres <- person.parameter(res)                        #person parameters
+      it.res <- itemfit(pres)                              #compute itemfit
+      pvalvec <- 1-pchisq(it.res$i.fit, it.res$i.df)       #vector with pvalues
+      pvalsig <- which(pvalvec < alpha)                    #significant p-values
+
+      if (length(pvalsig) > 0) {
+        it.el[i] <- which(it.res$i.fit == max(it.res$i.fit))[1]
+        ie <- it.el[i]
+        itemfit.mat[i,] <- c(it.res$i.fit[ie], it.res$i.df[ie], pvalvec[ie])
+        if (verbose) cat("Eliminated item - Step ",i,": ",colnames(X.new)[it.el[i]],"\n", sep = "")
+        el.names[i] <- colnames(X.new)[it.el[i]]
+        X.new <- X.new[,-it.el[i]]
+      } else break
+    }
+    #-------------- end itemfit criterion -----------
+      
+    #------------------ Waldtest criterion ----------
+    if (criterion[[1]] == "Waldtest")
+    {
+      wald.res <- Waldtest(res, splitcr = splitcr)        #compute Waldtest
+      zvalvec <- abs(wald.res$coef.table[,1])             #absolute z-values
+      pvalvec <- wald.res$coef.table[,2]                  #vector with pvalues
+      pvalsig <- which(pvalvec < alpha)                   #significant p-values 
+      if (length(pvalsig) > 0) {
+        elpos <- which(zvalvec == max(zvalvec))[1]       #exclude maximum z-value Waldtest
+        wald.mat[i,] <- wald.res$coef.table[elpos,]
+        if (length(wald.res$it.ex) > 0) elpos <- elpos + sum(wald.res$it.ex <= elpos)  #if items couldn't computed in Waldtest
+        it.el[i] <- elpos
+        el.names[i] <- colnames(X.new)[it.el[i]]
+        if (verbose) cat("Eliminated item - Step ",i,": ",el.names[i],"\n", sep = "")
+        X.new <- X.new[,-it.el[i]]
+      } else break
+    }
+      
+    #-------------- LRtest criterion ----------------
+    if (criterion[[1]] == "LRtest")                           #uses Waldtest but stops when LRtest is sig.
+    {
+      lr.res <- LRtest(res, splitcr = splitcr)
+      if(lr.res$pvalue < alpha) {
+        wald.res <- Waldtest(res, splitcr = splitcr)        #compute Waldtest
+        zvalvec <- abs(wald.res$coef.table[,1])             #absolute z-values
+        elpos <- which(zvalvec == max(zvalvec))[1]       #exclude maximum z-value Waldtest
+        if (length(wald.res$it.ex) > 0) elpos <- elpos + sum(wald.res$it.ex <= elpos)  #if items couldn't computed in Waldtest
+        it.el[i] <- elpos
+        LR.mat[i,] <- c(lr.res$LR, lr.res$df, lr.res$pvalue)
+        el.names[i] <- colnames(X.new)[it.el[i]]
+        if (verbose) cat("Eliminated item - Step ",i,": ",el.names[i],"\n", sep = "")
+        X.new <- X.new[,-it.el[i]]
+      } else break
+    }
+    #----------- end LRtest criterion ---------
+  }
+ #--------------------- end stepwise------------------
+ 
+  #labeling
+  el.names <- el.names[!is.na(el.names)]
+  if (all(is.na(el.names))) {
+    warning("No items eliminated! Each of them fits the Rasch model!", call. = FALSE)
+    itemfit.mat <- NULL
+    LR.mat <- NULL
+    wald.mat <- NULL
+    criterion[[1]] <- "none"
+  }
+
+  if (criterion[[1]] == "itemfit")
+  {
+   itemfit.mat <- rbind(itemfit.mat[!is.na(rowSums(itemfit.mat)),])
+   rownames(itemfit.mat) <- paste("Step ",1:length(el.names),": ",el.names,sep = "")
+   colnames(itemfit.mat) <- c("Chisq", "df","p-value")
+  } else {
+    itemfit.mat <- NULL
+  }
+  if (criterion[[1]] == "Waldtest")
+  {
+    wald.mat <- rbind(wald.mat[!is.na(rowSums(wald.mat)),])
+    rownames(wald.mat) <- paste("Step ",1:length(el.names),": ",el.names,sep = "")
+    colnames(wald.mat) <- c("z-statistic", "p-value")
+  } else {
+    wald.mat <- NULL
+  }
+  if (criterion[[1]] == "LRtest")
+  {
+    if (i == maxstep) {
+      LR.mat <- rbind(LR.mat[!is.na(rowSums(LR.mat)),])
+      rownames(LR.mat) <- paste("Step ",1:length(el.names),": ",el.names,sep = "")
+    } else {
+      LR.mat <- rbind(LR.mat[!is.na(rowSums(LR.mat)),], c(lr.res$LR, lr.res$df, lr.res$pvalue))
+      rownames(LR.mat) <- c(paste("Step ",1:length(el.names),": ",el.names,sep = ""), paste("Step ", i,": None", sep = ""))
+    }
+    colnames(LR.mat) <- c("LR-value", "Chisq df", "p-value")
+  } else {
+    LR.mat <- NULL
+  }
+
+  result <- list(X = X.new, fit = res, it.elim = el.names, res.wald = wald.mat, res.itemfit = itemfit.mat,
+                res.LR = LR.mat, nsteps = i-1)
+  class(result) <- "step"
+  result
+}
diff --git a/R/summary.LR.r b/R/summary.LR.r
new file mode 100755
index 0000000..421e82e
--- /dev/null
+++ b/R/summary.LR.r
@@ -0,0 +1,32 @@
+summary.LR <- function(object,...)
+# summary method for objects of class "LR" (from LRtest")
+{
+  cat("\n")
+  cat("Andersen LR-test: \n")
+  cat("LR-value:", round(object$LR,3),"\n")
+  cat("Chi-square df:",object$df,"\n")
+  cat("p-value: ",round(object$pvalue,3),"\n")
+  cat("\n")
+
+  mt_vek <- apply(object$X,2,max,na.rm=TRUE)
+
+  for (i in 1:length(object$betalist)) {
+    cat("\n")
+    cat("Subject subgroup ",object$spl.gr[i],":",sep="")
+    cat("\n")
+    cat("Log-likelihood: ",object$likgroup[i])
+    cat("\n\n")
+    cat("Beta Parameters: \n")
+    betavec <- object$betalist[[i]]
+    if (!all(is.na(object$selist[[i]]))) {
+      coeftable <- rbind(betavec,object$selist[[i]])
+      rownames(coeftable) <- c("Estimate","Std.Err.")
+      print(coeftable)
+    } else {
+      print(betavec)
+    }
+    cat("\n")
+  }
+}
+
+
diff --git a/R/summary.MLoef.r b/R/summary.MLoef.r
new file mode 100755
index 0000000..52cd288
--- /dev/null
+++ b/R/summary.MLoef.r
@@ -0,0 +1,37 @@
+summary.MLoef <- function(object,...)
+{
+#print method for object of class "MLoef" (MLoef)
+
+# prepare message for split criteria
+  if( length(object$splitcr) == 1){
+    if( (object$splitcr == "median") | (object$splitcr == "mean")){ spl <- object$splitcr }
+  }
+  else{ spl <- "user-defined" }
+#
+#  if(!is.null(object$warning)){
+#    if(object$splitcr == "median") cat("Warning: Item(s)",paste(names(object$warning),collapse=", "),"with raw score equal to the median assigned to the lower raw score group!\n")
+#    if(object$splitcr == "mean") cat("Warning: Item(s)",paste(names(object$warning),collapse=", "),"with raw score equal to the mean assigned to the lower raw score group!\n")
+#  }
+  cat("\n")
+  cat("Martin-Loef-Test (split criterion: ",spl,")\n",sep="")
+
+  cat("\n")
+  cat("Group 1:\nItems: ")
+  cat(paste(object$items1),sep=", ")
+  cat("\nLog-Likelihood:",round(object$L1,3),"\n")
+  cat(paste("k_1:",object$k[1]),"\n")
+  cat("\n")
+  cat("Group 2:\nItems: ")
+  cat(paste(object$items2),sep=", ")
+  cat("\nLog-Likelihood:",round(object$L2,3),"\n")
+  cat(paste("k_2:",object$k[2]),"\n")
+  cat("\n")
+  cat("Overall Rasch-Model:\n")
+  cat("Log-Likelihood:",round(object$L0,3),"\n")
+  cat("\n")
+
+  cat(paste("LR-value:",round(object$LR,3),"\n"))
+  cat(paste("Chi-square df:",round(object$df,3),"\n"))
+  cat(paste("p-value:",round(object$p.value,3)),"\n")
+  cat("\n")
+}
diff --git a/R/summary.eRm.R b/R/summary.eRm.R
new file mode 100755
index 0000000..1b0299c
--- /dev/null
+++ b/R/summary.eRm.R
@@ -0,0 +1,47 @@
+`summary.eRm` <-
+function(object,...)
+{
+
+#labels...whether the item parameters should be labelled
+
+cat("\n")
+cat("Results of",object$model,"estimation: \n")
+cat("\n")
+cat("Call: ", deparse(object$call), "\n")
+cat("\n")
+
+cat("Conditional log-likelihood:",object$loglik,"\n")
+cat("Number of iterations:",object$iter,"\n")
+cat("Number of parameters:",object$npar,"\n")
+cat("\n")
+
+X <- object$X
+X01 <- object$X01
+mt_vek <- apply(X,2,max,na.rm=TRUE)
+
+ci <- confint(object,"eta")                                         # eta parameters:
+if (object$model %in% c("RM","RSM","PCM"))                          # now difficulty for RM, RSM, PCM
+    cat("Item (Category) Difficulty Parameters (eta) ")             # new labelling rh 25-03-2010
+else
+    cat("Basic Parameters eta ")
+cat("with 0.95 CI:\n")
+
+coeftable <- as.data.frame(cbind(round(object$etapar,3),
+                           round(object$se.eta,3),round(ci,3)))
+colnames(coeftable) <- c("Estimate","Std. Error","lower CI","upper CI")
+rownames(coeftable) <- names(object$etapar)
+print(coeftable)
+
+
+ci <- confint(object,"beta")
+cat("\nItem Easiness Parameters (beta) with 0.95 CI:\n")
+#coeftable <- as.data.frame(cbind(round(object$betapar),3),
+#                           round(object$se.beta,3),round(ci,3))
+coeftable <- cbind(round(object$betapar,3), round(object$se.beta,3), round(ci,3))
+
+colnames(coeftable) <- c("Estimate","Std. Error","lower CI","upper CI")
+rownames(coeftable) <- names(object$betapar)
+print(coeftable)
+cat("\n")
+}
+
diff --git a/R/summary.gof.R b/R/summary.gof.R
new file mode 100755
index 0000000..4a3def4
--- /dev/null
+++ b/R/summary.gof.R
@@ -0,0 +1,20 @@
+summary.gof <- function(object, ...)
+{
+  #summary method for objects of class "gof" (from gofIRT.ppar)
+  cat("\nGoodness-of-Fit Tests\n")
+  print(round(object$test.table, 3))
+
+  cat("\nR-Squared Measures")
+  cat("\nPearson R2:", round(object$R2$R2.P, 3))
+  cat("\nSum-of-Squares R2:", round(object$R2$R2.SS, 3))
+  cat("\nMcFadden R2:", round(object$R2$R2.MF, 3))
+  
+  cat("\n\nClassifier Results - Confusion Matrix (relative frequencies)\n")
+  print(round(object$classifier$confmat/sum(object$classifier$confmat), 3))
+  cat("\nAccuracy:", round(object$classifier$accuracy, 3))
+  cat("\nSensitivity:", round(object$classifier$sensitivity, 3))
+  cat("\nSpecificity:", round(object$classifier$specificity, 3))
+  cat("\nArea under ROC:", round(object$AUC, 3))
+  cat("\nGini coefficient:", round(object$Gini, 3))
+  cat("\n\n")
+}
\ No newline at end of file
diff --git a/R/summary.ppar.R b/R/summary.ppar.R
new file mode 100755
index 0000000..3457e51
--- /dev/null
+++ b/R/summary.ppar.R
@@ -0,0 +1,40 @@
+`summary.ppar` <-
+function(object,...)
+# summary method for object of class "ppar"
+{
+  
+  if (length(object$pers.ex) > 0) {
+    thetaind <- rownames(object$X)[-object$pers.ex]
+  } else {
+    thetaind <- rownames(object$X)
+  }
+    
+  if (any(is.na(object$X))) {                                       #recompute gmemb without persons excluded
+    dichX <- ifelse(is.na(object$X),1,0)
+    strdata <- apply(dichX,1,function(x) {paste(x,collapse="")})
+    gmemb <- as.vector(data.matrix(data.frame(strdata)))
+  } else {
+    gmemb <- rep(1,dim(object$X)[1])
+  }
+  
+  cat("\n")
+  cat("Estimation of Ability Parameters")
+  for (i in 1:length(object$thetapar)) {
+    cat("\n\n")
+    if (length(object$thetapar) > 1) {
+      cat("Subject NA Group:",i,"\n")
+      xvec <- rbind(object$X[gmemb==i,])[1,]                    #determine NA pattern
+      xvec[!is.na(xvec)] <- "x"
+      cat("NA pattern:",xvec,"\n")
+      }
+    cat("Collapsed log-likelihood:",object$loglik[[i]],"\n")
+    cat("Number of iterations:",object$iter[[i]],"\n")
+    cat("Number of parameters:",object$npar[[i]],"\n")
+    cat("\n")
+    cat("ML estimated ability parameters (without spline interpolated values): \n")
+    coef.table <- cbind(object$thetapar[[i]],object$se.theta[[i]],confint(object)[[i]])
+    dimnames(coef.table) <- list(paste("theta",thetaind[object$gmemb==i]),c("Estimate","Std. Err.",colnames(confint(object)[[i]])))
+    print(coef.table)
+  }
+}
+
diff --git a/R/summary.threshold.r b/R/summary.threshold.r
new file mode 100755
index 0000000..47e984c
--- /dev/null
+++ b/R/summary.threshold.r
@@ -0,0 +1,10 @@
+summary.threshold <- function(object,...)
+{
+#object of class "threshold"
+
+  coef.table <- cbind(round(object$threshpar,5),round(object$se.thresh,5),round(confint(object),5))
+  dimnames(coef.table) <- list(names(object$threshpar),c("Estimate","Std. Err.",colnames(confint(object))))
+  cat("\n")
+  print(coef.table)
+  cat("\n")
+}
\ No newline at end of file
diff --git a/R/thresholds.eRm.r b/R/thresholds.eRm.r
new file mode 100755
index 0000000..f554671
--- /dev/null
+++ b/R/thresholds.eRm.r
@@ -0,0 +1,61 @@
+thresholds.eRm <- function(object)                # uses matrix approach
+{
+#Computation of threshold parameters for polytomous models
+#object of class "eRm" (but not "dRm")
+
+  if ((object$model == "LLTM") || (object$model == "RM")) stop("Threshold parameters are computed only for polytomous models!")
+  if ((object$model == "LRSM") || (object$model == "LPCM")) {
+    mpoints <- object$mpoints
+    ngroups <- object$ngroups
+    vecrep <- mpoints * ngroups                      
+  } else {
+    mpoints <- 1
+    ngroups <- 1
+    vecrep <- 1
+  }
+  
+  betapar <- object$betapar
+  indmt <- apply(object$X,2,max,na.rm=TRUE)         #number of categories per item
+  mt_vek1 <- sequence(indmt[1:(length(indmt)/mpoints)]) #1 block of beta-items
+  mt_vek <- rep(mt_vek1, vecrep) 
+  sq<-ifelse(mt_vek > 1,-1,0)
+  d1<-diag(sq[-1])
+  k<-length(betapar)
+  d2<-diag(k)
+  d2[-k,-1]<-d2[-k,-1]+d1
+  threshpar <-as.vector(crossprod(betapar,d2)*-1)                  #vector with threshold parameters
+  
+  names(threshpar) <- paste("thresh",names(betapar))
+  
+  vc.beta <- (object$W%*%solve(object$hessian)%*%t(object$W)) #VC matrix beta's
+  se.thresh <- sqrt(diag(d2%*%(vc.beta)%*%t(d2)))             #standard errors of thresholds
+  names(se.thresh) <- names(threshpar)
+
+  blocks <- rep(1:vecrep, each = length(mt_vek1))
+  thblock <- split(threshpar,blocks)                          #block of threshholds (as in design matrix)
+  indmt1 <- indmt[1:(length(indmt)/mpoints)]
+  indvec <- rep(1:length(indmt1),indmt1)
+  
+  threshtab.l <- lapply(thblock, function(x) {                     #list of table-blocks
+                     Location <- tapply(x,indvec,mean)             #location parameters
+                     thresh.l <- split(x, indvec)
+                     threshmat <- t(sapply(thresh.l,"[",1:max(mt_vek)))
+                     colnames(threshmat) <- paste("Threshold", 1:dim(threshmat)[2])
+                     parmat <- cbind(Location,threshmat)
+                     }) 
+  
+  #determine item names for block-table
+  cnames <- colnames(object$X)
+  ind.it <- rep(1:mpoints,each = length(cnames)/mpoints)           #item label index
+  itnames1 <- as.vector(unlist(tapply(cnames, ind.it, function(x) rep(x, ngroups)))) 
+  rep.ind <- sapply(threshtab.l, function(x) dim(x)[1])
+  sp.ind <- rep(1:length(rep.ind), rep.ind)
+
+  names.l <- split(itnames1, sp.ind)                   #names as list
+  for (i in 1:length(threshtab.l)) rownames(threshtab.l[[i]]) <- names.l[[i]]              #name the items
+
+  result <- list(threshpar = threshpar,se.thresh = se.thresh, threshtable = threshtab.l)
+  class(result) <- "threshold"
+  result
+
+}
\ No newline at end of file
diff --git a/R/thresholds.r b/R/thresholds.r
new file mode 100755
index 0000000..b5cc877
--- /dev/null
+++ b/R/thresholds.r
@@ -0,0 +1 @@
+thresholds <- function(object)UseMethod("thresholds")
diff --git a/R/vcov.eRm.R b/R/vcov.eRm.R
new file mode 100755
index 0000000..b227b99
--- /dev/null
+++ b/R/vcov.eRm.R
@@ -0,0 +1,11 @@
+`vcov.eRm` <-
+function(object,...) 
+{
+  if (any(is.na(object$se.eta))) {
+    vcmat <- NA 
+  } else {
+    vcmat <- (solve(object$hessian))      #VC-matrix of the parameter estimates
+  }
+  return(vcmat)
+}
+
diff --git a/R/zzz.R b/R/zzz.R
new file mode 100755
index 0000000..f6f05e0
--- /dev/null
+++ b/R/zzz.R
@@ -0,0 +1,38 @@
+setClass("prediction",
+         representation(predictions = "list",
+                        labels      = "list",
+                        cutoffs     = "list",
+                        fp          = "list",
+                        tp          = "list",
+                        tn          = "list",
+                        fn          = "list",
+                        n.pos       = "list",
+                        n.neg       = "list",
+                        n.pos.pred  = "list",
+                        n.neg.pred  = "list"))
+
+setClass("performance",
+         representation(x.name       = "character",
+                        y.name       = "character",
+                        alpha.name   = "character",
+                        x.values     = "list",
+                        y.values     = "list",
+                        alpha.values = "list" ))
+
+setMethod("plot",signature(x="performance",y="missing"),
+          function(x,y,...) {
+              .plot.performance(x,...)
+          })
+
+# .First.lib <- function( libname, pkgname, where) {
+#     if (!require(methods)) {
+#         stop("Require Methods package")
+#     }
+#     if (!require(gplots)) {
+#         stop("Require gplots package")
+#     }
+#
+#     where <- match(paste("package:",pkgname, sep=""), search())
+# }
+
+
diff --git a/data/lltmdat1.rda b/data/lltmdat1.rda
new file mode 100755
index 0000000..2d59f46
Binary files /dev/null and b/data/lltmdat1.rda differ
diff --git a/data/lltmdat2.rda b/data/lltmdat2.rda
new file mode 100755
index 0000000..1ae8982
Binary files /dev/null and b/data/lltmdat2.rda differ
diff --git a/data/lpcmdat.rda b/data/lpcmdat.rda
new file mode 100755
index 0000000..d5db045
Binary files /dev/null and b/data/lpcmdat.rda differ
diff --git a/data/lrsmdat.rda b/data/lrsmdat.rda
new file mode 100755
index 0000000..cb70680
Binary files /dev/null and b/data/lrsmdat.rda differ
diff --git a/data/pcmdat.rda b/data/pcmdat.rda
new file mode 100755
index 0000000..4c738a3
Binary files /dev/null and b/data/pcmdat.rda differ
diff --git a/data/pcmdat2.rda b/data/pcmdat2.rda
new file mode 100755
index 0000000..325f301
Binary files /dev/null and b/data/pcmdat2.rda differ
diff --git a/data/raschdat1.rda b/data/raschdat1.rda
new file mode 100755
index 0000000..8875733
Binary files /dev/null and b/data/raschdat1.rda differ
diff --git a/data/raschdat2.rda b/data/raschdat2.rda
new file mode 100755
index 0000000..341be27
Binary files /dev/null and b/data/raschdat2.rda differ
diff --git a/data/rsmdat.rda b/data/rsmdat.rda
new file mode 100755
index 0000000..2812985
Binary files /dev/null and b/data/rsmdat.rda differ
diff --git a/inst/doc/Rplots.pdf b/inst/doc/Rplots.pdf
new file mode 100755
index 0000000..4b0242b
--- /dev/null
+++ b/inst/doc/Rplots.pdf
@@ -0,0 +1,4506 @@
+%PDF-1.4
+%���ρ�\r
+1 0 obj
+<<
+/CreationDate (D:20100408161643)
+/ModDate (D:20100408161643)
+/Title (R Graphics Output)
+/Producer (R 2.10.1)
+/Creator (R)
+>>
+endobj
+2 0 obj
+<<
+/Type /Catalog
+/Pages 3 0 R
+>>
+endobj
+5 0 obj
+<<
+/Type /Page
+/Parent 3 0 R
+/Contents 6 0 R
+/Resources 4 0 R
+>>
+endobj
+6 0 obj
+<<
+/Length 7 0 R
+>>
+stream
+q
+Q q 59.04 73.44 414.72 371.52 re W n
+0.000 0.000 0.000 RG
+0.75 w
+[] 0 d
+1 J
+1 j
+10.00 M
+BT
+/F1 1 Tf 1 Tr 7.48 0 0 7.48 395.67 378.45 Tm (l) Tj 0 Tr
+ET
+BT
+/F1 1 Tf 1 Tr 7.48 0 0 7.48 318.92 340.90 Tm (l) Tj 0 Tr
+ET
+BT
+/F1 1 Tf 1 Tr 7.48 0 0 7.48 242.83 278.41 Tm (l) Tj 0 Tr
+ET
+BT
+/F1 1 Tf 1 Tr 7.48 0 0 7.48 230.24 203.09 Tm (l) Tj 0 Tr
+ET
+BT
+/F1 1 Tf 1 Tr 7.48 0 0 7.48 165.78 156.44 Tm (l) Tj 0 Tr
+ET
+Q q
+0.000 0.000 0.000 RG
+0.75 w
+[] 0 d
+1 J
+1 j
+10.00 M
+74.40 73.44 m 458.40 73.44 l S
+74.40 73.44 m 74.40 66.24 l S
+138.40 73.44 m 138.40 66.24 l S
+202.40 73.44 m 202.40 66.24 l S
+266.40 73.44 m 266.40 66.24 l S
+330.40 73.44 m 330.40 66.24 l S
+394.40 73.44 m 394.40 66.24 l S
+458.40 73.44 m 458.40 66.24 l S
+BT
+0.000 0.000 0.000 rg
+/F2 1 Tf 12.00 0.00 0.00 12.00 67.56 47.52 Tm (-3) Tj
+ET
+BT
+/F2 1 Tf 12.00 0.00 0.00 12.00 131.56 47.52 Tm (-2) Tj
+ET
+BT
+/F2 1 Tf 12.00 0.00 0.00 12.00 195.56 47.52 Tm (-1) Tj
+ET
+BT
+/F2 1 Tf 12.00 0.00 0.00 12.00 263.06 47.52 Tm (0) Tj
+ET
+BT
+/F2 1 Tf 12.00 0.00 0.00 12.00 327.06 47.52 Tm (1) Tj
+ET
+BT
+/F2 1 Tf 12.00 0.00 0.00 12.00 391.06 47.52 Tm (2) Tj
+ET
+BT
+/F2 1 Tf 12.00 0.00 0.00 12.00 455.06 47.52 Tm (3) Tj
+ET
+59.04 87.20 m 59.04 431.20 l S
+59.04 87.20 m 51.84 87.20 l S
+59.04 144.53 m 51.84 144.53 l S
+59.04 201.87 m 51.84 201.87 l S
+59.04 259.20 m 51.84 259.20 l S
+59.04 316.53 m 51.84 316.53 l S
+59.04 373.87 m 51.84 373.87 l S
+59.04 431.20 m 51.84 431.20 l S
+BT
+/F2 1 Tf 0.00 12.00 -12.00 0.00 41.76 80.36 Tm (-3) Tj
+ET
+BT
+/F2 1 Tf 0.00 12.00 -12.00 0.00 41.76 137.69 Tm (-2) Tj
+ET
+BT
+/F2 1 Tf 0.00 12.00 -12.00 0.00 41.76 195.03 Tm (-1) Tj
+ET
+BT
+/F2 1 Tf 0.00 12.00 -12.00 0.00 41.76 255.86 Tm (0) Tj
+ET
+BT
+/F2 1 Tf 0.00 12.00 -12.00 0.00 41.76 313.20 Tm (1) Tj
+ET
+BT
+/F2 1 Tf 0.00 12.00 -12.00 0.00 41.76 370.53 Tm (2) Tj
+ET
+BT
+/F2 1 Tf 0.00 12.00 -12.00 0.00 41.76 427.86 Tm (3) Tj
+ET
+59.04 73.44 m
+473.76 73.44 l
+473.76 444.96 l
+59.04 444.96 l
+59.04 73.44 l
+S
+Q q
+BT
+0.000 0.000 0.000 rg
+/F3 1 Tf 14.00 0.00 0.00 14.00 189.13 469.45 Tm [(Graphical Model Chec) 20 (k)] TJ
+ET
+BT
+/F2 1 Tf 12.00 0.00 0.00 12.00 169.82 18.72 Tm [(Beta f) 30 (or Group: Ra) 20 (w Scores < Mean)] TJ
+ET
+BT
+/F2 1 Tf 0.00 12.00 -12.00 0.00 12.96 159.12 Tm [(Beta f) 30 (or Group: Ra) 20 (w Scores >= Mean)] TJ
+ET
+Q q 59.04 73.44 414.72 371.52 re W n
+0.000 0.000 0.000 RG
+0.75 w
+[] 0 d
+1 J
+1 j
+10.00 M
+59.04 73.44 m 473.76 444.96 l S
+BT
+0.000 0.000 0.000 rg
+/F2 1 Tf 12.00 0.00 0.00 12.00 405.83 378.17 Tm (I14) Tj
+ET
+BT
+/F2 1 Tf 12.00 0.00 0.00 12.00 329.08 340.62 Tm (I5) Tj
+ET
+BT
+/F2 1 Tf 12.00 0.00 0.00 12.00 253.00 278.13 Tm (I18) Tj
+ET
+BT
+/F2 1 Tf 12.00 0.00 0.00 12.00 240.40 202.81 Tm (I7) Tj
+ET
+BT
+/F2 1 Tf 12.00 0.00 0.00 12.00 175.94 156.16 Tm (I1) Tj
+ET
+0.000 0.000 1.000 RG
+0.75 w
+[ 0.00 4.00] 0 d
+398.63 420.80 m
+398.63 341.29 l
+S
+486.83 381.04 m
+310.43 381.04 l
+S
+0.75 w
+[] 0 d
+486.83 381.04 m
+486.78 382.29 l
+486.65 383.54 l
+486.44 384.79 l
+486.13 386.03 l
+485.74 387.26 l
+485.27 388.49 l
+484.70 389.72 l
+484.06 390.93 l
+483.33 392.13 l
+482.51 393.33 l
+481.61 394.51 l
+480.63 395.68 l
+479.57 396.83 l
+478.43 397.97 l
+477.21 399.09 l
+475.92 400.19 l
+474.55 401.28 l
+473.10 402.34 l
+471.58 403.39 l
+469.98 404.41 l
+468.32 405.41 l
+466.59 406.38 l
+464.79 407.33 l
+462.92 408.26 l
+461.00 409.15 l
+459.01 410.02 l
+456.96 410.86 l
+454.85 411.67 l
+452.69 412.45 l
+450.47 413.20 l
+448.20 413.92 l
+445.89 414.61 l
+443.53 415.26 l
+441.12 415.88 l
+438.67 416.46 l
+436.18 417.01 l
+433.66 417.53 l
+431.10 418.00 l
+428.51 418.45 l
+425.89 418.85 l
+423.24 419.22 l
+420.56 419.55 l
+417.87 419.84 l
+415.16 420.09 l
+412.43 420.31 l
+409.68 420.48 l
+406.93 420.62 l
+404.17 420.72 l
+401.40 420.78 l
+398.63 420.80 l
+395.86 420.78 l
+393.09 420.72 l
+390.33 420.62 l
+387.58 420.48 l
+384.83 420.31 l
+382.10 420.09 l
+379.39 419.84 l
+376.70 419.55 l
+374.02 419.22 l
+371.38 418.85 l
+368.76 418.45 l
+366.16 418.00 l
+363.60 417.53 l
+361.08 417.01 l
+358.59 416.46 l
+356.14 415.88 l
+353.74 415.26 l
+351.37 414.61 l
+349.06 413.92 l
+346.79 413.20 l
+344.57 412.45 l
+342.41 411.67 l
+340.31 410.86 l
+338.26 410.02 l
+336.27 409.15 l
+334.34 408.26 l
+332.47 407.33 l
+330.67 406.38 l
+328.94 405.41 l
+327.28 404.41 l
+325.69 403.39 l
+324.16 402.34 l
+322.72 401.28 l
+321.34 400.19 l
+320.05 399.09 l
+318.83 397.97 l
+317.69 396.83 l
+316.63 395.68 l
+315.65 394.51 l
+314.75 393.33 l
+313.94 392.13 l
+313.20 390.93 l
+312.56 389.72 l
+312.00 388.49 l
+311.52 387.26 l
+311.13 386.03 l
+310.83 384.79 l
+310.61 383.54 l
+310.48 382.29 l
+310.43 381.04 l
+310.48 379.80 l
+310.61 378.55 l
+310.83 377.30 l
+311.13 376.06 l
+311.52 374.83 l
+312.00 373.60 l
+312.56 372.37 l
+313.20 371.16 l
+313.94 369.95 l
+314.75 368.76 l
+315.65 367.58 l
+316.63 366.41 l
+317.69 365.26 l
+318.83 364.12 l
+320.05 363.00 l
+321.34 361.89 l
+322.72 360.81 l
+324.16 359.74 l
+325.69 358.70 l
+327.28 357.68 l
+328.94 356.68 l
+330.67 355.71 l
+332.47 354.76 l
+334.34 353.83 l
+336.27 352.94 l
+338.26 352.07 l
+340.31 351.23 l
+342.41 350.42 l
+344.57 349.63 l
+346.79 348.89 l
+349.06 348.17 l
+351.37 347.48 l
+353.74 346.83 l
+356.14 346.21 l
+358.59 345.63 l
+361.08 345.08 l
+363.60 344.56 l
+366.16 344.08 l
+368.76 343.64 l
+371.38 343.24 l
+374.02 342.87 l
+376.70 342.54 l
+379.39 342.25 l
+382.10 342.00 l
+384.83 341.78 l
+387.58 341.61 l
+390.33 341.47 l
+393.09 341.37 l
+395.86 341.31 l
+398.63 341.29 l
+401.40 341.31 l
+404.17 341.37 l
+406.93 341.47 l
+409.68 341.61 l
+412.43 341.78 l
+415.16 342.00 l
+417.87 342.25 l
+420.56 342.54 l
+423.24 342.87 l
+425.89 343.24 l
+428.51 343.64 l
+431.10 344.08 l
+433.66 344.56 l
+436.18 345.08 l
+438.67 345.63 l
+441.12 346.21 l
+443.53 346.83 l
+445.89 347.48 l
+448.20 348.17 l
+450.47 348.89 l
+452.69 349.63 l
+454.85 350.42 l
+456.96 351.23 l
+459.01 352.07 l
+461.00 352.94 l
+462.92 353.83 l
+464.79 354.76 l
+466.59 355.71 l
+468.32 356.68 l
+469.98 357.68 l
+471.58 358.70 l
+473.10 359.74 l
+474.55 360.81 l
+475.92 361.89 l
+477.21 363.00 l
+478.43 364.12 l
+479.57 365.26 l
+480.63 366.41 l
+481.61 367.58 l
+482.51 368.76 l
+483.33 369.95 l
+484.06 371.16 l
+484.70 372.37 l
+485.27 373.60 l
+485.74 374.83 l
+486.13 376.06 l
+486.44 377.30 l
+486.65 378.55 l
+486.78 379.80 l
+486.83 381.04 l
+S
+0.000 0.000 1.000 rg
+BT
+/F1 1 Tf 2 Tr 3.74 0 0 3.74 397.15 379.75 Tm (l) Tj 0 Tr
+ET
+0.75 w
+[ 0.00 4.00] 0 d
+321.88 377.69 m
+321.88 309.29 l
+S
+375.93 343.49 m
+267.83 343.49 l
+S
+0.75 w
+[] 0 d
+375.93 343.49 m
+375.91 344.57 l
+375.83 345.64 l
+375.69 346.71 l
+375.51 347.78 l
+375.27 348.84 l
+374.98 349.90 l
+374.63 350.95 l
+374.24 352.00 l
+373.79 353.03 l
+373.29 354.06 l
+372.74 355.08 l
+372.14 356.08 l
+371.49 357.07 l
+370.79 358.05 l
+370.04 359.02 l
+369.25 359.97 l
+368.41 360.90 l
+367.52 361.82 l
+366.59 362.71 l
+365.61 363.59 l
+364.59 364.45 l
+363.53 365.29 l
+362.43 366.11 l
+361.28 366.90 l
+360.10 367.67 l
+358.88 368.42 l
+357.63 369.14 l
+356.34 369.84 l
+355.01 370.51 l
+353.65 371.16 l
+352.26 371.78 l
+350.84 372.37 l
+349.40 372.93 l
+347.92 373.46 l
+346.42 373.96 l
+344.90 374.44 l
+343.35 374.88 l
+341.78 375.29 l
+340.19 375.67 l
+338.58 376.02 l
+336.96 376.33 l
+335.32 376.62 l
+333.67 376.87 l
+332.01 377.08 l
+330.34 377.27 l
+328.65 377.42 l
+326.97 377.54 l
+325.27 377.62 l
+323.58 377.67 l
+321.88 377.69 l
+320.18 377.67 l
+318.49 377.62 l
+316.79 377.54 l
+315.10 377.42 l
+313.42 377.27 l
+311.75 377.08 l
+310.09 376.87 l
+308.44 376.62 l
+306.80 376.33 l
+305.18 376.02 l
+303.57 375.67 l
+301.98 375.29 l
+300.41 374.88 l
+298.86 374.44 l
+297.34 373.96 l
+295.84 373.46 l
+294.36 372.93 l
+292.92 372.37 l
+291.50 371.78 l
+290.11 371.16 l
+288.75 370.51 l
+287.42 369.84 l
+286.13 369.14 l
+284.88 368.42 l
+283.66 367.67 l
+282.48 366.90 l
+281.33 366.11 l
+280.23 365.29 l
+279.17 364.45 l
+278.15 363.59 l
+277.17 362.71 l
+276.24 361.82 l
+275.35 360.90 l
+274.51 359.97 l
+273.72 359.02 l
+272.97 358.05 l
+272.27 357.07 l
+271.62 356.08 l
+271.02 355.08 l
+270.47 354.06 l
+269.97 353.03 l
+269.52 352.00 l
+269.13 350.95 l
+268.78 349.90 l
+268.49 348.84 l
+268.25 347.78 l
+268.06 346.71 l
+267.93 345.64 l
+267.85 344.57 l
+267.83 343.49 l
+267.85 342.42 l
+267.93 341.34 l
+268.06 340.27 l
+268.25 339.21 l
+268.49 338.14 l
+268.78 337.08 l
+269.13 336.03 l
+269.52 334.99 l
+269.97 333.95 l
+270.47 332.92 l
+271.02 331.91 l
+271.62 330.90 l
+272.27 329.91 l
+272.97 328.93 l
+273.72 327.97 l
+274.51 327.02 l
+275.35 326.08 l
+276.24 325.17 l
+277.17 324.27 l
+278.15 323.39 l
+279.17 322.53 l
+280.23 321.69 l
+281.33 320.88 l
+282.48 320.08 l
+283.66 319.31 l
+284.88 318.56 l
+286.13 317.84 l
+287.42 317.14 l
+288.75 316.47 l
+290.11 315.82 l
+291.50 315.21 l
+292.92 314.62 l
+294.36 314.06 l
+295.84 313.52 l
+297.34 313.02 l
+298.86 312.55 l
+300.41 312.11 l
+301.98 311.69 l
+303.57 311.32 l
+305.18 310.97 l
+306.80 310.65 l
+308.44 310.37 l
+310.09 310.12 l
+311.75 309.90 l
+313.42 309.71 l
+315.10 309.56 l
+316.79 309.45 l
+318.49 309.36 l
+320.18 309.31 l
+321.88 309.29 l
+323.58 309.31 l
+325.27 309.36 l
+326.97 309.45 l
+328.65 309.56 l
+330.34 309.71 l
+332.01 309.90 l
+333.67 310.12 l
+335.32 310.37 l
+336.96 310.65 l
+338.58 310.97 l
+340.19 311.32 l
+341.78 311.69 l
+343.35 312.11 l
+344.90 312.55 l
+346.42 313.02 l
+347.92 313.52 l
+349.40 314.06 l
+350.84 314.62 l
+352.26 315.21 l
+353.65 315.82 l
+355.01 316.47 l
+356.34 317.14 l
+357.63 317.84 l
+358.88 318.56 l
+360.10 319.31 l
+361.28 320.08 l
+362.43 320.88 l
+363.53 321.69 l
+364.59 322.53 l
+365.61 323.39 l
+366.59 324.27 l
+367.52 325.17 l
+368.41 326.08 l
+369.25 327.02 l
+370.04 327.97 l
+370.79 328.93 l
+371.49 329.91 l
+372.14 330.90 l
+372.74 331.91 l
+373.29 332.92 l
+373.79 333.95 l
+374.24 334.99 l
+374.63 336.03 l
+374.98 337.08 l
+375.27 338.14 l
+375.51 339.21 l
+375.69 340.27 l
+375.83 341.34 l
+375.91 342.42 l
+375.93 343.49 l
+S
+BT
+/F1 1 Tf 2 Tr 3.74 0 0 3.74 320.40 342.19 Tm (l) Tj 0 Tr
+ET
+0.75 w
+[ 0.00 4.00] 0 d
+245.80 312.43 m
+245.80 249.57 l
+S
+285.44 281.00 m
+206.15 281.00 l
+S
+0.75 w
+[] 0 d
+285.44 281.00 m
+285.42 281.99 l
+285.36 282.98 l
+285.26 283.96 l
+285.13 284.94 l
+284.95 285.92 l
+284.74 286.89 l
+284.49 287.86 l
+284.19 288.82 l
+283.87 289.77 l
+283.50 290.72 l
+283.10 291.65 l
+282.66 292.57 l
+282.18 293.49 l
+281.67 294.39 l
+281.12 295.27 l
+280.54 296.15 l
+279.92 297.00 l
+279.27 297.85 l
+278.59 298.67 l
+277.87 299.48 l
+277.12 300.27 l
+276.34 301.04 l
+275.53 301.79 l
+274.70 302.52 l
+273.83 303.23 l
+272.93 303.92 l
+272.01 304.58 l
+271.07 305.22 l
+270.09 305.84 l
+269.10 306.43 l
+268.08 307.00 l
+267.04 307.54 l
+265.98 308.06 l
+264.90 308.55 l
+263.79 309.01 l
+262.68 309.44 l
+261.54 309.85 l
+260.39 310.23 l
+259.23 310.58 l
+258.05 310.90 l
+256.86 311.19 l
+255.66 311.45 l
+254.44 311.68 l
+253.23 311.88 l
+252.00 312.05 l
+250.77 312.19 l
+249.53 312.30 l
+248.29 312.37 l
+247.04 312.42 l
+245.80 312.43 l
+244.55 312.42 l
+243.31 312.37 l
+242.07 312.30 l
+240.83 312.19 l
+239.59 312.05 l
+238.37 311.88 l
+237.15 311.68 l
+235.94 311.45 l
+234.74 311.19 l
+233.55 310.90 l
+232.37 310.58 l
+231.20 310.23 l
+230.05 309.85 l
+228.92 309.44 l
+227.80 309.01 l
+226.70 308.55 l
+225.62 308.06 l
+224.55 307.54 l
+223.51 307.00 l
+222.49 306.43 l
+221.50 305.84 l
+220.53 305.22 l
+219.58 304.58 l
+218.66 303.92 l
+217.76 303.23 l
+216.90 302.52 l
+216.06 301.79 l
+215.25 301.04 l
+214.47 300.27 l
+213.72 299.48 l
+213.01 298.67 l
+212.32 297.85 l
+211.67 297.00 l
+211.06 296.15 l
+210.47 295.27 l
+209.93 294.39 l
+209.41 293.49 l
+208.94 292.57 l
+208.50 291.65 l
+208.09 290.72 l
+207.73 289.77 l
+207.40 288.82 l
+207.11 287.86 l
+206.85 286.89 l
+206.64 285.92 l
+206.47 284.94 l
+206.33 283.96 l
+206.23 282.98 l
+206.17 281.99 l
+206.15 281.00 l
+206.17 280.02 l
+206.23 279.03 l
+206.33 278.04 l
+206.47 277.06 l
+206.64 276.09 l
+206.85 275.11 l
+207.11 274.15 l
+207.40 273.19 l
+207.73 272.23 l
+208.09 271.29 l
+208.50 270.36 l
+208.94 269.43 l
+209.41 268.52 l
+209.93 267.62 l
+210.47 266.73 l
+211.06 265.86 l
+211.67 265.00 l
+212.32 264.16 l
+213.01 263.34 l
+213.72 262.53 l
+214.47 261.74 l
+215.25 260.97 l
+216.06 260.22 l
+216.90 259.49 l
+217.76 258.78 l
+218.66 258.09 l
+219.58 257.43 l
+220.53 256.78 l
+221.50 256.17 l
+222.49 255.57 l
+223.51 255.01 l
+224.55 254.46 l
+225.62 253.95 l
+226.70 253.46 l
+227.80 253.00 l
+228.92 252.56 l
+230.05 252.16 l
+231.20 251.78 l
+232.37 251.43 l
+233.55 251.11 l
+234.74 250.82 l
+235.94 250.56 l
+237.15 250.33 l
+238.37 250.13 l
+239.59 249.96 l
+240.83 249.82 l
+242.07 249.71 l
+243.31 249.63 l
+244.55 249.59 l
+245.80 249.57 l
+247.04 249.59 l
+248.29 249.63 l
+249.53 249.71 l
+250.77 249.82 l
+252.00 249.96 l
+253.23 250.13 l
+254.44 250.33 l
+255.66 250.56 l
+256.86 250.82 l
+258.05 251.11 l
+259.23 251.43 l
+260.39 251.78 l
+261.54 252.16 l
+262.68 252.56 l
+263.79 253.00 l
+264.90 253.46 l
+265.98 253.95 l
+267.04 254.46 l
+268.08 255.01 l
+269.10 255.57 l
+270.09 256.17 l
+271.07 256.78 l
+272.01 257.43 l
+272.93 258.09 l
+273.83 258.78 l
+274.70 259.49 l
+275.53 260.22 l
+276.34 260.97 l
+277.12 261.74 l
+277.87 262.53 l
+278.59 263.34 l
+279.27 264.16 l
+279.92 265.00 l
+280.54 265.86 l
+281.12 266.73 l
+281.67 267.62 l
+282.18 268.52 l
+282.66 269.43 l
+283.10 270.36 l
+283.50 271.29 l
+283.87 272.23 l
+284.19 273.19 l
+284.49 274.15 l
+284.74 275.11 l
+284.95 276.09 l
+285.13 277.06 l
+285.26 278.04 l
+285.36 279.03 l
+285.42 280.02 l
+285.44 281.00 l
+S
+BT
+/F1 1 Tf 2 Tr 3.74 0 0 3.74 244.32 279.71 Tm (l) Tj 0 Tr
+ET
+0.75 w
+[ 0.00 4.00] 0 d
+233.20 244.74 m
+233.20 166.63 l
+S
+271.83 205.68 m
+194.58 205.68 l
+S
+0.75 w
+[] 0 d
+271.83 205.68 m
+271.81 206.91 l
+271.75 208.14 l
+271.66 209.36 l
+271.52 210.58 l
+271.35 211.79 l
+271.14 213.00 l
+270.90 214.20 l
+270.62 215.40 l
+270.30 216.58 l
+269.94 217.75 l
+269.55 218.91 l
+269.12 220.06 l
+268.65 221.19 l
+268.15 222.31 l
+267.62 223.41 l
+267.05 224.50 l
+266.45 225.56 l
+265.82 226.61 l
+265.15 227.64 l
+264.45 228.64 l
+263.72 229.62 l
+262.97 230.58 l
+262.18 231.51 l
+261.36 232.42 l
+260.52 233.30 l
+259.64 234.15 l
+258.75 234.98 l
+257.82 235.78 l
+256.88 236.54 l
+255.91 237.28 l
+254.91 237.99 l
+253.90 238.66 l
+252.87 239.30 l
+251.81 239.91 l
+250.74 240.48 l
+249.65 241.02 l
+248.54 241.53 l
+247.42 242.00 l
+246.29 242.43 l
+245.14 242.83 l
+243.98 243.19 l
+242.81 243.51 l
+241.63 243.80 l
+240.44 244.05 l
+239.25 244.26 l
+238.05 244.43 l
+236.84 244.57 l
+235.63 244.66 l
+234.42 244.72 l
+233.20 244.74 l
+231.99 244.72 l
+230.78 244.66 l
+229.57 244.57 l
+228.36 244.43 l
+227.16 244.26 l
+225.97 244.05 l
+224.78 243.80 l
+223.60 243.51 l
+222.43 243.19 l
+221.27 242.83 l
+220.12 242.43 l
+218.99 242.00 l
+217.87 241.53 l
+216.76 241.02 l
+215.67 240.48 l
+214.60 239.91 l
+213.54 239.30 l
+212.51 238.66 l
+211.49 237.99 l
+210.50 237.28 l
+209.53 236.54 l
+208.58 235.78 l
+207.66 234.98 l
+206.76 234.15 l
+205.89 233.30 l
+205.05 232.42 l
+204.23 231.51 l
+203.44 230.58 l
+202.69 229.62 l
+201.96 228.64 l
+201.26 227.64 l
+200.59 226.61 l
+199.96 225.56 l
+199.36 224.50 l
+198.79 223.41 l
+198.26 222.31 l
+197.76 221.19 l
+197.29 220.06 l
+196.86 218.91 l
+196.47 217.75 l
+196.11 216.58 l
+195.79 215.40 l
+195.51 214.20 l
+195.26 213.00 l
+195.06 211.79 l
+194.89 210.58 l
+194.75 209.36 l
+194.66 208.14 l
+194.60 206.91 l
+194.58 205.68 l
+194.60 204.46 l
+194.66 203.23 l
+194.75 202.01 l
+194.89 200.79 l
+195.06 199.57 l
+195.26 198.37 l
+195.51 197.16 l
+195.79 195.97 l
+196.11 194.79 l
+196.47 193.62 l
+196.86 192.45 l
+197.29 191.31 l
+197.76 190.17 l
+198.26 189.06 l
+198.79 187.95 l
+199.36 186.87 l
+199.96 185.80 l
+200.59 184.76 l
+201.26 183.73 l
+201.96 182.73 l
+202.69 181.75 l
+203.44 180.79 l
+204.23 179.86 l
+205.05 178.95 l
+205.89 178.07 l
+206.76 177.21 l
+207.66 176.39 l
+208.58 175.59 l
+209.53 174.82 l
+210.50 174.09 l
+211.49 173.38 l
+212.51 172.71 l
+213.54 172.07 l
+214.60 171.46 l
+215.67 170.89 l
+216.76 170.35 l
+217.87 169.84 l
+218.99 169.37 l
+220.12 168.94 l
+221.27 168.54 l
+222.43 168.18 l
+223.60 167.86 l
+224.78 167.57 l
+225.97 167.32 l
+227.16 167.11 l
+228.36 166.94 l
+229.57 166.80 l
+230.78 166.71 l
+231.99 166.65 l
+233.20 166.63 l
+234.42 166.65 l
+235.63 166.71 l
+236.84 166.80 l
+238.05 166.94 l
+239.25 167.11 l
+240.44 167.32 l
+241.63 167.57 l
+242.81 167.86 l
+243.98 168.18 l
+245.14 168.54 l
+246.29 168.94 l
+247.42 169.37 l
+248.54 169.84 l
+249.65 170.35 l
+250.74 170.89 l
+251.81 171.46 l
+252.87 172.07 l
+253.90 172.71 l
+254.91 173.38 l
+255.91 174.09 l
+256.88 174.82 l
+257.82 175.59 l
+258.75 176.39 l
+259.64 177.21 l
+260.52 178.07 l
+261.36 178.95 l
+262.18 179.86 l
+262.97 180.79 l
+263.72 181.75 l
+264.45 182.73 l
+265.15 183.73 l
+265.82 184.76 l
+266.45 185.80 l
+267.05 186.87 l
+267.62 187.95 l
+268.15 189.06 l
+268.65 190.17 l
+269.12 191.31 l
+269.55 192.45 l
+269.94 193.62 l
+270.30 194.79 l
+270.62 195.97 l
+270.90 197.16 l
+271.14 198.37 l
+271.35 199.57 l
+271.52 200.79 l
+271.66 202.01 l
+271.75 203.23 l
+271.81 204.46 l
+271.83 205.68 l
+S
+BT
+/F1 1 Tf 2 Tr 3.74 0 0 3.74 231.72 204.39 Tm (l) Tj 0 Tr
+ET
+0.75 w
+[ 0.00 4.00] 0 d
+168.74 210.70 m
+168.74 107.36 l
+S
+207.11 159.03 m
+130.38 159.03 l
+S
+0.75 w
+[] 0 d
+207.11 159.03 m
+207.09 160.65 l
+207.04 162.28 l
+206.94 163.89 l
+206.81 165.51 l
+206.64 167.11 l
+206.43 168.71 l
+206.19 170.30 l
+205.91 171.88 l
+205.59 173.45 l
+205.23 175.00 l
+204.84 176.53 l
+204.42 178.05 l
+203.96 179.55 l
+203.46 181.03 l
+202.93 182.49 l
+202.37 183.92 l
+201.77 185.33 l
+201.14 186.72 l
+200.48 188.07 l
+199.78 189.40 l
+199.06 190.70 l
+198.31 191.97 l
+197.52 193.20 l
+196.71 194.40 l
+195.87 195.57 l
+195.01 196.70 l
+194.12 197.79 l
+193.20 198.84 l
+192.26 199.86 l
+191.30 200.83 l
+190.31 201.77 l
+189.30 202.66 l
+188.27 203.51 l
+187.23 204.31 l
+186.16 205.07 l
+185.08 205.78 l
+183.98 206.45 l
+182.87 207.07 l
+181.74 207.65 l
+180.60 208.17 l
+179.45 208.65 l
+178.28 209.08 l
+177.11 209.46 l
+175.93 209.79 l
+174.75 210.07 l
+173.55 210.29 l
+172.35 210.47 l
+171.15 210.60 l
+169.95 210.68 l
+168.74 210.70 l
+167.54 210.68 l
+166.33 210.60 l
+165.13 210.47 l
+163.93 210.29 l
+162.74 210.07 l
+161.55 209.79 l
+160.37 209.46 l
+159.20 209.08 l
+158.04 208.65 l
+156.89 208.17 l
+155.75 207.65 l
+154.62 207.07 l
+153.51 206.45 l
+152.41 205.78 l
+151.32 205.07 l
+150.26 204.31 l
+149.21 203.51 l
+148.18 202.66 l
+147.18 201.77 l
+146.19 200.83 l
+145.23 199.86 l
+144.29 198.84 l
+143.37 197.79 l
+142.48 196.70 l
+141.61 195.57 l
+140.77 194.40 l
+139.96 193.20 l
+139.18 191.97 l
+138.43 190.70 l
+137.70 189.40 l
+137.01 188.07 l
+136.35 186.72 l
+135.72 185.33 l
+135.12 183.92 l
+134.56 182.49 l
+134.03 181.03 l
+133.53 179.55 l
+133.07 178.05 l
+132.64 176.53 l
+132.25 175.00 l
+131.90 173.45 l
+131.58 171.88 l
+131.30 170.30 l
+131.05 168.71 l
+130.85 167.11 l
+130.68 165.51 l
+130.55 163.89 l
+130.45 162.28 l
+130.39 160.65 l
+130.38 159.03 l
+130.39 157.41 l
+130.45 155.79 l
+130.55 154.17 l
+130.68 152.55 l
+130.85 150.95 l
+131.05 149.35 l
+131.30 147.76 l
+131.58 146.18 l
+131.90 144.62 l
+132.25 143.06 l
+132.64 141.53 l
+133.07 140.01 l
+133.53 138.51 l
+134.03 137.03 l
+134.56 135.57 l
+135.12 134.14 l
+135.72 132.73 l
+136.35 131.34 l
+137.01 129.99 l
+137.70 128.66 l
+138.43 127.36 l
+139.18 126.09 l
+139.96 124.86 l
+140.77 123.66 l
+141.61 122.49 l
+142.48 121.36 l
+143.37 120.27 l
+144.29 119.22 l
+145.23 118.20 l
+146.19 117.23 l
+147.18 116.29 l
+148.18 115.40 l
+149.21 114.56 l
+150.26 113.75 l
+151.32 112.99 l
+152.41 112.28 l
+153.51 111.61 l
+154.62 110.99 l
+155.75 110.41 l
+156.89 109.89 l
+158.04 109.41 l
+159.20 108.98 l
+160.37 108.60 l
+161.55 108.28 l
+162.74 108.00 l
+163.93 107.77 l
+165.13 107.59 l
+166.33 107.46 l
+167.54 107.39 l
+168.74 107.36 l
+169.95 107.39 l
+171.15 107.46 l
+172.35 107.59 l
+173.55 107.77 l
+174.75 108.00 l
+175.93 108.28 l
+177.11 108.60 l
+178.28 108.98 l
+179.45 109.41 l
+180.60 109.89 l
+181.74 110.41 l
+182.87 110.99 l
+183.98 111.61 l
+185.08 112.28 l
+186.16 112.99 l
+187.23 113.75 l
+188.27 114.56 l
+189.30 115.40 l
+190.31 116.29 l
+191.30 117.23 l
+192.26 118.20 l
+193.20 119.22 l
+194.12 120.27 l
+195.01 121.36 l
+195.87 122.49 l
+196.71 123.66 l
+197.52 124.86 l
+198.31 126.09 l
+199.06 127.36 l
+199.78 128.66 l
+200.48 129.99 l
+201.14 131.34 l
+201.77 132.73 l
+202.37 134.14 l
+202.93 135.57 l
+203.46 137.03 l
+203.96 138.51 l
+204.42 140.01 l
+204.84 141.53 l
+205.23 143.06 l
+205.59 144.62 l
+205.91 146.18 l
+206.19 147.76 l
+206.43 149.35 l
+206.64 150.95 l
+206.81 152.55 l
+206.94 154.17 l
+207.04 155.79 l
+207.09 157.41 l
+207.11 159.03 l
+S
+BT
+/F1 1 Tf 2 Tr 3.74 0 0 3.74 167.26 157.73 Tm (l) Tj 0 Tr
+ET
+Q
+endstream
+endobj
+7 0 obj
+19674
+endobj
+8 0 obj
+<<
+/Type /Page
+/Parent 3 0 R
+/Contents 9 0 R
+/Resources 4 0 R
+>>
+endobj
+9 0 obj
+<<
+/Length 10 0 R
+>>
+stream
+q
+Q q 49.00 312.96 177.90 142.04 re W n
+Q q 49.00 312.96 177.90 142.04 re W n
+0.000 0.000 0.000 RG
+0.75 w
+[] 0 d
+1 J
+1 j
+10.00 M
+55.59 448.42 m
+57.65 448.28 l
+59.71 448.13 l
+61.77 447.97 l
+63.83 447.78 l
+65.89 447.58 l
+67.95 447.36 l
+70.00 447.11 l
+72.06 446.84 l
+74.12 446.54 l
+76.18 446.22 l
+78.24 445.85 l
+80.30 445.46 l
+82.36 445.02 l
+84.42 444.54 l
+86.48 444.02 l
+88.54 443.44 l
+90.59 442.81 l
+92.65 442.12 l
+94.71 441.36 l
+96.77 440.53 l
+98.83 439.63 l
+100.89 438.65 l
+102.95 437.57 l
+105.01 436.41 l
+107.07 435.14 l
+109.13 433.77 l
+111.18 432.28 l
+113.24 430.68 l
+115.30 428.95 l
+117.36 427.10 l
+119.42 425.10 l
+121.48 422.98 l
+123.54 420.71 l
+125.60 418.29 l
+127.66 415.74 l
+129.72 413.04 l
+131.77 410.20 l
+133.83 407.23 l
+135.89 404.12 l
+137.95 400.90 l
+140.01 397.57 l
+142.07 394.14 l
+144.13 390.63 l
+146.19 387.05 l
+148.25 383.43 l
+150.31 379.78 l
+152.36 376.12 l
+154.42 372.47 l
+156.48 368.86 l
+158.54 365.30 l
+160.60 361.82 l
+162.66 358.43 l
+164.72 355.15 l
+166.78 352.00 l
+168.84 348.99 l
+170.90 346.13 l
+172.96 343.42 l
+175.01 340.89 l
+177.07 338.52 l
+179.13 336.33 l
+181.19 334.30 l
+183.25 332.45 l
+185.31 330.76 l
+187.37 329.22 l
+189.43 327.84 l
+191.49 326.60 l
+193.55 325.49 l
+195.60 324.50 l
+197.66 323.63 l
+199.72 322.87 l
+201.78 322.20 l
+203.84 321.62 l
+205.90 321.11 l
+207.96 320.67 l
+210.02 320.29 l
+212.08 319.97 l
+214.14 319.69 l
+216.19 319.46 l
+218.25 319.26 l
+220.31 319.09 l
+S
+Q q
+0.000 0.000 0.000 RG
+0.75 w
+[] 0 d
+1 J
+1 j
+10.00 M
+55.59 312.96 m 220.31 312.96 l S
+55.59 312.96 m 55.59 306.98 l S
+96.77 312.96 m 96.77 306.98 l S
+137.95 312.96 m 137.95 306.98 l S
+179.13 312.96 m 179.13 306.98 l S
+220.31 312.96 m 220.31 306.98 l S
+BT
+0.000 0.000 0.000 rg
+/F2 1 Tf 10.00 0.00 0.00 10.00 49.89 291.44 Tm (-4) Tj
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 91.07 291.44 Tm (-2) Tj
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 135.17 291.44 Tm (0) Tj
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 176.35 291.44 Tm (2) Tj
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 217.53 291.44 Tm (4) Tj
+ET
+49.00 318.22 m 49.00 449.74 l S
+49.00 318.22 m 43.03 318.22 l S
+49.00 344.52 m 43.03 344.52 l S
+49.00 370.82 m 43.03 370.82 l S
+49.00 397.13 m 43.03 397.13 l S
+49.00 423.43 m 43.03 423.43 l S
+49.00 449.74 m 43.03 449.74 l S
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 34.66 311.27 Tm (0.0) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 34.66 337.57 Tm (0.2) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 34.66 363.87 Tm (0.4) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 34.66 390.18 Tm (0.6) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 34.66 416.48 Tm (0.8) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 34.66 442.79 Tm (1.0) Tj
+ET
+49.00 312.96 m
+226.90 312.96 l
+226.90 455.00 l
+49.00 455.00 l
+49.00 312.96 l
+S
+Q q 0.00 252.00 252.00 252.00 re W n
+BT
+0.000 0.000 0.000 rg
+/F3 1 Tf 12.00 0.00 0.00 12.00 83.06 475.19 Tm [(ICC plot f) 20 (or item  I1)] TJ
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 99.05 267.54 Tm (Latent Dimension) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 10.76 341.31 Tm [(Probability to Solv) 25 (e)] TJ
+ET
+Q q 49.00 312.96 177.90 142.04 re W n
+1.000 0.000 0.000 RG
+0.75 w
+[] 0 d
+1 J
+1 j
+10.00 M
+55.59 319.53 m
+57.65 319.67 l
+59.71 319.82 l
+61.77 319.98 l
+63.83 320.16 l
+65.89 320.37 l
+67.95 320.59 l
+70.00 320.83 l
+72.06 321.10 l
+74.12 321.40 l
+76.18 321.72 l
+78.24 322.08 l
+80.30 322.48 l
+82.36 322.91 l
+84.42 323.38 l
+86.48 323.90 l
+88.54 324.47 l
+90.59 325.09 l
+92.65 325.77 l
+94.71 326.52 l
+96.77 327.33 l
+98.83 328.21 l
+100.89 329.17 l
+102.95 330.22 l
+105.01 331.35 l
+107.07 332.58 l
+109.13 333.90 l
+111.18 335.33 l
+113.24 336.86 l
+115.30 338.51 l
+117.36 340.26 l
+119.42 342.14 l
+121.48 344.13 l
+123.54 346.23 l
+125.60 348.45 l
+127.66 350.77 l
+129.72 353.20 l
+131.77 355.72 l
+133.83 358.33 l
+135.89 361.00 l
+137.95 363.73 l
+140.01 366.49 l
+142.07 369.26 l
+144.13 372.02 l
+146.19 374.74 l
+148.25 377.40 l
+150.31 379.96 l
+152.36 382.40 l
+154.42 384.68 l
+156.48 386.78 l
+158.54 388.67 l
+160.60 390.32 l
+162.66 391.71 l
+164.72 392.82 l
+166.78 393.63 l
+168.84 394.13 l
+170.90 394.31 l
+172.96 394.17 l
+175.01 393.72 l
+177.07 392.95 l
+179.13 391.88 l
+181.19 390.53 l
+183.25 388.92 l
+185.31 387.06 l
+187.37 384.99 l
+189.43 382.73 l
+191.49 380.32 l
+193.55 377.77 l
+195.60 375.13 l
+197.66 372.41 l
+199.72 369.65 l
+201.78 366.88 l
+203.84 364.12 l
+205.90 361.39 l
+207.96 358.71 l
+210.02 356.09 l
+212.08 353.56 l
+214.14 351.11 l
+216.19 348.77 l
+218.25 346.54 l
+220.31 344.42 l
+S
+0.000 0.804 0.000 RG
+55.59 318.22 m
+57.65 318.22 l
+59.71 318.22 l
+61.77 318.22 l
+63.83 318.22 l
+65.89 318.22 l
+67.95 318.22 l
+70.00 318.22 l
+72.06 318.22 l
+74.12 318.23 l
+76.18 318.23 l
+78.24 318.23 l
+80.30 318.23 l
+82.36 318.24 l
+84.42 318.24 l
+86.48 318.25 l
+88.54 318.26 l
+90.59 318.27 l
+92.65 318.28 l
+94.71 318.29 l
+96.77 318.31 l
+98.83 318.33 l
+100.89 318.35 l
+102.95 318.38 l
+105.01 318.41 l
+107.07 318.45 l
+109.13 318.50 l
+111.18 318.56 l
+113.24 318.63 l
+115.30 318.71 l
+117.36 318.81 l
+119.42 318.93 l
+121.48 319.07 l
+123.54 319.23 l
+125.60 319.43 l
+127.66 319.66 l
+129.72 319.93 l
+131.77 320.24 l
+133.83 320.61 l
+135.89 321.04 l
+137.95 321.54 l
+140.01 322.11 l
+142.07 322.77 l
+144.13 323.52 l
+146.19 324.37 l
+148.25 325.34 l
+150.31 326.43 l
+152.36 327.65 l
+154.42 329.02 l
+156.48 330.53 l
+158.54 332.20 l
+160.60 334.03 l
+162.66 336.03 l
+164.72 338.20 l
+166.78 340.54 l
+168.84 343.05 l
+170.90 345.73 l
+172.96 348.57 l
+175.01 351.56 l
+177.07 354.69 l
+179.13 357.96 l
+181.19 361.33 l
+183.25 364.80 l
+185.31 368.35 l
+187.37 371.95 l
+189.43 375.60 l
+191.49 379.26 l
+193.55 382.91 l
+195.60 386.54 l
+197.66 390.12 l
+199.72 393.64 l
+201.78 397.09 l
+203.84 400.43 l
+205.90 403.67 l
+207.96 406.79 l
+210.02 409.78 l
+212.08 412.64 l
+214.14 415.36 l
+216.19 417.94 l
+218.25 420.37 l
+220.31 422.66 l
+S
+Q q 301.00 312.96 177.90 142.04 re W n
+Q q 301.00 312.96 177.90 142.04 re W n
+0.000 0.000 0.000 RG
+0.75 w
+[] 0 d
+1 J
+1 j
+10.00 M
+307.59 448.77 m
+309.65 448.67 l
+311.71 448.56 l
+313.77 448.44 l
+315.83 448.31 l
+317.89 448.16 l
+319.95 447.99 l
+322.00 447.81 l
+324.06 447.61 l
+326.12 447.39 l
+328.18 447.15 l
+330.24 446.88 l
+332.30 446.59 l
+334.36 446.27 l
+336.42 445.91 l
+338.48 445.52 l
+340.54 445.09 l
+342.59 444.62 l
+344.65 444.10 l
+346.71 443.53 l
+348.77 442.91 l
+350.83 442.23 l
+352.89 441.48 l
+354.95 440.67 l
+357.01 439.78 l
+359.07 438.80 l
+361.13 437.74 l
+363.18 436.59 l
+365.24 435.34 l
+367.30 433.99 l
+369.36 432.52 l
+371.42 430.94 l
+373.48 429.23 l
+375.54 427.39 l
+377.60 425.42 l
+379.66 423.31 l
+381.72 421.06 l
+383.77 418.67 l
+385.83 416.14 l
+387.89 413.46 l
+389.95 410.64 l
+392.01 407.69 l
+394.07 404.61 l
+396.13 401.40 l
+398.19 398.09 l
+400.25 394.67 l
+402.31 391.17 l
+404.36 387.61 l
+406.42 383.99 l
+408.48 380.34 l
+410.54 376.68 l
+412.60 373.03 l
+414.66 369.41 l
+416.72 365.84 l
+418.78 362.35 l
+420.84 358.94 l
+422.90 355.65 l
+424.96 352.48 l
+427.01 349.44 l
+429.07 346.56 l
+431.13 343.83 l
+433.19 341.27 l
+435.25 338.88 l
+437.31 336.65 l
+439.37 334.60 l
+441.43 332.72 l
+443.49 331.01 l
+445.55 329.45 l
+447.60 328.04 l
+449.66 326.78 l
+451.72 325.65 l
+453.78 324.65 l
+455.84 323.76 l
+457.90 322.98 l
+459.96 322.30 l
+462.02 321.70 l
+464.08 321.18 l
+466.14 320.73 l
+468.19 320.35 l
+470.25 320.02 l
+472.31 319.73 l
+S
+Q q
+0.000 0.000 0.000 RG
+0.75 w
+[] 0 d
+1 J
+1 j
+10.00 M
+307.59 312.96 m 472.31 312.96 l S
+307.59 312.96 m 307.59 306.98 l S
+348.77 312.96 m 348.77 306.98 l S
+389.95 312.96 m 389.95 306.98 l S
+431.13 312.96 m 431.13 306.98 l S
+472.31 312.96 m 472.31 306.98 l S
+BT
+0.000 0.000 0.000 rg
+/F2 1 Tf 10.00 0.00 0.00 10.00 301.89 291.44 Tm (-4) Tj
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 343.07 291.44 Tm (-2) Tj
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 387.17 291.44 Tm (0) Tj
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 428.35 291.44 Tm (2) Tj
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 469.53 291.44 Tm (4) Tj
+ET
+301.00 318.22 m 301.00 449.74 l S
+301.00 318.22 m 295.03 318.22 l S
+301.00 344.52 m 295.03 344.52 l S
+301.00 370.82 m 295.03 370.82 l S
+301.00 397.13 m 295.03 397.13 l S
+301.00 423.43 m 295.03 423.43 l S
+301.00 449.74 m 295.03 449.74 l S
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 286.66 311.27 Tm (0.0) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 286.66 337.57 Tm (0.2) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 286.66 363.87 Tm (0.4) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 286.66 390.18 Tm (0.6) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 286.66 416.48 Tm (0.8) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 286.66 442.79 Tm (1.0) Tj
+ET
+301.00 312.96 m
+478.90 312.96 l
+478.90 455.00 l
+301.00 455.00 l
+301.00 312.96 l
+S
+Q q 252.00 252.00 252.00 252.00 re W n
+BT
+0.000 0.000 0.000 rg
+/F3 1 Tf 12.00 0.00 0.00 12.00 335.06 475.19 Tm [(ICC plot f) 20 (or item  I1)] TJ
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 351.05 267.54 Tm (Latent Dimension) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 262.76 341.31 Tm [(Probability to Solv) 25 (e)] TJ
+ET
+Q q 301.00 312.96 177.90 142.04 re W n
+1.000 0.000 0.000 RG
+0.75 w
+[] 0 d
+1 J
+1 j
+10.00 M
+307.59 319.18 m
+309.65 319.28 l
+311.71 319.39 l
+313.77 319.51 l
+315.83 319.64 l
+317.89 319.79 l
+319.95 319.96 l
+322.00 320.14 l
+324.06 320.33 l
+326.12 320.55 l
+328.18 320.79 l
+330.24 321.06 l
+332.30 321.35 l
+334.36 321.67 l
+336.42 322.02 l
+338.48 322.41 l
+340.54 322.84 l
+342.59 323.30 l
+344.65 323.82 l
+346.71 324.38 l
+348.77 324.99 l
+350.83 325.66 l
+352.89 326.40 l
+354.95 327.20 l
+357.01 328.07 l
+359.07 329.02 l
+361.13 330.05 l
+363.18 331.17 l
+365.24 332.38 l
+367.30 333.69 l
+369.36 335.10 l
+371.42 336.62 l
+373.48 338.25 l
+375.54 339.99 l
+377.60 341.84 l
+379.66 343.81 l
+381.72 345.90 l
+383.77 348.10 l
+385.83 350.41 l
+387.89 352.82 l
+389.95 355.33 l
+392.01 357.92 l
+394.07 360.59 l
+396.13 363.31 l
+398.19 366.06 l
+400.25 368.83 l
+402.31 371.60 l
+404.36 374.33 l
+406.42 377.00 l
+408.48 379.57 l
+410.54 382.03 l
+412.60 384.34 l
+414.66 386.47 l
+416.72 388.39 l
+418.78 390.08 l
+420.84 391.51 l
+422.90 392.66 l
+424.96 393.52 l
+427.01 394.07 l
+429.07 394.30 l
+431.13 394.21 l
+433.19 393.81 l
+435.25 393.09 l
+437.31 392.07 l
+439.37 390.76 l
+441.43 389.18 l
+443.49 387.36 l
+445.55 385.32 l
+447.60 383.09 l
+449.66 380.70 l
+451.72 378.17 l
+453.78 375.54 l
+455.84 372.83 l
+457.90 370.08 l
+459.96 367.31 l
+462.02 364.54 l
+464.08 361.81 l
+466.14 359.12 l
+468.19 356.49 l
+470.25 353.94 l
+472.31 351.48 l
+S
+0.000 0.804 0.000 RG
+307.59 318.22 m
+309.65 318.22 l
+311.71 318.22 l
+313.77 318.22 l
+315.83 318.22 l
+317.89 318.22 l
+319.95 318.22 l
+322.00 318.22 l
+324.06 318.22 l
+326.12 318.22 l
+328.18 318.22 l
+330.24 318.22 l
+332.30 318.23 l
+334.36 318.23 l
+336.42 318.23 l
+338.48 318.23 l
+340.54 318.24 l
+342.59 318.24 l
+344.65 318.25 l
+346.71 318.26 l
+348.77 318.26 l
+350.83 318.28 l
+352.89 318.29 l
+354.95 318.30 l
+357.01 318.32 l
+359.07 318.34 l
+361.13 318.37 l
+363.18 318.40 l
+365.24 318.44 l
+367.30 318.49 l
+369.36 318.55 l
+371.42 318.61 l
+373.48 318.70 l
+375.54 318.79 l
+377.60 318.91 l
+379.66 319.04 l
+381.72 319.20 l
+383.77 319.40 l
+385.83 319.62 l
+387.89 319.88 l
+389.95 320.19 l
+392.01 320.55 l
+394.07 320.97 l
+396.13 321.46 l
+398.19 322.02 l
+400.25 322.66 l
+402.31 323.40 l
+404.36 324.23 l
+406.42 325.18 l
+408.48 326.25 l
+410.54 327.45 l
+412.60 328.80 l
+414.66 330.29 l
+416.72 331.93 l
+418.78 333.74 l
+420.84 335.71 l
+422.90 337.85 l
+424.96 340.17 l
+427.01 342.66 l
+429.07 345.31 l
+431.13 348.12 l
+433.19 351.09 l
+435.25 354.20 l
+437.31 357.45 l
+439.37 360.80 l
+441.43 364.26 l
+443.49 367.80 l
+445.55 371.40 l
+447.60 375.04 l
+449.66 378.69 l
+451.72 382.35 l
+453.78 385.98 l
+455.84 389.58 l
+457.90 393.11 l
+459.96 396.56 l
+462.02 399.92 l
+464.08 403.18 l
+466.14 406.32 l
+468.19 409.33 l
+470.25 412.21 l
+472.31 414.95 l
+S
+Q q 49.00 60.96 177.90 142.04 re W n
+Q q 49.00 60.96 177.90 142.04 re W n
+0.000 0.000 0.000 RG
+0.75 w
+[] 0 d
+1 J
+1 j
+10.00 M
+55.59 191.42 m
+57.65 190.78 l
+59.71 190.09 l
+61.77 189.33 l
+63.83 188.50 l
+65.89 187.59 l
+67.95 186.61 l
+70.00 185.53 l
+72.06 184.36 l
+74.12 183.09 l
+76.18 181.71 l
+78.24 180.22 l
+80.30 178.62 l
+82.36 176.88 l
+84.42 175.02 l
+86.48 173.03 l
+88.54 170.89 l
+90.59 168.62 l
+92.65 166.20 l
+94.71 163.64 l
+96.77 160.93 l
+98.83 158.09 l
+100.89 155.11 l
+102.95 152.00 l
+105.01 148.78 l
+107.07 145.44 l
+109.13 142.01 l
+111.18 138.49 l
+113.24 134.92 l
+115.30 131.29 l
+117.36 127.64 l
+119.42 123.98 l
+121.48 120.33 l
+123.54 116.72 l
+125.60 113.17 l
+127.66 109.69 l
+129.72 106.30 l
+131.77 103.03 l
+133.83 99.88 l
+135.89 96.88 l
+137.95 94.02 l
+140.01 91.32 l
+142.07 88.80 l
+144.13 86.44 l
+146.19 84.25 l
+148.25 82.23 l
+150.31 80.38 l
+152.36 78.69 l
+154.42 77.17 l
+156.48 75.79 l
+158.54 74.55 l
+160.60 73.45 l
+162.66 72.47 l
+164.72 71.60 l
+166.78 70.84 l
+168.84 70.18 l
+170.90 69.59 l
+172.96 69.09 l
+175.01 68.66 l
+177.07 68.28 l
+179.13 67.96 l
+181.19 67.68 l
+183.25 67.45 l
+185.31 67.25 l
+187.37 67.08 l
+189.43 66.94 l
+191.49 66.82 l
+193.55 66.72 l
+195.60 66.63 l
+197.66 66.56 l
+199.72 66.50 l
+201.78 66.45 l
+203.84 66.41 l
+205.90 66.38 l
+207.96 66.35 l
+210.02 66.33 l
+212.08 66.31 l
+214.14 66.29 l
+216.19 66.28 l
+218.25 66.27 l
+220.31 66.26 l
+S
+Q q
+0.000 0.000 0.000 RG
+0.75 w
+[] 0 d
+1 J
+1 j
+10.00 M
+55.59 60.96 m 220.31 60.96 l S
+55.59 60.96 m 55.59 54.98 l S
+96.77 60.96 m 96.77 54.98 l S
+137.95 60.96 m 137.95 54.98 l S
+179.13 60.96 m 179.13 54.98 l S
+220.31 60.96 m 220.31 54.98 l S
+BT
+0.000 0.000 0.000 rg
+/F2 1 Tf 10.00 0.00 0.00 10.00 49.89 39.44 Tm (-4) Tj
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 91.07 39.44 Tm (-2) Tj
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 135.17 39.44 Tm (0) Tj
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 176.35 39.44 Tm (2) Tj
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 217.53 39.44 Tm (4) Tj
+ET
+49.00 66.22 m 49.00 197.74 l S
+49.00 66.22 m 43.03 66.22 l S
+49.00 92.52 m 43.03 92.52 l S
+49.00 118.82 m 43.03 118.82 l S
+49.00 145.13 m 43.03 145.13 l S
+49.00 171.43 m 43.03 171.43 l S
+49.00 197.74 m 43.03 197.74 l S
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 34.66 59.27 Tm (0.0) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 34.66 85.57 Tm (0.2) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 34.66 111.87 Tm (0.4) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 34.66 138.18 Tm (0.6) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 34.66 164.48 Tm (0.8) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 34.66 190.79 Tm (1.0) Tj
+ET
+49.00 60.96 m
+226.90 60.96 l
+226.90 203.00 l
+49.00 203.00 l
+49.00 60.96 l
+S
+Q q 0.00 0.00 252.00 252.00 re W n
+BT
+0.000 0.000 0.000 rg
+/F3 1 Tf 12.00 0.00 0.00 12.00 83.06 223.19 Tm [(ICC plot f) 20 (or item  I1)] TJ
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 99.05 15.54 Tm (Latent Dimension) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 10.76 89.31 Tm [(Probability to Solv) 25 (e)] TJ
+ET
+Q q 49.00 60.96 177.90 142.04 re W n
+1.000 0.000 0.000 RG
+0.75 w
+[] 0 d
+1 J
+1 j
+10.00 M
+55.59 72.49 m
+57.65 73.12 l
+59.71 73.80 l
+61.77 74.55 l
+63.83 75.36 l
+65.89 76.25 l
+67.95 77.21 l
+70.00 78.26 l
+72.06 79.40 l
+74.12 80.63 l
+76.18 81.95 l
+78.24 83.38 l
+80.30 84.92 l
+82.36 86.57 l
+84.42 88.33 l
+86.48 90.21 l
+88.54 92.20 l
+90.59 94.31 l
+92.65 96.53 l
+94.71 98.86 l
+96.77 101.30 l
+98.83 103.82 l
+100.89 106.43 l
+102.95 109.11 l
+105.01 111.83 l
+107.07 114.59 l
+109.13 117.37 l
+111.18 120.13 l
+113.24 122.85 l
+115.30 125.50 l
+117.36 128.06 l
+119.42 130.49 l
+121.48 132.76 l
+123.54 134.86 l
+125.60 136.74 l
+127.66 138.38 l
+129.72 139.76 l
+131.77 140.85 l
+133.83 141.65 l
+135.89 142.14 l
+137.95 142.31 l
+140.01 142.16 l
+142.07 141.69 l
+144.13 140.92 l
+146.19 139.84 l
+148.25 138.48 l
+150.31 136.85 l
+152.36 134.99 l
+154.42 132.91 l
+156.48 130.64 l
+158.54 128.22 l
+160.60 125.67 l
+162.66 123.02 l
+164.72 120.31 l
+166.78 117.55 l
+168.84 114.78 l
+170.90 112.02 l
+172.96 109.29 l
+175.01 106.61 l
+177.07 103.99 l
+179.13 101.46 l
+181.19 99.02 l
+183.25 96.69 l
+185.31 94.46 l
+187.37 92.34 l
+189.43 90.34 l
+191.49 88.45 l
+193.55 86.68 l
+195.60 85.03 l
+197.66 83.48 l
+199.72 82.04 l
+201.78 80.71 l
+203.84 79.47 l
+205.90 78.33 l
+207.96 77.28 l
+210.02 76.31 l
+212.08 75.42 l
+214.14 74.60 l
+216.19 73.85 l
+218.25 73.16 l
+220.31 72.53 l
+S
+0.000 0.804 0.000 RG
+55.59 66.26 m
+57.65 66.27 l
+59.71 66.28 l
+61.77 66.29 l
+63.83 66.31 l
+65.89 66.33 l
+67.95 66.35 l
+70.00 66.38 l
+72.06 66.41 l
+74.12 66.45 l
+76.18 66.50 l
+78.24 66.56 l
+80.30 66.63 l
+82.36 66.71 l
+84.42 66.81 l
+86.48 66.93 l
+88.54 67.07 l
+90.59 67.24 l
+92.65 67.44 l
+94.71 67.67 l
+96.77 67.94 l
+98.83 68.26 l
+100.89 68.63 l
+102.95 69.06 l
+105.01 69.56 l
+107.07 70.13 l
+109.13 70.79 l
+111.18 71.55 l
+113.24 72.41 l
+115.30 73.38 l
+117.36 74.47 l
+119.42 75.70 l
+121.48 77.07 l
+123.54 78.59 l
+125.60 80.26 l
+127.66 82.10 l
+129.72 84.11 l
+131.77 86.29 l
+133.83 88.63 l
+135.89 91.15 l
+137.95 93.84 l
+140.01 96.68 l
+142.07 99.68 l
+144.13 102.82 l
+146.19 106.08 l
+148.25 109.46 l
+150.31 112.93 l
+152.36 116.49 l
+154.42 120.09 l
+156.48 123.74 l
+158.54 127.40 l
+160.60 131.05 l
+162.66 134.68 l
+164.72 138.26 l
+166.78 141.78 l
+168.84 145.22 l
+170.90 148.56 l
+172.96 151.79 l
+175.01 154.91 l
+177.07 157.89 l
+179.13 160.75 l
+181.19 163.46 l
+183.25 166.03 l
+185.31 168.46 l
+187.37 170.75 l
+189.43 172.89 l
+191.49 174.89 l
+193.55 176.77 l
+195.60 178.51 l
+197.66 180.12 l
+199.72 181.62 l
+201.78 183.00 l
+203.84 184.28 l
+205.90 185.46 l
+207.96 186.54 l
+210.02 187.53 l
+212.08 188.44 l
+214.14 189.28 l
+216.19 190.04 l
+218.25 190.74 l
+220.31 191.38 l
+S
+Q q 301.00 60.96 177.90 142.04 re W n
+Q q 301.00 60.96 177.90 142.04 re W n
+0.000 0.000 0.000 RG
+0.75 w
+[] 0 d
+1 J
+1 j
+10.00 M
+307.59 193.86 m
+309.65 193.46 l
+311.71 193.02 l
+313.77 192.54 l
+315.83 192.02 l
+317.89 191.44 l
+319.95 190.81 l
+322.00 190.12 l
+324.06 189.36 l
+326.12 188.54 l
+328.18 187.63 l
+330.24 186.65 l
+332.30 185.58 l
+334.36 184.41 l
+336.42 183.15 l
+338.48 181.77 l
+340.54 180.29 l
+342.59 178.69 l
+344.65 176.96 l
+346.71 175.10 l
+348.77 173.11 l
+350.83 170.98 l
+352.89 168.71 l
+354.95 166.30 l
+357.01 163.74 l
+359.07 161.05 l
+361.13 158.21 l
+363.18 155.23 l
+365.24 152.13 l
+367.30 148.91 l
+369.36 145.58 l
+371.42 142.15 l
+373.48 138.64 l
+375.54 135.06 l
+377.60 131.44 l
+379.66 127.79 l
+381.72 124.13 l
+383.77 120.48 l
+385.83 116.87 l
+387.89 113.31 l
+389.95 109.83 l
+392.01 106.44 l
+394.07 103.16 l
+396.13 100.01 l
+398.19 97.00 l
+400.25 94.13 l
+402.31 91.43 l
+404.36 88.90 l
+406.42 86.53 l
+408.48 84.33 l
+410.54 82.31 l
+412.60 80.45 l
+414.66 78.76 l
+416.72 77.23 l
+418.78 75.84 l
+420.84 74.60 l
+422.90 73.49 l
+424.96 72.51 l
+427.01 71.64 l
+429.07 70.87 l
+431.13 70.20 l
+433.19 69.62 l
+435.25 69.11 l
+437.31 68.67 l
+439.37 68.29 l
+441.43 67.97 l
+443.49 67.69 l
+445.55 67.46 l
+447.60 67.26 l
+449.66 67.09 l
+451.72 66.94 l
+453.78 66.82 l
+455.84 66.72 l
+457.90 66.64 l
+459.96 66.57 l
+462.02 66.51 l
+464.08 66.46 l
+466.14 66.42 l
+468.19 66.38 l
+470.25 66.35 l
+472.31 66.33 l
+S
+Q q
+0.000 0.000 0.000 RG
+0.75 w
+[] 0 d
+1 J
+1 j
+10.00 M
+307.59 60.96 m 472.31 60.96 l S
+307.59 60.96 m 307.59 54.98 l S
+348.77 60.96 m 348.77 54.98 l S
+389.95 60.96 m 389.95 54.98 l S
+431.13 60.96 m 431.13 54.98 l S
+472.31 60.96 m 472.31 54.98 l S
+BT
+0.000 0.000 0.000 rg
+/F2 1 Tf 10.00 0.00 0.00 10.00 301.89 39.44 Tm (-4) Tj
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 343.07 39.44 Tm (-2) Tj
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 387.17 39.44 Tm (0) Tj
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 428.35 39.44 Tm (2) Tj
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 469.53 39.44 Tm (4) Tj
+ET
+301.00 66.22 m 301.00 197.74 l S
+301.00 66.22 m 295.03 66.22 l S
+301.00 92.52 m 295.03 92.52 l S
+301.00 118.82 m 295.03 118.82 l S
+301.00 145.13 m 295.03 145.13 l S
+301.00 171.43 m 295.03 171.43 l S
+301.00 197.74 m 295.03 197.74 l S
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 286.66 59.27 Tm (0.0) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 286.66 85.57 Tm (0.2) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 286.66 111.87 Tm (0.4) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 286.66 138.18 Tm (0.6) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 286.66 164.48 Tm (0.8) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 286.66 190.79 Tm (1.0) Tj
+ET
+301.00 60.96 m
+478.90 60.96 l
+478.90 203.00 l
+301.00 203.00 l
+301.00 60.96 l
+S
+Q q 252.00 0.00 252.00 252.00 re W n
+BT
+0.000 0.000 0.000 rg
+/F3 1 Tf 12.00 0.00 0.00 12.00 335.06 223.19 Tm [(ICC plot f) 20 (or item  I1)] TJ
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 351.05 15.54 Tm (Latent Dimension) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 262.76 89.31 Tm [(Probability to Solv) 25 (e)] TJ
+ET
+Q q 301.00 60.96 177.90 142.04 re W n
+1.000 0.000 0.000 RG
+0.75 w
+[] 0 d
+1 J
+1 j
+10.00 M
+307.59 70.08 m
+309.65 70.47 l
+311.71 70.91 l
+313.77 71.38 l
+315.83 71.90 l
+317.89 72.47 l
+319.95 73.09 l
+322.00 73.77 l
+324.06 74.51 l
+326.12 75.33 l
+328.18 76.21 l
+330.24 77.17 l
+332.30 78.21 l
+334.36 79.35 l
+336.42 80.57 l
+338.48 81.90 l
+340.54 83.32 l
+342.59 84.86 l
+344.65 86.50 l
+346.71 88.26 l
+348.77 90.13 l
+350.83 92.12 l
+352.89 94.22 l
+354.95 96.44 l
+357.01 98.77 l
+359.07 101.19 l
+361.13 103.72 l
+363.18 106.32 l
+365.24 108.99 l
+367.30 111.72 l
+369.36 114.48 l
+371.42 117.25 l
+373.48 120.01 l
+375.54 122.73 l
+377.60 125.39 l
+379.66 127.95 l
+381.72 130.39 l
+383.77 132.67 l
+385.83 134.77 l
+387.89 136.66 l
+389.95 138.31 l
+392.01 139.70 l
+394.07 140.81 l
+396.13 141.62 l
+398.19 142.13 l
+400.25 142.31 l
+402.31 142.17 l
+404.36 141.72 l
+406.42 140.95 l
+408.48 139.89 l
+410.54 138.54 l
+412.60 136.92 l
+414.66 135.07 l
+416.72 133.00 l
+418.78 130.74 l
+420.84 128.32 l
+422.90 125.78 l
+424.96 123.13 l
+427.01 120.42 l
+429.07 117.66 l
+431.13 114.89 l
+433.19 112.13 l
+435.25 109.40 l
+437.31 106.71 l
+439.37 104.10 l
+441.43 101.56 l
+443.49 99.12 l
+445.55 96.78 l
+447.60 94.55 l
+449.66 92.43 l
+451.72 90.42 l
+453.78 88.53 l
+455.84 86.75 l
+457.90 85.09 l
+459.96 83.54 l
+462.02 82.10 l
+464.08 80.76 l
+466.14 79.52 l
+468.19 78.38 l
+470.25 77.32 l
+472.31 76.35 l
+S
+0.000 0.804 0.000 RG
+307.59 66.23 m
+309.65 66.23 l
+311.71 66.24 l
+313.77 66.24 l
+315.83 66.25 l
+317.89 66.26 l
+319.95 66.27 l
+322.00 66.28 l
+324.06 66.29 l
+326.12 66.31 l
+328.18 66.33 l
+330.24 66.35 l
+332.30 66.38 l
+334.36 66.41 l
+336.42 66.45 l
+338.48 66.50 l
+340.54 66.56 l
+342.59 66.63 l
+344.65 66.71 l
+346.71 66.81 l
+348.77 66.93 l
+350.83 67.07 l
+352.89 67.23 l
+354.95 67.43 l
+357.01 67.66 l
+359.07 67.93 l
+361.13 68.24 l
+363.18 68.61 l
+365.24 69.04 l
+367.30 69.54 l
+369.36 70.11 l
+371.42 70.77 l
+373.48 71.52 l
+375.54 72.37 l
+377.60 73.34 l
+379.66 74.43 l
+381.72 75.65 l
+383.77 77.01 l
+385.83 78.52 l
+387.89 80.19 l
+389.95 82.02 l
+392.01 84.02 l
+394.07 86.19 l
+396.13 88.53 l
+398.19 91.05 l
+400.25 93.72 l
+402.31 96.56 l
+404.36 99.55 l
+406.42 102.68 l
+408.48 105.95 l
+410.54 109.32 l
+412.60 112.79 l
+414.66 116.34 l
+416.72 119.94 l
+418.78 123.59 l
+420.84 127.25 l
+422.90 130.90 l
+424.96 134.53 l
+427.01 138.11 l
+429.07 141.63 l
+431.13 145.08 l
+433.19 148.42 l
+435.25 151.66 l
+437.31 154.78 l
+439.37 157.77 l
+441.43 160.63 l
+443.49 163.35 l
+445.55 165.93 l
+447.60 168.36 l
+449.66 170.65 l
+451.72 172.80 l
+453.78 174.81 l
+455.84 176.69 l
+457.90 178.44 l
+459.96 180.06 l
+462.02 181.56 l
+464.08 182.95 l
+466.14 184.23 l
+468.19 185.41 l
+470.25 186.50 l
+472.31 187.49 l
+S
+Q
+endstream
+endobj
+10 0 obj
+22433
+endobj
+11 0 obj
+<<
+/Type /Page
+/Parent 3 0 R
+/Contents 12 0 R
+/Resources 4 0 R
+>>
+endobj
+12 0 obj
+<<
+/Length 13 0 R
+>>
+stream
+q
+Q q 49.00 312.96 177.90 142.04 re W n
+Q q 49.00 312.96 177.90 142.04 re W n
+0.000 0.000 0.000 RG
+0.75 w
+[] 0 d
+1 J
+1 j
+10.00 M
+55.59 448.42 m
+57.65 448.28 l
+59.71 448.13 l
+61.77 447.97 l
+63.83 447.78 l
+65.89 447.58 l
+67.95 447.36 l
+70.00 447.11 l
+72.06 446.84 l
+74.12 446.54 l
+76.18 446.22 l
+78.24 445.85 l
+80.30 445.46 l
+82.36 445.02 l
+84.42 444.54 l
+86.48 444.02 l
+88.54 443.44 l
+90.59 442.81 l
+92.65 442.12 l
+94.71 441.36 l
+96.77 440.53 l
+98.83 439.63 l
+100.89 438.65 l
+102.95 437.57 l
+105.01 436.41 l
+107.07 435.14 l
+109.13 433.77 l
+111.18 432.28 l
+113.24 430.68 l
+115.30 428.95 l
+117.36 427.10 l
+119.42 425.10 l
+121.48 422.98 l
+123.54 420.71 l
+125.60 418.29 l
+127.66 415.74 l
+129.72 413.04 l
+131.77 410.20 l
+133.83 407.23 l
+135.89 404.12 l
+137.95 400.90 l
+140.01 397.57 l
+142.07 394.14 l
+144.13 390.63 l
+146.19 387.05 l
+148.25 383.43 l
+150.31 379.78 l
+152.36 376.12 l
+154.42 372.47 l
+156.48 368.86 l
+158.54 365.30 l
+160.60 361.82 l
+162.66 358.43 l
+164.72 355.15 l
+166.78 352.00 l
+168.84 348.99 l
+170.90 346.13 l
+172.96 343.42 l
+175.01 340.89 l
+177.07 338.52 l
+179.13 336.33 l
+181.19 334.30 l
+183.25 332.45 l
+185.31 330.76 l
+187.37 329.22 l
+189.43 327.84 l
+191.49 326.60 l
+193.55 325.49 l
+195.60 324.50 l
+197.66 323.63 l
+199.72 322.87 l
+201.78 322.20 l
+203.84 321.62 l
+205.90 321.11 l
+207.96 320.67 l
+210.02 320.29 l
+212.08 319.97 l
+214.14 319.69 l
+216.19 319.46 l
+218.25 319.26 l
+220.31 319.09 l
+S
+Q q
+0.000 0.000 0.000 RG
+0.75 w
+[] 0 d
+1 J
+1 j
+10.00 M
+55.59 312.96 m 220.31 312.96 l S
+55.59 312.96 m 55.59 306.98 l S
+96.77 312.96 m 96.77 306.98 l S
+137.95 312.96 m 137.95 306.98 l S
+179.13 312.96 m 179.13 306.98 l S
+220.31 312.96 m 220.31 306.98 l S
+BT
+0.000 0.000 0.000 rg
+/F2 1 Tf 10.00 0.00 0.00 10.00 49.89 291.44 Tm (-4) Tj
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 91.07 291.44 Tm (-2) Tj
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 135.17 291.44 Tm (0) Tj
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 176.35 291.44 Tm (2) Tj
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 217.53 291.44 Tm (4) Tj
+ET
+49.00 318.22 m 49.00 449.74 l S
+49.00 318.22 m 43.03 318.22 l S
+49.00 344.52 m 43.03 344.52 l S
+49.00 370.82 m 43.03 370.82 l S
+49.00 397.13 m 43.03 397.13 l S
+49.00 423.43 m 43.03 423.43 l S
+49.00 449.74 m 43.03 449.74 l S
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 34.66 311.27 Tm (0.0) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 34.66 337.57 Tm (0.2) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 34.66 363.87 Tm (0.4) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 34.66 390.18 Tm (0.6) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 34.66 416.48 Tm (0.8) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 34.66 442.79 Tm (1.0) Tj
+ET
+49.00 312.96 m
+226.90 312.96 l
+226.90 455.00 l
+49.00 455.00 l
+49.00 312.96 l
+S
+Q q 0.00 252.00 252.00 252.00 re W n
+BT
+0.000 0.000 0.000 rg
+/F3 1 Tf 12.00 0.00 0.00 12.00 83.06 475.19 Tm [(ICC plot f) 20 (or item  I1)] TJ
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 99.05 267.54 Tm (Latent Dimension) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 10.76 341.31 Tm [(Probability to Solv) 25 (e)] TJ
+ET
+Q q 49.00 312.96 177.90 142.04 re W n
+1.000 0.000 0.000 RG
+0.75 w
+[] 0 d
+1 J
+1 j
+10.00 M
+55.59 319.53 m
+57.65 319.67 l
+59.71 319.82 l
+61.77 319.98 l
+63.83 320.16 l
+65.89 320.37 l
+67.95 320.59 l
+70.00 320.83 l
+72.06 321.10 l
+74.12 321.40 l
+76.18 321.72 l
+78.24 322.08 l
+80.30 322.48 l
+82.36 322.91 l
+84.42 323.38 l
+86.48 323.90 l
+88.54 324.47 l
+90.59 325.09 l
+92.65 325.77 l
+94.71 326.52 l
+96.77 327.33 l
+98.83 328.21 l
+100.89 329.17 l
+102.95 330.22 l
+105.01 331.35 l
+107.07 332.58 l
+109.13 333.90 l
+111.18 335.33 l
+113.24 336.86 l
+115.30 338.51 l
+117.36 340.26 l
+119.42 342.14 l
+121.48 344.13 l
+123.54 346.23 l
+125.60 348.45 l
+127.66 350.77 l
+129.72 353.20 l
+131.77 355.72 l
+133.83 358.33 l
+135.89 361.00 l
+137.95 363.73 l
+140.01 366.49 l
+142.07 369.26 l
+144.13 372.02 l
+146.19 374.74 l
+148.25 377.40 l
+150.31 379.96 l
+152.36 382.40 l
+154.42 384.68 l
+156.48 386.78 l
+158.54 388.67 l
+160.60 390.32 l
+162.66 391.71 l
+164.72 392.82 l
+166.78 393.63 l
+168.84 394.13 l
+170.90 394.31 l
+172.96 394.17 l
+175.01 393.72 l
+177.07 392.95 l
+179.13 391.88 l
+181.19 390.53 l
+183.25 388.92 l
+185.31 387.06 l
+187.37 384.99 l
+189.43 382.73 l
+191.49 380.32 l
+193.55 377.77 l
+195.60 375.13 l
+197.66 372.41 l
+199.72 369.65 l
+201.78 366.88 l
+203.84 364.12 l
+205.90 361.39 l
+207.96 358.71 l
+210.02 356.09 l
+212.08 353.56 l
+214.14 351.11 l
+216.19 348.77 l
+218.25 346.54 l
+220.31 344.42 l
+S
+0.000 0.804 0.000 RG
+55.59 318.22 m
+57.65 318.22 l
+59.71 318.22 l
+61.77 318.22 l
+63.83 318.22 l
+65.89 318.22 l
+67.95 318.22 l
+70.00 318.22 l
+72.06 318.22 l
+74.12 318.23 l
+76.18 318.23 l
+78.24 318.23 l
+80.30 318.23 l
+82.36 318.24 l
+84.42 318.24 l
+86.48 318.25 l
+88.54 318.26 l
+90.59 318.27 l
+92.65 318.28 l
+94.71 318.29 l
+96.77 318.31 l
+98.83 318.33 l
+100.89 318.35 l
+102.95 318.38 l
+105.01 318.41 l
+107.07 318.45 l
+109.13 318.50 l
+111.18 318.56 l
+113.24 318.63 l
+115.30 318.71 l
+117.36 318.81 l
+119.42 318.93 l
+121.48 319.07 l
+123.54 319.23 l
+125.60 319.43 l
+127.66 319.66 l
+129.72 319.93 l
+131.77 320.24 l
+133.83 320.61 l
+135.89 321.04 l
+137.95 321.54 l
+140.01 322.11 l
+142.07 322.77 l
+144.13 323.52 l
+146.19 324.37 l
+148.25 325.34 l
+150.31 326.43 l
+152.36 327.65 l
+154.42 329.02 l
+156.48 330.53 l
+158.54 332.20 l
+160.60 334.03 l
+162.66 336.03 l
+164.72 338.20 l
+166.78 340.54 l
+168.84 343.05 l
+170.90 345.73 l
+172.96 348.57 l
+175.01 351.56 l
+177.07 354.69 l
+179.13 357.96 l
+181.19 361.33 l
+183.25 364.80 l
+185.31 368.35 l
+187.37 371.95 l
+189.43 375.60 l
+191.49 379.26 l
+193.55 382.91 l
+195.60 386.54 l
+197.66 390.12 l
+199.72 393.64 l
+201.78 397.09 l
+203.84 400.43 l
+205.90 403.67 l
+207.96 406.79 l
+210.02 409.78 l
+212.08 412.64 l
+214.14 415.36 l
+216.19 417.94 l
+218.25 420.37 l
+220.31 422.66 l
+S
+Q q 301.00 312.96 177.90 142.04 re W n
+Q q 301.00 312.96 177.90 142.04 re W n
+0.000 0.000 0.000 RG
+0.75 w
+[] 0 d
+1 J
+1 j
+10.00 M
+307.59 448.77 m
+309.65 448.67 l
+311.71 448.56 l
+313.77 448.44 l
+315.83 448.31 l
+317.89 448.16 l
+319.95 447.99 l
+322.00 447.81 l
+324.06 447.61 l
+326.12 447.39 l
+328.18 447.15 l
+330.24 446.88 l
+332.30 446.59 l
+334.36 446.27 l
+336.42 445.91 l
+338.48 445.52 l
+340.54 445.09 l
+342.59 444.62 l
+344.65 444.10 l
+346.71 443.53 l
+348.77 442.91 l
+350.83 442.23 l
+352.89 441.48 l
+354.95 440.67 l
+357.01 439.78 l
+359.07 438.80 l
+361.13 437.74 l
+363.18 436.59 l
+365.24 435.34 l
+367.30 433.99 l
+369.36 432.52 l
+371.42 430.94 l
+373.48 429.23 l
+375.54 427.39 l
+377.60 425.42 l
+379.66 423.31 l
+381.72 421.06 l
+383.77 418.67 l
+385.83 416.14 l
+387.89 413.46 l
+389.95 410.64 l
+392.01 407.69 l
+394.07 404.61 l
+396.13 401.40 l
+398.19 398.09 l
+400.25 394.67 l
+402.31 391.17 l
+404.36 387.61 l
+406.42 383.99 l
+408.48 380.34 l
+410.54 376.68 l
+412.60 373.03 l
+414.66 369.41 l
+416.72 365.84 l
+418.78 362.35 l
+420.84 358.94 l
+422.90 355.65 l
+424.96 352.48 l
+427.01 349.44 l
+429.07 346.56 l
+431.13 343.83 l
+433.19 341.27 l
+435.25 338.88 l
+437.31 336.65 l
+439.37 334.60 l
+441.43 332.72 l
+443.49 331.01 l
+445.55 329.45 l
+447.60 328.04 l
+449.66 326.78 l
+451.72 325.65 l
+453.78 324.65 l
+455.84 323.76 l
+457.90 322.98 l
+459.96 322.30 l
+462.02 321.70 l
+464.08 321.18 l
+466.14 320.73 l
+468.19 320.35 l
+470.25 320.02 l
+472.31 319.73 l
+S
+Q q
+0.000 0.000 0.000 RG
+0.75 w
+[] 0 d
+1 J
+1 j
+10.00 M
+307.59 312.96 m 472.31 312.96 l S
+307.59 312.96 m 307.59 306.98 l S
+348.77 312.96 m 348.77 306.98 l S
+389.95 312.96 m 389.95 306.98 l S
+431.13 312.96 m 431.13 306.98 l S
+472.31 312.96 m 472.31 306.98 l S
+BT
+0.000 0.000 0.000 rg
+/F2 1 Tf 10.00 0.00 0.00 10.00 301.89 291.44 Tm (-4) Tj
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 343.07 291.44 Tm (-2) Tj
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 387.17 291.44 Tm (0) Tj
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 428.35 291.44 Tm (2) Tj
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 469.53 291.44 Tm (4) Tj
+ET
+301.00 318.22 m 301.00 449.74 l S
+301.00 318.22 m 295.03 318.22 l S
+301.00 344.52 m 295.03 344.52 l S
+301.00 370.82 m 295.03 370.82 l S
+301.00 397.13 m 295.03 397.13 l S
+301.00 423.43 m 295.03 423.43 l S
+301.00 449.74 m 295.03 449.74 l S
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 286.66 311.27 Tm (0.0) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 286.66 337.57 Tm (0.2) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 286.66 363.87 Tm (0.4) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 286.66 390.18 Tm (0.6) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 286.66 416.48 Tm (0.8) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 286.66 442.79 Tm (1.0) Tj
+ET
+301.00 312.96 m
+478.90 312.96 l
+478.90 455.00 l
+301.00 455.00 l
+301.00 312.96 l
+S
+Q q 252.00 252.00 252.00 252.00 re W n
+BT
+0.000 0.000 0.000 rg
+/F3 1 Tf 12.00 0.00 0.00 12.00 335.06 475.19 Tm [(ICC plot f) 20 (or item  I1)] TJ
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 351.05 267.54 Tm (Latent Dimension) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 262.76 341.31 Tm [(Probability to Solv) 25 (e)] TJ
+ET
+Q q 301.00 312.96 177.90 142.04 re W n
+1.000 0.000 0.000 RG
+0.75 w
+[] 0 d
+1 J
+1 j
+10.00 M
+307.59 319.18 m
+309.65 319.28 l
+311.71 319.39 l
+313.77 319.51 l
+315.83 319.64 l
+317.89 319.79 l
+319.95 319.96 l
+322.00 320.14 l
+324.06 320.33 l
+326.12 320.55 l
+328.18 320.79 l
+330.24 321.06 l
+332.30 321.35 l
+334.36 321.67 l
+336.42 322.02 l
+338.48 322.41 l
+340.54 322.84 l
+342.59 323.30 l
+344.65 323.82 l
+346.71 324.38 l
+348.77 324.99 l
+350.83 325.66 l
+352.89 326.40 l
+354.95 327.20 l
+357.01 328.07 l
+359.07 329.02 l
+361.13 330.05 l
+363.18 331.17 l
+365.24 332.38 l
+367.30 333.69 l
+369.36 335.10 l
+371.42 336.62 l
+373.48 338.25 l
+375.54 339.99 l
+377.60 341.84 l
+379.66 343.81 l
+381.72 345.90 l
+383.77 348.10 l
+385.83 350.41 l
+387.89 352.82 l
+389.95 355.33 l
+392.01 357.92 l
+394.07 360.59 l
+396.13 363.31 l
+398.19 366.06 l
+400.25 368.83 l
+402.31 371.60 l
+404.36 374.33 l
+406.42 377.00 l
+408.48 379.57 l
+410.54 382.03 l
+412.60 384.34 l
+414.66 386.47 l
+416.72 388.39 l
+418.78 390.08 l
+420.84 391.51 l
+422.90 392.66 l
+424.96 393.52 l
+427.01 394.07 l
+429.07 394.30 l
+431.13 394.21 l
+433.19 393.81 l
+435.25 393.09 l
+437.31 392.07 l
+439.37 390.76 l
+441.43 389.18 l
+443.49 387.36 l
+445.55 385.32 l
+447.60 383.09 l
+449.66 380.70 l
+451.72 378.17 l
+453.78 375.54 l
+455.84 372.83 l
+457.90 370.08 l
+459.96 367.31 l
+462.02 364.54 l
+464.08 361.81 l
+466.14 359.12 l
+468.19 356.49 l
+470.25 353.94 l
+472.31 351.48 l
+S
+0.000 0.804 0.000 RG
+307.59 318.22 m
+309.65 318.22 l
+311.71 318.22 l
+313.77 318.22 l
+315.83 318.22 l
+317.89 318.22 l
+319.95 318.22 l
+322.00 318.22 l
+324.06 318.22 l
+326.12 318.22 l
+328.18 318.22 l
+330.24 318.22 l
+332.30 318.23 l
+334.36 318.23 l
+336.42 318.23 l
+338.48 318.23 l
+340.54 318.24 l
+342.59 318.24 l
+344.65 318.25 l
+346.71 318.26 l
+348.77 318.26 l
+350.83 318.28 l
+352.89 318.29 l
+354.95 318.30 l
+357.01 318.32 l
+359.07 318.34 l
+361.13 318.37 l
+363.18 318.40 l
+365.24 318.44 l
+367.30 318.49 l
+369.36 318.55 l
+371.42 318.61 l
+373.48 318.70 l
+375.54 318.79 l
+377.60 318.91 l
+379.66 319.04 l
+381.72 319.20 l
+383.77 319.40 l
+385.83 319.62 l
+387.89 319.88 l
+389.95 320.19 l
+392.01 320.55 l
+394.07 320.97 l
+396.13 321.46 l
+398.19 322.02 l
+400.25 322.66 l
+402.31 323.40 l
+404.36 324.23 l
+406.42 325.18 l
+408.48 326.25 l
+410.54 327.45 l
+412.60 328.80 l
+414.66 330.29 l
+416.72 331.93 l
+418.78 333.74 l
+420.84 335.71 l
+422.90 337.85 l
+424.96 340.17 l
+427.01 342.66 l
+429.07 345.31 l
+431.13 348.12 l
+433.19 351.09 l
+435.25 354.20 l
+437.31 357.45 l
+439.37 360.80 l
+441.43 364.26 l
+443.49 367.80 l
+445.55 371.40 l
+447.60 375.04 l
+449.66 378.69 l
+451.72 382.35 l
+453.78 385.98 l
+455.84 389.58 l
+457.90 393.11 l
+459.96 396.56 l
+462.02 399.92 l
+464.08 403.18 l
+466.14 406.32 l
+468.19 409.33 l
+470.25 412.21 l
+472.31 414.95 l
+S
+Q q 49.00 60.96 177.90 142.04 re W n
+Q q 49.00 60.96 177.90 142.04 re W n
+0.000 0.000 0.000 RG
+0.75 w
+[] 0 d
+1 J
+1 j
+10.00 M
+55.59 191.42 m
+57.65 190.78 l
+59.71 190.09 l
+61.77 189.33 l
+63.83 188.50 l
+65.89 187.59 l
+67.95 186.61 l
+70.00 185.53 l
+72.06 184.36 l
+74.12 183.09 l
+76.18 181.71 l
+78.24 180.22 l
+80.30 178.62 l
+82.36 176.88 l
+84.42 175.02 l
+86.48 173.03 l
+88.54 170.89 l
+90.59 168.62 l
+92.65 166.20 l
+94.71 163.64 l
+96.77 160.93 l
+98.83 158.09 l
+100.89 155.11 l
+102.95 152.00 l
+105.01 148.78 l
+107.07 145.44 l
+109.13 142.01 l
+111.18 138.49 l
+113.24 134.92 l
+115.30 131.29 l
+117.36 127.64 l
+119.42 123.98 l
+121.48 120.33 l
+123.54 116.72 l
+125.60 113.17 l
+127.66 109.69 l
+129.72 106.30 l
+131.77 103.03 l
+133.83 99.88 l
+135.89 96.88 l
+137.95 94.02 l
+140.01 91.32 l
+142.07 88.80 l
+144.13 86.44 l
+146.19 84.25 l
+148.25 82.23 l
+150.31 80.38 l
+152.36 78.69 l
+154.42 77.17 l
+156.48 75.79 l
+158.54 74.55 l
+160.60 73.45 l
+162.66 72.47 l
+164.72 71.60 l
+166.78 70.84 l
+168.84 70.18 l
+170.90 69.59 l
+172.96 69.09 l
+175.01 68.66 l
+177.07 68.28 l
+179.13 67.96 l
+181.19 67.68 l
+183.25 67.45 l
+185.31 67.25 l
+187.37 67.08 l
+189.43 66.94 l
+191.49 66.82 l
+193.55 66.72 l
+195.60 66.63 l
+197.66 66.56 l
+199.72 66.50 l
+201.78 66.45 l
+203.84 66.41 l
+205.90 66.38 l
+207.96 66.35 l
+210.02 66.33 l
+212.08 66.31 l
+214.14 66.29 l
+216.19 66.28 l
+218.25 66.27 l
+220.31 66.26 l
+S
+Q q
+0.000 0.000 0.000 RG
+0.75 w
+[] 0 d
+1 J
+1 j
+10.00 M
+55.59 60.96 m 220.31 60.96 l S
+55.59 60.96 m 55.59 54.98 l S
+96.77 60.96 m 96.77 54.98 l S
+137.95 60.96 m 137.95 54.98 l S
+179.13 60.96 m 179.13 54.98 l S
+220.31 60.96 m 220.31 54.98 l S
+BT
+0.000 0.000 0.000 rg
+/F2 1 Tf 10.00 0.00 0.00 10.00 49.89 39.44 Tm (-4) Tj
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 91.07 39.44 Tm (-2) Tj
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 135.17 39.44 Tm (0) Tj
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 176.35 39.44 Tm (2) Tj
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 217.53 39.44 Tm (4) Tj
+ET
+49.00 66.22 m 49.00 197.74 l S
+49.00 66.22 m 43.03 66.22 l S
+49.00 92.52 m 43.03 92.52 l S
+49.00 118.82 m 43.03 118.82 l S
+49.00 145.13 m 43.03 145.13 l S
+49.00 171.43 m 43.03 171.43 l S
+49.00 197.74 m 43.03 197.74 l S
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 34.66 59.27 Tm (0.0) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 34.66 85.57 Tm (0.2) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 34.66 111.87 Tm (0.4) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 34.66 138.18 Tm (0.6) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 34.66 164.48 Tm (0.8) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 34.66 190.79 Tm (1.0) Tj
+ET
+49.00 60.96 m
+226.90 60.96 l
+226.90 203.00 l
+49.00 203.00 l
+49.00 60.96 l
+S
+Q q 0.00 0.00 252.00 252.00 re W n
+BT
+0.000 0.000 0.000 rg
+/F3 1 Tf 12.00 0.00 0.00 12.00 83.06 223.19 Tm [(ICC plot f) 20 (or item  I1)] TJ
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 99.05 15.54 Tm (Latent Dimension) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 10.76 89.31 Tm [(Probability to Solv) 25 (e)] TJ
+ET
+Q q 49.00 60.96 177.90 142.04 re W n
+1.000 0.000 0.000 RG
+0.75 w
+[] 0 d
+1 J
+1 j
+10.00 M
+55.59 72.49 m
+57.65 73.12 l
+59.71 73.80 l
+61.77 74.55 l
+63.83 75.36 l
+65.89 76.25 l
+67.95 77.21 l
+70.00 78.26 l
+72.06 79.40 l
+74.12 80.63 l
+76.18 81.95 l
+78.24 83.38 l
+80.30 84.92 l
+82.36 86.57 l
+84.42 88.33 l
+86.48 90.21 l
+88.54 92.20 l
+90.59 94.31 l
+92.65 96.53 l
+94.71 98.86 l
+96.77 101.30 l
+98.83 103.82 l
+100.89 106.43 l
+102.95 109.11 l
+105.01 111.83 l
+107.07 114.59 l
+109.13 117.37 l
+111.18 120.13 l
+113.24 122.85 l
+115.30 125.50 l
+117.36 128.06 l
+119.42 130.49 l
+121.48 132.76 l
+123.54 134.86 l
+125.60 136.74 l
+127.66 138.38 l
+129.72 139.76 l
+131.77 140.85 l
+133.83 141.65 l
+135.89 142.14 l
+137.95 142.31 l
+140.01 142.16 l
+142.07 141.69 l
+144.13 140.92 l
+146.19 139.84 l
+148.25 138.48 l
+150.31 136.85 l
+152.36 134.99 l
+154.42 132.91 l
+156.48 130.64 l
+158.54 128.22 l
+160.60 125.67 l
+162.66 123.02 l
+164.72 120.31 l
+166.78 117.55 l
+168.84 114.78 l
+170.90 112.02 l
+172.96 109.29 l
+175.01 106.61 l
+177.07 103.99 l
+179.13 101.46 l
+181.19 99.02 l
+183.25 96.69 l
+185.31 94.46 l
+187.37 92.34 l
+189.43 90.34 l
+191.49 88.45 l
+193.55 86.68 l
+195.60 85.03 l
+197.66 83.48 l
+199.72 82.04 l
+201.78 80.71 l
+203.84 79.47 l
+205.90 78.33 l
+207.96 77.28 l
+210.02 76.31 l
+212.08 75.42 l
+214.14 74.60 l
+216.19 73.85 l
+218.25 73.16 l
+220.31 72.53 l
+S
+0.000 0.804 0.000 RG
+55.59 66.26 m
+57.65 66.27 l
+59.71 66.28 l
+61.77 66.29 l
+63.83 66.31 l
+65.89 66.33 l
+67.95 66.35 l
+70.00 66.38 l
+72.06 66.41 l
+74.12 66.45 l
+76.18 66.50 l
+78.24 66.56 l
+80.30 66.63 l
+82.36 66.71 l
+84.42 66.81 l
+86.48 66.93 l
+88.54 67.07 l
+90.59 67.24 l
+92.65 67.44 l
+94.71 67.67 l
+96.77 67.94 l
+98.83 68.26 l
+100.89 68.63 l
+102.95 69.06 l
+105.01 69.56 l
+107.07 70.13 l
+109.13 70.79 l
+111.18 71.55 l
+113.24 72.41 l
+115.30 73.38 l
+117.36 74.47 l
+119.42 75.70 l
+121.48 77.07 l
+123.54 78.59 l
+125.60 80.26 l
+127.66 82.10 l
+129.72 84.11 l
+131.77 86.29 l
+133.83 88.63 l
+135.89 91.15 l
+137.95 93.84 l
+140.01 96.68 l
+142.07 99.68 l
+144.13 102.82 l
+146.19 106.08 l
+148.25 109.46 l
+150.31 112.93 l
+152.36 116.49 l
+154.42 120.09 l
+156.48 123.74 l
+158.54 127.40 l
+160.60 131.05 l
+162.66 134.68 l
+164.72 138.26 l
+166.78 141.78 l
+168.84 145.22 l
+170.90 148.56 l
+172.96 151.79 l
+175.01 154.91 l
+177.07 157.89 l
+179.13 160.75 l
+181.19 163.46 l
+183.25 166.03 l
+185.31 168.46 l
+187.37 170.75 l
+189.43 172.89 l
+191.49 174.89 l
+193.55 176.77 l
+195.60 178.51 l
+197.66 180.12 l
+199.72 181.62 l
+201.78 183.00 l
+203.84 184.28 l
+205.90 185.46 l
+207.96 186.54 l
+210.02 187.53 l
+212.08 188.44 l
+214.14 189.28 l
+216.19 190.04 l
+218.25 190.74 l
+220.31 191.38 l
+S
+Q q 301.00 60.96 177.90 142.04 re W n
+Q q 301.00 60.96 177.90 142.04 re W n
+0.000 0.000 0.000 RG
+0.75 w
+[] 0 d
+1 J
+1 j
+10.00 M
+307.59 193.86 m
+309.65 193.46 l
+311.71 193.02 l
+313.77 192.54 l
+315.83 192.02 l
+317.89 191.44 l
+319.95 190.81 l
+322.00 190.12 l
+324.06 189.36 l
+326.12 188.54 l
+328.18 187.63 l
+330.24 186.65 l
+332.30 185.58 l
+334.36 184.41 l
+336.42 183.15 l
+338.48 181.77 l
+340.54 180.29 l
+342.59 178.69 l
+344.65 176.96 l
+346.71 175.10 l
+348.77 173.11 l
+350.83 170.98 l
+352.89 168.71 l
+354.95 166.30 l
+357.01 163.74 l
+359.07 161.05 l
+361.13 158.21 l
+363.18 155.23 l
+365.24 152.13 l
+367.30 148.91 l
+369.36 145.58 l
+371.42 142.15 l
+373.48 138.64 l
+375.54 135.06 l
+377.60 131.44 l
+379.66 127.79 l
+381.72 124.13 l
+383.77 120.48 l
+385.83 116.87 l
+387.89 113.31 l
+389.95 109.83 l
+392.01 106.44 l
+394.07 103.16 l
+396.13 100.01 l
+398.19 97.00 l
+400.25 94.13 l
+402.31 91.43 l
+404.36 88.90 l
+406.42 86.53 l
+408.48 84.33 l
+410.54 82.31 l
+412.60 80.45 l
+414.66 78.76 l
+416.72 77.23 l
+418.78 75.84 l
+420.84 74.60 l
+422.90 73.49 l
+424.96 72.51 l
+427.01 71.64 l
+429.07 70.87 l
+431.13 70.20 l
+433.19 69.62 l
+435.25 69.11 l
+437.31 68.67 l
+439.37 68.29 l
+441.43 67.97 l
+443.49 67.69 l
+445.55 67.46 l
+447.60 67.26 l
+449.66 67.09 l
+451.72 66.94 l
+453.78 66.82 l
+455.84 66.72 l
+457.90 66.64 l
+459.96 66.57 l
+462.02 66.51 l
+464.08 66.46 l
+466.14 66.42 l
+468.19 66.38 l
+470.25 66.35 l
+472.31 66.33 l
+S
+Q q
+0.000 0.000 0.000 RG
+0.75 w
+[] 0 d
+1 J
+1 j
+10.00 M
+307.59 60.96 m 472.31 60.96 l S
+307.59 60.96 m 307.59 54.98 l S
+348.77 60.96 m 348.77 54.98 l S
+389.95 60.96 m 389.95 54.98 l S
+431.13 60.96 m 431.13 54.98 l S
+472.31 60.96 m 472.31 54.98 l S
+BT
+0.000 0.000 0.000 rg
+/F2 1 Tf 10.00 0.00 0.00 10.00 301.89 39.44 Tm (-4) Tj
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 343.07 39.44 Tm (-2) Tj
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 387.17 39.44 Tm (0) Tj
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 428.35 39.44 Tm (2) Tj
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 469.53 39.44 Tm (4) Tj
+ET
+301.00 66.22 m 301.00 197.74 l S
+301.00 66.22 m 295.03 66.22 l S
+301.00 92.52 m 295.03 92.52 l S
+301.00 118.82 m 295.03 118.82 l S
+301.00 145.13 m 295.03 145.13 l S
+301.00 171.43 m 295.03 171.43 l S
+301.00 197.74 m 295.03 197.74 l S
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 286.66 59.27 Tm (0.0) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 286.66 85.57 Tm (0.2) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 286.66 111.87 Tm (0.4) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 286.66 138.18 Tm (0.6) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 286.66 164.48 Tm (0.8) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 286.66 190.79 Tm (1.0) Tj
+ET
+301.00 60.96 m
+478.90 60.96 l
+478.90 203.00 l
+301.00 203.00 l
+301.00 60.96 l
+S
+Q q 252.00 0.00 252.00 252.00 re W n
+BT
+0.000 0.000 0.000 rg
+/F3 1 Tf 12.00 0.00 0.00 12.00 335.06 223.19 Tm [(ICC plot f) 20 (or item  I1)] TJ
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 351.05 15.54 Tm (Latent Dimension) Tj
+ET
+BT
+/F2 1 Tf 0.00 10.00 -10.00 0.00 262.76 89.31 Tm [(Probability to Solv) 25 (e)] TJ
+ET
+Q q 301.00 60.96 177.90 142.04 re W n
+1.000 0.000 0.000 RG
+0.75 w
+[] 0 d
+1 J
+1 j
+10.00 M
+307.59 70.08 m
+309.65 70.47 l
+311.71 70.91 l
+313.77 71.38 l
+315.83 71.90 l
+317.89 72.47 l
+319.95 73.09 l
+322.00 73.77 l
+324.06 74.51 l
+326.12 75.33 l
+328.18 76.21 l
+330.24 77.17 l
+332.30 78.21 l
+334.36 79.35 l
+336.42 80.57 l
+338.48 81.90 l
+340.54 83.32 l
+342.59 84.86 l
+344.65 86.50 l
+346.71 88.26 l
+348.77 90.13 l
+350.83 92.12 l
+352.89 94.22 l
+354.95 96.44 l
+357.01 98.77 l
+359.07 101.19 l
+361.13 103.72 l
+363.18 106.32 l
+365.24 108.99 l
+367.30 111.72 l
+369.36 114.48 l
+371.42 117.25 l
+373.48 120.01 l
+375.54 122.73 l
+377.60 125.39 l
+379.66 127.95 l
+381.72 130.39 l
+383.77 132.67 l
+385.83 134.77 l
+387.89 136.66 l
+389.95 138.31 l
+392.01 139.70 l
+394.07 140.81 l
+396.13 141.62 l
+398.19 142.13 l
+400.25 142.31 l
+402.31 142.17 l
+404.36 141.72 l
+406.42 140.95 l
+408.48 139.89 l
+410.54 138.54 l
+412.60 136.92 l
+414.66 135.07 l
+416.72 133.00 l
+418.78 130.74 l
+420.84 128.32 l
+422.90 125.78 l
+424.96 123.13 l
+427.01 120.42 l
+429.07 117.66 l
+431.13 114.89 l
+433.19 112.13 l
+435.25 109.40 l
+437.31 106.71 l
+439.37 104.10 l
+441.43 101.56 l
+443.49 99.12 l
+445.55 96.78 l
+447.60 94.55 l
+449.66 92.43 l
+451.72 90.42 l
+453.78 88.53 l
+455.84 86.75 l
+457.90 85.09 l
+459.96 83.54 l
+462.02 82.10 l
+464.08 80.76 l
+466.14 79.52 l
+468.19 78.38 l
+470.25 77.32 l
+472.31 76.35 l
+S
+0.000 0.804 0.000 RG
+307.59 66.23 m
+309.65 66.23 l
+311.71 66.24 l
+313.77 66.24 l
+315.83 66.25 l
+317.89 66.26 l
+319.95 66.27 l
+322.00 66.28 l
+324.06 66.29 l
+326.12 66.31 l
+328.18 66.33 l
+330.24 66.35 l
+332.30 66.38 l
+334.36 66.41 l
+336.42 66.45 l
+338.48 66.50 l
+340.54 66.56 l
+342.59 66.63 l
+344.65 66.71 l
+346.71 66.81 l
+348.77 66.93 l
+350.83 67.07 l
+352.89 67.23 l
+354.95 67.43 l
+357.01 67.66 l
+359.07 67.93 l
+361.13 68.24 l
+363.18 68.61 l
+365.24 69.04 l
+367.30 69.54 l
+369.36 70.11 l
+371.42 70.77 l
+373.48 71.52 l
+375.54 72.37 l
+377.60 73.34 l
+379.66 74.43 l
+381.72 75.65 l
+383.77 77.01 l
+385.83 78.52 l
+387.89 80.19 l
+389.95 82.02 l
+392.01 84.02 l
+394.07 86.19 l
+396.13 88.53 l
+398.19 91.05 l
+400.25 93.72 l
+402.31 96.56 l
+404.36 99.55 l
+406.42 102.68 l
+408.48 105.95 l
+410.54 109.32 l
+412.60 112.79 l
+414.66 116.34 l
+416.72 119.94 l
+418.78 123.59 l
+420.84 127.25 l
+422.90 130.90 l
+424.96 134.53 l
+427.01 138.11 l
+429.07 141.63 l
+431.13 145.08 l
+433.19 148.42 l
+435.25 151.66 l
+437.31 154.78 l
+439.37 157.77 l
+441.43 160.63 l
+443.49 163.35 l
+445.55 165.93 l
+447.60 168.36 l
+449.66 170.65 l
+451.72 172.80 l
+453.78 174.81 l
+455.84 176.69 l
+457.90 178.44 l
+459.96 180.06 l
+462.02 181.56 l
+464.08 182.95 l
+466.14 184.23 l
+468.19 185.41 l
+470.25 186.50 l
+472.31 187.49 l
+S
+Q
+endstream
+endobj
+13 0 obj
+22433
+endobj
+14 0 obj
+<<
+/Type /Page
+/Parent 3 0 R
+/Contents 15 0 R
+/Resources 4 0 R
+>>
+endobj
+15 0 obj
+<<
+/Length 16 0 R
+>>
+stream
+q
+Q q 57.60 36.00 432.00 342.00 re W n
+Q q
+0.000 0.000 0.000 RG
+0.75 w
+[] 0 d
+1 J
+1 j
+10.00 M
+57.60 36.00 m
+489.60 36.00 l
+489.60 378.00 l
+57.60 378.00 l
+57.60 36.00 l
+S
+Q q 0.00 0.00 504.00 378.00 re W n
+Q q
+0.000 0.000 0.000 RG
+0.75 w
+[] 0 d
+1 J
+1 j
+10.00 M
+57.60 112.00 m 57.60 302.00 l S
+57.60 112.00 m 50.40 112.00 l S
+57.60 175.33 m 50.40 175.33 l S
+57.60 238.67 m 50.40 238.67 l S
+57.60 302.00 m 50.40 302.00 l S
+BT
+0.000 0.000 0.000 rg
+/F2 1 Tf 8.00 0.00 0.00 8.00 36.53 109.13 Tm (I2) Tj
+ET
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 36.53 172.46 Tm (I1) Tj
+ET
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 36.53 235.79 Tm (I4) Tj
+ET
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 36.53 299.13 Tm (I3) Tj
+ET
+57.60 36.00 m 489.60 36.00 l S
+98.33 36.00 m 98.33 28.80 l S
+176.94 36.00 m 176.94 28.80 l S
+255.55 36.00 m 255.55 28.80 l S
+334.16 36.00 m 334.16 28.80 l S
+412.77 36.00 m 412.77 28.80 l S
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 93.77 18.70 Tm (-2) Tj
+ET
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 172.38 18.70 Tm (-1) Tj
+ET
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 253.33 18.70 Tm (0) Tj
+ET
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 331.94 18.70 Tm (1) Tj
+ET
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 410.55 18.70 Tm (2) Tj
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 234.69 7.20 Tm (Latent Dimension) Tj
+ET
+Q q 57.60 36.00 432.00 342.00 re W n
+0.000 0.000 0.000 RG
+0.75 w
+[] 0 d
+1 J
+1 j
+10.00 M
+73.60 372.30 m 73.60 504.00 l S
+178.26 372.30 m 178.26 504.00 l S
+247.35 372.30 m 247.35 504.00 l S
+300.47 372.30 m 300.47 504.00 l S
+302.46 372.30 m 302.46 504.00 l S
+325.45 372.30 m 325.45 504.00 l S
+426.93 372.30 m 426.93 504.00 l S
+412.42 372.30 m 412.42 504.00 l S
+307.67 112.00 m 405.22 112.00 l S
+BT
+/F1 1 Tf 1 Tr 7.48 0 0 7.48 297.50 109.40 Tm (l) Tj 0 Tr
+ET
+BT
+/F1 1 Tf 1 Tr 7.48 0 0 7.48 409.46 109.40 Tm (l) Tj 0 Tr
+ET
+BT
+0.000 0.000 0.000 rg
+/F2 1 Tf 8.00 0.00 0.00 8.00 298.24 100.01 Tm (1) Tj
+ET
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 410.20 100.01 Tm (2) Tj
+ET
+BT
+/F1 1 Tf 2 Tr 7.48 0 0 7.48 353.48 109.40 Tm (l) Tj 0 Tr
+ET
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 353.11 119.20 Tm (I2) Tj
+ET
+254.55 175.33 m 419.73 175.33 l S
+BT
+/F1 1 Tf 1 Tr 7.48 0 0 7.48 244.39 172.74 Tm (l) Tj 0 Tr
+ET
+BT
+/F1 1 Tf 1 Tr 7.48 0 0 7.48 423.97 172.74 Tm (l) Tj 0 Tr
+ET
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 245.13 163.35 Tm (1) Tj
+ET
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 424.70 163.35 Tm (2) Tj
+ET
+BT
+/F1 1 Tf 2 Tr 7.48 0 0 7.48 334.18 172.74 Tm (l) Tj 0 Tr
+ET
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 333.81 182.53 Tm (I1) Tj
+ET
+185.46 238.67 m 318.25 238.67 l S
+BT
+/F1 1 Tf 1 Tr 7.48 0 0 7.48 175.30 236.07 Tm (l) Tj 0 Tr
+ET
+BT
+/F1 1 Tf 1 Tr 7.48 0 0 7.48 322.49 236.07 Tm (l) Tj 0 Tr
+ET
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 176.03 226.68 Tm (1) Tj
+ET
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 323.23 226.68 Tm (2) Tj
+ET
+BT
+/F1 1 Tf 2 Tr 7.48 0 0 7.48 248.89 236.07 Tm (l) Tj 0 Tr
+ET
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 248.52 245.87 Tm (I4) Tj
+ET
+80.80 302.00 m 295.26 302.00 l S
+BT
+/F1 1 Tf 1 Tr 7.48 0 0 7.48 70.64 299.40 Tm (l) Tj 0 Tr
+ET
+BT
+/F1 1 Tf 1 Tr 7.48 0 0 7.48 299.50 299.40 Tm (l) Tj 0 Tr
+ET
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 71.38 290.01 Tm (1) Tj
+ET
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 300.24 290.01 Tm (2) Tj
+ET
+BT
+/F1 1 Tf 2 Tr 7.48 0 0 7.48 185.07 299.40 Tm (l) Tj 0 Tr
+ET
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 184.70 309.20 Tm (I3) Tj
+ET
+Q q
+BT
+0.000 0.000 0.000 rg
+/F2 1 Tf 0.00 12.00 -12.00 0.00 502.60 110.33 Tm ( ) Tj
+ET
+BT
+/F2 1 Tf 0.00 12.00 -12.00 0.00 502.60 173.67 Tm ( ) Tj
+ET
+BT
+/F2 1 Tf 0.00 12.00 -12.00 0.00 502.60 237.00 Tm ( ) Tj
+ET
+BT
+/F2 1 Tf 0.00 12.00 -12.00 0.00 502.60 300.33 Tm ( ) Tj
+ET
+Q q 57.60 378.00 432.00 82.80 re W n
+Q q 0.00 378.00 504.00 126.00 re W n
+BT
+0.000 0.000 0.000 rg
+/F3 1 Tf 14.00 0.00 0.00 14.00 215.76 477.37 Tm [(P) 30 (er) 15 (son-Item Map)] TJ
+ET
+BT
+/F2 1 Tf 12.00 0.00 0.00 12.00 267.26 323.28 Tm (ttx) Tj
+ET
+Q q 57.60 378.00 432.00 82.80 re W n
+0.745 0.745 0.745 RG
+3.75 w
+[] 0 d
+2 J
+1 j
+10.00 M
+83.30 381.07 m 83.30 384.90 l S
+173.08 381.07 m 173.08 403.11 l S
+236.61 381.07 m 236.61 414.61 l S
+290.24 381.07 m 290.24 457.73 l S
+341.63 381.07 m 341.63 414.61 l S
+397.79 381.07 m 397.79 429.94 l S
+473.60 381.07 m 473.60 407.90 l S
+Q q
+BT
+0.000 0.000 0.000 rg
+/F2 1 Tf 8.00 0.00 0.00 8.00 25.46 426.61 Tm [(P) 50 (erson)] TJ
+ET
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 13.46 416.53 Tm [(P) 40 (ar) 10 (ameter)] TJ
+ET
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 10.43 406.45 Tm [(Distr) -15 (ib) 20 (ution)] TJ
+ET
+0.000 0.000 0.000 RG
+0.75 w
+[] 0 d
+1 J
+1 j
+10.00 M
+57.60 378.00 m
+489.60 378.00 l
+489.60 460.80 l
+57.60 460.80 l
+57.60 378.00 l
+S
+Q q 59.04 73.44 414.72 371.52 re W n
+Q
+endstream
+endobj
+16 0 obj
+4484
+endobj
+17 0 obj
+<<
+/Type /Page
+/Parent 3 0 R
+/Contents 18 0 R
+/Resources 4 0 R
+>>
+endobj
+18 0 obj
+<<
+/Length 19 0 R
+>>
+stream
+q
+Q q 57.60 36.00 432.00 342.00 re W n
+Q q
+0.000 0.000 0.000 RG
+0.75 w
+[] 0 d
+1 J
+1 j
+10.00 M
+57.60 36.00 m
+489.60 36.00 l
+489.60 378.00 l
+57.60 378.00 l
+57.60 36.00 l
+S
+Q q 0.00 0.00 504.00 378.00 re W n
+Q q
+0.000 0.000 0.000 RG
+0.75 w
+[] 0 d
+1 J
+1 j
+10.00 M
+57.60 112.00 m 57.60 302.00 l S
+57.60 112.00 m 50.40 112.00 l S
+57.60 175.33 m 50.40 175.33 l S
+57.60 238.67 m 50.40 238.67 l S
+57.60 302.00 m 50.40 302.00 l S
+BT
+0.000 0.000 0.000 rg
+/F2 1 Tf 8.00 0.00 0.00 8.00 36.53 109.13 Tm (I2) Tj
+ET
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 36.53 172.46 Tm (I1) Tj
+ET
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 36.53 235.79 Tm (I4) Tj
+ET
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 36.53 299.13 Tm (I3) Tj
+ET
+57.60 36.00 m 489.60 36.00 l S
+98.33 36.00 m 98.33 28.80 l S
+176.94 36.00 m 176.94 28.80 l S
+255.55 36.00 m 255.55 28.80 l S
+334.16 36.00 m 334.16 28.80 l S
+412.77 36.00 m 412.77 28.80 l S
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 93.77 18.70 Tm (-2) Tj
+ET
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 172.38 18.70 Tm (-1) Tj
+ET
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 253.33 18.70 Tm (0) Tj
+ET
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 331.94 18.70 Tm (1) Tj
+ET
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 410.55 18.70 Tm (2) Tj
+ET
+BT
+/F2 1 Tf 10.00 0.00 0.00 10.00 234.69 7.20 Tm (Latent Dimension) Tj
+ET
+Q q 57.60 36.00 432.00 342.00 re W n
+0.000 0.000 0.000 RG
+0.75 w
+[] 0 d
+1 J
+1 j
+10.00 M
+73.60 372.30 m 73.60 504.00 l S
+178.26 372.30 m 178.26 504.00 l S
+247.35 372.30 m 247.35 504.00 l S
+300.47 372.30 m 300.47 504.00 l S
+302.46 372.30 m 302.46 504.00 l S
+325.45 372.30 m 325.45 504.00 l S
+426.93 372.30 m 426.93 504.00 l S
+412.42 372.30 m 412.42 504.00 l S
+307.67 112.00 m 405.22 112.00 l S
+BT
+/F1 1 Tf 1 Tr 7.48 0 0 7.48 297.50 109.40 Tm (l) Tj 0 Tr
+ET
+BT
+/F1 1 Tf 1 Tr 7.48 0 0 7.48 409.46 109.40 Tm (l) Tj 0 Tr
+ET
+BT
+0.000 0.000 0.000 rg
+/F2 1 Tf 8.00 0.00 0.00 8.00 298.24 100.01 Tm (1) Tj
+ET
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 410.20 100.01 Tm (2) Tj
+ET
+BT
+/F1 1 Tf 2 Tr 7.48 0 0 7.48 353.48 109.40 Tm (l) Tj 0 Tr
+ET
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 353.11 119.20 Tm (I2) Tj
+ET
+254.55 175.33 m 419.73 175.33 l S
+BT
+/F1 1 Tf 1 Tr 7.48 0 0 7.48 244.39 172.74 Tm (l) Tj 0 Tr
+ET
+BT
+/F1 1 Tf 1 Tr 7.48 0 0 7.48 423.97 172.74 Tm (l) Tj 0 Tr
+ET
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 245.13 163.35 Tm (1) Tj
+ET
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 424.70 163.35 Tm (2) Tj
+ET
+BT
+/F1 1 Tf 2 Tr 7.48 0 0 7.48 334.18 172.74 Tm (l) Tj 0 Tr
+ET
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 333.81 182.53 Tm (I1) Tj
+ET
+185.46 238.67 m 318.25 238.67 l S
+BT
+/F1 1 Tf 1 Tr 7.48 0 0 7.48 175.30 236.07 Tm (l) Tj 0 Tr
+ET
+BT
+/F1 1 Tf 1 Tr 7.48 0 0 7.48 322.49 236.07 Tm (l) Tj 0 Tr
+ET
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 176.03 226.68 Tm (1) Tj
+ET
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 323.23 226.68 Tm (2) Tj
+ET
+BT
+/F1 1 Tf 2 Tr 7.48 0 0 7.48 248.89 236.07 Tm (l) Tj 0 Tr
+ET
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 248.52 245.87 Tm (I4) Tj
+ET
+80.80 302.00 m 295.26 302.00 l S
+BT
+/F1 1 Tf 1 Tr 7.48 0 0 7.48 70.64 299.40 Tm (l) Tj 0 Tr
+ET
+BT
+/F1 1 Tf 1 Tr 7.48 0 0 7.48 299.50 299.40 Tm (l) Tj 0 Tr
+ET
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 71.38 290.01 Tm (1) Tj
+ET
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 300.24 290.01 Tm (2) Tj
+ET
+BT
+/F1 1 Tf 2 Tr 7.48 0 0 7.48 185.07 299.40 Tm (l) Tj 0 Tr
+ET
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 184.70 309.20 Tm (I3) Tj
+ET
+Q q
+BT
+0.000 0.000 0.000 rg
+/F2 1 Tf 0.00 12.00 -12.00 0.00 502.60 110.33 Tm ( ) Tj
+ET
+BT
+/F2 1 Tf 0.00 12.00 -12.00 0.00 502.60 173.67 Tm ( ) Tj
+ET
+BT
+/F2 1 Tf 0.00 12.00 -12.00 0.00 502.60 237.00 Tm ( ) Tj
+ET
+BT
+/F2 1 Tf 0.00 12.00 -12.00 0.00 502.60 300.33 Tm ( ) Tj
+ET
+Q q 57.60 378.00 432.00 82.80 re W n
+Q q 0.00 378.00 504.00 126.00 re W n
+BT
+0.000 0.000 0.000 rg
+/F3 1 Tf 14.00 0.00 0.00 14.00 215.76 477.37 Tm [(P) 30 (er) 15 (son-Item Map)] TJ
+ET
+BT
+/F2 1 Tf 12.00 0.00 0.00 12.00 267.26 323.28 Tm (ttx) Tj
+ET
+Q q 57.60 378.00 432.00 82.80 re W n
+0.745 0.745 0.745 RG
+3.75 w
+[] 0 d
+2 J
+1 j
+10.00 M
+83.30 381.07 m 83.30 384.90 l S
+173.08 381.07 m 173.08 403.11 l S
+236.61 381.07 m 236.61 414.61 l S
+290.24 381.07 m 290.24 457.73 l S
+341.63 381.07 m 341.63 414.61 l S
+397.79 381.07 m 397.79 429.94 l S
+473.60 381.07 m 473.60 407.90 l S
+Q q
+BT
+0.000 0.000 0.000 rg
+/F2 1 Tf 8.00 0.00 0.00 8.00 25.46 426.61 Tm [(P) 50 (erson)] TJ
+ET
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 13.46 416.53 Tm [(P) 40 (ar) 10 (ameter)] TJ
+ET
+BT
+/F2 1 Tf 8.00 0.00 0.00 8.00 10.43 406.45 Tm [(Distr) -15 (ib) 20 (ution)] TJ
+ET
+0.000 0.000 0.000 RG
+0.75 w
+[] 0 d
+1 J
+1 j
+10.00 M
+57.60 378.00 m
+489.60 378.00 l
+489.60 460.80 l
+57.60 460.80 l
+57.60 378.00 l
+S
+Q q 59.04 73.44 414.72 371.52 re W n
+Q
+endstream
+endobj
+19 0 obj
+4484
+endobj
+3 0 obj
+<<
+/Type /Pages
+/Kids [
+5 0 R
+8 0 R
+11 0 R
+14 0 R
+17 0 R
+]
+/Count 5
+/MediaBox [0 0 504 504]
+>>
+endobj
+4 0 obj
+<<
+/ProcSet [/PDF /Text]
+/Font << /F1 21 0 R /F2 22 0 R /F3 23 0 R >>
+/ExtGState << >>
+>>
+endobj
+20 0 obj
+<<
+/Type /Encoding
+/BaseEncoding /WinAnsiEncoding
+/Differences [ 45/minus ]
+>>
+endobj
+21 0 obj
+<<
+/Type /Font
+/Subtype /Type1
+/Name /F1
+/BaseFont /ZapfDingbats
+>>
+endobj
+22 0 obj <<
+/Type /Font
+/Subtype /Type1
+/Name /F2
+/BaseFont /Helvetica
+/Encoding 20 0 R
+>> endobj
+23 0 obj <<
+/Type /Font
+/Subtype /Type1
+/Name /F3
+/BaseFont /Helvetica-Bold
+/Encoding 20 0 R
+>> endobj
+xref
+0 24
+0000000000 65535 f 
+0000000021 00000 n 
+0000000164 00000 n 
+0000074506 00000 n 
+0000074616 00000 n 
+0000000213 00000 n 
+0000000293 00000 n 
+0000020020 00000 n 
+0000020041 00000 n 
+0000020121 00000 n 
+0000042608 00000 n 
+0000042630 00000 n 
+0000042712 00000 n 
+0000065200 00000 n 
+0000065222 00000 n 
+0000065304 00000 n 
+0000069843 00000 n 
+0000069864 00000 n 
+0000069946 00000 n 
+0000074485 00000 n 
+0000074721 00000 n 
+0000074816 00000 n 
+0000074900 00000 n 
+0000074998 00000 n 
+trailer
+<<
+/Size 24
+/Info 1 0 R
+/Root 2 0 R
+>>
+startxref
+75101
+%%EOF
diff --git a/inst/doc/UCML.jpg b/inst/doc/UCML.jpg
new file mode 100755
index 0000000..b9aa6a0
Binary files /dev/null and b/inst/doc/UCML.jpg differ
diff --git a/inst/doc/Z.cls b/inst/doc/Z.cls
new file mode 100755
index 0000000..4a9c8c3
--- /dev/null
+++ b/inst/doc/Z.cls
@@ -0,0 +1,239 @@
+\def\fileversion{1.1}
+\def\filename{Z}
+\def\filedate{2006/10/11}
+%%
+%% Package `Z' to use with LaTeX2e for Z reports
+%% Copyright (C) 2004 Achim Zeileis
+%%
+\NeedsTeXFormat{LaTeX2e}
+\ProvidesClass{Z}[\filedate\space\fileversion\space Z class by Achim Zeileis]
+
+%% options
+\LoadClass[10pt,a4paper,twoside]{article}
+\newif\if at notitle
+\@notitlefalse
+\newif\if at noheadings
+\@noheadingsfalse
+\DeclareOption{notitle}{\@notitletrue}
+\DeclareOption{noheadings}{\@noheadingstrue}
+\ProcessOptions
+
+%% required packages
+\RequirePackage{graphicx,a4wide,color,hyperref,ae,fancyvrb,thumbpdf}
+\RequirePackage[T1]{fontenc}
+\usepackage[authoryear,round,longnamesfirst]{natbib}
+\bibpunct{(}{)}{;}{a}{}{,}
+\bibliographystyle{jss}
+
+%% paragraphs
+\setlength{\parskip}{0.7ex plus0.1ex minus0.1ex}
+\setlength{\parindent}{0em}
+
+%% for all publications
+\newcommand{\Plaintitle}[1]{\def\@Plaintitle{#1}}
+\newcommand{\Shorttitle}[1]{\def\@Shorttitle{#1}}
+\newcommand{\Plainauthor}[1]{\def\@Plainauthor{#1}}
+\newcommand{\Keywords}[1]{\def\@Keywords{#1}}
+\newcommand{\Plainkeywords}[1]{\def\@Plainkeywords{#1}}
+\newcommand{\Abstract}[1]{\def\@Abstract{#1}}
+
+%% defaults
+\author{Firstname Lastname\\Affiliation}
+\title{Title}
+\Abstract{---!!!---an abstract is required---!!!---}
+\Plainauthor{\@author}
+\Plaintitle{\@title}
+\Shorttitle{\@title}
+\Keywords{---!!!---at least one keyword is required---!!!---}
+\Plainkeywords{\@Keywords}
+
+%% Sweave(-like)
+%\DefineVerbatimEnvironment{Sinput}{Verbatim}{fontshape=sl}
+%\DefineVerbatimEnvironment{Soutput}{Verbatim}{}
+%\DefineVerbatimEnvironment{Scode}{Verbatim}{fontshape=sl}
+%\newenvironment{Schunk}{}{}
+%\setkeys{Gin}{width=0.8\textwidth}
+
+%% new \maketitle
+\def\maketitle{
+ \begingroup
+   \def\thefootnote{\fnsymbol{footnote}}
+   \def\@makefnmark{\hbox to 0pt{$^{\@thefnmark}$\hss}}
+   \long\def\@makefntext##1{\parindent 1em\noindent
+			    \hbox to1.8em{\hss $\m at th ^{\@thefnmark}$}##1}
+   \@maketitle \@thanks
+ \endgroup
+ \setcounter{footnote}{0}
+
+ \if at noheadings
+   %% \thispagestyle{empty}
+   %% \markboth{\centerline{\@Shorttitle}}{\centerline{\@Plainauthor}}
+   %% \pagestyle{myheadings}
+ \else
+   \thispagestyle{empty}
+   \markboth{\centerline{\@Shorttitle}}{\centerline{\@Plainauthor}}
+   \pagestyle{myheadings}
+ \fi
+
+ \let\maketitle\relax \let\@maketitle\relax
+ \gdef\@thanks{}\gdef\@author{}\gdef\@title{}\let\thanks\relax
+}
+
+% Author information can be set in various styles:
+% For several authors from the same institution:
+% \author{Author 1 \and ... \and Author n \\
+%     Address line \\ ... \\ Address line}
+% if the names do not fit well on one line use
+%         Author 1 \\ {\bf Author 2} \\ ... \\ {\bf Author n} \\
+% For authors from different institutions:
+% \author{Author 1 \\ Address line \\  ... \\ Address line
+%     \And  ... \And
+%     Author n \\ Address line \\ ... \\ Address line}
+% To start a seperate ``row'' of authors use \AND, as in
+% \author{Author 1 \\ Address line \\  ... \\ Address line
+%     \AND
+%     Author 2 \\ Address line \\ ... \\ Address line \And
+%     Author 3 \\ Address line \\ ... \\ Address line}
+
+\def\@maketitle{\vbox{\hsize\textwidth \linewidth\hsize
+ {\centering
+ {\LARGE\bf \@title\par}
+ \vskip 0.2in plus 1fil minus 0.1in
+ {
+     \def\and{\unskip\enspace{\rm and}\enspace}%
+     \def\And{\end{tabular}\hss \egroup \hskip 1in plus 2fil
+ 	      \hbox to 0pt\bgroup\hss \begin{tabular}[t]{c}\large\bf\rule{\z@}{24pt}\ignorespaces}%
+     \def\AND{\end{tabular}\hss\egroup \hfil\hfil\egroup
+ 	      \vskip 0.1in plus 1fil minus 0.05in
+ 	      \hbox to \linewidth\bgroup\rule{\z@}{10pt} \hfil\hfil
+ 	      \hbox to 0pt\bgroup\hss \begin{tabular}[t]{c}\large\bf\rule{\z@}{24pt}\ignorespaces}
+     \hbox to \linewidth\bgroup\rule{\z@}{10pt} \hfil\hfil
+     \hbox to 0pt\bgroup\hss \begin{tabular}[t]{c}\large\bf\rule{\z@}{24pt}\@author
+     \end{tabular}\hss\egroup
+ \hfil\hfil\egroup}
+ \vskip 0.3in minus 0.1in
+ \hrule
+ \begin{abstract}
+ \@Abstract
+ \end{abstract}}
+ \textit{Keywords}:~\@Keywords.
+ \vskip 0.1in minus 0.05in
+ \hrule
+ \vskip 0.2in minus 0.1in
+}}
+
+%% \def\@maketitle{\vbox{\hsize\textwidth \linewidth\hsize 
+%%  {\centering
+%%  {\LARGE\bf \@title\par}
+%%    \def\And{\end{tabular}\hfil\linebreak[0]\hfil
+%% 	    \begin{tabular}[t]{c}\large\bf\rule{\z@}{24pt}\ignorespaces}%
+%%     \begin{tabular}[t]{c}\large\bf\rule{\z@}{24pt}\@author\end{tabular}%
+%%  \vskip 0.3in minus 0.1in
+%%  \hrule
+%%  \begin{abstract}
+%%  \@Abstract
+%%  \end{abstract}}
+%%  \textit{Keywords}:~\@Keywords.
+%%  \vskip 0.1in minus 0.05in
+%%  \hrule
+%%  \vskip 0.2in minus 0.1in
+%% }}
+
+
+%% sections, subsections, and subsubsections
+\newlength{\preXLskip}
+\newlength{\preLskip}
+\newlength{\preMskip}
+\newlength{\preSskip}
+\newlength{\postMskip}
+\newlength{\postSskip}
+\setlength{\preXLskip}{1.8\baselineskip plus 0.5ex minus 0ex}
+\setlength{\preLskip}{1.5\baselineskip plus 0.3ex minus 0ex}
+\setlength{\preMskip}{1\baselineskip plus 0.2ex minus 0ex}
+\setlength{\preSskip}{.8\baselineskip plus 0.2ex minus 0ex}
+\setlength{\postMskip}{.5\baselineskip plus 0ex minus 0.1ex}
+\setlength{\postSskip}{.3\baselineskip plus 0ex minus 0.1ex}
+
+\newcommand{\jsssec}[2][default]{\vskip \preXLskip%
+  \pdfbookmark[1]{#1}{Section.\thesection.#1}%
+  \refstepcounter{section}%
+  \centerline{\textbf{\Large \thesection. #2}} \nopagebreak
+  \vskip \postMskip \nopagebreak}
+\newcommand{\jsssecnn}[1]{\vskip \preXLskip%
+  \centerline{\textbf{\Large #1}} \nopagebreak
+  \vskip \postMskip \nopagebreak}
+
+\newcommand{\jsssubsec}[2][default]{\vskip \preMskip%
+  \pdfbookmark[2]{#1}{Subsection.\thesubsection.#1}%
+  \refstepcounter{subsection}%
+  \textbf{\large \thesubsection. #2} \nopagebreak
+  \vskip \postSskip \nopagebreak}
+\newcommand{\jsssubsecnn}[1]{\vskip \preMskip%
+  \textbf{\large #1} \nopagebreak
+  \vskip \postSskip \nopagebreak}
+
+\newcommand{\jsssubsubsec}[2][default]{\vskip \preSskip%
+  \pdfbookmark[3]{#1}{Subsubsection.\thesubsubsection.#1}%
+  \refstepcounter{subsubsection}%
+  {\large \textit{#2}} \nopagebreak
+  \vskip \postSskip \nopagebreak}
+\newcommand{\jsssubsubsecnn}[1]{\vskip \preSskip%
+  {\textit{\large #1}} \nopagebreak
+  \vskip \postSskip \nopagebreak}
+
+\newcommand{\jsssimplesec}[2][default]{\vskip \preLskip%
+%%  \pdfbookmark[1]{#1}{Section.\thesection.#1}%
+  \refstepcounter{section}%
+  \textbf{\large #1} \nopagebreak
+  \vskip \postSskip \nopagebreak}
+\newcommand{\jsssimplesecnn}[1]{\vskip \preLskip%
+  \textbf{\large #1} \nopagebreak
+  \vskip \postSskip \nopagebreak}
+
+\renewcommand{\section}{\secdef \jsssec \jsssecnn}
+\renewcommand{\subsection}{\secdef \jsssubsec \jsssubsecnn}
+\renewcommand{\subsubsection}{\secdef \jsssubsubsec \jsssubsubsecnn}
+
+%% colors
+\definecolor{Red}{rgb}{0.7,0,0}
+\definecolor{Blue}{rgb}{0,0,0.8}
+\hypersetup{%
+  hyperindex = {true},
+  colorlinks = {true},
+  linktocpage = {true},
+  plainpages = {false},
+  linkcolor = {Blue},
+  citecolor = {Blue},
+  urlcolor = {Red},
+  pdfstartview = {Fit},
+  pdfpagemode = {UseOutlines},
+  pdfview = {XYZ null null null}
+}
+
+\AtBeginDocument{
+  \hypersetup{%
+    pdfauthor = {\@Plainauthor},
+    pdftitle = {\@Plaintitle},
+    pdfkeywords = {\@Plainkeywords}
+  }
+}
+\if at notitle
+  %% \AtBeginDocument{\maketitle}
+\else
+  \AtBeginDocument{\maketitle}
+\fi
+
+%% commands
+\makeatletter
+\newcommand\code{\bgroup\@makeother\_\@codex}
+\def\@codex#1{{\normalfont\ttfamily\hyphenchar\font=-1 #1}\egroup}
+\makeatother
+%%\let\code=\texttt
+\let\proglang=\textsf
+\newcommand{\pkg}[1]{{\normalfont\fontseries{b}\selectfont #1}}
+\newcommand{\email}[1]{\href{mailto:#1}{\normalfont\texttt{#1}}}
+\newcommand{\doi}[1]{\href{http://dx.doi.org/#1}{\normalfont\texttt{doi:#1}}}
+\newcommand{\E}{\mathsf{E}}
+\newcommand{\VAR}{\mathsf{VAR}}
+\newcommand{\COV}{\mathsf{COV}}
+\newcommand{\Prob}{\mathsf{P}}
diff --git a/inst/doc/eRmvig.R b/inst/doc/eRmvig.R
new file mode 100755
index 0000000..3beec56
--- /dev/null
+++ b/inst/doc/eRmvig.R
@@ -0,0 +1,101 @@
+###################################################
+### chunk number 1: 
+###################################################
+library(eRm)
+data(raschdat1)
+res.rasch <- RM(raschdat1)
+pres.rasch <- person.parameter(res.rasch)
+
+
+###################################################
+### chunk number 2: 
+###################################################
+lrres.rasch <- LRtest(res.rasch, splitcr = "mean", se = TRUE)
+lrres.rasch
+
+
+###################################################
+### chunk number 3: 
+###################################################
+plotGOF(lrres.rasch, beta.subset=c(14,5,18,7,1), tlab="item", conf=list(ia=FALSE,col="blue",lty="dotted"))
+
+
+###################################################
+### chunk number 4: 
+###################################################
+data(lltmdat2)
+W <- matrix(c(1,2,1,3,2,2,2,1,1,1),ncol=2)
+res.lltm <- LLTM(lltmdat2, W)
+summary(res.lltm)
+
+
+###################################################
+### chunk number 5: 
+###################################################
+data(pcmdat2)
+res.rsm <- RSM(pcmdat2)
+thresholds(res.rsm)
+
+
+###################################################
+### chunk number 6: 
+###################################################
+plotICC(res.rsm, mplot=TRUE, legpos=FALSE,ask=FALSE)
+
+
+###################################################
+### chunk number 7: 
+###################################################
+plotICC(res.rsm, mplot=TRUE, legpos=FALSE,ask=FALSE)
+
+
+###################################################
+### chunk number 8: 
+###################################################
+res.pcm <- PCM(pcmdat2)
+plotPImap(res.pcm, sorted = TRUE)
+
+
+###################################################
+### chunk number 9: 
+###################################################
+res.pcm <- PCM(pcmdat2)
+plotPImap(res.pcm, sorted = TRUE)
+
+
+###################################################
+### chunk number 10: 
+###################################################
+pres.pcm <- person.parameter(res.pcm)
+itemfit(pres.pcm)
+
+
+###################################################
+### chunk number 11: 
+###################################################
+lr<- 2*(res.pcm$loglik-res.rsm$loglik)
+df<- res.pcm$npar-res.rsm$npar
+pvalue<-1-pchisq(lr,df)
+cat("LR statistic: ", lr, "  df =",df, "  p =",pvalue, "\n")
+
+
+###################################################
+### chunk number 12: 
+###################################################
+data(lpcmdat)
+grouplpcm <- rep(1:2, each = 10)
+
+
+###################################################
+### chunk number 13: 
+###################################################
+reslpcm <- LPCM(lpcmdat, mpoints = 2, groupvec = grouplpcm, sum0 = FALSE)
+model.matrix(reslpcm)
+
+
+###################################################
+### chunk number 14: 
+###################################################
+reslpcm
+
+
diff --git a/inst/doc/eRmvig.Rnw b/inst/doc/eRmvig.Rnw
new file mode 100755
index 0000000..68afe9f
--- /dev/null
+++ b/inst/doc/eRmvig.Rnw
@@ -0,0 +1,1006 @@
+%\VignetteIndexEntry{eRm Basics}
+
+\documentclass[article]{Z}
+\usepackage{amsmath, thumbpdf}
+\usepackage{Sweave}
+\usepackage{graphicx}
+
+\author{Patrick Mair\\Wirtschaftsuniversit\"at Wien \And
+        Reinhold Hatzinger\\Wirtschaftsuniversit\"at Wien}
+\Plainauthor{Patrick Mair, Reinhold Hatzinger}
+
+\title{Extended Rasch Modeling: The R Package \pkg{eRm}}
+\Plaintitle{Extended Rasch Modeling: The R Package eRm}
+\Shorttitle{The R Package \pkg{eRm}}
+
+\Abstract{
+
+This package vignette is an update of the \pkg{eRm} papers by published
+in a special issue on Psychometrics in the Journal of Statistical
+Software and in Psychology Science \citep{Mair+Hatzinger:2007,
+Mair+Hatzinger:2007b}.  Since the publication of these papers various
+extensions and additional features have been incorporated into the
+package.  We start with a methodological introduction to extended
+Rasch models followed by a general program description and application
+topics.  The package allows for the computation of simple Rasch models,
+rating scale models, partial credit models and linear extensions of
+these.  The incorporation of such linear structures allows for modeling
+the effects of covariates and enables the analysis of repeated
+categorical measurements.  The item parameter estimation is
+performed by means of CML, for the person parameters we use ordinary ML.
+The estimation routines work for incomplete data matrices as well.
+Based on these estimators, item-wise and global goodness-of-fit
+statistics are described and various plots are presented.  }
+
+\Keywords{eRm package, Rasch model, LLTM, RSM, LRSM, PCM, LPCM, CML estimation}
+
+%\Volume{20}
+%\Issue{9}
+%\Month{April}
+%\Year{2007}
+%FIXME%
+%% \Submitdate{2004-06-21}
+%% \Acceptdate{2004-12-04}
+
+%\Address{
+%  Patrick Mair\\
+%  Department f\"ur Statistik und Mathematik\\
+%  Wirtschaftsuniversit\"at Wien\\
+%  A-1090 Wien, Austria\\
+%  E-mail: \email{patrick.mair at wu-wien.ac.at}\\
+%  URL: \url{http://statmath.wu-wien.ac.at/~mair/}
+%}
+
+\begin{document}
+
+\section{Introduction}
+
+\citet{Ro:99} claimed in his article that ``even though the Rasch model
+has been existing for such a long time, 95\% of the current tests in
+psychology are still constructed by using methods from classical test
+theory" (p. 140).  Basically, he quotes the following reasons why the
+Rasch model (RM) is being rarely used:  The Rasch model in its original form
+\citep{Ra:60}, which was limited to dichotomous items, is arguably too
+restrictive for practical testing purposes.  Thus, researchers should
+focus on extended Rasch models.  In addition, Rost argues that there is
+a lack of user-friendly software for the computation of such models.
+Hence, there is a need for a comprehensive, user-friendly software
+routine.  Corresponding recent discussions can be found in
+\citet{Kub:05} and \citet{Bor:06}.
+
+In addition to the RM, the models that can be computed by means of the \pkg {eRm} package are:
+the linear logistic test model \citep{Scheib:72}, the rating scale model
+\citep{And:78}, the linear rating scale model \citep{FiPa:91}, the
+partial credit model \citep{Mast:82}, and the linear partial credit
+model \citep{GlVe:89,FiPo:94}.  These models and their main
+characteristics are presented in Section \ref{sec:erm}.
+
+Concerning parameter estimation, these models have an important feature
+in common:  Separability of item and person parameters.  This implies
+that the item parameters $\mathbf{\beta}$ can be estimated without
+estimating the person parameters achieved by conditioning the likelihood
+on the sufficient person raw score.  This conditional maximum likelihood
+(CML) approach is described in Section \ref{sec:cml}.
+
+Several diagnostic tools and tests to evaluate model fit are presented in Section \ref{Gof}.
+
+In Section \ref{sec:pack}, the corresponding implementation in
+\proglang{R} \citep{R:06} is described by means of several examples.
+The \pkg{eRm} package uses a design matrix approach which allows
+to reparameterize the item parameters to model common characteristics of
+the items or to enable the
+user to impose repeated measurement designs as well as group contrasts.
+By combining these types of contrasts one allows that the item parameter
+may differ over time with respect to certain subgroups.  To illustrate
+the flexibility of the \pkg{eRm} package some examples are given to show
+how suitable design matrices can be constructed.
+
+%----------------- end introduction ----------------
+\section{Extended Rasch models}
+\label{sec:erm}
+
+\subsection{General expressions}
+Briefly after the first publication of the basic Rasch Model \citep{Ra:60}, the author worked on polytomous generalizations which can be found in \citet{Ra:61}. \citet{And:95} derived the representations below which are based on Rasch's general expression for polytomous data. The data matrix is denoted as $\mathbf{X}$ with the persons in the rows and the items in the columns. In total there are $v=1,...,n$ persons and $i=1,...,k$ items. A single element in the data matrix $\mathbf{X}$ is [...]
+
+\begin{equation}
+\label{eq1}
+    P(X_{vi}=h)=\frac{\exp[\phi_h(\theta_v+\beta_i)+\omega_h]}{\sum_{l=0}^{m_i} \exp[\phi_l (\theta_v+\beta_i)+\omega_l]}
+\end{equation}
+
+or
+
+\begin{equation}
+\label{eq2}
+    P(X_{vi}=h)=\frac{\exp[\phi_h \theta_v+\beta_{ih}]}{\sum_{l=0}^{m_i} \exp[\phi_l \theta_v+\beta_{il}]}.
+\end{equation}
+
+Here, $\phi_h$ are scoring functions for the item parameters, $\theta_v$ are the uni-dimensional person parameters, and $\beta_i$ are the item parameters. In Equation \ref{eq1}, $\omega_h$ corresponds to category parameters, whereas in Equation \ref{eq2} $\beta_{ih}$ are the item-category parameters. The meaning of these parameters will be discussed in detail below. Within the framework of these two equations, numerous models have been suggested that retain the basic properties of the Ra [...]
+
+
+\subsection{Representation of extended Rasch models}
+\label{Rep}
+For the ordinary Rasch model for dichotomous items, Equation \ref{eq1} reduces to
+\begin{equation}
+\label{eq:rasch}
+  P(X_{vi}=1)=\frac{\exp(\theta_v - \beta_i)}{1+\exp(\theta_v-\beta_i)}.
+\end{equation}
+The main assumptions, which hold as well for the generalizations presented in this paper, are: uni-dimensionality of the latent trait, sufficiency of the raw score, local independence, and parallel item characteristic curves (ICCs). Corresponding explanations can be found, e.g., in \citet{Fisch:74} and mathematical derivations and proofs in \citet{Fisch:95a}.
+
+\begin{figure}[hbt]
+\centering
+\includegraphics[height=60mm, width=40mm]{modelhierarchy.pdf}
+\caption{\label{fig1} Model hierarchy}
+\end{figure}
+
+For dichotomous items, \citet{Scheib:72} proposed the (even more restricted) linear logistic test model (LLTM), later formalized by \citet{Fisch:73}, by splitting up the item parameters into the linear combination
+
+\begin{equation}
+\label{eq4}
+  \beta_i=\sum_{j=1}^p w_{ij} \eta_j.
+\end{equation}
+
+\citet{Scheib:72} explained the dissolving process of items in a test for logics (``Mengenrechentest") by so-called ``cognitive operations" $\eta_j$ such as negation, disjunction, conjunction, sequence, intermediate result, permutation, and material. Note that the weights $w_{ij}$ for item $i$ and operation $j$ have to be fixed a priori. Further elaborations about the cognitive operations can be found in \citet[p.~361ff.]{Fisch:74}. Thus, from this perspective the LLTM is more parsimonou [...]
+
+Though, there exists another way to look at the LLTM: A generalization of the basic Rasch model in terms of repeated measures and group contrasts. It should be noted that both types of reparameterization also apply to the linear rating scale model (LRSM) and the linear partial credit model (LPCM) with respect to the basic rating scale model (RSM) and the partial credit model (PCM) presented below. Concerning the LLTM, the possibility to use it as a generalization of the Rasch model for r [...]
+
+At this point we will focus on a simple polytomous generalization of the Rasch model, the RSM \citep{And:78}, where each item $I_i$ must have the same number of categories. Pertaining to Equation \ref{eq1}, $\phi_h$ may be set to $h$ with $h=0,...,m$. Since in the RSM the number of item categories is constant, $m$ is used instead of $m_i$. Hence, it follows that
+
+\begin{equation}
+\label{eq5}
+    P(X_{vi}=h)=\frac{\exp[h(\theta_v+\beta_i)+\omega_h]}{\sum_{l=0}^m \exp[l(\theta_v+ \beta_i)+\omega_l]},
+\end{equation}
+
+with $k$ item parameters $\beta_1,...,\beta_k$ and $m+1$ category parameters $\omega_0,...,\omega_m$. This parameterization causes a scoring of the response categories $C_h$ which is constant over the single items. Again, the item parameters can be split up in a linear combination as in Equation \ref{eq4}. This leads to the LRSM proposed by \citet{FiPa:91}.
+
+Finally, the PCM developed by \citet{Mast:82} and its linear extension, the LPCM \citep{FiPo:94}, are presented. The PCM assigns one parameter $\beta_{ih}$ to each $I_i \times C_h$ combination for $h=0,...,m_i$. Thus, the constant scoring property must not hold over the items and in addition, the items can have different numbers of response categories denoted by $m_i$. Therefore, the PCM can be regarded as a generalization of the RSM and the probability for a response of person $v$ on ca [...]
+
+\begin{equation}
+\label{eq6}
+    P(X_{vih}=1)=\frac{\exp[h\theta_v + \beta_{ih}]}{\sum_{l=0}^{m_i}\exp[l\theta_v + \beta_{il}]}.
+\end{equation}
+
+It is obvious that (\ref{eq6}) is a simplification of (\ref{eq2}) in terms of $\phi_h = h$. As for the LLTM and the LRSM, the LPCM is defined by reparameterizing the item parameters of the basic model, i.e.,
+
+\begin{equation}
+\label{eq:lpcmeta}
+  \beta_{ih}=\sum_{j=1}^p w_{ihj}\eta_j.
+\end{equation}
+
+These six models constitute a hierarchical order as displayed in Figure \ref{fig1}. This hierarchy is the base for a unified CML approach presented in the next section. It is outlined again that the linear extension models can be regarded either as generalizations or as more restrictive formulations pertaining to the underlying base model. The hierarchy for the basic model is straightforward: The RM allows only items with two categories, thus each item is represented by one parameter $\b [...]
+
+To conclude, the most general model is the LPCM. All other models can be considered as simplifications of Equation \ref{eq6} combined with Equation \ref{eq:lpcmeta}. As a consequence, once an estimation procedure is established for the LPCM, this approach can be used for any of the remaining models. This is what we quote as \textit{unified CML approach}. The corresponding likelihood equations follow in Section \ref{sec:cml}.
+
+\subsection{The concept of virtual items}
+\label{sec:design}
+When operating with longitudinal models, the
+main research question  is whether an individual's test
+performance changes over time. The most intuitive way would be to
+look at the shift in ability $\theta_v$ across time points. Such
+models are presented e.g. in \citet{Mi:85}, \citet{Glas:1992}, and
+discussed by \citet{Ho:95}.
+
+Yet there exists another look onto time dependent changes, as presented in \citet[p~158ff.]{Fisch:95b}: The
+person parameters are fixed over time and instead of them the item
+parameters change. The basic idea is that one item $I_i$ is presented at two different times to the same person $S_v$
+is regarded as a pair of \textit{virtual items}. Within the framework of extended Rasch models, any change in $\theta_v$ occuring between the testing occasions can be described without loss of generality as a change of the item parameters, instead of describing change in terms of the person parameter. Thus, with only two measurement points, $I_i$ with the corresponding parameter $\beta_i$ generates two virtual items $I_r$ and $I_s$ with associated item parameters $\beta^{\ast}_r$ and $\b [...]
+
+Correspondingly, for each measurement point $t$ we have a vector of
+\textit{virtual item parameters} $\boldsymbol{\beta}^{\ast(t)}$ of
+length $k$. These are linear reparameterizations of the original
+$\boldsymbol{\beta}^{(t)}$, and thus the CML approach can be used
+for estimation. In general, for a simple LLTM with two measurement points the design
+matrix $\boldsymbol{W}$ is of the form as given in Table \ref{tab1}.
+
+\begin{table}
+\centering
+\[
+\begin{array}{c|c|rrrr|r}
+& & \eta_1 & \eta_2 & \hdots & \eta_k & \eta_{k+1}\\
+\hline
+\textrm{Time 1} & \beta_1^{\ast(1)} & 1 & 0 & 0 & 0 & 0\\
+& \beta_2^{\ast(1)} & 0 & 1 & 0 & 0 & 0\\
+& \vdots        &   &   & \ddots& & \vdots\\
+& \beta_{k}^{\ast(1)} & 1 & 0 & 0 & 1 & 0\\
+\hline
+\textrm{Time 2} & \beta_{k+1}^{\ast(2)} & 1 & 0 & 0 & 0 & 1\\
+& \beta_{k+2}^{\ast(2)} & 0 & 1 & 0 & 0 & 1\\
+& \vdots        &   &   & \ddots& & \vdots\\
+& \beta_{2k}^{\ast(2)} & 1 & 0 & 0 & 1 & 1\\
+\end{array}
+\]
+\caption{\label{tab1}A design matrix for an LLTM with two timepoints.}
+\end{table}
+
+The parameter vector $\boldsymbol{\beta}^{\ast(1)}$ represents the
+item parameters for the first test occasion,
+$\boldsymbol{\beta}^{\ast(2)}$ the parameters for the second
+occasion. It might be of interest whether these vectors differ. The
+corresponding trend contrast is $\eta_{k+1}$. Due to this contrast,
+the number of original $\beta$-parameters is doubled by introducing
+the $2k$ virtual item parameters. If we assume a constant shift for
+all item parameters, it is only necessary to estimate
+$\hat{\boldsymbol{\eta}}'=(\hat{\eta}_1,...,\hat{\eta}_{k+1})$
+where $\hat{\eta}_{k+1}$ gives the amount of shift. Since according to (\ref{eq4}), the vector
+$\hat{\boldsymbol{\beta}}^\ast$ is just a linear combination of
+$\hat{\boldsymbol{\eta}}$.
+
+As mentioned in the former section, when using models with linear
+extensions it is possible to impose group contrasts. By doing this,
+one allows that the item difficulties are different across
+subgroups. However, this is possible only for models with repeated
+measurements and virtual items since otherwise the introduction of a
+group contrast leads to overparameterization and the group effect
+cannot be estimated by using CML.
+
+Table \ref{tab2} gives an example for a repeated measurement design
+where the effect of a treatment is to be evaluated by comparing item
+difficulties regarding a control and a treatment group. The number
+of virtual parameters is doubled compared to the model matrix given
+in Table \ref{tab1}.
+
+\begin{table}[h]
+  \centering
+\[
+\begin{array}{c|c|c|rrrr|rrr}
+& & & \eta_1 & \eta_2 & \hdots & \eta_k & \eta_{k+1} & \eta_{k+2} \\
+\hline
+\textrm{Time 1} & \textrm{Group 1} & \beta_1^{\ast(1)} & 1 & 0 & 0 & 0 & 0 &  0\\
+& & \beta_2^{\ast(1)} & 0 & 1 & 0 & 0 & 0&  0\\
+& & \vdots        &   &   & \ddots& &\vdots &\vdots\\
+& & \beta_{k}^{\ast(1)} & 1 & 0 & 0 & 1 & 0 & 0\\
+\cline{2-9}
+& \textrm{Group 2} & \beta_{k+1}^{\ast(1)} & 1 & 0 & 0 & 0 & 0 & 0\\
+& & \beta_{k+2}^{\ast(1)} & 0 & 1 & 0 & 0 & 0 & 0\\
+& & \vdots        &   &   & \ddots& &\vdots & \vdots\\
+& & \beta_{2k}^{\ast(1)} & 1 & 0 & 0 & 1 & 0& 0\\
+\hline
+\textrm{Time 2} & \textrm{Group 1} & \beta_1^{\ast(2)} & 1 & 0 & 0 & 0 & 1 & 0\\
+& & \beta_2^{\ast(2)} & 0 & 1 & 0 & 0 & 1 & 0\\
+& & \vdots        &   &   & \ddots& &\vdots &\vdots\\
+& & \beta_{k}^{\ast(2)} & 1 & 0 & 0 & 1 & 1 & 0\\
+\cline{2-9}
+& \textrm{Group 2} & \beta_{k+1}^{\ast(2)} & 1 & 0 & 0 & 0 & 1 & 1\\
+& & \beta_{k+2}^{\ast(2)} & 0 & 1 & 0 & 0 & 1 & 1\\
+& & \vdots        &   &   & \ddots& &\vdots  & \vdots\\
+& & \beta_{2k}^{\ast(2)} & 1 & 0 & 0 & 1 & 1 & 1\\
+\end{array} \]
+\caption{\label{tab2} Design matrix for a repeated measurements design with treatment and control group.}
+\end{table}
+
+Again, $\eta_{k+1}$ is the parameter that refers to the time
+contrast, and $\eta_{k+2}$ is a group effect within
+measurement point 2. More examples are given in Section \ref{sec:pack}
+and further explanations can be found in \citet{Fisch:95b},
+\citet{FiPo:94}, and in the software manual for the LPCM-Win program
+by \citet{FiPS:98}.
+
+By introducing the concept of virtual persons, \pkg{eRm} allows for the computation of the linear logistic test model with relaxed assumptions \citep[LLRA][]{Fisch:77}. Corresponding explanations will be given in a subsequent version of this vignette.
+
+
+%------------------------ end extended Rasch models --------------------------
+
+\section{Estimation of item and person parameters}
+\label{sec:cml}
+
+\subsection{CML for item parameter estimation}
+The main idea behind the CML estimation is that the person's raw score $r_v=\sum_{i=1}^k x_{vi}$ is a sufficient statistic. Thus, by conditioning the likelihood onto $\boldsymbol{r}'=(r_1,...,r_n)$, the person parameters $\boldsymbol{\theta}$, which in this context are nuisance parameters, vanish from the likelihood equation, thus, leading to consistently estimated item parameters $\hat{\boldsymbol{\beta}}$.
+
+Some restrictions have to be imposed on the parameters to ensure identifiability. This can be achieved, e.g., by setting certain parameters to zero depending on the model. In the Rasch model one item parameter has to be fixed to 0. This parameter may be considered as baseline difficulty. In addition, in the RSM the category parameters $\omega_0$ and $\omega_1$ are also constrained to 0. In the PCM all parameters representing the first category, i.e. $\beta_{i0}$ with $i=1,\ldots,k$, and  [...]
+
+At this point, for the LPCM the likelihood equations with corresponding first and second order derivatives are presented (i.e. \textit{unified CML equations}). In the first version of the \pkg {eRm} package numerical approximations of the Hessian matrix are used. However, to ensure numerical accuracy and to speed up the estimation process, it is planned to implement the analytical solution as given below.
+
+The conditional log-likelihood equation for the LPCM is
+
+\begin{equation}
+\label{eq:cmll}
+    \log L_c = \sum_{i=1}^k \sum_{h=1}^{m_i} x_{+ih} \sum_{j=1}^p w_{ihj} \eta_j - \sum_{r=1}^{r_{max}} n_r \log \gamma_r.
+\end{equation}
+
+The maximal raw score is denoted by $r_{max}$ whereas the number of subjects with the same raw score is quoted as $n_r$. Alternatively, by going down to an individual level, the last sum over $r$ can be replaced by $\sum_{v=1}^n \log \gamma_{r_v}$. It is straightforward to show that the LPCM as well as the other extended Rasch models, define an exponential family  \citep{And:83}. Thus, the raw score $r_v$ is minimally sufficient for $\theta_v$ and the item totals $x_{.ih}$ are minimally  [...]
+
+Crucial expressions are the $\gamma$-terms which are known as \textit{elementary symmetric functions}. More details about these terms are given in the next section. However, in the \pkg {eRm} package the numerically stable \textit{summation algorithm} as suggested by \citet{And:72} is implemented. \citet{FiPo:94} adopted this algorithm for the LPCM and devised also the first order derivative for computing the corresponding derivative of $\log L_c$:
+
+\begin{equation}
+\label{eq:dcml}
+\frac{\partial\log L_c}{\partial\eta_a} = \sum_{i=1}^k \sum_{h=1}^{m_i} w_{iha}\left(x_{+ih} - \epsilon_{ih} \sum_{r=1}^{r_{max}} n_r \frac{ \gamma_{r}^{(i)}}{\gamma_r}\right).
+\end{equation}
+
+It is important to mention that for the CML-representation, the multiplicative Rasch expression is used throughout equations \ref{eq1} to \ref{eq:lpcmeta}, i.e., $\epsilon_i=\exp(-\beta_i)$ for the person parameter. Therefore, $\epsilon_{ih}$ corresponds to the reparameterized item $\times$ category parameter whereas $\epsilon_{ih} > 0$. Furthermore, $\gamma_{r}^{(i)}$ are the first order derivatives of the $\gamma$-functions with respect to item $i$. The index $a$ in $\eta_a$ denotes th [...]
+
+For the second order derivative of $\log L_c$, two cases have to be distinguished: the derivatives for the off-diagonal elements and the derivatives for the main diagonal elements. The item categories with respect to the item index $i$ are coded with $h_i$, and those referring to item $l$ with $h_l$. The second order derivatives of the $\gamma$-functions with respect to items $i$ and $l$ are denoted by $\gamma_r^{(i,l)}$. The corresponding likelihood expressions are
+\begin{align}
+\label{eq:2dcml}
+\frac{\partial\log L_c}{\partial\eta_a \eta_b} = & -\sum_{i=1}^k \sum_{h_i=1}^{m_i} w_{ih_ia}w_{ih_ib}\epsilon_{ih_i} \sum_{r=1}^{r_{max}} n_r \frac{\log \gamma_{r-h_i}}{\gamma_r}\\
+& -\sum_{i=1}^k \sum_{h_i=1}^{m_i} \sum_{l=1}^k \sum_{h_l=1}^{m_l} w_{ih_ia}w_{lh_lb} \left[\epsilon_{ih_i} \epsilon_{lh_l} \left( \sum_{r=1}^{r_{max}} n_r \frac{\gamma_{r}^{(i)}\gamma_{r}^{(l)}}{\gamma_r^2} - \sum_{r=1}^{r_{max}} n_r \frac{\gamma_{r}^{(i,l)}}{\gamma_r}\right)\right]
+\notag
+\end{align}
+for $a\neq b$, and
+\begin{align}
+\label{eq:2dcmlab}
+\frac{\partial\log L_c}{\partial\eta_a^2} = & -\sum_{i=1}^k \sum_{h_i=1}^{m_i} w_{ih_ia}^2 \epsilon_{ih_i} \sum_{r=1}^{r_{max}} n_r \frac{\log \gamma_{r-h_i}}{\gamma_r}\\
+& -\sum_{i=1}^k \sum_{h_i=1}^{m_i} \sum_{l=1}^k \sum_{h_l=1}^{m_l} w_{ih_ia}w_{lh_la}\epsilon_{ih_i} \epsilon_{lh_l}\sum_{r=1}^{r_{max}} n_r \frac{\gamma_{r-h_i}^{(i)}\gamma_{r-h_l}^{(l)}}{\gamma_r^2}
+\notag
+\end{align}
+for $a=b$.
+
+To solve the likelihood equations with respect to $\mathbf{\hat{\eta}}$, a Newton-Raphson algorithm is applied. The update within each iteration step $s$ is performed by
+
+\begin{equation}
+\label{eq:iter}
+\boldsymbol{\hat{\eta}}_s=\boldsymbol{\hat{\eta}}_{s-1}-\mathbf{H}_{s-1}^{-1} \boldsymbol{\delta}_{s-1}.
+\end{equation}
+
+The starting values are $\boldsymbol{\hat{\eta}}_0=\mathbf{0}$.
+$\mathbf{H}_{s-1}^{-1}$ is the inverse of the Hessian matrix composed by
+the elements given in Equation \ref{eq:2dcml} and \ref{eq:2dcmlab} and
+$\boldsymbol{\delta}_{s-1}$ is the gradient at iteration $s-1$ as
+specified in Equation \ref{eq:dcml}.  The iteration stops if the
+likelihood difference $\left|\log L_c^{(s)} - \log L_c^{(s-1)}
+\right|\leq \varphi$ where $\varphi$ is a predefined (small) iteration
+limit.  Note that in the current version (\Sexpr{packageDescription("eRm", fields = "Version")})
+$\mathbf{H}$ is
+approximated numerically by using the \pkg{nlm} Newton-type algorithm
+provided in the \pkg{stats} package.  The analytical solution as given
+in Equation \ref{eq:2dcml} and \ref{eq:2dcmlab} will be implemented in
+the subsequent version of \pkg{eRm}.
+
+
+\subsection{Mathematical properties of the CML estimates}
+\label{sec:mpcml}
+A variety of estimation approaches for IRT models in general  and
+for the Rasch model in particular are available: The \emph{joint
+maximum likelihood} (JML) estimation as proposed by \citet{Wright+Panchapakesan:1969}
+which is not recommended since the estimates are not consistent
+\citep[see e.g.][]{Haberman:77}. The basic reason for that is that the
+person parameters $\boldsymbol{\theta}$ are nuisance parameters; the
+larger the sample size, the larger the number of parameters.
+
+A well-known alternative is the \emph{marginal maximum likelihood}
+(MML) estimation \citep{Bock+Aitkin:1981}: A distribution $g(\theta)$ for
+the person parameters is assumed and the resulting situation
+corresponds to a mixed-effects ANOVA: Item difficulties can be
+regarded as fixed effects and person abilities as random effects.
+Thus, IRT models fit into the framework of \emph{generalized linear
+mixed models} (GLMM) as elaborated in \citet{deBoeck+Wilson:2004}. By
+integrating over the ability distribution the random nuisance
+parameters can be removed from the likelihood equations. This leads
+to consistent estimates of the item parameters. Further discussions
+of the MML approach with respect to the CML method will follow.
+
+For the sake of completeness, some other methods for the estimation
+of the item parameters are the following: \citet{CAnd:07} propose
+a Pseudo-ML approach, \citet{Molenaar:1995} and \citet{Linacre:2004} give an
+overview of various (heuristic) non-ML methods, Bayesian
+techniques can be found in \citet[Chapter 7]{BaKi:04}, and for nonparameteric approaches it is referred to \citet{LeVe:86}.
+
+However, back to CML, the main idea behind this approach is the
+assumption that the raw score $r_v$ is a minimal sufficient
+statistic for $\theta_v$. Starting from the equivalent
+multiplicative expression of Equation \ref{eq1} with
+$\xi_v=\exp(\theta_v)$ and $\epsilon_i=\exp(-\beta_i)$, i.e.,
+\begin{equation}
+\label{eq7}
+  P(X_{vi}=1)=\frac{\xi_v \epsilon_i}{1+\xi_v \epsilon_i},
+\end{equation}
+the following likelihood for the response pattern $\boldsymbol{x}_v$
+for a certain subject $v$ results:
+\begin{equation}
+\label{eq8}
+  P(\boldsymbol{x}_v|\xi_v,\boldsymbol{\epsilon})=\prod_{i=1}^k \frac{(\xi_v \epsilon_i)^{x_{vi}}}{1+\xi_v \epsilon_i}=
+  \frac{{\theta_v}^{r_v} \prod_{i=1}^k {\epsilon_i}^{x_{vi}}}{\prod_{i=1}^k (1+\xi_v \epsilon_i)}.
+\end{equation}
+Using the notation $\boldsymbol{y}=(y_1,\ldots ,y_k)$ for all
+possible response patterns with $\sum_{i=1}^k y_i=r_v$,  the
+probability for a fixed raw score $r_v$ is
+\begin{equation}
+\label{eq9}
+  P(r_v|\xi_v,\boldsymbol{\epsilon})=\sum_{\boldsymbol{y}|r_v} \prod_{i=1}^k \frac{(\xi_v \epsilon_i)^{x_{vi}}}{1+\xi_v \epsilon_i}=\frac{{\theta_v}^{r_v} \sum_{\boldsymbol{y}|r_v}  \prod_{i=1}^k {\epsilon_i}^{x_{vi}}}{\prod_{i=1}^k (1+\xi_v \epsilon_i)}.
+\end{equation}
+The crucial term with respect to numerical solutions of the
+likelihood equations is the second term in the numerator:
+\begin{equation}
+\label{eq:gamma}
+  \gamma_r(\epsilon_i) \equiv \sum_{\boldsymbol{y}|r_v} \prod_{i=1}^k {\epsilon_i}^{x_{vi}}
+\end{equation}
+These are the \emph{elementary symmetric functions}  (of order $r$).
+An overview of efficient computational algorithms and corresponding
+simulation studies can be found in \citet{Li:94}. The \pkg{eRm}
+package uses the summation algorithm as proposed by \citet{And:72}.
+
+Finally, by collecting the different raw scores into the vector
+$\boldsymbol{r}$ the conditional probability of observing response
+pattern $\boldsymbol{x}_v$ with given raw score $r_v$ is
+\begin{equation}
+\label{eq:xraw}
+  P(\boldsymbol{x}_v|r_v,\boldsymbol{\epsilon})=\frac{P(\boldsymbol{x}_v|\xi_v,\boldsymbol{\epsilon})}{P(r_v|\xi_v,\boldsymbol{\epsilon})} \,.
+\end{equation}
+By taking the product over the persons (independence  assumption),
+the (conditional) likelihood expression for the whole sample becomes
+\begin{equation}
+\label{eq:likall}
+  L(\boldsymbol{\epsilon}|\boldsymbol{r})=P(\boldsymbol{x}|\boldsymbol{r},\boldsymbol{\epsilon})=\prod_{v=1}^n \frac{\prod_{i=1}^k {\epsilon_i}^{x_{vi}}}{\gamma_{r_v}}.
+\end{equation}
+With respect to raw score frequencies $n_r$ and by reintroducing the
+$\beta$-parameters, (\ref{eq:likall}) can be reformulated as
+\begin{equation}
+\label{eq12a}
+  L(\boldsymbol{\beta}|\boldsymbol{r})= \frac{\exp \left(\sum_{i=1}^k x_{+i}\beta_i \right)}{\prod_{r=0}^k
+  \gamma_r^{n_r}} \,,
+\end{equation}
+where $x_{+i}$ are the item raw scores. It is obvious  that by
+conditioning the likelihood on the raw scores $\boldsymbol{r}$, the
+person parameters completely vanished from the expression. As a
+consequence, the parameters $\boldsymbol{\hat{\beta}}$ can be
+estimated without knowledge of the subject's abilities. This issue
+is referred as \emph{person-free item assessment} and we will
+discuss this topic within the context of specific objectivity in the
+next section.
+
+Pertaining to asymptotical issues, it can be shown that  under mild
+regularity conditions \citep{Pf:94} the CML estimates are
+consistent for $n\rightarrow \infty$ and $k$ fixed, unbiased,
+asymptotically efficient, and normally distributed
+\citep{Andersen:1970}. For the computation of a Rasch model,
+comparatively small samples are sufficient to get reliable estimates
+\citep{Fischer:1988}. Whether the MML estimates are unbiased depends
+on the correct specification of the ability distribution
+$g(\theta)$. In case of an incorrect assumption, the estimates are
+biased which is surely a drawback of this method. If $g(\theta)$ is
+specified appropriately, the CML and MML estimates are
+asymptotically equivalent \citep{Pf:94}.
+
+\citet{Fischer:1981} elaborates on the conditions for the existence and
+the uniqueness of the CML estimates. The crucial condition for the
+data matrix is that $\boldsymbol{X}$ has to be
+\emph{well-conditioned}. To introduce this issue it is convenient to
+look at a matrix which is \emph{ill-conditioned}: A matrix is
+ill-conditioned if there exists a partition of the items into two
+nonempty subsets such that all of a group of subjects responded
+correctly to items $i+1,\ldots,k$ ($\boldsymbol{X}_2$) and all of
+all other subjects failed for items $1,\ldots,i$
+($\boldsymbol{X}_3$), i.e.,
+\begin{table}[h]
+\centering
+\[
+\boldsymbol{X}=
+\left(
+\begin{array}{c|c}
+\boldsymbol{X}_1 & \boldsymbol{X}_2\\
+\hline
+\boldsymbol{X}_3 & \boldsymbol{X}_4\\
+\end{array}
+\right)
+=
+\left(
+\begin{array}{ccc|ccc}
+& & & 1 & \ldots & 1 \\
+& \boldsymbol{X}_1 & & \vdots & \ddots & \vdots \\
+& & & 1 & \ldots & 1 \\
+\hline
+0 & \ldots & 0 & & & \\
+\vdots & \ddots & \vdots & & \boldsymbol{X}_4 & \\
+0 & \ldots & 0 & & & \\
+\end{array}
+\right)
+\]
+\end{table}
+
+Thus, following the definition in \citet{Fischer:1981}: $\boldsymbol{X}$
+will be called \emph{well-conditioned} iff in every possible
+partition of the items into two nonempty subsets some subjects has
+given response 1 on some item in the first set and response 0 on
+some item in the second set. In this case a unique solution for the
+CML estimates $\boldsymbol{\hat{\beta}}$  exists.
+
+This issue is important for structurally incomplete designs which
+often  occur in practice; different subsets of items are presented
+to different groups of persons $g=1,\ldots,G$ where $G\leq n$. As a
+consequence, the likelihood values have to be computed for each
+group separately and the joint likelihood is the product over the
+single group likelihoods. Hence, the likelihood in Equation
+\ref{eq12a} becomes
+\begin{equation}
+\label{eq:glik}
+L(\boldsymbol{\beta}|\boldsymbol{r})=\prod_{g=1}^G \frac{\exp \left(\sum_{i=1}^k x_{+i}\beta_i \right)}{\prod_{r=0}^k {\gamma_{g,r}}^{n_{g,r}}}
+\end{equation}
+This also implies the necessity to compute the elementary symmetric
+functions separately for each group. The \pkg{eRm} package can
+handle such structurally incomplete designs.
+
+From the elaborations above it is obvious that from an
+asymptotical point of view the CML estimates are at least as good
+as the MML estimates. In the past, computational problems (speed,
+numerical accuracy) involved in calculating the elementary symmetric
+functions limited the practical usage of the CML approach \citep[see e.g.][]{Gustafsson:1980}.
+Nowadays, these issues are less crucial due to increased computer power.
+
+In some cases MML estimation has advantages not shared  by CML: MML
+leads to finite person parameters even for persons with zero and
+perfect raw score, and such persons are not removed from the
+estimation process \citep{Molenaar:1995}. On he other hand the
+consideration of such persons does not seem meaningful from a
+substantial point of view since the person parameters are not
+reliable anymore -- for such subjects the test is too difficult or
+too easy, respectively. Thus, due to these covering effects, a
+corresponding ability estimation is not feasible. However, if the
+research goal is to find ability distributions such persons should
+be regarded and MML can handle this.
+
+When estimates for the person parameters are of interest some care
+has to be taken if the CML method is used since person parameters
+cancel from the estimation equations. Usually, they are estimated
+(once having obtained values for the item parameters) by inserting
+$\boldsymbol{\hat{\beta}}$ (or equivalently
+$\boldsymbol{\hat{\epsilon}}$) into Equation \ref {eq8} and
+solving with respect to $\boldsymbol{\theta}$. Alternatively,
+Bayesian procedures are applicable \citep{Hoijtink+Boomsma:1995}. It is again
+pointed out that each person in the sample gets an own parameter
+even though limited by the number of different raw scores.
+
+\subsection{CML and specific objectivity}
+In general, the Rasch model can be regarded as a measurement model:
+Starting from the (nominally scaled) 0/1-data matrix
+$\boldsymbol{X}$, the person raw scores $r_v$ are on an ordinal
+level. They, in turn, are used to estimate the item parameters
+$\boldsymbol{\beta}$ which are on an interval scale provided that
+the Rasch model holds.
+
+Thus, Rasch models allow for comparisons between objects on an
+interval level. Rasch reasoned on requirements to be fulfilled such
+that a specific proposition within this context can be regarded as
+``scientific''. His conclusions were that a basic requirement is the
+``objectivity'' of comparisons \citep{Ra:61}. This claim
+contrasts assumptions met in \emph{classical test theory} (CTT). A
+major advantage of the Rasch model over CTT models is the
+\emph{sample independence} of the results. The relevant concepts in
+CTT are based on a linear model for the ``true score" leading to
+some indices, often correlation coefficients, which in turn depend
+on the observed data. This is a major drawback in CTT. According to
+\citet{Fisch:74}, sample independence in IRT models has the
+following implications:
+\begin{itemize}
+  \item The person-specific results (i.e., essentially $\boldsymbol{\theta}$) do not depend on the assignment of a person to a certain subject group nor on the selected test items from an item pool $\Psi$.
+  \item Changes in the skills of a person on the latent trait can be determined independently from its base level and independently from the selected item subset $\psi \subset \Psi$.
+  \item From both theoretical and practical perspective the requirement for representativeness of the sample is obsolete in terms of a true random selection process.
+\end{itemize}
+Based on these requirements for parameter comparisons, \citet{Ra:77}
+introduced the term \emph{specific objectivity}: \emph{objective}
+because any comparison of a pair of parameters is independent of any
+other parameters or comparisons; \emph{specifically objective}
+because the comparison made was relative to some specified frame of
+reference \citep{Andrich:88}. In other words, if specific
+objectivity holds, two persons $v$ and $w$ with corresponding
+parameters $\theta_v$ and $\theta_w$, are comparable independently
+from the remaining persons in the sample and independently from the
+presented item subset $\psi$. In turn, for two items $i$ and $j$
+with parameters $\beta_i$ and $\beta_j$, the comparison of these
+items can be accomplished independently from the remaining items in
+$\Psi$ and independently from the persons in the sample.
+
+The latter is crucial since it reflects completely what is called
+sample independence. If we think not only of comparing $\beta_i$ and
+$\beta_j$ but rather to estimate these parameters, we achieve a
+point where specific objectivity requires a procedure which is able
+to provide estimates $\boldsymbol{\hat{\beta}}$ that do not
+depend on the sample. This implies that
+$\boldsymbol{\hat{\beta}}$ should be computable without the
+involvement of $\boldsymbol{\theta}$. CML estimation fulfills this requirement: By
+conditioning on the sufficient raw score vector $\boldsymbol{r}$,
+$\boldsymbol{\theta}$ disappears from the likelihood equation and
+$L(\boldsymbol{\beta}|\boldsymbol{r})$ can be solved without
+knowledge of $\boldsymbol{\theta}$. This issue is referred to as
+\emph{separability of item and person parameters} \citep[see e.g.][]{Wright+Masters:1982}. Furthermore, separability implies  that no specific distribution should be assumed neither for the person nor for the item parameters \citep{Rost:2000}. MML estimation requires such assumptions. At this point it is clear that CML estimation is
+the only estimation method within the Rasch measurement context
+fulfilling the requirement of \emph{person-free item calibration}
+and, thus, it maps the epistemological theory of specific
+objectivity to a statistical maximum likelihood framework. Note that
+strictly speaking any statistical result based on sample
+observations is sample-dependent because any result depends at least
+on the sample size \citep{Fischer:1987}. The estimation of the item
+parameters is ``sample-independent", a term indicating the fact that
+the actually obtained sample of a certain population is not of
+relevance for the statistical inference on these parameters
+\citep[][p. 23]{Kubinger:1989}.
+
+\subsection{Estimation of person parameters}
+CML estimation for person parameters is not recommended due to computational issues. The \pkg{eRm} package provides two methods for this estimation. The first is ordinary ML where the CML-based item parameters are plugged into the joint ML equation. The likelihood is optimized with respect to $\boldsymbol{\theta}$. \citet{And:95} gives a general formulation of this ML estimate with $r_v=r$ and $\theta_v=\theta$:
+\begin{equation}
+\label{eq17}
+    r - \sum_{i=1}^k \sum_{h=1}^{m_i} \frac{h \exp(h \theta+\hat{\beta}_{ih})}{\sum_{l=0}^{m_i}\exp(h \theta_v+\hat{\beta}_{il})}=0
+\end{equation}
+
+\citet{Warm:1989} proposed a weighted likelihood estimation (WLE) which is more accurate compared to ML. For the dichotomous Rasch model the expression to be solved with respect to $\boldsymbol{\theta}$ is
+\begin{equation}
+P(\theta_v|\boldsymbol{x}_v, \hat{\boldsymbol{\beta}}) \propto \frac{exp(r_v\theta_v)}{\prod_i (1+exp(\theta_v-\hat{\beta}_i)}\sum_i p_{vi}(1-p_{vi})
+\end{equation}
+Again, the item parameter vector $\hat{\boldsymbol{\beta}}$ is used from CML. This approach will implemented in a subsequent \pkg{eRm} version. Additional explanations and simulation studies regarding person parameter estimation can be found in \citet{Hoijtink+Boomsma:1995}.
+
+%----------------- end parameter estimation -----------------
+
+\section{Testing extended Rasch models}
+\label{Gof}
+
+Testing IRT models involves two parts: First, item- and person-wise
+statistics can be examined; in particular item-fit and person-fit
+statistics. Secondly, based on CML properties, various model tests
+can be derived \citep[see][]{Glas+Verhelst:1995a,
+Glas+Verhelst:1995b}.
+
+\subsection{Item-fit and person-fit statistics}
+
+Commonly in IRT, items and persons are excluded  due to item-fit and
+person-fit statistics. Both are residual based measures: The
+observed data matrix $\mathbf{X}$ is compared with the model
+probability matrix $\mathbf{P}$. Computing standardized residuals
+for all observations gives the $n \times k$ residual matrix
+$\mathbf{R}$. The squared column sums correspond to item-fit
+statistics and the squared row sums to person-fit statistics both of
+which are $\chi^2$-distributed with the corresponding degrees of
+freedom. Based on these quantities unweighted (\textsl{outfit}) and
+weighted (\textsl{infit}) mean-square statistics can also be used to
+evaluate item and person fit \citep[see
+e.g.][]{Wright+Masters:1982}.
+
+\subsection{A Wald test for item elimination}
+A helpful implication of CML estimates is that subsequent test
+statistics are readily obtained and model tests are easy to carry
+out. Basically, we have to distinguish between test on item level
+and global model tests.
+
+On item level, sample independence reflects the property that by
+splitting up the sample in, e.g., two parts, the corresponding
+parameter vectors $\boldsymbol{\hat{\beta}}^{(1)}$ and
+$\boldsymbol{\hat{\beta}}^{(2)}$ should be the same. Thus,  when
+we want to achieve Rasch model fit those items have to be
+eliminated from the test which differ in the subsamples. This
+important issue in test calibration can be examined, e.g., by using
+a graphical model test. \citet{FiSch:70} propose a $N(0,1)$-distributed
+test statistic which compares the item parameters for two subgroups:
+\begin{equation}
+\label{eq:wald}
+  z=\frac{\beta_i^{(1)}-\beta_i^{(2)}}{\sqrt{Var_i^{(1)}-Var_i^{(2)}}}
+\end{equation}
+The variance term in the denominator is based on Fisher's function of ``information in the sample".
+However, as \citet{Glas+Verhelst:1995a} point out
+discussing their Wald-type test that this term can be extracted directly
+from the variance-covariance matrix of the CML estimates. This Wald approach is provided in \pkg{eRm} by means of the function \code{Waldtest()}.
+
+\subsection{Andersen's likelihood-ratio test}
+In the \pkg {eRm} package the likelihood ratio test statistic $LR$, initially proposed by \citet{And:73} is computed for the RM, the RSM, and the PCM. For the models with linear extensions, $LR$ has to be computed separately for each measurement point and subgroup.
+\begin{equation}
+\label{eq15}
+LR = 2\left(\sum_{g=1}^G \log L_c(\boldsymbol{\hat{\eta}}_g;\boldsymbol{X}_g)-\log L_c(\boldsymbol{\hat{\eta}};\boldsymbol{X})\right)
+\end{equation}
+The underlying principle of this test statistic is that of \textit{subgroup homogeneity} in Rasch models: for arbitrary disjoint subgroups $g=1,...,G$ the parameter estimates $\boldsymbol{\hat{\eta}}_g$ have to be the same. $LR$ is asymptotically $\chi^2$-distributed with $df$ equal to the number of parameters estimated in the subgroups minus the number of parameters in the total data set. For the sake of computational efficiency, the \pkg {eRm} package performs a person raw score median [...]
+
+\subsection{Nonparametric (``exact'') Tests}
+Based on the package \pkg{RaschSampler} by
+\citet{Verhelst+Hatzinger+Mair:2007} several Rasch model tests as
+proposed by \citep{Ponocny:2001} are provided.
+
+\subsection{Martin-L\"of Test}
+Applying the LR principle to subsets of items, Martin-L\"of \citep[1973, see][]{Glas+Verhelst:1995a} suggested a statistic to
+evaluate if two groups of items are homogeneous, i.e.,
+to test the unidimensionality axiom.
+%-------------------------- end goodness-of-fit ------------------
+
+%---------------------------- APPLIED SECTION ----------------------------
+\section{The eRm package and application examples}
+\label{sec:pack}
+The underlying idea of the \pkg {eRm} package is to provide a user-friendly
+flexible tool to compute extended Rasch models. This implies, amongst others,
+an automatic generation of the design matrix $\mathbf{W}$. However, in order to
+test specific hypotheses the user may specify $\mathbf{W}$ allowing the package
+to be flexible enough for computing IRT-models beyond their regular applications.
+In the following subsections, various examples are provided pertaining to different model and design
+matrix scenarios. Due to intelligibility matters, the artificial data sets are kept rather small. A detailed description in German of applications of various extendend Rasch models using the \pkg{eRm} package can be found in \citet{Poinstingl+Mair+Hatzinger:07}.
+
+\subsection{Structure of the eRm package}
+Embedding \pkg{eRm} into the flexible framework of \proglang{R} is a
+crucial benefit over existing stand-alone programs like WINMIRA
+\citep{Davier:1998}, LPCM-WIN \citep{FiPS:98}, and others.
+
+Another important issue in the development phase was that the
+package should be flexible enough to allow for CML compatible
+polytomous generalizations of the basic Rasch model such as the RSM
+and the PCM. In addition, by introducing a design matrix concept
+linear extensions of these basic models should be applicable. This
+approach resulted in including the LLTM, the LRSM and the LPCM as
+the most general model into the \pkg{eRm} package. For the latter
+model the CML estimation was implemented which can be used for the
+remaining models as well. A corresponding
+graphical representation is given in Figure \ref{fig:body}.
+
+\begin{figure}[hbt]
+\begin{center}
+    \includegraphics[width=13.7cm, height=6.5cm]{UCML.jpg}
+    \caption{\label{fig:body}Bodywork of the \pkg{eRm} routine}
+\end{center}
+\end{figure}
+
+An important benefit of the package with respect to linearly
+extended models is that for certain models the design matrix
+$\boldsymbol{W}$ can be generated automatically \citep[LPCM-WIN,][]{FiPS:98} also allows for specifying design matrices but in
+case of more complex models this can become a tedious task and the
+user must have a thorough understanding of establishing proper
+design structures). For repeated measurement models time contrasts
+in the \pkg{eRm} can be simply specified by defining the number of
+measurement points, i.e., {\tt mpoints}. To regard group contrasts
+like, e.g., treatment and control groups, a corresponding vector
+({\tt groupvec}) can be specified that denotes which person belongs
+to which group. However, $\boldsymbol{W}$ can also be defined by the
+user.
+
+A recently added feature of the routine is the option to allow for
+structurally missing values. This is required, e.g., in situations
+when different subsets of items are presented to different groups of
+subjects as described in Section \ref{sec:mpcml}. These person groups
+are identified automatically: In the data matrix $\boldsymbol{X}$,
+those items which are not presented to a certain subject are
+declared as \code{NA}s, as usual in \proglang{R}.
+
+After solving the CML equations by the Newton-Raphson method, the
+output of the routine consists of the ``basic" parameter estimates
+$\boldsymbol{\hat{\eta}}$, the corresponding variance-covariance
+matrix, and consequently the vector with the standard errors.
+Furthermore, the ordinary item parameter estimates
+$\boldsymbol{\hat{\beta}}$ are computed by using the linear
+transformation
+$\boldsymbol{\hat{\beta}}=\boldsymbol{W}\boldsymbol{\hat{\eta}}$.
+For ordinary Rasch models these basic parameters correspond to the
+item easiness. For the RM, the RSM, and the PCM, however, we display
+$\boldsymbol{\hat{\eta}}$ as $\boldsymbol{-\hat{\eta}}$, i.e., as difficulty.
+It has to be mentioned that the CML equation is
+solved with the restriction that one item parameter has to be fixed
+to zero (we use
+ $\beta_1=0$). For the sake of interpretability, the resulting
+estimates $\boldsymbol{\hat{\beta}}$ can easily be transformed
+into ``sum-zero" restricted $\boldsymbol{\hat{\beta}^*}$ by
+applying
+$\hat{\beta}_i^*=\hat{\beta}_i-\sum_i{\hat{\beta}_i}/k$.
+This transformation is also used for the graphical model test.
+
+\subsection{Example 1: Rasch model}
+We start the example section
+with a  simple Rasch model based on a $100 \times 30$ data matrix.
+First, we estimate the item parameters using the function
+\code{RM()} and then the person parameters with
+\code{person.parameters()}.
+
+<<>>=
+library(eRm)
+data(raschdat1)
+res.rasch <- RM(raschdat1)
+pres.rasch <- person.parameter(res.rasch)
+@
+
+Then we use Andersen's LR-test for goodness-of-fit with mean split criterion:
+<<>>=
+lrres.rasch <- LRtest(res.rasch, splitcr = "mean", se = TRUE)
+lrres.rasch
+@
+
+We see that the model fits and a graphical  representation of this
+result (subset of items only) is given in Figure \ref{fig:GOF} by means
+of a goodness-of-fit plot with confidence ellipses.
+
+\begin{figure}[hbt]
+\begin{center}
+<<fig = TRUE>>=
+plotGOF(lrres.rasch, beta.subset=c(14,5,18,7,1), tlab="item", conf=list(ia=FALSE,col="blue",lty="dotted"))
+@
+\caption{\label{fig:GOF} Goodness-of-fit plot for some items with confidence ellipses.}
+\end{center}
+\end{figure}
+
+To be able to draw confidence ellipses it is needed to set \code{se = TRUE} when computing the LR-test.
+
+\subsection{Example 2: LLTM as a restricted Rasch model}
+As mentioned in Section \ref{Rep}, also the models with the linear extensions on
+the item parameters can be seen as special cases of their underlying basic model.
+In fact, the LLTM as presented below and following the original idea by \citet{Scheib:72},
+is a restricted RM, i.e. the number of estimated parameters is smaller compared to a Rasch model. The data matrix
+$\mathbf{X}$ consists of $n=15$ persons and $k=5$ items. Furthermore, we specify a design matrix $\mathbf{W}$ (following Equation \ref{eq4}) with specific weight elements $w_{ij}$.
+
+<<>>=
+data(lltmdat2)
+W <- matrix(c(1,2,1,3,2,2,2,1,1,1),ncol=2)
+res.lltm <- LLTM(lltmdat2, W)
+summary(res.lltm)
+@
+
+The \code{summary()} method provides point estimates and standard
+errors for the basic parameters and for the resulting item
+parameters. Note that item parameters in \pkg{eRm} are always
+estimated as easiness parameters according to equations \ref{eq1}
+and \ref{eq2} but not \ref{eq:rasch}. If the sign is switched, the
+user gets difficulty parameters (the standard errors remain the
+same, of course). However,
+all plotting functions \code{plotGOF}, \code{plotICC},
+\code{plotjointICC}, and \code{plotPImap}, as well as the function
+\code{thresholds} display the difficulty parameters. The same applies
+for the basic parameters $\eta$ in the output of the RM, RSM, and PCM.
+
+\subsection{Example 3: RSM and PCM}
+Again, we provide an artificial data set now with $n=300$ persons and $k=4$ items;
+each of them with $m+1=3$ categories. We start with the estimation of an RSM and, subsequently,
+we calculate the corresponding category-intersection parameters using the function \code{thresholds()}.
+
+<<>>=
+data(pcmdat2)
+res.rsm <- RSM(pcmdat2)
+thresholds(res.rsm)
+@
+
+The location parameter is basically the item difficulty and the thesholds are the points in the
+ICC plot given in Figure \ref{fig:ICC} where the category curves intersect:
+
+<<fig = FALSE>>=
+plotICC(res.rsm, mplot=TRUE, legpos=FALSE,ask=FALSE)
+@
+
+\begin{figure}[hbt]
+\begin{center}
+<<fig = TRUE, echo=FALSE>>=
+plotICC(res.rsm, mplot=TRUE, legpos=FALSE,ask=FALSE)
+@
+\caption{\label{fig:ICC} ICC plot for an RSM.}
+\end{center}
+\end{figure}
+
+The RSM restricts the threshold distances to be the same across all items.
+This strong assumption can be relaxed using a PCM. The results are represented in a person-item map
+(see Figure \ref{fig:PImap}).
+
+<<fig=FALSE>>=
+res.pcm <- PCM(pcmdat2)
+plotPImap(res.pcm, sorted = TRUE)
+@
+
+\begin{figure}[hbt]
+\begin{center}
+<<fig=TRUE,echo=FALSE>>=
+res.pcm <- PCM(pcmdat2)
+plotPImap(res.pcm, sorted = TRUE)
+@
+\caption{\label{fig:PImap} Person-Item map for a PCM.}
+\end{center}
+\end{figure}
+
+After estimating the person parameters we can check the item-fit statistics.
+<<>>=
+pres.pcm <- person.parameter(res.pcm)
+itemfit(pres.pcm)
+@
+
+A likelihood ratio test comparing the RSM and the PCM indicates that the PCM provides a better fit.
+%Since none of the items is significant we can conclude that the data fit the PCM.
+
+<<>>=
+lr<- 2*(res.pcm$loglik-res.rsm$loglik)
+df<- res.pcm$npar-res.rsm$npar
+pvalue<-1-pchisq(lr,df)
+cat("LR statistic: ", lr, "  df =",df, "  p =",pvalue, "\n")
+@
+
+
+\subsection{An LPCM for repeated measurements in different groups}
+The most complex example refers to an LPCM with two measurement points.
+In addition, the hypothesis is of interest whether the treatment has an effect.
+The corresponding contrast is the last column in $\mathbf{W}$ below.
+
+First, the data matrix $\mathbf{X}$ is specified. We assume an artificial test consisting of $k=3$ items
+which was presented twice to the subjects. The first 3 columns in $\mathbf{X}$ correspond
+to the first test occasion, whereas the last 3 to the second occasion.
+Generally, the first $k$ columns correspond to the first test occasion, the next $k$ columns for the second, etc.
+In total, there are $n=20$ subjects. Among these, the first 10 persons belong to the first group (e.g., control),
+and the next 10 persons to the second group (e.g., treatment). This is specified
+by a group vector:
+
+<<>>=
+data(lpcmdat)
+grouplpcm <- rep(1:2, each = 10)
+@
+
+Again, $\boldsymbol{W}$ is generated automatically. In general, for such designs
+the generation of $\boldsymbol{W}$ consists first of the item contrasts,
+followed by the time contrasts and finally by the group main effects except for
+the first measurement point (due to identifiability issues, as already described).
+
+<<>>=
+reslpcm <- LPCM(lpcmdat, mpoints = 2, groupvec = grouplpcm, sum0 = FALSE)
+model.matrix(reslpcm)
+@
+
+The parameter estimates are the following:
+
+<<echo = FALSE>>=
+reslpcm
+@
+
+Testing whether the $\eta$-parameters equal 0 is mostly not of relevance for those
+parameters referring to the items (in this example $\eta_1,...,\eta_8$).
+But for the remaining contrasts, $H_0: \eta_9=0$ (implying no general time effect)
+can not be rejected ($p=.44$), whereas hypothesis $H_0: \eta_{10}=0$ has to be rejected
+($p=.004$) when applying a $z$-test.
+This suggests that there is a significant treatment effect over the measurement points.
+If a user wants to perform additional tests such as a Wald test for the equivalence
+of two $\eta$-parameters, the \code{vcov} method can be applied to get the
+variance-covariance matrix.
+
+\section{Additional topics}
+
+This section will be extended successively with new developments and
+components which do not directly relate to the modeling core of
+\pkg{eRm} but may prove to be useful add-ons.
+
+\subsection{The eRm simulation module}
+A recent \pkg{eRm} development is the implementation of a simulation module to generate 0-1 matrices for different Rasch scenarios. In this article we give a brief overview about the functionality and for more detailed descriptions (within the context of model testing) it is referred to \citet{Mair:2006} and \citet{Suarez+Glas:2003}.
+
+For each scenario the user has the option either to assign $\boldsymbol{\theta}$ and $\boldsymbol{\beta}$ as vectors to the simulation function (e.g. by drawing parameters from a uniform distribution) or to let the function draw the parameters from a $N(0,1)$ distribution. The first scenario is the simulation of Rasch homogenous data by means of the function \code{sim.rasch()}. The parameter values are plugged into equation \ref{eq:rasch} and it results the matrix $\mathbf{P}$ of model p [...]
+\begin{equation*}
+x_{vi} = \left\{
+ \begin{array}{rl}
+  1 & \text{if } p^{\star}_{vi} \leq p_{vi}\\
+  0 & \text{if } p^{\star}_{vi} > p_{vi}\\
+ \end{array} \right.
+\end{equation*}
+Alternatively, the user can specify a fixed cutpoint $p^{\star}:=p^{\star}_{vi}$ (e.g. $p^{\star} = 0.5$) and make the decision according to the same rule. This option is provided by means of the \code{cutpoint} argument. Caution is advised when using this deterministic option since this leads likely to ill-conditioned data matrices.
+
+The second scenario in this module regards the violation of the parallel ICC assumption which leads to the two-parameter logistic model (2-PL) proposed by \citet{Birnbaum:1968}:
+\begin{equation}
+\label{eq:2pl}
+  P(X_{vi}=1)=\frac{\exp(\alpha_i(\theta_v - \beta_i))}{1+\exp(\alpha_i(\theta_v-\beta_i))}.
+\end{equation}
+The parameter $\alpha_i$ denotes the item discrimination which for the Rasch model is 1 across all items. Thus, each item score gets a weight and the raw scores are not sufficient anymore. The function for simulating 2-PL data is \code{sim.2pl()} and if $\boldsymbol{\alpha}$ is not specified by the user by means of the argument \code{discrim}, the discrimination parameters are drawn from a log-normal distribution. The reasons for using this particular kind of distribution are the followi [...]
+ns of the dispersion parameter $\sigma^2$. A value of $\sigma^2 = .50$ already denotes a strong violation. The lower $\sigma^2$, the closer the values lie around 1. In this case the $\alpha_i$ are close to the Rasch slopes.
+
+Using the function \code{sim.xdim()} the unidimensionality assumptions is violated. This function allows for the simulation of multidimensional Rasch models as for instance given \citet{Glas:1992} and \citet{Adams+Wilson+Wang:1997}. Multidimensionality implies that one single item measures more than one latent construct. Let us denote the number of these latent traits by $D$. Consequently, each person has a vector of ability parameters $\boldsymbol{\theta}_v$ of length $D$. These vectors [...]
+ is not provided by the user, \code{sim.xdim()} generates $\mathbf{Z}$ such that each $\mathbf{z}_i$ contains only nonzero element which indicates the assigned dimension. This corresponds to the \emph{between-item multidimensional model} \citep{Adams+Wilson+Wang:1997}. However, in any case the person part of the model is $\mathbf{z}_i^T \boldsymbol{\theta}_v$ which replaces $\theta_v$ in Equation \ref{eq:rasch}.
+
+Finally, locally dependent item responses can be produced by means of the function \code{sim.locdep()}. Local dependence implies the introduction of pair-wise item correlations $\delta_{ij}$. If these correlations are constant across items, the argument \code{it.cor} can be a single value $\delta$. A value $\delta = 0$ corresponds to the Rasch model whereas $\delta = 1$ leads to the strongest violation. Alternatively, for different pair-wise item correlations, the user can specify a VC-m [...]
+\begin{equation}
+P(X_{vi}=1|X_{vj}=x_{vj})=\frac{\exp(\theta_v - \beta_i + x_{vj}\delta_{ij})}{1+\exp(\theta_v-\beta_i + x_{vj}\delta_{ij})}.
+\end{equation}
+This model was proposed by \citet{Jannarone:1986} and is suited to model locally dependent item responses.
+
+
+\section{Discussion and outlook}
+\label{sec:disc}
+
+Here we give a brief outline of future \pkg{eRm} developments. The
+CML estimation  approach, in combination with the EM-algorithm, can
+also be used to estimate \textit{mixed Rasch models} (MIRA). The
+basic idea behind such models is that the extended Rasch model holds
+within subpopulations of individuals, but with different parameter
+values for each subgroup. Corresponding elaborations are given in
+\citet{RoDa:95}.
+
+In Rasch models the item discrimination parameter $\alpha_i$ is
+always fixed  to 1 and thus it does not appear in the basic
+equation. Allowing for different discrimination parameters across
+items leads to the two-parameter logistic model as given in Equation
+\ref{eq:2pl}. In this model the raw scores are not sufficient
+statistics anymore and hence CML can not be applied. 2-PL models can
+be estimated by means of the \pkg{ltm} package \citep{Riz:06}.
+However, \citet{Verhelst+Glas:1995} formulated the one parameter
+logistic model (OPLM) where the $\alpha_i$ do not vary across the
+items but are unequal to one. The basic strategy to estimate OPLM is
+a three-step approach: First, the item parameters of the Rasch model
+are computed. Then, discrimination parameters are computed under
+certain restrictions. Finally, using these discrimination weights,
+the item parameters for the OPLM are estimated using CML. This is a
+more flexible version of the Rasch model in terms of different
+slopes.
+
+To conclude, the \pkg{eRm} package is a tool to estimate extended
+Rasch models for unidimensional traits.  The generalizations towards
+different numbers of item categories, linear extensions to allow for
+introducing item covariates and/or trend and optionally group
+contrasts are important issues when examining item behavior and
+person performances in tests.  This improves the feasibility of IRT
+models with respect to a wide variety of application areas.
+
+\bibliography{eRmvig}
+
+\end{document}
diff --git a/inst/doc/eRmvig.bib b/inst/doc/eRmvig.bib
new file mode 100755
index 0000000..a0bbf07
--- /dev/null
+++ b/inst/doc/eRmvig.bib
@@ -0,0 +1,695 @@
+ at article{Ro:99,
+   author = {J. Rost},
+   year = {1999},
+   TITLE  = {Was ist aus dem Rasch-Modell geworden? [What Happened with the Rasch Model?]},
+   JOURNAL = {Psychologische Rundschau},
+   VOLUME = {50},
+   PAGES = {140-156}
+}
+
+ at article{Scheib:72,
+   author = {H. Scheiblechner},
+   year = {1972},
+   TITLE  = {{Das Lernen und L\"osen komplexer Denkaufgaben. [The learning and solving of complex reasoning items.]}},
+   JOURNAL = {Zeitschrift f\"ur Experimentelle und Angewandte Psychologie},
+   VOLUME = {3},
+   PAGES = {456-506}
+}
+
+ at article{And:78,
+   author = {D. Andrich},
+   year = {1978},
+   TITLE  = {A rating formulation for ordered response categories},
+   JOURNAL = {Psychometrika},
+   VOLUME = {43},
+   PAGES = {561-573}
+}
+
+ at article{FiPa:91,
+   author = {G. H. Fischer and P. Parzer},
+   year = {1991},
+   TITLE  = {An extension of the rating scale model with an application to the measurement of change},
+   JOURNAL = {Psychometrika},
+   VOLUME = {56},
+   PAGES = {637-651}
+}
+
+ at article{Mast:82,
+   author = {G. N. Masters},
+   year = {1982},
+   TITLE  = {A Rasch model for partial credit scoring},
+   JOURNAL = {Psychometrika},
+   VOLUME = {47},
+   PAGES = {149-174}
+}
+
+ at article{FiPo:94,
+   author = {G. H. Fischer and I. Ponocny},
+   year = {1994},
+   TITLE  = {An extension of the partial credit model with an application to the measurement of change},
+   JOURNAL = {Psychometrika},
+   VOLUME = {59},
+   PAGES = {177-192}
+}
+
+ at article{LeVe:86,
+   author = {J. de Leeuw and N. Verhelst},
+   year = {1986},
+   TITLE  = {Maximum likelihood estimation in generalized Rasch models},
+   JOURNAL = {Journal of educational statistics},
+   VOLUME = {11},
+   PAGES = {183-196}
+}
+
+ at article{Ra:77,
+   author = {G. Rasch},
+   year = {1977},
+   TITLE  = {On specific objectivity: An attempt at formalising the request for generality and validity of scientific statements},
+   JOURNAL = {Danish Yearbook of Philosophy},
+   VOLUME = {14},
+   PAGES = {58-94}
+}
+
+ at article{GlVe:89,
+   author = {C. A. W. Glas and N. Verhelst},
+   year = {1989},
+   TITLE  = {Extensions of the partial credit model},
+   JOURNAL = {Psychometrika},
+   VOLUME = {54},
+   PAGES = {635-659}
+}
+
+ at article{Mi:85,
+   author = {R. J. Mislevy},
+   year = {1985},
+   TITLE  = {Estimation of latent group effects},
+   JOURNAL = {Journal of the American Statistical Association},
+   VOLUME = {80},
+   PAGES = {993-997}
+}
+
+ at article{Li:94,
+   author = {M. Liou},
+   year = {1994},
+   TITLE  = {More on the computation of higher-order derivatives of the elementary symmetric functions in the Rasch model},
+   JOURNAL = {Applied Psychological Measurement},
+   VOLUME = {18},
+   PAGES = {53-62}
+}
+
+
+ at article{And:72,
+   author = {E. B. Andersen},
+   year = {1972},
+   TITLE  = {The numerical solution of a set of conditional estimation equations},
+   JOURNAL = {Journal of the Royal Statistical Society, Series B},
+   VOLUME = {34},
+   PAGES = {42-54}
+}
+
+ at article{And:73,
+   author = {E. B. Andersen},
+   year = {1973},
+   TITLE  = {A goodness of fit test for the Rasch model},
+   JOURNAL = {Psychometrika},
+   VOLUME = {38},
+   PAGES = {123-140}
+}
+
+ at article{Fisch:73,
+   author = {G. H. Fischer},
+   year = {1973},
+   TITLE  = {The linear logistic test model as an instrument in educational research},
+   JOURNAL = {Acta Psychologica},
+   VOLUME = {37},
+   PAGES = {359-374}
+}
+
+ at article{Riz:06,
+   author = {D. Rizopoulos},
+   year = {2006},
+   TITLE  = {\pkg{ltm}: An \proglang{R} package for latent variable modeling and item response theory analyses},
+   JOURNAL = {Journal of Statistical Software},
+   VOLUME = {17},
+   number = {5},
+   pages = {1-25},
+   url = {http://www.jstatsoft.org/v17/i05/}
+}
+
+ at article{Bor:06,
+   author = {D. Borsboom},
+   year = {2006},
+   TITLE  = {The attack of the psychometricians},
+   JOURNAL = {Psychometrika},
+   VOLUME = {71},
+   PAGES = {425-440}
+}
+
+ at article{Kub:05,
+   author = {K. D. Kubinger},
+   year = {2005},
+   TITLE  = {Psychological test calibration using the Rasch model: Some critical suggestions on traditional approaches},
+   JOURNAL = {International Journal of Testing},
+   VOLUME = {5},
+   PAGES = {377-394}
+}
+
+ at article{CAnd:07,
+   author = {C. Anderson and Z. Li and J. Vermunt},
+   year = {2007},
+   TITLE  = {Estimation of models in the Rasch family for polytomous items and multiple latent variables},
+   JOURNAL = {Journal of Statistical Software},
+   VOLUME = {20},
+   number = {6},
+   PAGES = {},
+   url = {http://www.jstatsoft.org/v20/i06/}
+}
+
+ at BOOK{Ra:60,
+   AUTHOR = {Rasch, G.},
+   YEAR   = {1960},
+   TITLE  = {Probabilistic Models for some Intelligence and Attainment Tests},
+   PUBLISHER = {Danish Institute for Educational Research},
+   EDITION = {},
+   ADDRESS = {Copenhagen}
+}
+
+ at BOOK{Fisch:74,
+   AUTHOR = {Fischer, G. H.},
+   YEAR   = {1974},
+   TITLE  = {Einf\"uhrung in die Theorie psychologischer Tests [Introduction to Mental Test Theory]},
+   PUBLISHER = {Huber},
+   EDITION = {},
+   ADDRESS = {Bern}
+}
+
+ at BOOK{BaKi:04,
+   AUTHOR = {Baker, F. B. and Kim, S.},
+   YEAR   = {2004},
+   TITLE  = {Item Response Theory: Parameter Estimation Techniques},
+   PUBLISHER = {Dekker},
+   EDITION = {2nd},
+   ADDRESS = {New York}
+}
+
+ at BOOK{FiPS:98,
+   AUTHOR = {Fischer, G. H. and Ponocny-Seliger, E.},
+   YEAR   = {1998},
+   TITLE  = {Structural Rasch Modeling: Handbook of the Usage of LPCM-WIN 1.0},
+   PUBLISHER = {ProGAMMA},
+   EDITION = {},
+   ADDRESS = {Groningen}
+}
+
+
+ at INCOLLECTION{Ra:61,
+   AUTHOR = {Rasch, G.},
+   YEAR   = {1961},
+   TITLE  = {On General Laws and the Meaning of Measurement in Psychology.},
+   BOOKTITLE = {Proceedings of the IV. Berkeley Symposium on Mathematical Statistics and Probability, Vol. IV},
+   PAGES = {321-333},
+   EDITOR = {},
+   PUBLISHER = {University of California Press}, 
+   ADDRESS = {Berkeley}
+}
+
+ at INCOLLECTION{Fisch:95a,
+   AUTHOR = {Fischer, G. H.},
+   YEAR   = {1995},
+   TITLE  = {Derivations of the Rasch Model},
+   BOOKTITLE = {Rasch Models: Foundations, Recent Developments, and Applications},
+   PAGES = {15-38},
+   EDITOR = {G.H. Fischer and I.W. Molenaar},
+   PUBLISHER = {Springer}, 
+   ADDRESS = {New York}
+}
+
+ at INCOLLECTION{Fisch:95b,
+   AUTHOR = {Fischer, G. H.},
+   YEAR   = {1995},
+   TITLE  = {Linear Logistic Models for Change},
+   BOOKTITLE = {Rasch Models: Foundations, Recent Developments, and Applications},
+   PAGES = {157-180},
+   EDITOR = {G.H. Fischer and I.W. Molenaar},
+   PUBLISHER = {Springer}, 
+   ADDRESS = {New York}
+}
+
+ at INCOLLECTION{Linacre:2004,
+   AUTHOR = {Linacre, J. M.},
+   YEAR   = {2004},
+   TITLE  = {Estimation Methods for \uppercase{R}asch Measures},
+   BOOKTITLE = {Introduction to \uppercase{R}asch Measurement},
+   PAGES = {25-48},
+   EDITOR = {E. V. {Smith Jr.} and R. M. Smith},
+   PUBLISHER = {JAM Press}, 
+   ADDRESS = {Maple Grove, MN}
+}
+
+ at INCOLLECTION{And:95,
+   AUTHOR = {Andersen, E. B.},
+   YEAR   = {1995},
+   TITLE  = {Polytomous Rasch Models and their Estimation},
+   BOOKTITLE = {Rasch models: Foundations, recent developments, and applications},
+   PAGES = {271-292},
+   EDITOR = {G.H. Fischer and I.W. Molenaar},
+   PUBLISHER = {Springer}, 
+   ADDRESS = {New York}
+}
+
+ at INCOLLECTION{Molenaar:1995,
+   AUTHOR = {Molenaar, I.},
+   YEAR   = {1995},
+   TITLE  = {Estimation of Item Parameters},
+   BOOKTITLE = {Rasch models: Foundations, recent developements, and applications},
+   PAGES = {39-51},
+   EDITOR = {G.H. Fischer and I.W. Molenaar},
+   PUBLISHER = {Springer}, 
+   ADDRESS = {New York}
+}
+
+ at article{Bock+Aitkin:1981,
+   author = {R. D. Bock and M. Aitkin},
+   year = {1981},
+   TITLE  = {Marginal maximum likelihood estimation of item parameters: as application of an \uppercase{EM} algorithm},
+   JOURNAL = {Psychometrika},
+   VOLUME = {46},
+   PAGES = {443-459}
+}
+
+ at article{Haberman:77,
+   author = {S. J. Haberman},
+   year = {1977},
+   TITLE  = {Maximum likelihood estimates in exponential response models},
+   JOURNAL = {The Annals of Statistics},
+   VOLUME = {5},
+   PAGES = {815-841}
+}
+
+ at article{Wright+Panchapakesan:1969,
+   author = {B. D. Wright and N. Panchapakesan},
+   year = {1969},
+   TITLE  = {A procedure for sample-free item analysis},
+   JOURNAL = {Educational and Psychological measurement},
+   VOLUME = {29},
+   PAGES = {23-48}
+}
+
+ at BOOK{Wright+Masters:1982,
+   AUTHOR = {Wright, B. D. and Masters, G. N.},
+   YEAR   = {1982},
+   TITLE  = {Rating scale analysis: \uppercase{R}asch measurement},
+   PUBLISHER = {Mesa Press},
+   EDITION = {},
+   ADDRESS = {Chicago}
+}
+
+ at BOOK{Andrich:88,
+   AUTHOR = {Andrich, D.},
+   YEAR   = {1988},
+   TITLE  = {Rasch Models for Measurement (Sage University paper series on quantitative applications in the social sciences)},
+   PUBLISHER = {Sage},
+   EDITION = {},
+   ADDRESS = {Newbury Park, CA}
+}
+
+ at INCOLLECTION{FiJr:92,
+   AUTHOR = {Fisher Jr., W. P.},
+   YEAR   = {1992},
+   TITLE  = {Objectivity in Measurement: A Philosophical History of \uppercase{R}asch's Separability Theorem},
+   BOOKTITLE = {Objective Measurement: Theory into Practice, Volume 1},
+   PAGES = {29-60},
+   EDITOR = {M. Wilson},
+   PUBLISHER = {Ablex}, 
+   ADDRESS = {Norwood, NJ}
+}
+
+ at INCOLLECTION{Rost:2000,
+   AUTHOR = {Rost, J.},
+   YEAR   = {2000},
+   TITLE  = {The Growing Family of \uppercase{R}asch Models},
+   BOOKTITLE = {Essays on item response theory},
+   PAGES = {25-42},
+   EDITOR = {A. Boomsma and M.A.J. van Duijn and T.A.B. Snijders},
+   PUBLISHER = {Springer}, 
+   ADDRESS = {New York}
+}
+
+ at article{Fischer:1987,
+    author = {G. H. Fischer},
+    title = {Applying the principles of specific objectivity and of generalizability to the measurement of change},
+    year = {1987},
+    journal = {Psychometrika},
+    volume = {52},
+    pages = {565-587},
+}
+
+ at BOOK{Davier:1998,
+   AUTHOR = {{von Davier}, M.},
+   YEAR   = {1998},
+   TITLE  = {\uppercase{WINMIRA}: A \uppercase{W}indows program for mixed \uppercase{R}asch models},
+   PUBLISHER = {IPN},
+   EDITION = {},
+   ADDRESS = {Kiel}
+}
+
+ at INCOLLECTION{Kubinger:1989,
+   AUTHOR = {Kubinger, K. D.},
+   YEAR   = {1989},
+   TITLE  = {Aktueller \uppercase{S}tand und kritische \uppercase{W}\"urdigung der \uppercase{P}robabilistischen \uppercase{T}esttheorie. [\uppercase{C}urrent status and critical appreciation of probabilistic test theory]},
+   BOOKTITLE = {Moderne \uppercase{T}esttheorie: Ein Abriss samt neuesten Beitr\"agen},
+   PAGES = {19-83},
+   EDITOR = {K.D. Kubinger},
+   PUBLISHER = {Beltz}, 
+   ADDRESS = {Weinheim}
+}
+
+
+ at INCOLLECTION{Glas:1992,
+   AUTHOR = {Glas, C. A. W.},
+   YEAR   = {1992},
+   TITLE  = {A Rasch Model with a Multivariate Distribution of Ability},
+   BOOKTITLE = {Objective Measurement: Theory into Practice, Volume 1},
+   PAGES = {236-258},
+   EDITOR = {M. Wilson},
+   PUBLISHER = {Ablex}, 
+   ADDRESS = {Norwood, NJ}
+}
+
+ at INCOLLECTION{Ho:95,
+   AUTHOR = {Hoijtink, H.},
+   YEAR   = {1995},
+   TITLE  = {Linear and Repeated Measures Models for the Person Parameter},
+   BOOKTITLE = {Rasch Models: Foundations, Recent Developments, and Applications},
+   PAGES = {203-214},
+   EDITOR = {G.H. Fischer and I.W. Molenaar},
+   PUBLISHER = {Springer}, 
+   ADDRESS = {New York}
+}
+
+ at article{Fischer:1981,
+   author = {G. H. Fischer},
+   year = {1981},
+   TITLE  = {On the existence and uniqueness of maximum-likelihood estimates in the \uppercase{R}asch model},
+   JOURNAL = {Psychometrika},
+   VOLUME = {46},
+   PAGES = {59-77}
+}
+
+ at INCOLLECTION{Fischer:1988,
+   AUTHOR = {Fischer, G. H.},
+   YEAR   = {1988},
+   TITLE  = {Spezifische \uppercase{O}bjektvit\"at: \uppercase{E}ine wissenschaftstheoretische \uppercase{G}rundlage des \uppercase{R}asch-\uppercase{M}odells. [\uppercase{S}pecific objectivity: \uppercase{A}n epistemological foundation of the \uppercase{R}asch model.]},
+   BOOKTITLE = {Moderne Testtheorie},
+   PAGES = {87-111},
+   EDITOR = {K.D. Kubinger},
+   PUBLISHER = {Beltz}, 
+   ADDRESS = {Weinheim}
+}
+
+ at INCOLLECTION{And:83,
+   AUTHOR = {Andersen, E. B.},
+   YEAR   = {1983},
+   TITLE  = {A General Latent Structure Model for Contingency Table Data},
+   BOOKTITLE = {Principals of Modern Psychological Measurement},
+   PAGES = {117-138},
+   EDITOR = {H. Wainer and S. Messik},
+   PUBLISHER = {Erlbaum}, 
+   ADDRESS = {Hillsdale, NJ}
+}
+
+ at article{Andersen:1970,
+   author = {E. B. Andersen},
+   year = {1970},
+   TITLE  = {Asymptotic properties of conditional maximum likelihood estimators},
+   JOURNAL = {Journal of the Royal Statistical Society, Series B},
+   VOLUME = {32},
+   PAGES = {283-301}
+}
+
+ at INCOLLECTION{Glas+Verhelst:1995b,
+   AUTHOR = {Glas, C. A. W. and Verhelst, N.},
+   YEAR   = {1995},
+   TITLE  = {Tests of Fit for Polytomous \uppercase{R}asch Models},
+   BOOKTITLE = {Rasch Models: Foundations, Recent Developments, and Applications},
+   PAGES = {325-352},
+   EDITOR = {G.H. Fischer and I.W. Molenaar},
+   PUBLISHER = {Springer}, 
+   ADDRESS = {New York}
+}
+
+ at BOOK{deBoeck+Wilson:2004,
+   AUTHOR = {{de Boeck}, P. and Wilson, M.},
+   YEAR   = {2004},
+   TITLE  = {Explanatory item response models: A generalized linear and nonlinear approach},
+   PUBLISHER = {Springer},
+   EDITION = {},
+   ADDRESS = {New York}
+}
+
+
+ at article{VedB:01,
+   author = {T. Verguts and P. {De Boeck}},
+   year = {2001},
+   TITLE  = {Some \uppercase{M}antel-\uppercase{H}aenszel tests of \uppercase{R}asch model assumptions},
+   JOURNAL = {British Journal of Mathematical and Statistical Psychology},
+   VOLUME = {54},
+   PAGES = {21-37}
+}
+
+ at INCOLLECTION{Glas+Verhelst:1995a,
+   AUTHOR = {Glas, C. A. W. and Verhelst, N.},
+   YEAR   = {1995},
+   TITLE  = {Testing the \uppercase{R}asch model},
+   BOOKTITLE = {Rasch Models: Foundations, Recent Developments, and Applications},
+   PAGES = {69-96},
+   EDITOR = {G.H. Fischer and I.W. Molenaar},
+   PUBLISHER = {Springer}, 
+   ADDRESS = {New York}
+}
+
+ at INCOLLECTION{Sm:04,
+   AUTHOR = {Smith, R. M.},
+   YEAR   = {2004},
+   TITLE  = {Fit Analysis in Latent Trait Measurement Models.},
+   BOOKTITLE = {Introduction to Rasch Measurement},
+   PAGES = {73-92},
+   EDITOR = {E. S. Smith and R. M. Smith},
+   PUBLISHER = {JAM Press}, 
+   ADDRESS = {Maple Grove, MN}
+}
+
+ at INCOLLECTION{Fisch:77,
+   AUTHOR = {Fischer, G. H:},
+   YEAR   = {1977},
+   TITLE  = {Linear Logistic Trait Models: Theory and Application},
+   BOOKTITLE = {Structural Models of Thinking and Learning},
+   PAGES = {203-225},
+   EDITOR = {H. Spada and W. F. Kempf},
+   PUBLISHER = {Huber}, 
+   ADDRESS = {Bern}
+}
+
+ at INCOLLECTION{RoDa:95,
+   AUTHOR = {Rost, J. and von Davier, M.},
+   YEAR   = {1995},
+   TITLE  = {Polytomous Mixed Rasch Models},
+   BOOKTITLE = {Rasch Models: Foundations, Recent Developments, and Applications},
+   PAGES = {371-382},
+   EDITOR = {G.H. Fischer and I.W. Molenaar},
+   PUBLISHER = {Springer}, 
+   ADDRESS = {New York}
+}
+
+ at INCOLLECTION{Verhelst+Glas:1995,
+   AUTHOR = {N. Verhelst and C. A. W. Glas},
+   YEAR   = {1995},
+   TITLE  = {The one parameter logistic test model},
+   BOOKTITLE = {Rasch Models: Foundations, Recent Developments, and Applications},
+   PAGES = {215-238},
+   EDITOR = {G.H. Fischer and I.W. Molenaar},
+   PUBLISHER = {Springer}, 
+   ADDRESS = {New York}
+}
+
+ at INCOLLECTION{Pf:94,
+   AUTHOR = {Pfanzagl, J.},
+   YEAR   = {1994},
+   TITLE  = {On Item Parameter Estimation in Certain Latent Trait Models},
+   BOOKTITLE = {Contributions to Mathematical Psychology, Psychometrics, and Methodology},
+   PAGES = {249-263},
+   EDITOR = {G.H. Fischer and D. Laming},
+   PUBLISHER = {Springer}, 
+   ADDRESS = {New York}
+}
+
+ at article{Gustafsson:1980,
+   author = {J. Gustafsson},
+   year = {1980},
+   TITLE  = {Testing and obtaining fit of data to the \uppercase{R}asch model},
+   JOURNAL = {British Journal of Mathematical and Statistical Psychology},
+   VOLUME = {33},
+   PAGES = {205-233}
+}
+
+ at Manual{R:06,
+       title        = {R: A Language and Environment for Statistical
+                       Computing},
+       author       = {{R Development Core Team}},
+       organization = {R Foundation for Statistical Computing},
+       address      = {Vienna, Austria},
+       year         = 2007,
+       note         = {{ISBN} 3-900051-07-0},
+       url          = {http://www.R-project.org}
+     }
+     
+ at article{Mair+Hatzinger:2007,
+   author = {P. Mair and R. Hatzinger},
+   year = {2007},
+   TITLE  = {Extended \uppercase{R}asch Modeling: The e\uppercase{R}m package for the application of \uppercase{IRT} models in \uppercase{R}},
+   JOURNAL = {Journal of Statistical Software},
+   VOLUME = {20(9)},
+   PAGES = {1-20}
+}
+
+ at article{Warm:1989,
+   author = {T. A. Warm},
+   year = {1989},
+   TITLE  = {Weighted likelihood estimation of ability in item response theory},
+   JOURNAL = {Psychometrika},
+   VOLUME = {54},
+   PAGES = {427�450}
+}
+
+ at article{Ponocny:2001,
+   author = {I. Ponocny},
+   year = {2001},
+   TITLE  = {Nonparametric goodness-of-fit tests for the \uppercase{R}asch model.},
+   JOURNAL = {Psychometrika},
+   VOLUME = {66},
+   PAGES = {437-460}
+}
+
+ at INCOLLECTION{Birnbaum:1968,
+   AUTHOR = {Birnbaum, A.},
+   YEAR   = {1968},
+   TITLE  = {Some latent trait models and their use in inferring an examinee's ability},
+   BOOKTITLE = {Statistical theories of mental test scores},
+   PAGES = {395-479},
+   EDITOR = {F. M. Lord and M. R. Novick},
+   PUBLISHER = {Addison-Wesley}, 
+   ADDRESS = {Reading, MA}
+}
+
+ at article{Verhelst+Hatzinger+Mair:2007,
+   author = {N. Verhelst and R. Hatzinger and P. Mair},
+   year = {2007},
+   TITLE  = {The \uppercase{R}asch sampler},
+   JOURNAL = {Journal of Statistical Software},
+   VOLUME = {20(4)},
+   PAGES = {1-14}
+}
+
+ at article{FiSch:70,
+   author = {G. H. Fischer and H. H. Scheiblechner},
+   year = {1970},
+   TITLE  = {Algorithmen und \uppercase{P}rogramme f\"ur das probabilistische \uppercase{T}estmodell von \uppercase{R}asch. [\uppercase{A}lgorithms and programs for \uppercase{R}asch's probabilistic test model.]},
+   JOURNAL = {Psychologische Beitr\"age},
+   VOLUME = {12},
+   PAGES = {23-51}
+}
+
+ at article{Suarez+Glas:2003,
+   author = {J. C. Su\'arez-Falc\'on and C. A. W. Glas},
+   year = {2003},
+   TITLE  = {Evaluation of global testing procedures for item fit to the \uppercase{R}asch model.},
+   JOURNAL = {British Journal of Mathematical and Statistical Society},
+   VOLUME = {56},
+   PAGES = {127-143}
+}
+
+ at article{Adams+Wilson+Wang:1997,
+   author = {R. J. Adams and M. Wilson and W. C. Wang},
+   year = {1997},
+   TITLE  = {The multidimensional random coefficients multinomial logit model},
+   JOURNAL = {Applied Psychological Measurement},
+   VOLUME = {21},
+   PAGES = {1-23}
+}
+
+ at article{Jannarone:1986,
+   author = {R. J. Jannarone},
+   year = {1986},
+   TITLE  = {Conjunctive item response theory model kernels},
+   JOURNAL = {Psychometrika},
+   VOLUME = {51},
+   PAGES = {357-373}
+}
+
+ at article{,
+   author = {},
+   year = {},
+   TITLE  = {},
+   JOURNAL = {},
+   VOLUME = {},
+   PAGES = {}
+}
+
+ at article{Mair+Hatzinger:2007b,
+   author = {P. Mair and R. Hatzinger},
+   year = {2007},
+   TITLE  = {\uppercase{CML} based estimation of extended \uppercase{R}asch models with the e\uppercase{R}m package in \uppercase{R}},
+   JOURNAL = {Psychology Science},
+   VOLUME = {49},
+   PAGES = {26-43}
+}
+
+ at INCOLLECTION{Hoijtink+Boomsma:1995,
+   AUTHOR = {H. Hoijtink and A. Boomsma},
+   YEAR   = {1995},
+   TITLE  = {On person parameter estimation in the dichotomous \uppercase{R}asch model},
+   BOOKTITLE = {Rasch Models: Foundations, Recent Developments, and Applications},
+   PAGES = {53-68},
+   EDITOR = {G.H. Fischer and I.W. Molenaar},
+   PUBLISHER = {Springer}, 
+   ADDRESS = {New York}
+}
+
+
+ at BOOK{Poinstingl+Mair+Hatzinger:07,
+   AUTHOR = {Poinstingl, H. and Mair, P. and Hatzinger, R.},
+   YEAR   = {2007},
+   TITLE  = {Manual zum \uppercase{S}oftwarepackage e\uppercase{R}m: Anwendung des \uppercase{R}asch-\uppercase{M}odells},
+   PUBLISHER = {Pabst Science Publishers},
+   EDITION = {},
+   ADDRESS = {Lengerich}
+}
+
+ at MastersThesis{Mair:2006,
+   Author = {P. Mair},
+   School = {Department of Psychology, University of Vienna},
+   Title = {Simulation Studies for Goodness-of-Fit Statistics in Item Response Theory},
+   Year = {2006}
+}
+
+
+
+ at INCOLLECTION{,
+   AUTHOR = {},
+   YEAR   = {},
+   TITLE  = {},
+   BOOKTITLE = {},
+   PAGES = {},
+   EDITOR = {},
+   PUBLISHER = {}, 
+   ADDRESS = {}
+}
+
+ at BOOK{,
+   AUTHOR = {},
+   YEAR   = {},
+   TITLE  = {},
+   PUBLISHER = {},
+   EDITION = {},
+   ADDRESS = {}
+}
\ No newline at end of file
diff --git a/inst/doc/eRmvig.pdf b/inst/doc/eRmvig.pdf
new file mode 100755
index 0000000..d9d1b73
Binary files /dev/null and b/inst/doc/eRmvig.pdf differ
diff --git a/inst/doc/index.html b/inst/doc/index.html
new file mode 100755
index 0000000..408101d
--- /dev/null
+++ b/inst/doc/index.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><title>R: eRm vignettes</title>
+<link rel="stylesheet" type="text/css" href="../../R.css">
+</head><body>
+<h2>Vignettes of package eRm </h2>
+<dl>
+<dt><a href="eRmvig.pdf">eRmvig.pdf</a>:
+<dd> eRm Basics
+</dl>
+</body></html>
diff --git a/inst/doc/jss.bst b/inst/doc/jss.bst
new file mode 100755
index 0000000..a5b0e78
--- /dev/null
+++ b/inst/doc/jss.bst
@@ -0,0 +1,1647 @@
+%%
+%% This is file `jss.bst',
+%% generated with the docstrip utility.
+%%
+%% The original source files were:
+%%
+%% merlin.mbs  (with options: `ay,nat,nm-rvx,keyxyr,dt-beg,yr-par,note-yr,tit-qq,bt-qq,atit-u,trnum-it,vol-bf,volp-com,num-xser,isbn,issn,edpar,pp,ed,xedn,xand,etal-it,revdata,eprint,url,url-blk,doi,nfss')
+%% ----------------------------------------
+%% *** Journal of Statistical Software ***
+%% 
+%% Copyright 1994-2004 Patrick W Daly
+ % ===============================================================
+ % IMPORTANT NOTICE:
+ % This bibliographic style (bst) file has been generated from one or
+ % more master bibliographic style (mbs) files, listed above.
+ %
+ % This generated file can be redistributed and/or modified under the terms
+ % of the LaTeX Project Public License Distributed from CTAN
+ % archives in directory macros/latex/base/lppl.txt; either
+ % version 1 of the License, or any later version.
+ % ===============================================================
+ % Name and version information of the main mbs file:
+ % \ProvidesFile{merlin.mbs}[2004/02/09 4.13 (PWD, AO, DPC)]
+ %   For use with BibTeX version 0.99a or later
+ %-------------------------------------------------------------------
+ % This bibliography style file is intended for texts in ENGLISH
+ % This is an author-year citation style bibliography. As such, it is
+ % non-standard LaTeX, and requires a special package file to function properly.
+ % Such a package is    natbib.sty   by Patrick W. Daly
+ % The form of the \bibitem entries is
+ %   \bibitem[Jones et al.(1990)]{key}...
+ %   \bibitem[Jones et al.(1990)Jones, Baker, and Smith]{key}...
+ % The essential feature is that the label (the part in brackets) consists
+ % of the author names, as they should appear in the citation, with the year
+ % in parentheses following. There must be no space before the opening
+ % parenthesis!
+ % With natbib v5.3, a full list of authors may also follow the year.
+ % In natbib.sty, it is possible to define the type of enclosures that is
+ % really wanted (brackets or parentheses), but in either case, there must
+ % be parentheses in the label.
+ % The \cite command functions as follows:
+ %   \citet{key} ==>>                Jones et al. (1990)
+ %   \citet*{key} ==>>               Jones, Baker, and Smith (1990)
+ %   \citep{key} ==>>                (Jones et al., 1990)
+ %   \citep*{key} ==>>               (Jones, Baker, and Smith, 1990)
+ %   \citep[chap. 2]{key} ==>>       (Jones et al., 1990, chap. 2)
+ %   \citep[e.g.][]{key} ==>>        (e.g. Jones et al., 1990)
+ %   \citep[e.g.][p. 32]{key} ==>>   (e.g. Jones et al., p. 32)
+ %   \citeauthor{key} ==>>           Jones et al.
+ %   \citeauthor*{key} ==>>          Jones, Baker, and Smith
+ %   \citeyear{key} ==>>             1990
+ %---------------------------------------------------------------------
+
+ENTRY
+  { address
+    archive
+    author
+    booktitle
+    chapter
+    collaboration
+    doi
+    edition
+    editor
+    eid
+    eprint
+    howpublished
+    institution
+    isbn
+    issn
+    journal
+    key
+    month
+    note
+    number
+    numpages
+    organization
+    pages
+    publisher
+    school
+    series
+    title
+    type
+    url
+    volume
+    year
+  }
+  {}
+  { label extra.label sort.label short.list }
+INTEGERS { output.state before.all mid.sentence after.sentence after.block }
+FUNCTION {init.state.consts}
+{ #0 'before.all :=
+  #1 'mid.sentence :=
+  #2 'after.sentence :=
+  #3 'after.block :=
+}
+STRINGS { s t}
+FUNCTION {output.nonnull}
+{ 's :=
+  output.state mid.sentence =
+    { ", " * write$ }
+    { output.state after.block =
+        { add.period$ write$
+          newline$
+          "\newblock " write$
+        }
+        { output.state before.all =
+            'write$
+            { add.period$ " " * write$ }
+          if$
+        }
+      if$
+      mid.sentence 'output.state :=
+    }
+  if$
+  s
+}
+FUNCTION {output}
+{ duplicate$ empty$
+    'pop$
+    'output.nonnull
+  if$
+}
+FUNCTION {output.check}
+{ 't :=
+  duplicate$ empty$
+    { pop$ "empty " t * " in " * cite$ * warning$ }
+    'output.nonnull
+  if$
+}
+FUNCTION {fin.entry}
+{ add.period$
+  write$
+  newline$
+}
+
+FUNCTION {new.block}
+{ output.state before.all =
+    'skip$
+    { after.block 'output.state := }
+  if$
+}
+FUNCTION {new.sentence}
+{ output.state after.block =
+    'skip$
+    { output.state before.all =
+        'skip$
+        { after.sentence 'output.state := }
+      if$
+    }
+  if$
+}
+FUNCTION {add.blank}
+{  " " * before.all 'output.state :=
+}
+
+FUNCTION {date.block}
+{
+  new.block
+}
+
+FUNCTION {not}
+{   { #0 }
+    { #1 }
+  if$
+}
+FUNCTION {and}
+{   'skip$
+    { pop$ #0 }
+  if$
+}
+FUNCTION {or}
+{   { pop$ #1 }
+    'skip$
+  if$
+}
+FUNCTION {non.stop}
+{ duplicate$
+   "}" * add.period$
+   #-1 #1 substring$ "." =
+}
+
+STRINGS {z}
+FUNCTION {remove.dots}
+{ 'z :=
+  ""
+  { z empty$ not }
+  { z #1 #1 substring$
+    z #2 global.max$ substring$ 'z :=
+    duplicate$ "." = 'pop$
+      { * }
+    if$
+  }
+  while$
+}
+FUNCTION {new.block.checkb}
+{ empty$
+  swap$ empty$
+  and
+    'skip$
+    'new.block
+  if$
+}
+FUNCTION {field.or.null}
+{ duplicate$ empty$
+    { pop$ "" }
+    'skip$
+  if$
+}
+FUNCTION {emphasize}
+{ duplicate$ empty$
+    { pop$ "" }
+    { "\emph{" swap$ * "}" * }
+  if$
+}
+FUNCTION {bolden}
+{ duplicate$ empty$
+    { pop$ "" }
+    { "\textbf{" swap$ * "}" * }
+  if$
+}
+FUNCTION {tie.or.space.prefix}
+{ duplicate$ text.length$ #3 <
+    { "~" }
+    { " " }
+  if$
+  swap$
+}
+
+FUNCTION {capitalize}
+{ "u" change.case$ "t" change.case$ }
+
+FUNCTION {space.word}
+{ " " swap$ * " " * }
+ % Here are the language-specific definitions for explicit words.
+ % Each function has a name bbl.xxx where xxx is the English word.
+ % The language selected here is ENGLISH
+FUNCTION {bbl.and}
+{ "and"}
+
+FUNCTION {bbl.etal}
+{ "et~al." }
+
+FUNCTION {bbl.editors}
+{ "eds." }
+
+FUNCTION {bbl.editor}
+{ "ed." }
+
+FUNCTION {bbl.edby}
+{ "edited by" }
+
+FUNCTION {bbl.edition}
+{ "edition" }
+
+FUNCTION {bbl.volume}
+{ "volume" }
+
+FUNCTION {bbl.of}
+{ "of" }
+
+FUNCTION {bbl.number}
+{ "number" }
+
+FUNCTION {bbl.nr}
+{ "no." }
+
+FUNCTION {bbl.in}
+{ "in" }
+
+FUNCTION {bbl.pages}
+{ "pp." }
+
+FUNCTION {bbl.page}
+{ "p." }
+
+FUNCTION {bbl.eidpp}
+{ "pages" }
+
+FUNCTION {bbl.chapter}
+{ "chapter" }
+
+FUNCTION {bbl.techrep}
+{ "Technical Report" }
+
+FUNCTION {bbl.mthesis}
+{ "Master's thesis" }
+
+FUNCTION {bbl.phdthesis}
+{ "Ph.D. thesis" }
+
+MACRO {jan} {"January"}
+
+MACRO {feb} {"February"}
+
+MACRO {mar} {"March"}
+
+MACRO {apr} {"April"}
+
+MACRO {may} {"May"}
+
+MACRO {jun} {"June"}
+
+MACRO {jul} {"July"}
+
+MACRO {aug} {"August"}
+
+MACRO {sep} {"September"}
+
+MACRO {oct} {"October"}
+
+MACRO {nov} {"November"}
+
+MACRO {dec} {"December"}
+
+MACRO {acmcs} {"ACM Computing Surveys"}
+
+MACRO {acta} {"Acta Informatica"}
+
+MACRO {cacm} {"Communications of the ACM"}
+
+MACRO {ibmjrd} {"IBM Journal of Research and Development"}
+
+MACRO {ibmsj} {"IBM Systems Journal"}
+
+MACRO {ieeese} {"IEEE Transactions on Software Engineering"}
+
+MACRO {ieeetc} {"IEEE Transactions on Computers"}
+
+MACRO {ieeetcad}
+ {"IEEE Transactions on Computer-Aided Design of Integrated Circuits"}
+
+MACRO {ipl} {"Information Processing Letters"}
+
+MACRO {jacm} {"Journal of the ACM"}
+
+MACRO {jcss} {"Journal of Computer and System Sciences"}
+
+MACRO {scp} {"Science of Computer Programming"}
+
+MACRO {sicomp} {"SIAM Journal on Computing"}
+
+MACRO {tocs} {"ACM Transactions on Computer Systems"}
+
+MACRO {tods} {"ACM Transactions on Database Systems"}
+
+MACRO {tog} {"ACM Transactions on Graphics"}
+
+MACRO {toms} {"ACM Transactions on Mathematical Software"}
+
+MACRO {toois} {"ACM Transactions on Office Information Systems"}
+
+MACRO {toplas} {"ACM Transactions on Programming Languages and Systems"}
+
+MACRO {tcs} {"Theoretical Computer Science"}
+FUNCTION {bibinfo.check}
+{ swap$
+  duplicate$ missing$
+    {
+      pop$ pop$
+      ""
+    }
+    { duplicate$ empty$
+        {
+          swap$ pop$
+        }
+        { swap$
+          pop$
+        }
+      if$
+    }
+  if$
+}
+FUNCTION {bibinfo.warn}
+{ swap$
+  duplicate$ missing$
+    {
+      swap$ "missing " swap$ * " in " * cite$ * warning$ pop$
+      ""
+    }
+    { duplicate$ empty$
+        {
+          swap$ "empty " swap$ * " in " * cite$ * warning$
+        }
+        { swap$
+          pop$
+        }
+      if$
+    }
+  if$
+}
+FUNCTION {format.eprint}
+{ eprint duplicate$ empty$
+    'skip$
+    { "\eprint"
+      archive empty$
+        'skip$
+        { "[" * archive * "]" * }
+      if$
+      "{" * swap$ * "}" *
+    }
+  if$
+}
+FUNCTION {format.url}
+{ url empty$
+    { "" }
+    { "\urlprefix\url{" url * "}" * }
+  if$
+}
+
+STRINGS  { bibinfo}
+INTEGERS { nameptr namesleft numnames }
+
+FUNCTION {format.names}
+{ 'bibinfo :=
+  duplicate$ empty$ 'skip$ {
+  's :=
+  "" 't :=
+  #1 'nameptr :=
+  s num.names$ 'numnames :=
+  numnames 'namesleft :=
+    { namesleft #0 > }
+    { s nameptr
+      "{vv~}{ll}{ jj}{ f{}}"
+      format.name$
+      remove.dots
+      bibinfo bibinfo.check
+      't :=
+      nameptr #1 >
+        {
+          namesleft #1 >
+            { ", " * t * }
+            {
+              "," *
+              s nameptr "{ll}" format.name$ duplicate$ "others" =
+                { 't := }
+                { pop$ }
+              if$
+              t "others" =
+                {
+                  " " * bbl.etal emphasize *
+                }
+                { " " * t * }
+              if$
+            }
+          if$
+        }
+        't
+      if$
+      nameptr #1 + 'nameptr :=
+      namesleft #1 - 'namesleft :=
+    }
+  while$
+  } if$
+}
+FUNCTION {format.names.ed}
+{
+  'bibinfo :=
+  duplicate$ empty$ 'skip$ {
+  's :=
+  "" 't :=
+  #1 'nameptr :=
+  s num.names$ 'numnames :=
+  numnames 'namesleft :=
+    { namesleft #0 > }
+    { s nameptr
+      "{f{}~}{vv~}{ll}{ jj}"
+      format.name$
+      remove.dots
+      bibinfo bibinfo.check
+      't :=
+      nameptr #1 >
+        {
+          namesleft #1 >
+            { ", " * t * }
+            {
+              "," *
+              s nameptr "{ll}" format.name$ duplicate$ "others" =
+                { 't := }
+                { pop$ }
+              if$
+              t "others" =
+                {
+
+                  " " * bbl.etal emphasize *
+                }
+                { " " * t * }
+              if$
+            }
+          if$
+        }
+        't
+      if$
+      nameptr #1 + 'nameptr :=
+      namesleft #1 - 'namesleft :=
+    }
+  while$
+  } if$
+}
+FUNCTION {format.key}
+{ empty$
+    { key field.or.null }
+    { "" }
+  if$
+}
+
+FUNCTION {format.authors}
+{ author "author" format.names
+    duplicate$ empty$ 'skip$
+    { collaboration "collaboration" bibinfo.check
+      duplicate$ empty$ 'skip$
+        { " (" swap$ * ")" * }
+      if$
+      *
+    }
+  if$
+}
+FUNCTION {get.bbl.editor}
+{ editor num.names$ #1 > 'bbl.editors 'bbl.editor if$ }
+
+FUNCTION {format.editors}
+{ editor "editor" format.names duplicate$ empty$ 'skip$
+    {
+      " " *
+      get.bbl.editor
+   "(" swap$ * ")" *
+      *
+    }
+  if$
+}
+FUNCTION {format.isbn}
+{ isbn "isbn" bibinfo.check
+  duplicate$ empty$ 'skip$
+    {
+      new.block
+      "ISBN " swap$ *
+    }
+  if$
+}
+
+FUNCTION {format.issn}
+{ issn "issn" bibinfo.check
+  duplicate$ empty$ 'skip$
+    {
+      new.block
+      "ISSN " swap$ *
+    }
+  if$
+}
+
+FUNCTION {format.doi}
+{ doi "doi" bibinfo.check
+  duplicate$ empty$ 'skip$
+    {
+      new.block
+      "\doi{" swap$ * "}" *
+    }
+  if$
+}
+FUNCTION {format.note}
+{
+ note empty$
+    { "" }
+    { note #1 #1 substring$
+      duplicate$ "{" =
+        'skip$
+        { output.state mid.sentence =
+          { "l" }
+          { "u" }
+        if$
+        change.case$
+        }
+      if$
+      note #2 global.max$ substring$ * "note" bibinfo.check
+    }
+  if$
+}
+
+FUNCTION {format.title}
+{ title
+  "title" bibinfo.check
+  duplicate$ empty$ 'skip$
+    {
+      "\enquote{" swap$ *
+      add.period$ "}" *
+    }
+  if$
+}
+FUNCTION {end.quote.btitle}
+{ booktitle empty$
+    'skip$
+    { before.all 'output.state := }
+  if$
+}
+FUNCTION {format.full.names}
+{'s :=
+ "" 't :=
+  #1 'nameptr :=
+  s num.names$ 'numnames :=
+  numnames 'namesleft :=
+    { namesleft #0 > }
+    { s nameptr
+      "{vv~}{ll}" format.name$
+      't :=
+      nameptr #1 >
+        {
+          namesleft #1 >
+            { ", " * t * }
+            {
+              s nameptr "{ll}" format.name$ duplicate$ "others" =
+                { 't := }
+                { pop$ }
+              if$
+              t "others" =
+                {
+                  " " * bbl.etal emphasize *
+                }
+                {
+                  numnames #2 >
+                    { "," * }
+                    'skip$
+                  if$
+                  bbl.and
+                  space.word * t *
+                }
+              if$
+            }
+          if$
+        }
+        't
+      if$
+      nameptr #1 + 'nameptr :=
+      namesleft #1 - 'namesleft :=
+    }
+  while$
+}
+
+FUNCTION {author.editor.key.full}
+{ author empty$
+    { editor empty$
+        { key empty$
+            { cite$ #1 #3 substring$ }
+            'key
+          if$
+        }
+        { editor format.full.names }
+      if$
+    }
+    { author format.full.names }
+  if$
+}
+
+FUNCTION {author.key.full}
+{ author empty$
+    { key empty$
+         { cite$ #1 #3 substring$ }
+          'key
+      if$
+    }
+    { author format.full.names }
+  if$
+}
+
+FUNCTION {editor.key.full}
+{ editor empty$
+    { key empty$
+         { cite$ #1 #3 substring$ }
+          'key
+      if$
+    }
+    { editor format.full.names }
+  if$
+}
+
+FUNCTION {make.full.names}
+{ type$ "book" =
+  type$ "inbook" =
+  or
+    'author.editor.key.full
+    { type$ "proceedings" =
+        'editor.key.full
+        'author.key.full
+      if$
+    }
+  if$
+}
+
+FUNCTION {output.bibitem}
+{ newline$
+  "\bibitem[{" write$
+  label write$
+  ")" make.full.names duplicate$ short.list =
+     { pop$ }
+     { * }
+   if$
+  "}]{" * write$
+  cite$ write$
+  "}" write$
+  newline$
+  ""
+  before.all 'output.state :=
+}
+
+FUNCTION {n.dashify}
+{
+  't :=
+  ""
+    { t empty$ not }
+    { t #1 #1 substring$ "-" =
+        { t #1 #2 substring$ "--" = not
+            { "--" *
+              t #2 global.max$ substring$ 't :=
+            }
+            {   { t #1 #1 substring$ "-" = }
+                { "-" *
+                  t #2 global.max$ substring$ 't :=
+                }
+              while$
+            }
+          if$
+        }
+        { t #1 #1 substring$ *
+          t #2 global.max$ substring$ 't :=
+        }
+      if$
+    }
+  while$
+}
+
+FUNCTION {word.in}
+{ bbl.in capitalize
+  " " * }
+
+FUNCTION {format.date}
+{ year "year" bibinfo.check duplicate$ empty$
+    {
+      "empty year in " cite$ * "; set to ????" * warning$
+       pop$ "????"
+    }
+    'skip$
+  if$
+  extra.label *
+  before.all 'output.state :=
+  " (" swap$ * ")" *
+}
+FUNCTION {format.btitle}
+{ title "title" bibinfo.check
+  duplicate$ empty$ 'skip$
+    {
+      emphasize
+    }
+  if$
+}
+FUNCTION {either.or.check}
+{ empty$
+    'pop$
+    { "can't use both " swap$ * " fields in " * cite$ * warning$ }
+  if$
+}
+FUNCTION {format.bvolume}
+{ volume empty$
+    { "" }
+    { bbl.volume volume tie.or.space.prefix
+      "volume" bibinfo.check * *
+      series "series" bibinfo.check
+      duplicate$ empty$ 'pop$
+        { swap$ bbl.of space.word * swap$
+          emphasize * }
+      if$
+      "volume and number" number either.or.check
+    }
+  if$
+}
+FUNCTION {format.number.series}
+{ volume empty$
+    { number empty$
+        { series field.or.null }
+        { series empty$
+            { number "number" bibinfo.check }
+            { output.state mid.sentence =
+                { bbl.number }
+                { bbl.number capitalize }
+              if$
+              number tie.or.space.prefix "number" bibinfo.check * *
+              bbl.in space.word *
+              series "series" bibinfo.check *
+            }
+          if$
+        }
+      if$
+    }
+    { "" }
+  if$
+}
+
+FUNCTION {format.edition}
+{ edition duplicate$ empty$ 'skip$
+    {
+      output.state mid.sentence =
+        { "l" }
+        { "t" }
+      if$ change.case$
+      "edition" bibinfo.check
+      " " * bbl.edition *
+    }
+  if$
+}
+INTEGERS { multiresult }
+FUNCTION {multi.page.check}
+{ 't :=
+  #0 'multiresult :=
+    { multiresult not
+      t empty$ not
+      and
+    }
+    { t #1 #1 substring$
+      duplicate$ "-" =
+      swap$ duplicate$ "," =
+      swap$ "+" =
+      or or
+        { #1 'multiresult := }
+        { t #2 global.max$ substring$ 't := }
+      if$
+    }
+  while$
+  multiresult
+}
+FUNCTION {format.pages}
+{ pages duplicate$ empty$ 'skip$
+    { duplicate$ multi.page.check
+        {
+          bbl.pages swap$
+          n.dashify
+        }
+        {
+          bbl.page swap$
+        }
+      if$
+      tie.or.space.prefix
+      "pages" bibinfo.check
+      * *
+    }
+  if$
+}
+FUNCTION {format.journal.pages}
+{ pages duplicate$ empty$ 'pop$
+    { swap$ duplicate$ empty$
+        { pop$ pop$ format.pages }
+        {
+          ", " *
+          swap$
+          n.dashify
+          "pages" bibinfo.check
+          *
+        }
+      if$
+    }
+  if$
+}
+FUNCTION {format.journal.eid}
+{ eid "eid" bibinfo.check
+  duplicate$ empty$ 'pop$
+    { swap$ duplicate$ empty$ 'skip$
+      {
+          ", " *
+      }
+      if$
+      swap$ *
+      numpages empty$ 'skip$
+        { bbl.eidpp numpages tie.or.space.prefix
+          "numpages" bibinfo.check * *
+          " (" swap$ * ")" * *
+        }
+      if$
+    }
+  if$
+}
+FUNCTION {format.vol.num.pages}
+{ volume field.or.null
+  duplicate$ empty$ 'skip$
+    {
+      "volume" bibinfo.check
+    }
+  if$
+  bolden
+  number "number" bibinfo.check duplicate$ empty$ 'skip$
+    {
+      swap$ duplicate$ empty$
+        { "there's a number but no volume in " cite$ * warning$ }
+        'skip$
+      if$
+      swap$
+      "(" swap$ * ")" *
+    }
+  if$ *
+  eid empty$
+    { format.journal.pages }
+    { format.journal.eid }
+  if$
+}
+
+FUNCTION {format.chapter.pages}
+{ chapter empty$
+    'format.pages
+    { type empty$
+        { bbl.chapter }
+        { type "l" change.case$
+          "type" bibinfo.check
+        }
+      if$
+      chapter tie.or.space.prefix
+      "chapter" bibinfo.check
+      * *
+      pages empty$
+        'skip$
+        { ", " * format.pages * }
+      if$
+    }
+  if$
+}
+
+FUNCTION {bt.enquote}
+{ duplicate$ empty$ 'skip$
+  { "\enquote{" swap$ *
+    non.stop
+      { ",} " * }
+      { "}, " * }
+    if$
+  }
+  if$
+}
+FUNCTION {format.booktitle}
+{
+  booktitle "booktitle" bibinfo.check
+  bt.enquote
+}
+FUNCTION {format.in.ed.booktitle}
+{ format.booktitle duplicate$ empty$ 'skip$
+    {
+      editor "editor" format.names.ed duplicate$ empty$ 'pop$
+        {
+          " " *
+          get.bbl.editor
+          "(" swap$ * "), " *
+          * swap$
+          * }
+      if$
+      word.in swap$ *
+    }
+  if$
+}
+FUNCTION {format.thesis.type}
+{ type duplicate$ empty$
+    'pop$
+    { swap$ pop$
+      "t" change.case$ "type" bibinfo.check
+    }
+  if$
+}
+FUNCTION {format.tr.number}
+{ number "number" bibinfo.check
+  type duplicate$ empty$
+    { pop$ bbl.techrep }
+    'skip$
+  if$
+  "type" bibinfo.check
+  swap$ duplicate$ empty$
+    { pop$ "t" change.case$ }
+    { tie.or.space.prefix * * }
+  if$
+}
+FUNCTION {format.article.crossref}
+{
+  word.in
+  " \cite{" * crossref * "}" *
+}
+FUNCTION {format.book.crossref}
+{ volume duplicate$ empty$
+    { "empty volume in " cite$ * "'s crossref of " * crossref * warning$
+      pop$ word.in
+    }
+    { bbl.volume
+      capitalize
+      swap$ tie.or.space.prefix "volume" bibinfo.check * * bbl.of space.word *
+    }
+  if$
+  " \cite{" * crossref * "}" *
+}
+FUNCTION {format.incoll.inproc.crossref}
+{
+  word.in
+  " \cite{" * crossref * "}" *
+}
+FUNCTION {format.org.or.pub}
+{ 't :=
+  ""
+  address empty$ t empty$ and
+    'skip$
+    {
+      t empty$
+        { address "address" bibinfo.check *
+        }
+        { t *
+          address empty$
+            'skip$
+            { ", " * address "address" bibinfo.check * }
+          if$
+        }
+      if$
+    }
+  if$
+}
+FUNCTION {format.publisher.address}
+{ publisher "publisher" bibinfo.warn format.org.or.pub
+}
+
+FUNCTION {format.organization.address}
+{ organization "organization" bibinfo.check format.org.or.pub
+}
+
+FUNCTION {article}
+{ output.bibitem
+  format.authors "author" output.check
+  author format.key output
+  format.date "year" output.check
+  date.block
+  format.title "title" output.check
+  new.block
+  crossref missing$
+    {
+      journal
+      "journal" bibinfo.check
+      emphasize
+      "journal" output.check
+      format.vol.num.pages output
+    }
+    { format.article.crossref output.nonnull
+      format.pages output
+    }
+  if$
+  format.issn output
+  format.doi output
+  new.block
+  format.note output
+  format.eprint output
+  format.url output
+  fin.entry
+}
+FUNCTION {book}
+{ output.bibitem
+  author empty$
+    { format.editors "author and editor" output.check
+      editor format.key output
+    }
+    { format.authors output.nonnull
+      crossref missing$
+        { "author and editor" editor either.or.check }
+        'skip$
+      if$
+    }
+  if$
+  format.date "year" output.check
+  date.block
+  format.btitle "title" output.check
+  crossref missing$
+    { format.bvolume output
+      new.block
+      format.number.series output
+      new.sentence
+      format.publisher.address output
+    }
+    {
+      new.block
+      format.book.crossref output.nonnull
+    }
+  if$
+  format.edition output
+  format.isbn output
+  format.doi output
+  new.block
+  format.note output
+  format.eprint output
+  format.url output
+  fin.entry
+}
+FUNCTION {booklet}
+{ output.bibitem
+  format.authors output
+  author format.key output
+  format.date "year" output.check
+  date.block
+  format.title "title" output.check
+  new.block
+  howpublished "howpublished" bibinfo.check output
+  address "address" bibinfo.check output
+  format.isbn output
+  format.doi output
+  new.block
+  format.note output
+  format.eprint output
+  format.url output
+  fin.entry
+}
+
+FUNCTION {inbook}
+{ output.bibitem
+  author empty$
+    { format.editors "author and editor" output.check
+      editor format.key output
+    }
+    { format.authors output.nonnull
+      crossref missing$
+        { "author and editor" editor either.or.check }
+        'skip$
+      if$
+    }
+  if$
+  format.date "year" output.check
+  date.block
+  format.btitle "title" output.check
+  crossref missing$
+    {
+      format.bvolume output
+      format.chapter.pages "chapter and pages" output.check
+      new.block
+      format.number.series output
+      new.sentence
+      format.publisher.address output
+    }
+    {
+      format.chapter.pages "chapter and pages" output.check
+      new.block
+      format.book.crossref output.nonnull
+    }
+  if$
+  format.edition output
+  crossref missing$
+    { format.isbn output }
+    'skip$
+  if$
+  format.doi output
+  new.block
+  format.note output
+  format.eprint output
+  format.url output
+  fin.entry
+}
+
+FUNCTION {incollection}
+{ output.bibitem
+  format.authors "author" output.check
+  author format.key output
+  format.date "year" output.check
+  date.block
+  format.title "title" output.check
+  new.block
+  crossref missing$
+    { format.in.ed.booktitle "booktitle" output.check
+      end.quote.btitle
+      format.bvolume output
+      format.number.series output
+      format.chapter.pages output
+      new.sentence
+      format.publisher.address output
+      format.edition output
+      format.isbn output
+    }
+    { format.incoll.inproc.crossref output.nonnull
+      format.chapter.pages output
+    }
+  if$
+  format.doi output
+  new.block
+  format.note output
+  format.eprint output
+  format.url output
+  fin.entry
+}
+FUNCTION {inproceedings}
+{ output.bibitem
+  format.authors "author" output.check
+  author format.key output
+  format.date "year" output.check
+  date.block
+  format.title "title" output.check
+  new.block
+  crossref missing$
+    { format.in.ed.booktitle "booktitle" output.check
+      end.quote.btitle
+      format.bvolume output
+      format.number.series output
+      format.pages output
+      new.sentence
+      publisher empty$
+        { format.organization.address output }
+        { organization "organization" bibinfo.check output
+          format.publisher.address output
+        }
+      if$
+      format.isbn output
+      format.issn output
+    }
+    { format.incoll.inproc.crossref output.nonnull
+      format.pages output
+    }
+  if$
+  format.doi output
+  new.block
+  format.note output
+  format.eprint output
+  format.url output
+  fin.entry
+}
+FUNCTION {conference} { inproceedings }
+FUNCTION {manual}
+{ output.bibitem
+  format.authors output
+  author format.key output
+  format.date "year" output.check
+  date.block
+  format.btitle "title" output.check
+  organization address new.block.checkb
+  organization "organization" bibinfo.check output
+  address "address" bibinfo.check output
+  format.edition output
+  format.doi output
+  new.block
+  format.note output
+  format.eprint output
+  format.url output
+  fin.entry
+}
+
+FUNCTION {mastersthesis}
+{ output.bibitem
+  format.authors "author" output.check
+  author format.key output
+  format.date "year" output.check
+  date.block
+  format.btitle
+  "title" output.check
+  new.block
+  bbl.mthesis format.thesis.type output.nonnull
+  school "school" bibinfo.warn output
+  address "address" bibinfo.check output
+  format.doi output
+  new.block
+  format.note output
+  format.eprint output
+  format.url output
+  fin.entry
+}
+
+FUNCTION {misc}
+{ output.bibitem
+  format.authors output
+  author format.key output
+  format.date "year" output.check
+  date.block
+  format.title output
+  new.block
+  howpublished "howpublished" bibinfo.check output
+  format.doi output
+  new.block
+  format.note output
+  format.eprint output
+  format.url output
+  fin.entry
+}
+FUNCTION {phdthesis}
+{ output.bibitem
+  format.authors "author" output.check
+  author format.key output
+  format.date "year" output.check
+  date.block
+  format.btitle
+  "title" output.check
+  new.block
+  bbl.phdthesis format.thesis.type output.nonnull
+  school "school" bibinfo.warn output
+  address "address" bibinfo.check output
+  format.doi output
+  new.block
+  format.note output
+  format.eprint output
+  format.url output
+  fin.entry
+}
+
+FUNCTION {proceedings}
+{ output.bibitem
+  format.editors output
+  editor format.key output
+  format.date "year" output.check
+  date.block
+  format.btitle "title" output.check
+  format.bvolume output
+  format.number.series output
+  new.sentence
+  publisher empty$
+    { format.organization.address output }
+    { organization "organization" bibinfo.check output
+      format.publisher.address output
+    }
+  if$
+  format.isbn output
+  format.issn output
+  format.doi output
+  new.block
+  format.note output
+  format.eprint output
+  format.url output
+  fin.entry
+}
+
+FUNCTION {techreport}
+{ output.bibitem
+  format.authors "author" output.check
+  author format.key output
+  format.date "year" output.check
+  date.block
+  format.title
+  "title" output.check
+  new.block
+  format.tr.number emphasize output.nonnull
+  institution "institution" bibinfo.warn output
+  address "address" bibinfo.check output
+  format.doi output
+  new.block
+  format.note output
+  format.eprint output
+  format.url output
+  fin.entry
+}
+
+FUNCTION {unpublished}
+{ output.bibitem
+  format.authors "author" output.check
+  author format.key output
+  format.date "year" output.check
+  date.block
+  format.title "title" output.check
+  format.doi output
+  new.block
+  format.note "note" output.check
+  format.eprint output
+  format.url output
+  fin.entry
+}
+
+FUNCTION {default.type} { misc }
+READ
+FUNCTION {sortify}
+{ purify$
+  "l" change.case$
+}
+INTEGERS { len }
+FUNCTION {chop.word}
+{ 's :=
+  'len :=
+  s #1 len substring$ =
+    { s len #1 + global.max$ substring$ }
+    's
+  if$
+}
+FUNCTION {format.lab.names}
+{ 's :=
+  "" 't :=
+  s #1 "{vv~}{ll}" format.name$
+  s num.names$ duplicate$
+  #2 >
+    { pop$
+      " " * bbl.etal emphasize *
+    }
+    { #2 <
+        'skip$
+        { s #2 "{ff }{vv }{ll}{ jj}" format.name$ "others" =
+            {
+              " " * bbl.etal emphasize *
+            }
+            { bbl.and space.word * s #2 "{vv~}{ll}" format.name$
+              * }
+          if$
+        }
+      if$
+    }
+  if$
+}
+
+FUNCTION {author.key.label}
+{ author empty$
+    { key empty$
+        { cite$ #1 #3 substring$ }
+        'key
+      if$
+    }
+    { author format.lab.names }
+  if$
+}
+
+FUNCTION {author.editor.key.label}
+{ author empty$
+    { editor empty$
+        { key empty$
+            { cite$ #1 #3 substring$ }
+            'key
+          if$
+        }
+        { editor format.lab.names }
+      if$
+    }
+    { author format.lab.names }
+  if$
+}
+
+FUNCTION {editor.key.label}
+{ editor empty$
+    { key empty$
+        { cite$ #1 #3 substring$ }
+        'key
+      if$
+    }
+    { editor format.lab.names }
+  if$
+}
+
+FUNCTION {calc.short.authors}
+{ type$ "book" =
+  type$ "inbook" =
+  or
+    'author.editor.key.label
+    { type$ "proceedings" =
+        'editor.key.label
+        'author.key.label
+      if$
+    }
+  if$
+  'short.list :=
+}
+
+FUNCTION {calc.label}
+{ calc.short.authors
+  short.list
+  "("
+  *
+  year duplicate$ empty$
+  short.list key field.or.null = or
+     { pop$ "" }
+     'skip$
+  if$
+  *
+  'label :=
+}
+
+FUNCTION {sort.format.names}
+{ 's :=
+  #1 'nameptr :=
+  ""
+  s num.names$ 'numnames :=
+  numnames 'namesleft :=
+    { namesleft #0 > }
+    { s nameptr
+      "{vv{ } }{ll{ }}{  f{ }}{  jj{ }}"
+      format.name$ 't :=
+      nameptr #1 >
+        {
+          "   "  *
+          namesleft #1 = t "others" = and
+            { "zzzzz" * }
+            { t sortify * }
+          if$
+        }
+        { t sortify * }
+      if$
+      nameptr #1 + 'nameptr :=
+      namesleft #1 - 'namesleft :=
+    }
+  while$
+}
+
+FUNCTION {sort.format.title}
+{ 't :=
+  "A " #2
+    "An " #3
+      "The " #4 t chop.word
+    chop.word
+  chop.word
+  sortify
+  #1 global.max$ substring$
+}
+FUNCTION {author.sort}
+{ author empty$
+    { key empty$
+        { "to sort, need author or key in " cite$ * warning$
+          ""
+        }
+        { key sortify }
+      if$
+    }
+    { author sort.format.names }
+  if$
+}
+FUNCTION {author.editor.sort}
+{ author empty$
+    { editor empty$
+        { key empty$
+            { "to sort, need author, editor, or key in " cite$ * warning$
+              ""
+            }
+            { key sortify }
+          if$
+        }
+        { editor sort.format.names }
+      if$
+    }
+    { author sort.format.names }
+  if$
+}
+FUNCTION {editor.sort}
+{ editor empty$
+    { key empty$
+        { "to sort, need editor or key in " cite$ * warning$
+          ""
+        }
+        { key sortify }
+      if$
+    }
+    { editor sort.format.names }
+  if$
+}
+FUNCTION {presort}
+{ calc.label
+  label sortify
+  "    "
+  *
+  type$ "book" =
+  type$ "inbook" =
+  or
+    'author.editor.sort
+    { type$ "proceedings" =
+        'editor.sort
+        'author.sort
+      if$
+    }
+  if$
+  #1 entry.max$ substring$
+  'sort.label :=
+  sort.label
+  *
+  "    "
+  *
+  title field.or.null
+  sort.format.title
+  *
+  #1 entry.max$ substring$
+  'sort.key$ :=
+}
+
+ITERATE {presort}
+SORT
+STRINGS { last.label next.extra }
+INTEGERS { last.extra.num number.label }
+FUNCTION {initialize.extra.label.stuff}
+{ #0 int.to.chr$ 'last.label :=
+  "" 'next.extra :=
+  #0 'last.extra.num :=
+  #0 'number.label :=
+}
+FUNCTION {forward.pass}
+{ last.label label =
+    { last.extra.num #1 + 'last.extra.num :=
+      last.extra.num int.to.chr$ 'extra.label :=
+    }
+    { "a" chr.to.int$ 'last.extra.num :=
+      "" 'extra.label :=
+      label 'last.label :=
+    }
+  if$
+  number.label #1 + 'number.label :=
+}
+FUNCTION {reverse.pass}
+{ next.extra "b" =
+    { "a" 'extra.label := }
+    'skip$
+  if$
+  extra.label 'next.extra :=
+  extra.label
+  duplicate$ empty$
+    'skip$
+    { "{\natexlab{" swap$ * "}}" * }
+  if$
+  'extra.label :=
+  label extra.label * 'label :=
+}
+EXECUTE {initialize.extra.label.stuff}
+ITERATE {forward.pass}
+REVERSE {reverse.pass}
+FUNCTION {bib.sort.order}
+{ sort.label
+  "    "
+  *
+  year field.or.null sortify
+  *
+  "    "
+  *
+  title field.or.null
+  sort.format.title
+  *
+  #1 entry.max$ substring$
+  'sort.key$ :=
+}
+ITERATE {bib.sort.order}
+SORT
+FUNCTION {begin.bib}
+{ preamble$ empty$
+    'skip$
+    { preamble$ write$ newline$ }
+  if$
+  "\begin{thebibliography}{" number.label int.to.str$ * "}" *
+  write$ newline$
+  "\newcommand{\enquote}[1]{``#1''}"
+  write$ newline$
+  "\providecommand{\natexlab}[1]{#1}"
+  write$ newline$
+  "\providecommand{\url}[1]{\texttt{#1}}"
+  write$ newline$
+  "\providecommand{\urlprefix}{URL }"
+  write$ newline$
+  "\expandafter\ifx\csname urlstyle\endcsname\relax"
+  write$ newline$
+  "  \providecommand{\doi}[1]{doi:\discretionary{}{}{}#1}\else"
+  write$ newline$
+  "  \providecommand{\doi}{doi:\discretionary{}{}{}\begingroup \urlstyle{rm}\Url}\fi"
+  write$ newline$
+  "\providecommand{\eprint}[2][]{\url{#2}}"
+  write$ newline$
+}
+EXECUTE {begin.bib}
+EXECUTE {init.state.consts}
+ITERATE {call.type$}
+FUNCTION {end.bib}
+{ newline$
+  "\end{thebibliography}" write$ newline$
+}
+EXECUTE {end.bib}
+%% End of customized bst file
+%%
+%% End of file `jss.bst'.
diff --git a/inst/doc/modelhierarchy.pdf b/inst/doc/modelhierarchy.pdf
new file mode 100755
index 0000000..d1a7d96
Binary files /dev/null and b/inst/doc/modelhierarchy.pdf differ
diff --git a/man/IC.Rd b/man/IC.Rd
new file mode 100755
index 0000000..21907d7
--- /dev/null
+++ b/man/IC.Rd
@@ -0,0 +1,45 @@
+\name{IC}
+\alias{IC}
+\alias{IC.ppar}
+\title{Information criteria}
+\description{Computation of information criteria such as AIC, BIC, and cAIC based on
+unconditional (joint), marginal, and conditional log-likelihood}
+\usage{
+\method{IC}{ppar}(object)
+}
+
+\arguments{
+  \item{object}{Object of class \code{ppar} (from \code{person.parameter()}.}
+}
+
+\details{
+The joint log-likelihood is established by summation of the logarithms of the estimated
+solving probabilities. The marginal log-likelihood can be computed directly from the
+conditional log-likelihood (see vignette for details).
+}
+
+\value{
+The function \code{IC} returns an object of class \code{ICr} containing:
+  \item{ICtable}{Matrix containing log-likelihood values, number of parameters, AIC, BIC, and
+   cAIC for the joint, marginal, and conditional log-likelihood.}
+}
+
+\seealso{
+    \code{\link{LRtest.Rm}}
+}
+\examples{
+
+#IC's for Rasch model
+data(raschdat2)
+res <- RM(raschdat2)             #Rasch model
+pres <- person.parameter(res)    #Person parameters
+IC(pres)
+
+#IC's for RSM
+data(rsmdat)
+res <- RSM(rsmdat)
+pres <- person.parameter(res)
+IC(pres)
+
+}
+\keyword{models}
diff --git a/man/LLTM.Rd b/man/LLTM.Rd
new file mode 100755
index 0000000..11eb14d
--- /dev/null
+++ b/man/LLTM.Rd
@@ -0,0 +1,90 @@
+\name{LLTM}
+\alias{LLTM}
+\title{Estimation of linear logistic test models}
+\description{
+  This function computes the parameter estimates of a linear logistic test model (LLTM)
+  for binary item responses by using CML estimation.
+}
+\usage{
+LLTM(X, W, mpoints = 1, groupvec = 1, se = TRUE, sum0 = TRUE,
+   etaStart)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{X}{Input 0/1 data matrix or data frame; rows represent individuals (N in total),
+  columns represent items. Missing values have to be inserted as \code{NA}.}
+  \item{W}{Design matrix for the LLTM. If omitted, the function will compute W automatically.}
+  \item{mpoints}{Number of measurement points.}
+  \item{groupvec}{Vector of length N which determines the group membership of each subject,
+  starting from 1. If \code{groupvec=1}, no group contrasts are imposed.}
+  \item{se}{If \code{TRUE}, the standard errors are computed.}
+  \item{sum0}{If \code{TRUE}, the parameters are normalized to sum-0 by specifying
+  an appropriate \code{W}. If \code{FALSE}, the first parameter is restricted to 0.}
+  \item{etaStart}{A vector of starting values for the eta parameters can be specified. If missing, the 0-vector is used.}
+}
+\details{
+  Through appropriate definition of \code{W} the LLTM can be viewed as a more parsimonous
+  Rasch model, on the one hand, e.g. by imposing some cognitive base operations
+  to solve the items. One the other hand, linear extensions of the Rasch model
+  such as group comparisons and repeated measurement designs can be computed.
+  If more than one measurement point is examined, the item responses for the 2nd, 3rd, etc.
+  measurement point are added column-wise in X.
+
+  If \code{W} is user-defined, it is nevertheless necessary to
+  specify \code{mpoints} and \code{groupvec}. It is important that first the time contrasts and
+  then the group contrasts have to be imposed.
+
+  Available methods for LLTM-objects are:\cr
+   \code{print}, \code{coef},
+  \code{model.matrix}, \code{vcov},\code{summary}, \code{logLik}, \code{person.parameters}.
+}
+\value{
+  Returns on object of class \code{eRm} containing:
+
+  \item{loglik}{Conditional log-likelihood.}
+  \item{iter}{Number of iterations.}
+  \item{npar}{Number of parameters.}
+  \item{convergence}{See \code{code} output in \code{\link{nlm}}.}
+  \item{etapar}{Estimated basic item parameters.}
+  \item{se.eta}{Standard errors of the estimated basic parameters.}
+  \item{betapar}{Estimated item (easiness) parameters.}
+  \item{se.beta}{Standard errors of item parameters.}
+  \item{hessian}{Hessian matrix if \code{se = TRUE}.}
+  \item{W}{Design matrix.}
+  \item{X}{Data matrix.}
+  \item{X01}{Dichotomized data matrix.}
+  \item{groupvec}{Group membership vector.}
+  \item{call}{The matched call.}
+}
+\references{
+Fischer, G. H., and Molenaar, I. (1995). Rasch Models - Foundations,
+Recent Developements, and Applications. Springer.
+
+Mair, P., and Hatzinger, R. (2007). Extended Rasch modeling: The eRm package for
+the application of IRT models in R. Journal of Statistical Software, 20(9), 1-20.
+
+Mair, P., and Hatzinger, R. (2007). CML based estimation of extended Rasch models
+with the eRm package in R. Psychology Science, 49, 26-43.
+}
+\author{Patrick Mair, Reinhold Hatzinger}
+%\note{}
+\seealso{\code{\link{LRSM}},\code{\link{LPCM}}}
+\examples{
+
+#LLTM for 2 measurement points
+#100 persons, 2*15 items, W generated automatically
+data(lltmdat1)
+res1 <- LLTM(lltmdat1, mpoints = 2)
+print(res1)
+summary(res1)
+
+#Reparameterized Rasch model as LLTM (more pasimonious)
+data(lltmdat2)
+W <- matrix(c(1,2,1,3,2,2,2,1,1,1),ncol=2)              #design matrix
+res2 <- LLTM(lltmdat2, W = W)
+print(res2)
+summary(res2)
+
+}
+
+\keyword{models}
diff --git a/man/LPCM.Rd b/man/LPCM.Rd
new file mode 100755
index 0000000..b3de4dd
--- /dev/null
+++ b/man/LPCM.Rd
@@ -0,0 +1,81 @@
+\name{LPCM}
+\alias{LPCM}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{Estimation of linear partial credit models}
+\description{
+  This function computes the parameter estimates of a linear partial credit model (LRSM)
+  for polytomuous item responses by using CML estimation.
+}
+\usage{
+LPCM(X, W , mpoints = 1, groupvec = 1, se = TRUE, sum0 = TRUE,
+   etaStart)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{X}{Input data matrix or data frame; rows represent individuals (N in total),
+  columns represent items. Missing values are inserted as \code{NA}.}
+  \item{W}{Design matrix for the LPCM. If omitted, the function will compute W automatically.}
+  \item{mpoints}{Number of measurement points.}
+  \item{groupvec}{Vector of length N which determines the group membership of each subject, starting from 1}
+  \item{se}{If \code{TRUE}, the standard errors are computed.}
+  \item{sum0}{If \code{TRUE}, the parameters are normalized to sum-0 by specifying
+  an appropriate \code{W}. If \code{FALSE}, the first parameter is restricted to 0.}
+  \item{etaStart}{A vector of starting values for the eta parameters can be specified. If missing, the 0-vector is used.}
+}
+\details{
+  Through appropriate definition of \code{W} the LPCM can be viewed as a more parsimonous
+  PCM, on the one hand, e.g. by imposing some cognitive base operations
+  to solve the items. One the other hand, linear extensions of the Rasch model
+  such as group comparisons and repeated measurement designs can be computed.
+  If more than one measurement point is examined, the item responses for the 2nd, 3rd, etc.
+  measurement point are added column-wise in X.
+
+  If \code{W} is user-defined, it is nevertheless necessary to
+  specify \code{mpoints} and \code{groupvec}. It is important that first the time contrasts and
+  then the group contrasts have to be imposed.
+
+  Available methods for LPCM-objects are:\cr
+  \code{print}, \code{coef},
+  \code{model.matrix}, \code{vcov},\code{summary}, \code{logLik}, \code{person.parameters}.
+}
+\value{
+  Returns on object of class \code{eRm} containing:
+
+  \item{loglik}{Conditional log-likelihood.}
+  \item{iter}{Number of iterations.}
+  \item{npar}{Number of parameters.}
+  \item{convergence}{See \code{code} output in \code{\link{nlm}}.}
+  \item{etapar}{Estimated basic item parameters.}
+  \item{se.eta}{Standard errors of the estimated basic item parameters.}
+  \item{betapar}{Estimated item (easiness) parameters.}
+  \item{se.beta}{Standard errors of item parameters.}
+  \item{hessian}{Hessian matrix if \code{se = TRUE}.}
+  \item{W}{Design matrix.}
+  \item{X}{Data matrix.}
+  \item{X01}{Dichotomized data matrix.}
+  \item{groupvec}{Group membership vector.}
+  \item{call}{The matched call.}
+}
+\references{
+Fischer, G. H., and Molenaar, I. (1995). Rasch Models - Foundations,
+Recent Developements, and Applications. Springer.
+
+Mair, P., and Hatzinger, R. (2007). Extended Rasch modeling: The eRm package for the application of IRT models in R. Journal of Statistical Software, 20(9), 1-20.
+
+Mair, P., and Hatzinger, R. (2007). CML based estimation of extended Rasch models with the eRm package in R. Psychology Science, 49, 26-43.
+}
+\author{Patrick Mair, Reinhold Hatzinger}
+%\note{}
+\seealso{\code{\link{LRSM}},\code{\link{LLTM}}}
+\examples{
+
+#LPCM for two measurement points and two subject groups
+#20 subjects, 2*3 items
+data(lpcmdat)
+G <- c(rep(1,10),rep(2,10))                   #group vector
+res <- LPCM(lpcmdat, mpoints = 2, groupvec = G)
+print(res)
+summary(res)
+}
+
+\keyword{models}
diff --git a/man/LRSM.Rd b/man/LRSM.Rd
new file mode 100755
index 0000000..572c777
--- /dev/null
+++ b/man/LRSM.Rd
@@ -0,0 +1,80 @@
+\name{LRSM}
+\alias{LRSM}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{Estimation of linear rating scale models}
+\description{
+  This function computes the parameter estimates of a linear rating scale model (LRSM)
+  for polytomuous item responses by using CML estimation.
+}
+\usage{
+LRSM(X, W , mpoints = 1, groupvec = 1, se = TRUE, sum0 = TRUE,
+   etaStart)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{X}{Input data matrix or data frame; rows represent individuals (N in total), columns represent items. Missing values are inserted as \code{NA}.}
+  \item{W}{Design matrix for the LRSM. If omitted, the function will compute W automatically.}
+  \item{mpoints}{Number of measurement points.}
+  \item{groupvec}{Vector of length N which determines the group membership of each subject, starting from 1}
+  \item{se}{If \code{TRUE}, the standard errors are computed.}
+  \item{sum0}{If \code{TRUE}, the parameters are normalized to sum-0 by specifying
+  an appropriate \code{W}. If \code{FALSE}, the first parameter is restricted to 0.}
+  \item{etaStart}{A vector of starting values for the eta parameters can be specified. If missing, the 0-vector is used.}
+}
+\details{
+  Through appropriate definition of \code{W} the LRSM can be viewed as a more parsimonous
+  RSM, on the one hand, e.g. by imposing some cognitive base operations
+  to solve the items. One the other hand, linear extensions of the Rasch model
+  such as group comparisons and repeated measurement designs can be computed.
+  If more than one measurement point is examined, the item responses for the 2nd, 3rd, etc.
+  measurement point are added column-wise in X.
+
+  If \code{W} is user-defined, it is nevertheless necessary to
+  specify \code{mpoints} and \code{groupvec}. It is important that first the time contrasts and
+  then the group contrasts have to be imposed.
+
+  Available methods for LRSM-objects are:
+  \code{print}, \code{coef},
+  \code{model.matrix}, \code{vcov},\code{summary}, \code{logLik}, \code{person.parameters}.
+}
+\value{
+  Returns on object of class \code{eRm} containing:
+
+  \item{loglik}{Conditional log-likelihood.}
+  \item{iter}{Number of iterations.}
+  \item{npar}{Number of parameters.}
+  \item{convergence}{See \code{code} output in \code{\link{nlm}}.}
+  \item{etapar}{Estimated basic item parameters (item and category parameters).}
+  \item{se.eta}{Standard errors of the estimated basic item parameters.}
+  \item{betapar}{Estimated item (easiness) parameters.}
+  \item{se.beta}{Standard errors of item parameters.}
+  \item{hessian}{Hessian matrix if \code{se = TRUE}.}
+  \item{W}{Design matrix.}
+  \item{X}{Data matrix.}
+  \item{X01}{Dichotomized data matrix.}
+  \item{groupvec}{Group membership vector.}
+  \item{call}{The matched call.}
+}
+\references{
+Fischer, G. H., and Molenaar, I. (1995). Rasch Models - Foundations,
+Recent Developements, and Applications. Springer.
+
+Mair, P., and Hatzinger, R. (2007). Extended Rasch modeling: The eRm package for the application of IRT models in R. Journal of Statistical Software, 20(9), 1-20.
+
+Mair, P., and Hatzinger, R. (2007). CML based estimation of extended Rasch models with the eRm package in R. Psychology Science, 49, 26-43.
+}
+\author{Patrick Mair, Reinhold Hatzinger}
+%\note{}
+\seealso{\code{\link{LLTM}},\code{\link{LPCM}}}
+\examples{
+
+#LRSM for two measurement points
+#20 subjects, 2*3 items, W generated automatically,
+#first parameter set to 0, no standard errors computed.
+
+data(lrsmdat)
+res <- LRSM(lrsmdat, mpoints = 2, groupvec = 1, sum0 = FALSE, se = FALSE)
+print(res)
+}
+
+\keyword{models}
diff --git a/man/LRtest.Rd b/man/LRtest.Rd
new file mode 100755
index 0000000..a30bbaf
--- /dev/null
+++ b/man/LRtest.Rd
@@ -0,0 +1,136 @@
+\name{LRtest}
+\alias{LRtest.Rm}
+\alias{LRtest}
+\alias{print.LR}
+\alias{summary.LR}
+\alias{plotGOF}
+\alias{plotGOF.LR}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{Computation of Andersen's LR-test.}
+\description{This LR-test is based on subject subgroup splitting.
+}
+\usage{
+\method{LRtest}{Rm}(object, splitcr = "median", se = FALSE)
+\method{plotGOF}{LR}(x, beta.subset = "all", main="Graphical Model Check",
+   xlab = NULL, ylab = NULL, tlab = "item",
+   ylim = c(-3, 3), xlim = c(-3, 3), type = "p", pos = "4",
+   conf = NULL, ctrline = NULL, ...)
+%\method{print}{LR}(x,...)
+%\method{summary}{LR}(object,...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{object}{Object of class \code{Rm}.}
+  \item{splitcr}{Split criterion for subject raw score splitting. \code{all.r} corresponds to a
+  full raw score split, \code{median} uses the median as split criterion, \code{mean} performs a mean-split.
+  Optionally \code{splitcr} can also be a vector which assigns each person to a
+  certain subgroup (e.g., following an external criterion). This vector can be numeric, character or a factor.}
+  \item{se}{If \code{TRUE} standard errors for beta's are computed.}
+
+%Arguments for \code{plotGOF}:
+  \item{x}{Object of class \code{LR}. Also used for visualizing the fit of single items.}
+  \item{beta.subset}{If \code{"all"}, all items are plotted. Otherwise numeric subset vector can be specified.}
+  \item{main}{Main title of the plot.}
+  \item{xlab}{Label on x-axis, default gives name of \code{splitcr} and level.}
+  \item{ylab}{Label on y-axis, default gives name of \code{splitcr} and level.}
+  \item{tlab}{Specification of item labels: \code{"item"} prints the item names, \code{"number"} gives integers
+       corresponding to order of the beta parameters, if \code{"none"} no labels are printed.
+       \code{"identify"} allows for an interactive labelling. Initially no labels are printed, after clicking
+       close to an item point the corresponding label is added. The identification process is terminated by clicking
+       the second button and selecting 'Stop' from the menu, or from the 'Stop' menu on the graphics window.
+       For more information and basic operation see \code{\link{identify}}.
+       }
+  \item{xlim}{Limits on x-axis.}
+  \item{ylim}{Limits on y-axis.}
+  \item{type}{Plotting type.(see \code{\link{plot}})}
+  \item{pos}{Position of the item label (see \code{\link{text}})}
+  \item{conf}{for plotting confidence ellipses for the item parameters. If \code{conf=NULL}
+             (the default) no ellipses are drawn. Otherwise, \code{conf} must be
+             specified as a list with optional elements: \code{gamma}, is
+             the confidence level (numeric), \code{col} and \code{lty}, colour and linetype (see \code{\link{par}}),
+             and \code{ia}, logical, if the ellipses are to be drawn interactively (cf.
+             \code{tlab="identify"} above). If \code{conf} is specified as a an empty list, %\code{conf=list()},
+             the default values \code{conf=list(gamma=0.95, col="red", lty="dashed", ia=FALSE)}
+             will be used. See example below. To use \code{conf}, the LR object \code{x} has
+             to be generated using the option \code{se=TRUE} in \code{LRtest()}.
+             }
+  \item{ctrline}{for plotting confidence bands (control lines, cf.eg.Wright and Stone, 1999).
+             If \code{ctrline=NULL}
+             (the default) no lines are drawn. Otherwise, \code{ctrline} must be
+             specified as a list with optional elements: \code{gamma}, is
+             the confidence level (numeric), \code{col} and \code{lty}, colour and linetype (see \code{\link{par}}).
+             If \code{ctrline} is specified as \code{ctrline=list()},
+             the default values \code{conf=list(gamma=0.95, col="blue", lty="solid")}
+             will be used. See examples below. To use \code{ctrline}, the LR object \code{x} has
+             to be generated using the option \code{se=TRUE} in \code{LRtest()}.
+             }
+  \item{...}{Additional parameters.}
+
+}
+\details{If the data set contains missing values and \code{mean} or \code{median} is specified as splitcriterion,
+         means or medians are calculated for each missing value subgroup and consequently used for raw score splitting.
+
+         When using interactive selection for both labelling of single points (\code{tlab = "identify"} and
+         drawing confidence ellipses at certain points (\code{ia = TRUE}) then first all plotted points are labelled
+         and afterwards all ellipses are generated. Both identification processes can be terminated
+         by clicking the second (right) mouse button and selecting `Stop' from the menu, or from the `Stop'
+         menu on the graphics window.
+
+         \code{summary} and \code{print} methods are available for objects of class \code{LR}.
+}
+\value{
+\code{LRtest} returns an object of class \code{LR} containing:
+  \item{LR}{LR-value.}
+  \item{df}{Degrees of freedom of the test statistic.}
+  \item{Chisq}{Chi-square value with corresponding df.}
+  \item{pvalue}{P-value of the test.}
+  \item{likgroup}{Log-likelihood values for the subgroups}
+  \item{betalist}{List of beta parameters for the subgroups.}
+  \item{selist}{List of standard errors of beta's.}
+  \item{etalist}{List of eta parameters for the subgroups.}
+  \item{spl.gr}{Names and levels for \code{splitcr}.}
+  \item{call}{The matched call.}
+}
+\references{
+Fischer, G. H., and Molenaar, I. (1995). Rasch Models - Foundations, Recent Developements, and Applications. Springer.
+
+Mair, P., and Hatzinger, R. (2007). Extended Rasch modeling: The eRm package for the application of IRT models in R. Journal of Statistical Software, 20(9), 1-20.
+
+Mair, P., and Hatzinger, R. (2007). CML based estimation of extended Rasch models with the eRm package in R. Psychology Science, 49, 26-43.
+
+Wright, B.D.,  and Stone, M.H. (1999). Measurement essentials. Wide Range Inc., Wilmington.
+        (\url{http://www.rasch.org/measess/me-all.pdf} 28Mb).
+}
+
+\author{Patrick Mair, Reinhold Hatzinger}
+%\note{}
+\seealso{\code{\link{Waldtest}}}
+\examples{
+
+# LR-test on dichotomous Rasch model with user-defined split
+splitvec <- sample(1:3, 100, replace = TRUE)
+data(raschdat1)
+res <- RM(raschdat1)
+lrres <- LRtest(res, splitcr = splitvec)
+lrres
+summary(lrres)
+
+\dontrun{
+# goodness-of-fit plot with interactive labelling of items
+plotGOF(lrres, tlab = "identify")
+}
+
+# LR-test with mean split, standard errors for beta's
+lrres2 <- LRtest(res, split = "mean", se = TRUE)
+
+# goodness-of-fit plot
+# additional 95 percent control line with user specified style
+plotGOF(lrres2, ctrline=list(gamma=0.95, col="red", lty="dashed"))
+
+
+# goodness-of-fit plot for items 1, 14, 24, and 25
+# additional 95 percent confidence ellipses, default style
+plotGOF(lrres2, beta.subset=c(14,25,24,1), conf=list())
+}
+
+\keyword{models}
diff --git a/man/MLoef.Rd b/man/MLoef.Rd
new file mode 100755
index 0000000..649cfd3
--- /dev/null
+++ b/man/MLoef.Rd
@@ -0,0 +1,71 @@
+\name{MLoef}
+\alias{MLoef}
+\alias{print.MLoef}
+\alias{summary.MLoef}
+\title{Computation of Martin-L�f's LR-Test}
+\description{This LR-Test is based on item subgroup splitting.}
+\usage{
+MLoef(robj, splitcr = "median")
+}
+\arguments{
+  \item{robj}{Object of class \code{Rm}.}
+  \item{splitcr}{Split criterion to define two groups of item.
+    \code{"median"} and \code{"mean"} split items in two groups based on their
+    items' raw scores. \code{splitcr} can also be a vector of length k (where k
+    denotes the number of items) that takes two distinct values to define groups
+    used for the Martin-L�f Test.}
+}
+\details{The function can handle missing values, as long as every subject has at
+  least 2 valid responses in each group of items.
+
+  If the split criterion is \code{"median"} or \code{"mean"} and one or more items'
+  raw scores are equal the median resp. mean, \code{MLoef} will issue a warning
+  that those items are assigned to the lower raw score group. \code{summary.MLoef}
+  gives detailed information about the allocation of all items.
+
+  \code{summary} and \code{print} methods are available for objects of class
+  \code{MLoef}.
+}
+\value{
+  \code{MLoef} returns an object of class \code{MLoef} containing:
+    \item{X01}{binary data matrix of \code{robj}}
+    \item{model}{model of \code{robj}}
+    \item{LR}{LR-value}
+    \item{df}{degrees of freedom of the test statistic}
+    \item{p.value}{p-value of the test}
+    \item{L0}{log-likelihood of the Rasch model}
+    \item{L1}{log-likelihood of group 1}
+    \item{L2}{log-likelihood of group 2}
+    \item{theta.table.RM}{vector of persons' raw scores of the Rasch model}
+    \item{theta.table.MLoef}{tabulation of persons' raw scores in the two groups}
+    \item{items1}{list of items in group 1}
+    \item{items2}{list of items in group 2}
+    \item{k}{number of items in groups 1 and 2}
+    \item{splitcr}{submitted split criterion}
+    \item{split.vector}{binary allocation of items to groups}
+    \item{warning}{items equalling median or mean for the respective split criteria}
+    \item{call}{the matched call}
+}
+\references{
+Fischer, G. H., and Molenaar, I. (1995). Rasch Models -- Foundations, Recent Developements, and Applications. Springer.
+
+Rost, J. (2004). Lehrbuch Testtheorie -- Testkonstruktion. Bern: Huber.
+}
+\author{Marco Maier, Reinhold Hatzinger}
+%\note{}
+\seealso{\code{\link{LRtest}}, \code{\link{Waldtest}}}
+\examples{
+# Martin-Loef-test on dichotomous Rasch model using "median"
+# and a user-defined split
+splitvec <- c(1, 1, 1, 1, 0, 1, 0, 0, 1, 0)
+
+res <- RM(raschdat1[,1:10])
+
+MLoef.1 <- MLoef(res, splitcr = "median")
+MLoef.2 <- MLoef(res, splitcr = splitvec)
+
+MLoef.1
+
+summary(MLoef.2)
+}
+\keyword{models}
diff --git a/man/NPtest.Rd b/man/NPtest.Rd
new file mode 100755
index 0000000..9625297
--- /dev/null
+++ b/man/NPtest.Rd
@@ -0,0 +1,167 @@
+\name{NPtest}
+\Rdversion{1.1}
+\alias{NPtest}
+\title{function to perform nonparametric Rasch model tests}
+\description{A variety of nonparametric tests as proposed by Ponocny(2001) are implemented. The function operates on
+    random binary matrices that have been generated using an
+    MCMC algorithm (Verhelst, 2008) from the RaschSampler package (Hatzinger, Mair, and Verhelst, 2009).
+}
+\usage{
+NPtest(obj, n=NULL, method = "T1", ...)
+}
+\arguments{
+  \item{obj}{
+     A binary data matrix (or data frame) or
+     an object containing the output from the \code{\link[RaschSampler]{RaschSampler}} package.
+  }
+  \item{n}{
+     If \code{obj} is a matrix or a data frame, \code{n} n is the number of sampled matrices
+     (default is 500)
+  }
+  \item{method}{
+     One of the test statistics. See details below.
+  }
+  \item{\dots}{
+     Further arguments for specifying the statistics functions. See details below.
+  }
+}
+\details{
+     The function uses the  \code{\link[RaschSampler]{RaschSampler}} package. On input the user has to supply
+     either a binary data matrix or a RaschSampler output object. If the input is a data matrix, the RaschSampler
+     is called with default values (i.e., \code{rsctrl(burn_in = 256, n_eff = n, step = 32)}, see \code{\link[RaschSampler]{rsctrl}}),
+     where \code{n} may be specified by the user (otherwise it is 500). The starting values for the random number generators are chosen
+     randomly using system time.
+     Methods other than those listed below can easily be implemented using the RaschSampler package directly.
+
+     The currently implemented methods (following Ponocny's notation of \emph{T}-statistics) and their options are:
+   \describe{
+     \item{\bold{T1:}}{\code{method = "T1"}, no further option}\cr
+     Checks for local dependence via increased inter-item correlations. For all item pairs
+     cases are counted with equal responses on both items.
+
+     \item{\bold{T2:}}{\code{method = "T2", idx = NULL, stat = "var"}}\cr
+     \code{idx} \ldots vector of indexes specifying items which define a subscale, e.g., \code{idx = c(1, 5, 7)}\cr
+     \code{stat} \ldots one of \code{"var"} (variance), \code{"mad1"} (mean absolute deviation),
+     \code{"mad2"} (median absolute deviation), \code{"range"} (range)\cr
+     Checks for local dependence within model deviating subscales via increased
+     dispersion of subscale person rawscores.
+
+     \item{\bold{T4:}}{\code{method = "T4", idx = NULL, group = NULL, alternative = "high"}}\cr
+     \code{idx} \ldots vector of indexes specifying items which define a subscale, e.g., \code{idx = c(1, 5, 7)}\cr
+     \code{group} \ldots logical vector defining a subject group, e.g., \code{group = (age >= 15 && age < 30)}\cr
+     \code{alternative} \ldots one of \code{"high"} or \code{"low"}. Specifies the alternative hypothesis.\cr
+     Checks for group anomalies (DIF) via too high (low) raw scores on item(s) for specified group.
+
+     \item{\bold{T7:}}{\code{method = "T7", idx = NULL}}\cr
+     \code{idx} \ldots vector of indexes specifying items which define a subscale, e.g., \code{idx = c(1, 5, 7)}\cr
+     Checks for lower discrimination (2PL) in item subscale via counting cases with response 1 on more
+     difficult and 0 on easier items. The test is global for the subscale, i.e. all subscale items are evaluated
+     using a single statistic.
+
+     \item{\bold{T7a:}}{\code{method = "T7a", idx = NULL}}\cr
+     \code{idx} \ldots vector of indexes specifying items to investigate, e.g., \code{idx = c(1, 5, 7)}\cr
+     Checks for lower discrimination (2PL) of an item compared to another item  via counting cases with response 1 on more
+     difficult and 0 on easier item. The test is performed pairwise, i.e. a statistic is calculated for each item pair.
+
+     \item{\bold{T10:}}{\code{method = "T10", splitcr="median"}}\cr
+     \code{splitcr} \ldots split criterion for subject raw score splitting. \code{"median"} uses the median as split criterion,
+     \code{"mean"} performs a mean-split. Optionally \code{splitcr} can also be a vector which assigns each person to a
+     one of two subgroups (e.g., following an external criterion). This vector can be numeric, character, logical or a factor.\cr
+     Gobal test for subgroup-invariance. Checks for different item difficulties in two subgroups (for details see Ponocny, 2001).
+
+     \item{\bold{T11:}}{\code{method = "T11"}, no further option}\cr
+     Gobal test for local dependence. The statistic calculates the sum of absolute deviations between the observed inter-item correlations
+     and the expected correlations.
+   }
+}
+\value{
+Depends on the method used. For each method a list is returned. The returned objects are of class
+\code{T1obj}, \code{T2obj}, \code{T4obj}, \code{T7obj}, \code{T7aobj}, \code{T10obj}, \code{T11obj} corresponding to the method used.
+The main output element is \code{prop} giving the one-sided p-value, i.e., the number of statistics from the sampled matrices which are equal
+or exceed the statistic based on the observed data. For \emph{T1} and \emph{T7a} \code{prop} is a vector.
+}
+\references{
+Ponocny, I. (2001) Nonparametric goodness-of-fit tests for the rasch model. Psychometrika,  Volume 66, Number 3\cr
+Verhelst, N. D. (2008) An Efficient MCMC Algorithm to Sample Binary
+Matrices with Fixed Marginals. Psychometrika, Volume 73, Number 4\cr
+Verhelst, N. D., Hatzinger, R., and Mair, P. (2007) The Rasch Sampler, Journal of Statistical Software, Vol. 20, Issue 4, Feb 2007
+}
+\author{
+Reinhold Hatzinger
+}
+%\note{
+%Maybe notes appear here
+%}
+\seealso{
+    \code{\link[RaschSampler]{RaschSampler}}
+}
+\examples{
+### Preparation:
+
+# data for examples below
+data(raschdat1)
+X<-raschdat1
+
+# generate 100 random matrices based on original data matrix
+rmat<-rsampler(X,rsctrl(burn_in=100, n_eff=100, seed=123))
+
+## the following examples can also directly be used by setting
+## rmat <- raschdat1
+## without calling rsampler() first, e.g.,
+t1<-NPtest(raschdat1, n=100, method="T1")
+
+### Examples:
+
+##---- T1 ------------------------------------------------------
+t1<-NPtest(rmat,method="T1")
+# choose a different alpha for selecting displayed values
+print(t1,alpha=0.01)
+
+
+##---- T2 ------------------------------------------------------
+t21<-NPtest(rmat,method="T2",idx=1:5) # default is variance
+t21
+
+t22<-NPtest(rmat,method="T2",idx=c(1,22,5,27,6,9,11),stat="mad1")
+t22
+
+##---- T4 ------------------------------------------------------
+age<-sample(20:90, 100, replace=TRUE)
+# group must be a logical vector
+#   (value of TRUE is used for group selection)
+age<-age<30
+t41<-NPtest(rmat,method="T4",idx=1:3,group=age)
+t41
+
+sex<-gl(2,50)
+# group can also be a logical expression  (generating a vector)
+t42<-NPtest(rmat,method="T4",idx=c(1,4,5,6),group=sex==1)
+t42
+
+##---- T7, T7a --------------------------------------------------
+# simultaenous test for all items in subscale
+t7<-NPtest(rmat,method="T7",idx=1:3)
+t7
+
+# test for item-pairs
+t7a<-NPtest(rmat,method="T7a",idx=c(1,3,5)) # test for item-pairs
+t7a
+
+##---- T10 ------------------------------------------------------
+t101<-NPtest(rmat,method="T10")  # default split criterion is "median"
+t101
+
+split<-runif(100)
+t102<-NPtest(rmat,method="T10",splitcr=split>0.5)
+t102
+
+t103<-NPtest(rmat,method="T10",splitcr=sex)
+t103
+
+##---- T11 ------------------------------------------------------
+t11<-NPtest(rmat,method="T11")
+t11
+}
+
+\keyword{htest}
+\keyword{nonparametric}
diff --git a/man/PCM.Rd b/man/PCM.Rd
new file mode 100755
index 0000000..abcce1a
--- /dev/null
+++ b/man/PCM.Rd
@@ -0,0 +1,67 @@
+\name{PCM}
+\alias{PCM}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{Estimation of partial credit models}
+\description{
+  This function computes the parameter estimates of a partial credit model for polytomous
+  item responses by using CML estimation. }
+\usage{
+PCM(X, W, se = TRUE, sum0 = TRUE, etaStart)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{X}{Input data matrix or data frame with item responses (starting from 0); rows represent individuals, columns represent items. Missing values are inserted as \code{NA}.}
+  \item{W}{Design matrix for the PCM. If omitted, the function will compute W automatically.}
+  \item{se}{If \code{TRUE}, the standard errors are computed.}
+  \item{sum0}{If \code{TRUE}, the parameters are normed to sum-0 by specifying
+  an appropriate \code{W}. If \code{FALSE}, the first parameter is restricted to 0.}
+  \item{etaStart}{A vector of starting values for the eta parameters can be specified. If missing, the 0-vector is used.}
+}
+\details{
+  Through specification in W, the parameters of the categories with 0 responses
+  are set to 0 as well as the first category of the first item. Available methods
+  for PCM-objects are:\cr
+  \code{print}, \code{coef}, \code{model.matrix},
+  \code{vcov}, \code{plot}, \code{summary}, \code{logLik}, \code{person.parameters},
+  \code{plotICC}, \code{LRtest}.
+}
+\value{
+  Returns an object of class \code{Rm, eRm} containing.
+
+  \item{loglik}{Conditional log-likelihood.}
+  \item{iter}{Number of iterations.}
+  \item{npar}{Number of parameters.}
+  \item{convergence}{See \code{code} output in \code{\link{nlm}}.}
+  \item{etapar}{Estimated basic item difficulty parameters.}
+  \item{se.eta}{Standard errors of the estimated basic item parameters.}
+  \item{betapar}{Estimated item-category (easiness) parameters.}
+  \item{se.beta}{Standard errors of item parameters.}
+  \item{hessian}{Hessian matrix if \code{se = TRUE}.}
+  \item{W}{Design matrix.}
+  \item{X}{Data matrix.}
+  \item{X01}{Dichotomized data matrix.}
+  \item{call}{The matched call.}
+}
+\references{
+Fischer, G. H., and Molenaar, I. (1995). Rasch Models - Foundations,
+Recent Developements, and Applications. Springer.
+
+Mair, P., and Hatzinger, R. (2007). Extended Rasch modeling: The eRm package for the application of IRT models in R. Journal of Statistical Software, 20(9), 1-20.
+
+Mair, P., and Hatzinger, R. (2007). CML based estimation of extended Rasch models with the eRm package in R. Psychology Science, 49, 26-43.
+}
+\author{Patrick Mair, Reinhold Hatzinger}
+%\note{}
+\seealso{\code{\link{RM}},\code{\link{RSM}},\code{\link{LRtest}}
+}
+\examples{
+
+##PCM with 10 subjects, 3 items
+data(pcmdat)
+res <- PCM(pcmdat)
+res
+summary(res)                #eta and beta parameters with CI
+thresholds(res)             #threshold parameters
+}
+
+\keyword{models}
diff --git a/man/RM.Rd b/man/RM.Rd
new file mode 100755
index 0000000..799b848
--- /dev/null
+++ b/man/RM.Rd
@@ -0,0 +1,79 @@
+\name{RM}
+\alias{RM}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{Estimation of Rasch Models}
+\description{
+  This function computes the parameter estimates of a Rasch model for binary item responses by using CML estimation.
+}
+\usage{
+RM(X, W, se = TRUE, sum0 = TRUE, etaStart)
+}
+\arguments{
+  \item{X}{Input 0/1 data matrix or data frame; rows represent individuals, columns represent items. Missing values are inserted as \code{NA}.}
+  \item{W}{Design matrix for the Rasch model. If omitted, the function will compute W automatically.}
+  \item{se}{If \code{TRUE}, the standard errors are computed.}
+  \item{sum0}{If \code{TRUE}, the parameters are normed to sum-0 by specifying
+  an appropriate \code{W}. If \code{FALSE}, the first parameter is restricted to 0.}
+  \item{etaStart}{A vector of starting values for the eta parameters can be specified. If missing, the 0-vector is used.}
+}
+\details{
+  For estimating the item parameters the CML method is used.
+  Available methods for RM-objects are:\cr
+  \code{print}, \code{coef}, \code{model.matrix},
+  \code{vcov}, \code{summary}, \code{logLik}, \code{person.parameter}, \code{LRtest},
+  \code{Waldtest}, \code{plotICC}, \code{plotjointICC}.
+}
+\value{
+  Returns an object of class \code{dRm, Rm, eRm} and contains the log-likelihood value, the parameter estimates and their standard errors.
+
+  \item{loglik}{Conditional log-likelihood.}
+  \item{iter}{Number of iterations.}
+  \item{npar}{Number of parameters.}
+  \item{convergence}{See \code{code} output in \code{\link{nlm}}.}
+  \item{etapar}{Estimated basic item difficulty parameters.}
+  \item{se.eta}{Standard errors of the estimated basic item parameters.}
+  \item{betapar}{Estimated item (easiness) parameters.}
+  \item{se.beta}{Standard errors of item parameters.}
+  \item{hessian}{Hessian matrix if \code{se = TRUE}.}
+  \item{W}{Design matrix.}
+  \item{X}{Data matrix.}
+  \item{X01}{Dichotomized data matrix.}
+  \item{call}{The matched call.}
+}
+
+\references{
+Fischer, G. H., and Molenaar, I. (1995). Rasch Models - Foundations,
+Recent Developements, and Applications. Springer.
+
+Mair, P., and Hatzinger, R. (2007). Extended Rasch modeling: The eRm package for the application of IRT models in R. Journal of Statistical Software, 20(9), 1-20.
+
+Mair, P., and Hatzinger, R. (2007). CML based estimation of extended Rasch models with the eRm package in R. Psychology Science, 49, 26-43.
+}
+\author{Patrick Mair, Reinhold Hatzinger}
+%\note{}
+\seealso{\code{\link{RSM}},\code{\link{PCM}}, \code{\link{LRtest}}, \code{\link{Waldtest}}
+}
+\examples{
+
+# Rasch model with beta.1 restricted to 0
+data(raschdat1)
+res <- RM(raschdat1, sum0 = FALSE)
+print(res)
+summary(res)
+res$W                                       #generated design matrix
+
+# Rasch model with sum-0 beta restriction; no standard errors computed
+res <- RM(raschdat1, se = FALSE, sum0 = TRUE)
+print(res)
+summary(res)
+res$W                                       #generated design matrix
+
+#Rasch model with missing values
+data(raschdat2)
+res <- RM(raschdat2)
+print(res)
+summary(res)
+
+}
+
+\keyword{models}
diff --git a/man/RSM.Rd b/man/RSM.Rd
new file mode 100755
index 0000000..fa71764
--- /dev/null
+++ b/man/RSM.Rd
@@ -0,0 +1,67 @@
+\name{RSM}
+\alias{RSM}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{Estimation of rating scale models}
+\description{
+  This function computes the parameter estimates of a rating scale model for polytomous
+  item responses by using CML estimation. }
+\usage{
+RSM(X, W, se = TRUE, sum0 = TRUE, etaStart)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{X}{Input data matrix or data frame with item responses (starting from 0); rows represent individuals, columns represent items. Missing values are inserted as \code{NA}.}
+  \item{W}{Design matrix for the RSM. If omitted, the function will compute W automatically.}
+  \item{se}{If \code{TRUE}, the standard errors are computed.}
+  \item{sum0}{If \code{TRUE}, the parameters are normed to sum-0 by specifying
+  an appropriate \code{W}. If \code{FALSE}, the first parameter is restricted to 0.}
+  \item{etaStart}{A vector of starting values for the eta parameters can be specified. If missing, the 0-vector is used.}
+}
+\details{
+  The design matrix approach transforms the RSM into a partial credit model
+  and estimates the corresponding basic parameters by using CML.
+  Available methods for RSM-objects are \code{print}, \code{coef}, \code{model.matrix},
+  \code{vcov}, \code{summary}, \code{logLik}, \code{person.parameters}, \code{plotICC}, \code{LRtest}.
+}
+\value{
+  Returns an object of class \code{Rm, eRm} and contains the log-likelihood value,
+  the parameter estimates and their standard errors.
+
+  \item{loglik}{Conditional log-likelihood.}
+  \item{iter}{Number of iterations.}
+  \item{npar}{Number of parameters.}
+  \item{convergence}{See \code{code} output in \code{\link{nlm}}.}
+  \item{etapar}{Estimated basic item difficulty parameters (item and category parameters).}
+  \item{se.eta}{Standard errors of the estimated basic item parameters.}
+  \item{betapar}{Estimated item-category (easiness) parameters.}
+  \item{se.beta}{Standard errors of item parameters.}
+  \item{hessian}{Hessian matrix if \code{se = TRUE}.}
+  \item{W}{Design matrix.}
+  \item{X}{Data matrix.}
+  \item{X01}{Dichotomized data matrix.}
+  \item{call}{The matched call.}
+}
+\references{
+Fischer, G. H., and Molenaar, I. (1995). Rasch Models - Foundations,
+Recent Developements, and Applications. Springer.
+
+Mair, P., and Hatzinger, R. (2007). Extended Rasch modeling: The eRm package for the application of IRT models in R. Journal of Statistical Software, 20(9), 1-20.
+
+Mair, P., and Hatzinger, R. (2007). CML based estimation of extended Rasch models with the eRm package in R. Psychology Science, 49, 26-43.
+}
+\author{Patrick Mair, Reinhold Hatzinger}
+%\note{}
+
+\seealso{\code{\link{RM}},\code{\link{PCM}},\code{\link{LRtest}}
+}
+\examples{
+
+##RSM with 10 subjects, 3 items
+data(rsmdat)
+res <- RSM(rsmdat)
+res
+summary(res)                            #eta and beta parameters with CI
+thresholds(res)                         #threshold parameters
+}
+
+\keyword{models}
diff --git a/man/Waldtest.Rd b/man/Waldtest.Rd
new file mode 100755
index 0000000..562498c
--- /dev/null
+++ b/man/Waldtest.Rd
@@ -0,0 +1,64 @@
+\name{Waldtest}
+\alias{Waldtest}
+\alias{Waldtest.Rm}
+\alias{print.wald}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{Item-Specific Wald Test}
+\description{Performs a Wald test on item-level by splitting subjects into subgroups.
+}
+\usage{
+\method{Waldtest}{Rm}(object, splitcr = "median")
+\method{print}{wald}(x,...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{object}{Object of class \code{RM}.}
+  \item{splitcr}{Split criterion for subject raw score splitting. \code{median}
+  uses the median as split criterion, \code{mean} performs a mean-split.
+  Optionally \code{splitcr} can also be a dichotomous vector which assigns each person to a
+  certain subgroup (e.g., following an external criterion). This vector can be numeric, character or a factor. }
+  \item{x}{Object of class \code{wald}.}
+  \item{...}{Further arguments passed to or from other methods. They are ignored in this function.}
+}
+\details{Items are eliminated if they not have the same number of categories in each subgroup.
+To avoid this problem, for RSM and PCM it is considered to use a random or another user-defined split.
+If the data set contains missing values and \code{mean} or \code{median} is specified as splitcriterion,
+means or medians are calculated for each missing value subgroup and consequently used for raw score splitting.}
+\value{
+Returns an object of class \code{wald} containing:
+  \item{coef.table}{Data frame with test statistics, z- and p-values.}
+  \item{betapar1}{Beta parameters for first subgroup}
+  \item{se.beta1}{Standard errors for first subgroup}
+  \item{betapar2}{Beta parameters for second subgroup}
+  \item{se.beta2}{Standard errors for second subgroup}
+  \item{se.beta2}{Standard errors for second subgroup}
+  \item{spl.gr}{Names and levels for \code{splitcr}.}
+  \item{call}{The matched call.}
+}
+\references{
+Fischer, G. H., and Molenaar, I. (1995). Rasch Models - Foundations,
+Recent Developements, and Applications. Springer.
+
+Fischer, G. H., and Scheiblechner, H. (1970). Algorithmen und Programme fuer das
+probabilistische Testmodell von Rasch [Algorithms and programs for Rasch's
+probabilistic test model]. Psychologische Beitraege, 12, 23-51.
+}
+\author{Patrick Mair, Reinhold Hatzinger}
+%\note{}
+\seealso{\code{\link{LRtest}}}
+\examples{
+
+#Wald test for Rasch model with user-defined subject split
+data(raschdat2)
+res <- RM(raschdat2)
+splitvec <- sample(1:2,25,replace=TRUE)
+Waldtest(res, splitcr = splitvec)
+
+#Wald test for RSM eliminates 4 items (with median split)
+data(rsmdat)
+res <- RSM(rsmdat)
+Waldtest(res)
+
+}
+
+\keyword{models}
diff --git a/man/eRm-package.Rd b/man/eRm-package.Rd
new file mode 100755
index 0000000..4c8a345
--- /dev/null
+++ b/man/eRm-package.Rd
@@ -0,0 +1,66 @@
+\name{eRm-package}
+\alias{eRm-package}
+\alias{eRm}
+\docType{package}
+\title{
+extended Rasch modeling
+}
+\description{
+This package estimates extended Rasch models, i.e. the
+ordinary Rasch model for dichotomous data (RM), the linear logistic test model (LLTM),
+the rating scale model (RSM) and its linear extension (LRSM), the partial credit model (PCM)
+and its linear extension (LPCM). The parameters are estimated by conditional maximum
+likelihood (CML). Missing values are allowed in the data matrix. Additional features
+are the estimation of the person parameters, LR-Model test, item-spefific Wald test,
+Martin-L�f test, nonparametric Monte-Carlo tests,
+itemfit and personfit statistics, various ICC plots. An eRm platform is provided at
+http://r-forge.r-project.org/projects/erm/.
+}
+\details{
+\tabular{ll}{
+Package: \tab eRm\cr
+Type: \tab Package\cr
+Version: \tab 0.12-0\cr
+Date: \tab 2010-04-07\cr
+License: \tab GPL\cr
+}
+The basic input units for the functions are the person-item matrix X and the design matrix W.
+Missing values in X are coded with \code{NA}.
+By default, W is generated automatically, but it can be specified by the user as well.
+The function call of the basic models can be achieved through \code{RM(X, W)},
+\code{RSM(X, W)}, and \code{PCM(X, W)}.
+
+The linear extensions provide the possibility to fit a more restricted model than its basic complement,
+such as \code{LLTM(X, W)}, \code{LRSM(X, W)},\code{LPCM(X, W)}, but
+also a generalization by imposing repeated measurement designs and group contrasts. These models can
+be estimated by using, e.g.,
+\code{LLTM(X, W, mpoints = 2, groupvec = G)},\cr
+\code{LRSM(X, W, mpoints = 2, groupvec = G)}, and\cr
+\code{LPCM(X, W, mpoints = 2, groupvec = G)}. \cr
+\code{mpoints} specifies the number of measurement or time points,
+\code{G} is a vector with the group membership for each subject
+ordered according to the rows of the data matrix.
+
+\code{RM} produces an object belonging to the classes \code{dRM}, \code{RM}, and
+\code{eRm}. \code{PCM} and \code{RSM} produce objects belonging to the classes
+\code{RM} and \code{eRm}, whereas results of \code{LLTM}, \code{LRSM}, and \code{LLTM} are object of class \code{eRm}.
+
+The \code{eRm} package contains functions from the packages \code{sna} and \code{ROCR}.
+Thanks to Carter T. Butts and Tobias Sing et al.
+}
+\author{Patrick Mair, Reinhold Hatzinger, Marco Maier
+
+Maintainer: Patrick Mair <patrick.mair at wu.ac.at>
+}
+\references{
+Fischer, G. H., and Molenaar, I. (1995). Rasch Models - Foundations,
+Recent Developements, and Applications. Springer.
+
+Mair, P., and Hatzinger, R. (2007). Extended Rasch modeling: The eRm package for
+the application of IRT models in R. Journal of Statistical Software, 20(9), 1-20.
+
+Mair, P., and Hatzinger, R. (2007). CML based estimation of extended Rasch models
+with the eRm package in R. Psychology Science, 49, 26-43.
+}
+\keyword{models}
+
diff --git a/man/gofIRT.Rd b/man/gofIRT.Rd
new file mode 100755
index 0000000..624f7a0
--- /dev/null
+++ b/man/gofIRT.Rd
@@ -0,0 +1,60 @@
+\name{gofIRT}
+\alias{gofIRT}
+\alias{gofIRT.ppar}
+\alias{summary.gof}
+\alias{print.gof}
+
+
+
+\title{Various model tests and fit indices}
+
+\description{This function computes various model tests and fit indices for objects of class \code{ppar}: Collapsed deviance, Casewise deviance, Rost's LR-test, Hosmer-Lemeshow test, R-Squared measures, confusion matrix, ROC analysis.
+}
+
+\usage{
+\method{gofIRT}{ppar}(object, groups.hl = 10, cutpoint = 0.5)
+}
+
+\arguments{
+  \item{object}{Object of class \code{ppar} (from \code{person.parameter()}).}
+  \item{groups.hl}{Number of groups for Hosmer-Lemeshow test (see details).} 
+  \item{cutpoint}{Integer between 0 and 1 for computing the 0-1 model matrix from the estimated probabilities}
+
+
+}
+
+
+\details{So far this test statistics are implemented only for dichotomous models without NA's. The Hosmer-Lemeshow test is computed by splitting the response vector into percentiles, e.g. \code{groups.hl = 10} corresponds to decile splitting. 
+}
+\value{
+The function \code{gofIRT} returns an object of class \code{gof} containing:
+
+  \item{test.table}{Ouput for model tests.}
+  \item{R2}{List with R-squared measures.}
+  \item{classifier}{Confusion matrix, accuracy, sensitivity, specificity.}
+  \item{AUC}{Area under ROC curve.}
+  \item{Gini}{Gini coefficient.}
+  \item{ROC}{FPR and TPR for different cutpoints.}
+  \item{opt.cut}{Optimal cutpoint determined by ROC analysis.}
+  \item{predobj}{Prediction output from ROC analysis (\code{ROCR} package)}
+} 
+
+\references{
+Mair, P., Reise, S. P., and Bentler, P. M. (2008). IRT goodness-of-fit using approaches from logistic regression. UCLA Statistics Preprint Series. 
+}
+
+\seealso{
+    \code{\link{itemfit.ppar}},\code{\link{personfit.ppar}},\code{\link{LRtest}}
+}
+\examples{
+
+#Goodness-of-fit for a Rasch model
+data(raschdat1)
+res <- RM(raschdat1)
+pres <- person.parameter(res)
+gof.res <- gofIRT(pres)
+gof.res
+summary(gof.res)
+
+}
+\keyword{models}
diff --git a/man/itemfit.ppar.Rd b/man/itemfit.ppar.Rd
new file mode 100755
index 0000000..0e20621
--- /dev/null
+++ b/man/itemfit.ppar.Rd
@@ -0,0 +1,84 @@
+\name{itemfit.ppar}
+\alias{itemfit.ppar}
+\alias{itemfit}
+\alias{personfit.ppar}
+\alias{personfit}
+\alias{residuals.ppar}
+\alias{pmat.ppar}
+\alias{pmat}
+\alias{print.ifit}
+\alias{print.pfit}
+\alias{print.resid}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{Residuals, Personfit and Itemfit Statistics}
+\description{\code{pmat} computes the theoretical person-item matrix with solving
+probabilities for each category (except 0th). \code{residuals} computes the squared and standardized residuals based on
+the observed and the expected person-item matrix. Chi-square based itemfit and personfit
+statistics can be obtained by using \code{itemfit} and \code{personfit}.
+}
+\usage{
+\method{pmat}{ppar}(object)
+\method{residuals}{ppar}(object,...)
+\method{itemfit}{ppar}(object)
+\method{personfit}{ppar}(object)
+\method{print}{ifit}(x, visible = TRUE, ...)
+\method{print}{pfit}(x, visible = TRUE, ...)
+\method{print}{resid}(x, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{object}{Object of class \code{ppar}, derived from \code{person.parameter}.}
+  \item{x}{Object of class \code{ifit}, \code{pfit}, or \code{resid}.}
+  \item{visible}{if \code{FALSE}, returns the matrix of fit statistics that otherwise would be printed.}
+  \item{...}{Further arguments passed to or from other methods. They are ignored in this function.}
+}
+%\details{}
+\value{
+%Function \code{pmat}:
+  \item{pmat}{Matrix of theoretical probabilities for each category except 0th (from function \code{pmat}).}
+
+%Function \code{itemfit} returns a list of class \code{ifit} with components:
+  \item{i.fit}{Chi-squared itemfit statistics (from function \code{itemfit}).}
+  \item{i.df}{Degrees of freedom for itemfit statistics (from function \code{itemfit}).}
+  \item{st.res}{Standardized residuals (from function \code{itemfit}).}
+  \item{i.outfitMSQ}{Outfit mean-square statistics (from function \code{itemfit}).}
+  \item{i.infitMSQ}{Infit mean-square statistics (from function \code{itemfit}).}
+
+%Function \code{personfit} returns a list of class \code{pfit} with components:
+  \item{p.fit}{Chi-squared personfit statistics (from function \code{personfit}).}
+  \item{p.df}{Degrees of freedom for personfit statistics (from function \code{personfit}).}
+  \item{st.res}{Standardized residuals (from function \code{personfit}).}
+  \item{p.outfitMSQ}{Outfit mean-square statistics (from function \code{personfit}).}
+  \item{p.infitMSQ}{Infit mean-square statistics (from function \code{personfit}).}
+
+}
+\references{
+Smith Jr., E. V., and Smith, R. M. (2004). Introduction to Rasch Measurement.
+JAM press.
+
+Wright, B.D., and Masters, G.N. Computation of OUTFIT and INFIT Statistics.
+Rasch Measurement Transactions, 1990, 3:4 p.84-5
+}
+\author{Patrick Mair, Reinhold Hatzinger}
+%\note{}
+\seealso{\code{\link{person.parameter}}
+}
+\examples{
+
+# Rasch model, estimation of item and person parameters
+data(raschdat2)
+res <- RM(raschdat2)
+p.res <- person.parameter(res)
+
+# Matrix with expected probabilities and corresponding residuals
+pmat(p.res)
+residuals(p.res)
+
+#Itemfit
+itemfit(p.res)
+
+#Personfit
+personfit(p.res)
+
+}
+\keyword{models}
diff --git a/man/person.parameter.Rd b/man/person.parameter.Rd
new file mode 100755
index 0000000..69dde8b
--- /dev/null
+++ b/man/person.parameter.Rd
@@ -0,0 +1,102 @@
+\name{person.parameter}
+\alias{person.parameter}
+\alias{person.parameter.eRm}
+\alias{summary.ppar}
+\alias{print.ppar}
+\alias{plot.ppar}
+\alias{coef.ppar}
+\alias{logLik.ppar}
+\alias{print.logLik.ppar}
+\alias{confint.ppar}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{Estimation of Person Parameters}
+\description{Maximum likelihood estimation of the person parameters with spline
+interpolation for non-observed and 0/full responses. Extraction of information criteria such
+as AIC, BIC, and cAIC based on unconditional log-likelihood.}
+\usage{
+\method{person.parameter}{eRm}(object)
+\method{summary}{ppar}(object, ...)
+\method{print}{ppar}(x, ...)
+\method{plot}{ppar}(x, xlab = "Person Raw Scores",
+   ylab = "Person Parameters (Theta)", main = NULL, ...)
+\method{coef}{ppar}(object, ...)
+\method{logLik}{ppar}(object, ...)
+\method{confint}{ppar}(object, parm, level = 0.95, ...)
+}
+
+\arguments{
+  \item{object}{Object of class \code{eRm} in \code{person.parameter} and object of class \code{ppar} in \code{IC}.}
+
+Arguments for \code{print} and \code{plot} methods:
+  \item{x}{Object of class \code{ppar}.}
+  \item{xlab}{Label of the x-axis.}
+  \item{ylab}{Label of the y-axis.}
+  \item{main}{Title of the plot.}
+  \item{...}{Further arguments to be passed to or from other methods. They are ignored in this function.}
+
+Arguments for \code{confint}:
+  \item{parm}{Parameter specification (ignored).}
+  \item{level}{Alpha-level.}
+}
+\details{If the data set contains missing values, person parameters are estimated
+for each missing value subgroup.
+}
+\value{
+The function \code{person.parameter} returns an object of class \code{ppar} containing:
+  \item{loglik}{Log-likelihood of the collapsed data (for faster estimation persons with the same raw score are collapsed).}
+  \item{npar}{Number of parameters.}
+  \item{niter}{Number of iterations.}
+  \item{thetapar}{Person parameter estimates.}
+  \item{se.theta}{Standard errors of the person parameters.}
+  \item{hessian}{Hessian matrix.}
+  \item{theta.table}{Matrix with person parameters (ordered according to original data)
+         including NA pattern group.}
+  \item{pers.ex}{Indices with persons excluded due to 0/full raw score}
+  \item{X.ex}{Data matrix with persons excluded}
+  \item{gmemb}{NA group membership vector (0/full persons excluded)}
+
+
+The function \code{coef} returns a vector of the person parameter estimates for each person (i.e., the first column
+  of \code{theta.table}).
+
+The function \code{logLik} returns an object of class \code{loglik.ppar} containing:
+  \item{loglik}{Log-likelihood of the collapsed data (see above).}
+  \item{df}{Degrees of freedom.}
+
+}
+
+
+\references{
+Fischer, G. H., and Molenaar, I. (1995). Rasch Models - Foundations,
+Recent Developements, and Applications. Springer.
+
+Mair, P., and Hatzinger, R. (2007). Extended Rasch modeling: The eRm package for the application of IRT models in R. Journal of Statistical Software, 20(9), 1-20.
+
+Mair, P., and Hatzinger, R. (2007). CML based estimation of extended Rasch models with the eRm package in R. Psychology Science, 49, 26-43.
+}
+\author{Patrick Mair, Reinhold Hatzinger}
+%\note{}
+\seealso{
+    \code{\link{itemfit.ppar}},\code{\link{personfit.ppar}}
+}
+\examples{
+
+#Person parameter estimation of a rating scale model
+data(rsmdat)
+res <- RSM(rsmdat)
+pres <- person.parameter(res)
+print(pres)
+summary(pres)
+plot(pres)
+
+#Person parameter estimation for a Rasch model with missing values
+data(raschdat2)
+res <- RM(raschdat2, se = FALSE) #Rasch model without standard errors
+pres <- person.parameter(res)
+print(pres)                      #person parameters
+summary(pres)
+logLik(pres)                     #log-likelihood of person parameter estimation
+
+
+}
+\keyword{models}
diff --git a/man/plotICC.Rd b/man/plotICC.Rd
new file mode 100755
index 0000000..740de54
--- /dev/null
+++ b/man/plotICC.Rd
@@ -0,0 +1,124 @@
+\name{plotICC}
+\alias{plotICC}
+\alias{plotICC.Rm}
+\alias{plotjointICC}
+\alias{plotjointICC.dRm}
+
+\title{ICC Plots}
+\description{Plot functions for visualizing the item characteristic curves}
+\usage{
+\method{plotICC}{Rm}(object, item.subset = "all", empICC = NULL, empCI = NULL,
+   mplot = NULL, xlim = c(-4, 4), ylim = c(0, 1),
+   xlab = "Latent Dimension", ylab = "Probability to Solve", main=NULL,
+   col = NULL, lty = 1, legpos = "left", ask = TRUE, ...)
+\method{plotjointICC}{dRm}(object, item.subset = "all", legend = TRUE,
+   xlim = c(-4, 4), ylim = c(0, 1), xlab = "Latent Dimension",
+   ylab = "Probability to Solve", lty = 1, legpos = "left",
+   main="ICC plot",col=NULL,...)
+}
+\arguments{
+  \item{object}{object of class \code{Rm} or \code{dRm}}
+  \item{item.subset}{Subset of items to be plotted. Either a numeric vector indicating
+        the column in \code{X} or a character vector indiciating the column name.
+        If \code{"all"} (default), all items are plotted.}
+  \item{empICC}{Plotting the empirical ICCs for objects of class \code{dRm}.
+        If \code{empICC=NULL}
+        (the default) the empirical ICC is not drawn. Otherwise, \code{empICC} must be
+        specified as a list where the first element must be one of
+        \code{"raw"}, \code{"loess"}, \code{"tukey"}, \code{"kernel"}. The other optional elements are
+        \code{smooth} (numeric), \code{type} (line type for empirical ICCs,
+        useful values are \code{"p"} (default), \code{"l"}, and \code{"b"},
+          see graphics parameter \code{type} in \code{\link{plot.default}}),
+        \code{pch}, \code{col}, and \code{lty}, plotting `character', colour and linetype
+        (see \code{\link{par}}). See details and examples below.
+  }
+  \item{empCI}{Plotting confidence intervals for the the empirical ICCs.
+        If \code{empCI=NULL} (the default) no confidence intervals are drawn.
+        Otherwise, by specifying \code{empCI} as a list gives `exact' confidence
+        intervals for each point of the empirical ICC.
+        The optional elements of this list are \code{gamma}, the confidence level,
+        \code{col}, colour, and \code{lty}, line type. If \code{empCI} is specified
+        as an empty list,
+        the default values \code{empCI=list(gamma=0.95,col="red",lty="dotted")}
+        will be used.
+  }
+  \item{mplot}{if \code{NULL} the default setting is in effect. For models of class \code{dRm} this
+        is \code{mplot = TRUE}, i.e.,
+        the ICCs for up to 4 items are plotted in one figure. For \code{Rm}
+        models the default is \code{FALSE} (each item in one figure) but may be set to \code{TRUE}.
+  }
+  \item{xlab}{Label of the x-axis.}
+  \item{ylab}{Label of the y-axis.}
+  \item{xlim}{Range of person parameters.}
+  \item{ylim}{Range for probability to solve.}
+  \item{legend}{If \code{TRUE}, legend is provided, otherwise the ICCs are labeled.}
+  \item{col}{If not specified or \code{NULL}, line colors are determined automatically.
+             Otherwise, a scalar or vector with appropriate color specifications may be supplied
+             (see \code{\link{par}}).}
+  \item{lty}{Line type.}
+  \item{main}{Title of the plot.}
+  \item{legpos}{Position of the legend with possible values  \code{"bottomright"},
+               \code{"bottom"}, \code{"bottomleft"}, \code{"left"}, \code{"topleft"}, \code{"top"},
+               \code{"topright"}, \code{"right"} and \code{"center"}.
+               If \code{FALSE} no legend is displayed.}
+  \item{ask}{If \code{TRUE} (the default) and the \code{R} session is interactive the user is asked for input,
+             before a new figure is drawn. \code{FALSE} is only useful if automated figure export is
+             in effect, e.g., when using \code{\link{Sweave}}.}
+  \item{\ldots}{Additional plot parameters.}
+}
+\details{Empirical ICCs for objects of class \code{dRm} can be plotted using the option \code{empICC}, a
+         list where the first element specifies the type of calculation of the empirical values.
+         If \code{empICC=list("raw", other specifications)}
+         relative frequencies of the positive responses are calculated for each rawscore group and plotted
+         at the position of the corresponding person parameter. The other options use the default versions
+         of various smoothers: \code{"tukey"} (see \code{\link{smooth}}), \code{"loess"} (see \code{\link{loess}}),
+         and \code{"kernel"} (see \code{\link{ksmooth}}). For \code{"loess"} and \code{"kernel"} a further
+         element, \code{smooth},
+         may be specified to control the span (default is 0.75) or the bandwith (default is 0.5),
+         respectively. For example, the specification could be \code{empirical = list("loess", smooth=0.9)}
+         or \code{empirical = list("kernel",smooth=2)}.
+         Higher values result in smoother estimates of the empirical ICCs.
+
+         The optional confidence intervals are obtained by a procedure first given in
+         Clopper and Pearson (1934) based on the beta distribution (see \code{\link{binom.test}}).
+
+
+}
+\note{For most of the plot options see  \code{\link{plot}} and \code{\link{par}}.}
+%\value{}
+%\references{}
+\author{Patrick Mair, Reinhold Hatzinger}
+%\note{}
+\seealso{\code{\link{plotGOF}}}
+\examples{
+# Rating scale model, ICC plot for all items
+data(rsmdat)
+rsm.res <- RSM(rsmdat)
+thresholds(rsm.res)
+plotICC(rsm.res)
+
+# now items 1 to 4 in one figure without legends
+plotICC(rsm.res, item.subset = 1:4, mplot = TRUE, legpos = FALSE)
+
+# Rasch model for items 1 to 8 from raschdat1
+# empirical ICCs displaying relative frequencies (default settings)
+data(raschdat1)
+rm8.res <- RM(raschdat1[,1:8])
+plotICC(rm8.res, empICC=list("raw"))
+
+# the same but using different plotting styles
+plotICC(rm8.res, empICC=list("raw",type="b",col="blue",lty="dotted"))
+
+# kernel-smoothed empirical ICCs using bandwidth = 2
+plotICC(rm8.res, empICC = list("kernel",smooth=3))
+
+# raw empirical ICCs with confidence intervals
+# displaying only items 2,3,7,8
+plotICC(rm8.res, item.subset=c(2,3,7,8), empICC=list("raw"), empCI=list())
+
+# Joint ICC plot for items 2, 6, 8, and 15 for a Rasch model
+data(raschdat1)
+res <- RM(raschdat1)
+plotjointICC(res, item.subset = c(2,6,8,15), legpos = "left")
+}
+\keyword{models}
diff --git a/man/plotPImap.Rd b/man/plotPImap.Rd
new file mode 100755
index 0000000..ac536b2
--- /dev/null
+++ b/man/plotPImap.Rd
@@ -0,0 +1,53 @@
+\name{plotPImap}
+\alias{plotPImap}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{Person-Item Map}
+\description{
+    A person-item map displays the location of item (and threshold) parameters
+    as well as the distribution of person parameters.along the latent dimension.
+    Person-item maps are useful to compare the range and position of the item measure distribution
+    (lower panel) to the range and position of the person measure distribution (upper panel).
+    Items should ideally be located along the whole scale to meaningfully measure
+    the `ability' of all persons.
+}
+\usage{
+plotPImap(object, item.subset = "all", sorted = FALSE,
+   main = "Person-Item Map", latdim = "Latent Dimension",
+   pplabel = "Person\nParameter\nDistribution", cex.gen = 0.7,
+   xrange = NULL, warn.ord = TRUE, irug = TRUE)
+}
+\arguments{
+  \item{object}{Object of class \code{Rm} or \code{dRm}}
+  \item{item.subset}{Subset of items to be plotted. Either a numeric vector indicating
+       the column in \code{X} or a character vector indicating the column name.
+       If \code{"all"}, all items are plotted. The number of items to be plotted must be > 1.}
+  \item{sorted}{ If \code{TRUE}, the items are sorted in increasing order according to their location
+       on the latent dimension.}
+  \item{main}{Main title of the plot.}
+  \item{latdim}{Label of the x-axis, i.e., the latent dimension.}
+  \item{pplabel}{Title for the upper panel displaying the person parameter distribution}
+  \item{cex.gen}{\code{cex} as a graphical parameter
+       specifies a numerical value giving the amount by which plotting text and symbols should be
+       magnified relative to the default. Here \code{cex.gen} applies to all text labels. The default is 0.7.}
+  \item{xrange}{Range for the x-axis}
+  \item{warn.ord}{If \code{TRUE} (the default) asterisks are displayed in the right margin of the lower
+       panel to indicate nonordinal threshold locations for polytomous items.}
+  \item{irug}{If \code{TRUE} (the default), all thresholds are plotted below the person distribution
+       to indicate where the included items are most informative.}
+}
+\details{
+  Item locations are displayed with bullets, threshold locations with circles.
+}
+%\value{}
+\references{Bond, T.G., and Fox Ch.M. (2007) Applying the Rasch Model. Fundamental Measurement in the Human Sciences.
+2nd Edition. Lawrence Erlbaum Associates.
+}
+\author{Patrick Mair, Reinhold Hatzinger}
+%\note{}
+%\seealso{}
+\examples{
+data(pcmdat)
+res<-PCM(pcmdat)
+plotPImap(res, sorted=TRUE)
+}
+\keyword{models}
diff --git a/man/predict.ppar.Rd b/man/predict.ppar.Rd
new file mode 100755
index 0000000..e214347
--- /dev/null
+++ b/man/predict.ppar.Rd
@@ -0,0 +1,38 @@
+\name{predict.ppar}
+\alias{predict.ppar}
+
+\title{Predict methods}
+\description{Returns data matrix based on model probabilites. So far implemented for dichotomous models only.}
+\usage{
+\method{predict}{ppar}(object, cutpoint = "randomized", ...)
+}
+
+\arguments{
+  \item{object}{Object of class \code{ppar} (from \code{person.parameter()}).}
+  \item{cutpoint}{Either single integer value between 0 and 1 or \code{"randomized"} for randomized 0-1 assignment (see details)}
+  \item{...}{Additional arguments ignored}
+}
+
+\details{
+A randomized assignment implies that for each cell an additional random number is drawn. If the model probability is larger than this value, the person gets 1 on this particular item, if smaller, 0 is assigned. Alternatively, a numeric probability cutpoint can be assigned and the 0-1 scoring is carried out according to the same rule.
+}
+\value{
+Returns data matrix based on model probabilities
+}
+
+\author{Patrick Mair, Reinhold Hatzinger}
+%\note{}
+\seealso{
+    \code{\link{gofIRT.ppar}}
+}
+
+\examples{
+
+#Model-based data matrix for RSM
+data(raschdat2)
+res <- RM(raschdat2)
+pres <- person.parameter(res)
+predict(pres)
+
+}
+\keyword{models}
diff --git a/man/print.eRm.Rd b/man/print.eRm.Rd
new file mode 100755
index 0000000..bed420d
--- /dev/null
+++ b/man/print.eRm.Rd
@@ -0,0 +1,64 @@
+\name{print.eRm}
+\alias{print.eRm}
+\alias{summary.eRm}
+\alias{vcov.eRm}
+\alias{model.matrix.eRm}
+\alias{coef.eRm}
+\alias{logLik.eRm}
+\alias{confint.eRm}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{Methods for extended Rasch models}
+\description{Several methods for objects of class \code{eRm}.}
+\usage{
+\method{print}{eRm}(x, ...)
+\method{summary}{eRm}(object, ...)
+\method{coef}{eRm}(object, parm="beta", ...)
+\method{model.matrix}{eRm}(object, ...)
+\method{vcov}{eRm}(object, ...)
+\method{logLik}{eRm}(object, ...)
+\method{confint}{eRm}(object, parm = "beta", level = 0.95, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{x}{Object of class \code{eRm}.}
+  \item{object}{Object of class \code{eRm}.}
+  \item{parm}{Either \code{"eta"} or \code{"beta"}.}
+  \item{level}{Alpha-level.}
+  \item{...}{Further arguments to be passed to or from other methods. They are ignored in this function.}
+}
+\details{
+  The \code{print} method displays  the value of
+  the log-likelihood, parameter estimates (basic parameters eta) and their standard errors.
+  For RM, RSM, and PCM models, the etas are difficulty parameters, for the LLTM, LRSM,
+  LPCM the sign of the parameters depend on the design matrix and are easiness effects by default.
+  The \code{summary} method additionally gives the full set of item parameters beta as
+  easiness parameters for all models.
+
+  Print methods are also available for the functions \code{logLik} and \code{confint}
+  (see below).
+}
+
+\value{
+The methods below are extractor functions and return various quantities:
+  \code{vcov} returns the variance-covariance matrix of the parameter estimates,
+  \code{coef} a vector of estimates of the eta or beta basic parameters,
+  \code{model.matrix} the design matrix,
+  \code{logLik} an object with elements \code{loglik} and \code{df} containing
+  the log-likelihood value and df.
+  \code{confint} a matrix of confidence interval values for eta or beta.
+}
+\author{Patrick Mair, Reinhold Hatzinger}
+
+\examples{
+data(raschdat1)
+res <- RM(raschdat1)
+print(res)
+summary(res)
+coef(res)
+vcov(res)
+model.matrix(res)
+logLik(res)
+}
+
+\keyword{models}
+
diff --git a/man/raschdat.Rd b/man/raschdat.Rd
new file mode 100755
index 0000000..32b876e
--- /dev/null
+++ b/man/raschdat.Rd
@@ -0,0 +1,30 @@
+\name{raschdat1}
+\alias{raschdat1}
+\alias{raschdat2}
+\alias{lltmdat1}
+\alias{lltmdat2}
+\alias{pcmdat}
+\alias{pcmdat2}
+\alias{lpcmdat}
+\alias{rsmdat}
+\alias{lrsmdat}
+
+\docType{data}
+\title{Data for Computing Extended Rasch Models}
+\description{Artificial data sets for computing extended Rasch models.
+}
+\usage{data(raschdat1)}
+\format{Numeric matrices with subjects as rows, items as columns, missing values as \code{NA}.
+}
+\examples{
+data(raschdat1)
+data(raschdat2)
+data(lltmdat1)
+data(lltmdat2)
+data(pcmdat)
+data(pcmdat2)
+data(lpcmdat)
+data(rsmdat)
+data(lrsmdat)
+}
+\keyword{datasets}
diff --git a/man/sim.2pl.Rd b/man/sim.2pl.Rd
new file mode 100755
index 0000000..218e3ab
--- /dev/null
+++ b/man/sim.2pl.Rd
@@ -0,0 +1,61 @@
+\name{sim.2pl}
+\alias{sim.2pl}
+
+\title{Simulation of 2-pl data}
+\description{This utility function returns a 0-1 matrix violating the
+  parallel ICC assumption in the Rasch model.
+}
+\usage{
+sim.2pl(persons, items, discrim = 0.25, seed = NULL,
+   cutpoint = "randomized")
+}
+
+\arguments{
+  \item{persons}{Either a vector of person parameters or an integer indicating
+  the number of persons (see details).}
+  \item{items}{Either a vector of item parameters or an integer indicating the number of items (see details).}
+  \item{discrim}{Standard deviation on the log scale.}
+  \item{seed}{A seed for the random number generated can be set.}
+  \item{cutpoint}{Either \code{"randomized"} for a randomized tranformation of the model probability matrix into the model 0-1 matrix or an integer value between 0 and 1 (see details).}
+}
+
+\details{If \code{persons} and/or \code{items} (using single integers) are specified to
+determine the number of subjects or items, the corresponding
+parameter vector is drawn from N(0,1). The \code{cutpoint} argument refers to the
+transformation of the theoretical probabilities into a 0-1 data matrix. A randomized
+assingment implies that for each cell an additional random number is drawn.
+If the model probability is larger than this value, the person gets 1 on this particular
+item, if smaller, 0 is assigned. Alternatively, a numeric probability cutpoint can be
+assigned and the 0-1 scoring is carried out according to the same rule.
+
+The \code{discrim} argument can be specified either as a vector of length \code{items}
+defining the item discrimination parameters in the 2-PL (e.g., \code{c(1,1,0.5,1,1.5)}),
+or as a single value. In that case, the discrimination parameters are drawn from a lognormal
+distribution with
+\code{meanlog = 0}, where the specified
+value in \code{discrim} refers to the standard deviation on the log-scale.
+The larger the values, the stronger the degree of Rasch violation. Reasonable values are up to 0.5.
+If 0, the data are Rasch homogeneous.
+}
+
+\references{
+Su\'arez-Falc\'on, J. C., & Glas, C. A. W. (2003). Evaluation of global testing procedures for
+   item fit to the Rasch model. British Journal of Mathematical and Statistical Society,
+   56, 127-143.
+}
+
+\seealso{\code{\link{sim.rasch}}, \code{\link{sim.locdep}}, \code{\link{sim.xdim}}}
+\examples{
+
+#simulating 2-PL data
+#500 persons, 10 items, sdlog = 0.30, randomized cutpoint
+X <- sim.2pl(500, 10, discrim = 0.30)
+
+#item and discrimination parameters from uniform distribution,
+#cutpoint fixed
+dpar <- runif(50, 0, 2)
+ipar <- runif(50, -1.5, 1.5)
+X <- sim.2pl(500, ipar, dpar, cutpoint = 0.5)
+}
+
+\keyword{models}
diff --git a/man/sim.locdep.Rd b/man/sim.locdep.Rd
new file mode 100755
index 0000000..fbf83fa
--- /dev/null
+++ b/man/sim.locdep.Rd
@@ -0,0 +1,55 @@
+\name{sim.locdep}
+\alias{sim.locdep}
+
+\title{Simulation locally dependent items}
+\description{This utility function returns a 0-1 matrix violating the
+local independence assumption.
+}
+\usage{
+sim.locdep(persons, items, it.cor = 0.25, seed = NULL,
+   cutpoint = "randomized")
+}
+
+\arguments{
+  \item{persons}{Either a vector of person parameters or an integer indicating the number of persons (see details).}
+  \item{items}{Either a vector of item parameters or an integer indicating the number of items (see details).}
+  \item{it.cor}{Either a single correlation value between 0 and 1 or a positive semi-definite VC matrix.}
+  \item{seed}{A seed for the random number generated can be set.}
+  \item{cutpoint}{Either \code{"randomized"} for a randomized tranformation of the model probability matrix into the model 0-1 matrix or an integer value between 0 and 1 (see details).}
+}
+
+\details{If \code{persons} or \code{items} is an integer value, the corresponding parameter vector
+is drawn from N(0,1). The \code{cutpoint} argument refers to the transformation of the theoretical
+probabilities into a 0-1 data matrix. A randomized assingment implies that for each cell an
+additional random number is drawn. If the model probability is larger than this value,
+the person gets 1 on this particular item, if smaller, 0 is assigned. Alternatively, a numeric probability cutpoint can be assigned and the 0-1 scoring is carried out according to the same rule.
+
+The argument \code{it.cor} reflects the pair-wise inter-item correlation. If this should be constant
+across the items, a single value between 0 (i.e. Rasch model) and 1 (strong violation) can be specified.
+Alternatively, a symmetric VC-matrix of dimension number of items can be defined.
+
+}
+
+\references{
+Jannarone, R. J. (1986). Conjunctive item response theory kernels. Psychometrika, 51,
+357-373.
+
+Su\'arez-Falc\'on, J. C., & Glas, C. A. W. (2003). Evaluation of global testing procedures for
+   item fit to the Rasch model. British Journal of Mathematical and Statistical Society,
+   56, 127-143.
+}
+
+\seealso{\code{\link{sim.rasch}}, \code{\link{sim.2pl}}, \code{\link{sim.xdim}}}
+\examples{
+
+#simulating locally-dependent data
+#500 persons, 10 items, inter-item correlation of 0.5
+X <- sim.locdep(500, 10, it.cor = 0.5)
+
+#500 persons, 4 items, correlation matrix specified
+sigma <- matrix(c(1,0.2,0.2,0.3,0.2,1,0.4,0.1,0.2,0.4,1,0.8,0.3,0.1,0.8,1),
+   ncol = 4)
+X <- sim.locdep(500, 4, it.cor = sigma)
+}
+
+\keyword{models}
diff --git a/man/sim.rasch.Rd b/man/sim.rasch.Rd
new file mode 100755
index 0000000..c2497a9
--- /dev/null
+++ b/man/sim.rasch.Rd
@@ -0,0 +1,40 @@
+\name{sim.rasch}
+\alias{sim.rasch}
+
+\title{Simulation of Rasch homogeneous data}
+\description{This utility function returns a 0-1 matrix which fits the Rasch model.
+}
+\usage{
+sim.rasch(persons, items, seed = NULL, cutpoint = "randomized")
+}
+
+\arguments{
+  \item{persons}{Either a vector of person parameters or an integer indicating the number of persons (see details)}
+  \item{items}{Either a vector of item parameters or an integer indicating the number of items (see details)}
+  \item{seed}{A seed for the random number generated can be set.}
+  \item{cutpoint}{Either \code{"randomized"} for a randomized tranformation of the model probability matrix into the model 0-1 matrix or an integer value between 0 and 1 (see details)}
+}
+
+\details{If \code{persons} or \code{items} is an integer value, the corresponding parameter vector is drawn from N(0,1). The \code{cutpoint} argument refers to the transformation of the theoretical probabilities into a 0-1 data matrix. A randomized assingment implies that for each cell an additional random number is drawn. If the model probability is larger than this value, the person gets 1 on this particular item, if smaller, 0 is assigned. Alternatively, a numeric probability cutpoint [...]
+}
+
+\references{
+Su\'arez-Falc\'on, J. C., & Glas, C. A. W. (2003). Evaluation of global testing procedures for
+   item fit to the Rasch model. British Journal of Mathematical and Statistical Society,
+   56, 127-143.
+}
+
+\seealso{\code{\link{sim.xdim}}, \code{\link{sim.locdep}}, \code{\link{sim.2pl}}}
+\examples{
+
+#simulating Rasch homogenous data
+#100 persons, 10 items, parameter drawn from N(0,1)
+X <- sim.rasch(100, 10)
+
+#person parameters drawn from uniform distribution, fixed cutpoint
+ppar <- runif(100,-2,2)
+X <- sim.rasch(ppar, 10, cutpoint = 0.5)
+}
+
+\keyword{models}
+
diff --git a/man/sim.xdim.Rd b/man/sim.xdim.Rd
new file mode 100755
index 0000000..3c64977
--- /dev/null
+++ b/man/sim.xdim.Rd
@@ -0,0 +1,64 @@
+\name{sim.xdim}
+\alias{sim.xdim}
+
+\title{Simulation of multidimensional binary data}
+\description{This utility function simulates a 0-1 matrix violating the
+  unidimensionality assumption in the Rasch model.
+}
+\usage{
+sim.xdim(persons, items, Sigma, weightmat, seed = NULL,
+   cutpoint = "randomized")
+}
+
+\arguments{
+  \item{persons}{Either a matrix (each column corresponds to a dimension) of person parameters or an integer indicating the number of persons (see details).}
+  \item{items}{Either a vector of item parameters or an integer indicating the number of items (see details).}
+  \item{Sigma}{A positive-definite symmetric matrix specifying the covariance matrix of the variables.}
+  \item{weightmat}{Matrix for item-weights for each dimension (columns).}
+  \item{seed}{A seed for the random number generated can be set.}
+  \item{cutpoint}{Either \code{"randomized"} for a randomized tranformation of the model probability matrix into the model 0-1 matrix or an integer value between 0 and 1 (see details).}
+}
+
+\details{If \code{persons} is specified as matrix, \code{Sigma} is ignored. If \code{items} is
+an integer value, the corresponding parameter vector is drawn from N(0,1).
+The \code{cutpoint} argument refers to the transformation of the theoretical probabilities
+into a 0-1 data matrix. A randomized assingment implies that for each cell an additional random
+number is drawn. If the model probability is larger than this value, the person gets 1 on
+this particular item, if smaller, 0 is assigned. Alternatively, a numeric probability
+cutpoint can be assigned and the 0-1 scoring is carried out according to the same rule.
+
+If \code{weightmat} is not specified, a random indicator matrix is generated where each item is a measurement
+of only one dimension. For instance, the first row for a 3D-model could be (0,1,0) which means
+that the first item measures the second dimension only. This corresponds to the between-item
+multidimensional model presented by Adams et al. (1997).
+
+\code{Sigma} reflects the VC-structure for the person parameters drawn from a multivariate
+standard normal distribution. Thus, the diagonal elements are typically 1 and the lower the
+covariances in the off-diagonal, the stronger the model violation.
+}
+
+\references{
+Adams, R. J., Wilson, M., & Wang, W. C. (1997). The multidimensional random coefficients
+   multinomial logit model. Applied Psychological Measurement, 21, 1-23.
+
+Glas, C. A. W. (1992). A Rasch model with a multivariate distribution of ability. In M.
+   Wilson (Ed.), Objective Measurement: Foundations, Recent Developments, and
+   Applications (pp. 236-258). Norwood, NJ: Ablex.
+}
+
+\seealso{\code{\link{sim.rasch}}, \code{\link{sim.locdep}}, \code{\link{sim.2pl}}}
+\examples{
+
+# 500 persons, 10 items, 3 dimensions, random weights.
+Sigma <- matrix(c(1, 0.01, 0.01, 0.01, 1, 0.01, 0.01, 0.01, 1), 3)
+X <- sim.xdim(500, 10, Sigma)
+
+#500 persons, 10 items, 2 dimensions, weights fixed to 0.5
+itemvec <- runif(10, -2, 2)
+Sigma <- matrix(c(1, 0.05, 0.05, 1), 2)
+weights <- matrix(0.5, ncol = 2, nrow = 10)
+X <- sim.xdim(500, itemvec, Sigma, weightmat = weights)
+
+}
+\keyword{models}
+
diff --git a/man/stepwiseIt.Rd b/man/stepwiseIt.Rd
new file mode 100755
index 0000000..a6d951e
--- /dev/null
+++ b/man/stepwiseIt.Rd
@@ -0,0 +1,68 @@
+\name{stepwiseIt}
+\alias{stepwiseIt}
+\alias{stepwiseIt.eRm}
+\alias{print.step}
+
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{Stepwise item elimination}
+\description{This function eliminates items stepwise according to one of the following 
+criteria: itemfit, Wald test, Andersen's LR-test
+}
+\usage{
+\method{stepwiseIt}{eRm}(object, criterion = list("itemfit"), alpha = 0.05, verbose = TRUE, maxstep = NA)
+}
+
+\arguments{
+  \item{object}{Object of class \code{eRm}.}
+  \item{criterion}{List with either \code{"itemfit"}, \code{"Waldtest"} or \code{"LRtest"} as first element. 
+  Optionally, for the Waldtest and LRtest a second element containing the split criterion can be specified (see details).}
+  \item{alpha}{Significance level.}
+  \item{verbose}{If \code{TRUE} intermediate results are printed out. }
+  \item{maxstep}{Maximum number of elimination steps. If \code{NA} the procedure stops when the itemset is Rasch homogeneous.}
+}
+
+\details{If \code{criterion = list("itemfit")} the elimination stops when none of the p-values 
+in itemfit is significant. Within each step the item with the largest chi-squared 
+itemfit value is excluded.
+
+If \code{criterion = list("Waldtest")} the elimination stops when none of the p-values 
+resulting from the Wald test is significant. Within each step the item with the largest z-value in 
+Wald test is excluded. 
+
+If \code{criterion = list("LRtest")} the elimination stops when Andersen's LR-test is not
+significant. Within each step the item with the largest z-value in Wald test is excluded. 
+}
+
+\value{
+The function returns an object of class \code{step} containing:
+  \item{X}{Reduced data matrix (bad items eliminated)}
+  \item{fit}{Object of class \code{eRm} with the final item parameter elimination}
+  \item{it.elim}{Vector contaning the names of the eliminated items}
+  \item{res.wald}{Elimination results for Wald test criterion}
+  \item{res.itemfit}{Elimination results for itemfit criterion}
+  \item{res.LR}{Elimination results for LR-test criterion}
+  \item{nsteps}{Number of elimination steps}
+}
+
+\seealso{ \code{\link{LRtest.Rm}}, \code{\link{Waldtest.Rm}}, \code{\link{itemfit.ppar}}
+}
+\examples{
+
+## 2pl-data, 100 persons, 10 items
+set.seed(123)
+X <- sim.2pl(500, 10, 0.4)
+res <- RM(X)
+
+## elimination according to itemfit
+stepwiseIt(res, criterion = list("itemfit"))      
+
+## Wald test based on mean splitting
+stepwiseIt(res, criterion = list("Waldtest","mean")) 
+
+## Andersen LR-test based on random split
+set.seed(123)
+groupvec <- sample(1:3, 500, replace = TRUE)
+stepwiseIt(res, criterion = list("LRtest",groupvec))
+
+}
+\keyword{models}
diff --git a/man/thresholds.Rd b/man/thresholds.Rd
new file mode 100755
index 0000000..94ac389
--- /dev/null
+++ b/man/thresholds.Rd
@@ -0,0 +1,75 @@
+\name{thresholds}
+\alias{thresholds}
+\alias{thresholds.eRm}
+\alias{print.threshold}
+\alias{summary.threshold}
+\alias{confint.threshold}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{Computation of item-category treshold parameters.}
+\description{This function transforms the beta parameters into threshold
+parameters. These can be interpreted by means of log-odds as visualized in ICC plots.
+}
+\usage{
+\method{thresholds}{eRm}(object)
+\method{print}{threshold}(x, ...)
+\method{summary}{threshold}(object, ...)
+\method{confint}{threshold}(object, parm, level = 0.95, ...)
+}
+
+\arguments{
+Arguments for \code{thresholds}:
+  \item{object}{Object of class \code{eRm}.}
+
+Arguments for \code{print}, \code{summary}, and \code{confint} methods:
+  \item{x}{Object of class \code{threshold}.}
+  \item{parm}{Parameter specification (ignored).}
+  \item{level}{Alpha-level.}
+  \item{...}{Further arguments to be passed to methods. They are ignored.}
+}
+\details{For dichotomous models (i.e., RM and LLTM) threshold parameters are not computed.
+The \code{print} method returns a location parameter for each item which is the
+mean of the corresponding threshold parameters. For LPCM and LRSM the thresholds are
+computed for each design matrix block (i.e., measurement point/group) separately
+(PCM and RSM have only 1 block).}
+
+\value{
+The function \code{thresholds} returns an object of class \code{threshold} containing:
+  \item{threshpar}{Vector with threshold parameters.}
+  \item{se.thresh}{Vector with standard errors.}
+  \item{threshtable}{Data frame with location and threshold parameters.}
+}
+
+\references{
+Andrich, D. (1978). Application of a psychometric rating model to ordered categories which are scored with successive integers. Applied Psychological Measurement, 2, 581-594.
+}
+\seealso{
+      \code{\link{plotICC.Rm}}
+}
+\examples{
+
+#Threshold parameterization for a rating scale model
+data(rsmdat)
+res <- RSM(rsmdat)
+th.res <- thresholds(res)
+th.res
+confint(th.res)
+summary(th.res)
+
+#Threshold parameters for a PCM with ICC plot
+data(pcmdat)
+res <- PCM(pcmdat)
+th.res <- thresholds(res)
+th.res
+plotICC(res)
+
+#Threshold parameters for a LPCM:
+#Block 1: t1, g1; Block 2: t1, g2; ...; Block 6: t2,g3
+
+data(lpcmdat)
+G <- c(rep(1,7),rep(2,7),rep(3,6)) # group vector for 3 groups
+res <- LPCM(lpcmdat, mpoints = 2, groupvec = G)
+th.res <- thresholds(res)
+th.res
+
+}
+\keyword{models}
diff --git a/src/components.c b/src/components.c
new file mode 100755
index 0000000..1e28883
--- /dev/null
+++ b/src/components.c
@@ -0,0 +1,68 @@
+/*
+######################################################################
+#
+# components.c
+#
+# copyright (c) 2004, Carter T. Butts <buttsc at uci.edu>
+# Last Modified 11/26/04
+# Licensed under the GNU General Public License version 2 (June, 1991)
+#
+# Part of the R/sna package
+#
+# This file contains routines related to the identification of 
+# components.
+#
+######################################################################
+*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <R.h>
+#include "components.h"
+
+void component_dist_R(double *g, double *pn, double *memb)
+/*
+Determine component memberships in g.  The memberships are stored in memb,
+which must be a zero-initialized vector of length *pn.
+*/
+{
+  char *visited;
+  long int n,v,nod,i,s1count;
+  double comp=0.0;
+
+  /*Set up stuff*/
+  n=*pn;
+  /*Allocate memory for visited list*/
+  visited=(char *)R_alloc(n,sizeof(char));
+  /*Cycle through each node, performing a BFS*/
+  for(v=0;v<n;v++){
+    if(memb[v]==0.0){   /*Ignore nodes w/known membership*/
+      comp++;           /*Increment component counter*/
+      for(i=0;i<n;i++)  /*Mark all nodes unvisited*/
+        visited[i]=0;
+      s1count=0;
+      visited[v]++;     /*Mark v as "to be visited"*/
+      s1count++;
+      memb[v]=comp;     /*v belongs to new component*/
+      while(s1count){
+        while(s1count){
+          /*Find next node to be visited, change state*/
+          for(nod=v;visited[nod]!=1;nod++); /*Only OK b/c s1count>0*/
+          visited[nod]=3;               /*Mark as visited*/
+          s1count--;
+          memb[nod]=comp;               /*Set membership to comp*/
+          for(i=v+1;i<n;i++)            /*Walk the unvisited neighborhood*/
+            if((g[nod+i*n]!=0.0)&&(visited[i]==0)){
+              visited[i]=2;               /*Visit this next time*/
+            }
+        } /*Continue until we run out of nodes at this level*/
+        /*Mark all "to-be-visited" nodes as visitable*/
+        for(i=v+1;i<n;i++)
+          if(visited[i]==2){
+            visited[i]=1;
+            s1count++;
+          }
+      } /*Keep going until all nodes are accounted for*/
+    }
+  }
+}
diff --git a/src/components.h b/src/components.h
new file mode 100755
index 0000000..1803149
--- /dev/null
+++ b/src/components.h
@@ -0,0 +1,34 @@
+/*
+######################################################################
+#
+# components.h
+#
+# copyright (c) 2004, Carter T. Butts <buttsc at uci.edu>
+# Last Modified 11/26/04
+# Licensed under the GNU General Public License version 2 (June, 1991)
+#
+# Part of the R/sna package
+#
+# This file contains headers for components.c.
+#
+######################################################################
+*/
+#ifndef COMPONENTS_H
+#define COMPONENTS_H
+
+/*DECLARATIONS/INCLUSIONS---------------------------------------------------*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <R.h>
+
+
+/*INTERNAL ROUTINES---------------------------------------------------------*/
+
+
+/*R-CALLABLE ROUTINES-------------------------------------------------------*/
+
+void component_dist_R(double *g, double *pn, double *memb);
+
+
+#endif
diff --git a/src/geodist.c b/src/geodist.c
new file mode 100755
index 0000000..14b338a
--- /dev/null
+++ b/src/geodist.c
@@ -0,0 +1,74 @@
+/*
+######################################################################
+#
+# geodist.c
+#
+# copyright (c) 2004, Carter T. Butts <buttsc at uci.edu>
+# Last Modified 11/21/04
+# Licensed under the GNU General Public License version 2 (June, 1991)
+#
+# Part of the R/sna package
+#
+# This file contains routines related to the computation of geodesics.
+#
+######################################################################
+*/
+ 
+#include <stdio.h>
+#include <stdlib.h>
+#include <R.h>
+#include "geodist.h"
+
+void geodist_R(double *g, double *pn, double *gd, double *sigma)
+/*
+Compute geodesics for the graph in g.  The geodesic distances are stored in
+gd, and the path counts in sigma (both being nxn matrices).  Note that these
+should be initialized to all infs and all 0s, respectively.
+*/
+{
+  char *visited;
+  long int n,v,i,nod,s1count;
+
+  /*Set up stuff*/
+  n=*pn;
+  /*Allocate memory for visited list*/
+  visited=(char *)R_alloc(n,sizeof(char));
+  /*Cycle through each node, performing a BFS*/
+  for(v=0;v<n;v++){
+    /*Clear the visit list*/
+    for(i=0;i<n;i++)
+      visited[i]=0; 
+    s1count=0;
+    /*Start with the source node*/
+    nod=v;
+    visited[nod]=1;
+    s1count++;
+    sigma[v+v*n]=1.0;
+    gd[v+v*n]=0.0;
+    /*Now, conduct the trace*/
+    while(s1count>0){
+      while(s1count>0){
+        /*Find the next visitable node, and change its state*/
+        for(nod=0;visited[nod]!=1;nod++); /*Only OK b/c s1count>0*/
+        visited[nod]=3;
+        s1count--;
+        for(i=0;i<n;i++)   /*Walk the unvisited neighborhood*/
+          if((g[nod+i*n]!=0.0)&&((visited[i]==0)||(visited[i]==2))){
+            if(visited[i]==0)  /*If j is unvisited, visit it next time*/
+              visited[i]=2;
+            if(gd[v+i*n]-gd[v+nod*n]>=g[nod+i*n]){
+              gd[v+i*n]=gd[v+nod*n]+g[nod+i*n];  /*Geodist is nod's+g*/
+              sigma[v+i*n]+=sigma[v+nod*n];      /*Add to path count*/
+            }
+          }
+      } /*Continue until we run out of nodes for this iteration*/
+      /*Mark all "to-be-visited" nodes as visitable*/
+      for(i=0;i<n;i++)
+        if(visited[i]==2){
+          visited[i]=1;
+          s1count++;
+        }
+    } /*Keep going until all nodes are accounted for*/
+  } 
+}
+
diff --git a/src/geodist.h b/src/geodist.h
new file mode 100755
index 0000000..354b61e
--- /dev/null
+++ b/src/geodist.h
@@ -0,0 +1,34 @@
+/*
+######################################################################
+#
+# geodist.h
+#
+# copyright (c) 2004, Carter T. Butts <buttsc at uci.edu>
+# Last Modified 11/21/04
+# Licensed under the GNU General Public License version 2 (June, 1991)
+#
+# Part of the R/sna package
+#
+# This file contains headers for geodist.c.
+#
+######################################################################
+*/
+#ifndef GEODIST_H
+#define GEODIST_H
+
+/*DECLARATIONS/INCLUSIONS---------------------------------------------------*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <R.h>
+
+
+/*INTERNAL ROUTINES---------------------------------------------------------*/
+
+
+/*R-CALLABLE ROUTINES-------------------------------------------------------*/
+
+void geodist_R(double *g, double *pn, double *gd, double *sigma);
+
+
+#endif

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/r-cran-erm.git



More information about the debian-science-commits mailing list