[cg3] 01/02: Imported Upstream version 1.0.0~r12254

Tino Didriksen tinodidriksen-guest at moszumanska.debian.org
Mon Jun 26 09:57:50 UTC 2017


This is an automated email from the git hooks/post-receive script.

tinodidriksen-guest pushed a commit to branch master
in repository cg3.

commit 6dedb6ba7a68400e8a966003ef55bbe98ed111da
Author: Tino Didriksen <tino at didriksen.cc>
Date:   Mon Jun 26 09:56:30 2017 +0000

    Imported Upstream version 1.0.0~r12254
---
 CMakeLists.txt                              |  23 +-
 ChangeLog                                   | 951 +++++++++++++---------------
 README.md                                   |   8 +-
 TODO                                        |   4 +
 clang-format.pl                             |   2 +-
 cmake.sh                                    |   3 +-
 emacs/cg.el                                 | 119 ++--
 manual/cmdreference.xml                     |  25 +-
 manual/compatibility.xml                    |   6 +-
 manual/grammar.xml                          |  20 +
 manual/installation.xml                     |  26 +-
 manual/man/cg-comp.1                        |   8 +-
 manual/man/cg-conv.1                        |   6 +-
 manual/man/cg-proc.1                        |   4 +-
 manual/man/cg3-autobin.pl.1                 |   4 +-
 manual/man/vislcg3.1                        |  12 +-
 manual/manual.xml                           |   2 +-
 manual/probabilistic.xml                    |   4 +-
 manual/rules.xml                            |  42 +-
 manual/sets.xml                             |  21 +-
 manual/tags.xml                             |  16 +-
 newsletters/2017-05-20.txt                  |  49 ++
 scripts/CG3_External.pm                     |  14 +-
 scripts/cg-strictify                        | 202 ++++++
 scripts/cg3-autobin.pl.in                   |  12 +-
 scripts/external.pl                         |   2 +-
 scripts/external_text.pl                    |  16 +-
 scripts/profile-revisions.php               |  12 +-
 src/AST.hpp                                 |  11 +-
 src/ApertiumApplicator.cpp                  |  76 +--
 src/ApertiumApplicator.hpp                  |   3 +-
 src/BinaryGrammar.cpp                       |   2 +-
 src/BinaryGrammar.hpp                       |   6 +-
 src/BinaryGrammar_read.cpp                  |  38 +-
 src/BinaryGrammar_read_10043.cpp            |  20 +-
 src/BinaryGrammar_write.cpp                 |  58 +-
 src/CMakeLists.txt                          |  16 +-
 src/Cohort.cpp                              |  50 +-
 src/Cohort.hpp                              |  10 +-
 src/CohortIterator.cpp                      |  21 +-
 src/CohortIterator.hpp                      |   2 +-
 src/ContextualTest.cpp                      |   8 +-
 src/ContextualTest.hpp                      |   4 +-
 src/FSTApplicator.cpp                       |  28 +-
 src/FSTApplicator.hpp                       |   2 +-
 src/FormatConverter.cpp                     |   2 +-
 src/FormatConverter.hpp                     |   2 +-
 src/Grammar.cpp                             | 272 ++++----
 src/Grammar.hpp                             |  13 +-
 src/GrammarApplicator.cpp                   | 153 ++---
 src/GrammarApplicator.hpp                   |  36 +-
 src/GrammarApplicator_matchSet.cpp          | 139 ++--
 src/GrammarApplicator_reflow.cpp            | 111 ++--
 src/GrammarApplicator_runContextualTest.cpp | 120 ++--
 src/GrammarApplicator_runGrammar.cpp        |  56 +-
 src/GrammarApplicator_runRules.cpp          | 784 ++++++++++++++---------
 src/GrammarWriter.cpp                       |  71 ++-
 src/GrammarWriter.hpp                       |   2 +-
 src/IGrammarParser.hpp                      |   2 +-
 src/MatxinApplicator.cpp                    |  36 +-
 src/MatxinApplicator.hpp                    |   2 +-
 src/MweSplitApplicator.cpp                  |  34 +-
 src/MweSplitApplicator.hpp                  |   2 +-
 src/NicelineApplicator.cpp                  |  44 +-
 src/NicelineApplicator.hpp                  |   2 +-
 src/PlaintextApplicator.cpp                 |  29 +-
 src/PlaintextApplicator.hpp                 |   4 +-
 src/Reading.cpp                             |   8 +-
 src/Reading.hpp                             |   6 +-
 src/Relabeller.cpp                          |  58 +-
 src/Relabeller.hpp                          |  10 +-
 src/Rule.cpp                                |  22 +-
 src/Rule.hpp                                |  64 +-
 src/Set.cpp                                 |   4 +-
 src/Set.hpp                                 |   6 +-
 src/SingleWindow.cpp                        |  12 +-
 src/SingleWindow.hpp                        |   2 +-
 src/Strings.cpp                             |  12 +-
 src/Strings.hpp                             |   8 +-
 src/Tag.cpp                                 |  36 +-
 src/Tag.hpp                                 |  16 +-
 src/TagTrie.hpp                             |  32 +-
 src/TextualParser.cpp                       | 182 ++++--
 src/TextualParser.hpp                       |   9 +-
 src/Window.cpp                              |  28 +-
 src/Window.hpp                              |   2 +-
 src/bloomish.hpp                            |   6 +-
 src/cg-mwesplit.cpp                         |   4 +-
 src/cg-relabel.cpp                          |   4 +-
 src/cg3.h                                   |   7 +-
 src/cg_comp.cpp                             |   2 +-
 src/cg_conv.cpp                             |   9 +-
 src/cg_proc.cpp                             |   7 +-
 src/flat_unordered_map.hpp                  |   4 +-
 src/flat_unordered_set.hpp                  |   4 +-
 src/inlines.hpp                             |  79 ++-
 src/interval_vector.hpp                     |   4 +-
 src/istream.hpp                             |   4 +-
 src/libcg3.cpp                              |  44 +-
 src/main.cpp                                |  43 +-
 src/options.hpp                             |   6 +-
 src/options_conv.hpp                        |   6 +-
 src/parser_helpers.hpp                      |  48 +-
 src/process.hpp                             |   2 +-
 src/scoped_stack.hpp                        |   2 +-
 src/sorted_vector.hpp                       |  19 +-
 src/stdafx.hpp                              |  19 +-
 src/test_libcg3.c                           |   2 +-
 src/uextras.cpp                             |   2 +-
 src/uextras.hpp                             |   4 +-
 src/version.hpp                             |  20 +-
 test/Apertium/T_Flush/expected.txt          | Bin 0 -> 105 bytes
 test/Apertium/T_Flush/grammar.cg3           |   7 +
 test/Apertium/T_Flush/input.txt             | Bin 0 -> 147 bytes
 test/Apertium/T_Flush/run.pl                |  61 ++
 test/T_Append/expected.txt                  |   6 +-
 test/T_Append/grammar.cg3                   |   2 +-
 test/T_Dependency_Loops/grammar.cg3         |   2 +-
 test/T_External/run.pl                      |   2 +-
 test/T_JumpExecute/expected.txt             |  18 +-
 test/T_MapAdd_Different/expected.txt        |   6 +-
 test/T_MapAdd_Different/grammar.cg3         |   8 +-
 test/T_MapAdd_Different/grammar.cg3b.10043  | Bin 907 -> 0 bytes
 test/T_MapAdd_Different/input.txt           |   2 +-
 test/T_Movement/expected.txt                |  12 +-
 test/T_Movement/grammar.cg3                 |   2 +
 test/T_Movement/grammar.cg3b.10043          | Bin 1574 -> 0 bytes
 test/T_Movement/input.txt                   |  14 +-
 test/T_MweSplit/run.pl                      |   4 +-
 test/T_RelabelList/run.pl                   |   2 +-
 test/T_RelabelList_Apertium/run.pl          |   2 +-
 test/T_RelabelSet/run.pl                    |   2 +-
 test/T_RemCohort/expected.txt               |  40 +-
 test/T_RemCohort/grammar.cg3                |   3 +
 test/T_RemCohort/grammar.cg3b.10043         | Bin 2239 -> 0 bytes
 test/T_RemCohort/input.txt                  |  22 +-
 test/T_SetOps/expected.txt                  |  18 +-
 test/T_SetOps/grammar.cg3                   |   3 +-
 test/T_SetOps/grammar.cg3b.10043            | Bin 1437 -> 0 bytes
 test/T_SubReadings_Apertium/run.pl          |   2 +-
 test/T_SubstituteNil/grammar.cg3            |   5 +-
 test/T_SubstituteNil/grammar.cg3b.10043     | Bin 543 -> 0 bytes
 test/T_SubstituteNil/input.txt              |   2 +-
 test/clean.sh                               |   2 +-
 test/runall.pl                              |   4 +-
 145 files changed, 3029 insertions(+), 1976 deletions(-)

diff --git a/CMakeLists.txt b/CMakeLists.txt
index 1830150..8012920 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -40,9 +40,19 @@ if(MSVC)
 	set(CMAKE_C_FLAGS_RELEASE ${CMAKE_CXX_FLAGS_RELEASE})
 else()
 	set(_FLAGS_COMMON "-Wall -Wextra -Wno-missing-field-initializers -Wno-deprecated -Wno-unused-parameter -fPIC")
-	if((CMAKE_COMPILER_IS_GNUCXX AND NOT ${CMAKE_CXX_COMPILER_VERSION} VERSION_LESS 4.4) OR (CMAKE_CXX_COMPILER_ID STREQUAL "Clang"))
-		set(_FLAGS_COMMON "${_FLAGS_COMMON} -Wno-unused-result")
+
+	include(CheckCXXCompilerFlag)
+	foreach(flag "-Wno-unused-result" "-flto")
+		string(REGEX REPLACE "[^A-Za-z0-9]" "-" _flag ${flag})
+		CHECK_CXX_COMPILER_FLAG(${flag} COMPILER_SUPPORTS_${_flag})
+		if(COMPILER_SUPPORTS_${_flag})
+			set(_FLAGS_COMMON "${_FLAGS_COMMON} ${flag}")
+		endif()
+	endforeach()
+	if(COMPILER_SUPPORTS_flto)
+		set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} -flto")
 	endif()
+
 	set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${_FLAGS_COMMON} -fvisibility-inlines-hidden")
 	set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 -g3")
 	set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -O3")
@@ -51,12 +61,10 @@ else()
 	set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -O3")
 
 	# Enable latest possible C++ standard
-	include(CheckCXXCompilerFlag)
 	foreach(flag "-std=c++17" "-std=c++1z" "-std=c++14" "-std=c++1y" "-std=c++11" "-std=c++0x")
 		string(REGEX REPLACE "[^a-z0-9]" "-" _flag ${flag})
 		CHECK_CXX_COMPILER_FLAG(${flag} COMPILER_SUPPORTS_${_flag})
 		if(COMPILER_SUPPORTS_${_flag})
-			message(STATUS "Enabling ${flag}")
 			set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${flag}")
 			break()
 		endif()
@@ -78,6 +86,12 @@ else()
 		PATTERN ".svn" EXCLUDE)
 endif()
 
+include(TestBigEndian)
+TEST_BIG_ENDIAN(BIG_ENDIAN)
+if(BIG_ENDIAN)
+	add_definitions(-DBIG_ENDIAN)
+endif()
+
 if(WIN32)
 	add_definitions(-DUNICODE -D_UNICODE -D_SECURE_SCL=0 -D_ITERATOR_DEBUG_LEVEL=0 -D_CRT_SECURE_NO_DEPRECATE -DWIN32_LEAN_AND_MEAN -DVC_EXTRALEAN -DNOMINMAX)
 	include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include/win32")
@@ -94,6 +108,7 @@ add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/src)
 
 configure_file(${CMAKE_CURRENT_SOURCE_DIR}/scripts/cg3-autobin.pl.in ${CMAKE_CURRENT_BINARY_DIR}/scripts/cg3-autobin.pl @ONLY)
 install(PROGRAMS "${CMAKE_CURRENT_BINARY_DIR}/scripts/cg3-autobin.pl" DESTINATION bin)
+install(PROGRAMS "${CMAKE_CURRENT_SOURCE_DIR}/scripts/cg-strictify" DESTINATION bin)
 
 set(ELISPDIR "share/emacs/site-lisp" CACHE PATH "Where to install Emacs Lisp files")
 install(FILES "${CMAKE_CURRENT_SOURCE_DIR}/emacs/cg.el" DESTINATION ${ELISPDIR})
diff --git a/ChangeLog b/ChangeLog
index 218f6c3..255f497 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,459 @@
+2017-06-21  tino
+
+	* [r12250] src/FSTApplicator.cpp, src/GrammarApplicator.cpp,
+	  src/GrammarApplicator_runContextualTest.cpp,
+	  src/MweSplitApplicator.cpp, src/NicelineApplicator.cpp,
+	  src/PlaintextApplicator.cpp, src/Strings.cpp, src/Tag.cpp,
+	  src/inlines.hpp, src/parser_helpers.hpp, src/version.hpp:
+	  constexpr cleanup
+
+2017-06-14  tino
+
+	* [r12243] src/Tag.cpp: length() -> size()
+	* [r12242] src/AST.hpp, src/ApertiumApplicator.cpp,
+	  src/BinaryGrammar_read.cpp, src/BinaryGrammar_read_10043.cpp,
+	  src/BinaryGrammar_write.cpp, src/Cohort.cpp,
+	  src/CohortIterator.cpp, src/FSTApplicator.cpp, src/Grammar.cpp,
+	  src/GrammarApplicator.cpp, src/GrammarApplicator_matchSet.cpp,
+	  src/GrammarApplicator_reflow.cpp,
+	  src/GrammarApplicator_runContextualTest.cpp,
+	  src/GrammarApplicator_runGrammar.cpp,
+	  src/GrammarApplicator_runRules.cpp, src/GrammarWriter.cpp,
+	  src/MatxinApplicator.cpp, src/MweSplitApplicator.cpp,
+	  src/NicelineApplicator.cpp, src/PlaintextApplicator.cpp,
+	  src/Reading.cpp, src/Rule.cpp, src/SingleWindow.cpp, src/Tag.cpp,
+	  src/TextualParser.cpp, src/Window.cpp, src/cg_proc.cpp,
+	  src/libcg3.cpp, src/main.cpp, src/parser_helpers.hpp: Many
+	  foreach() -> range-for
+	* [r12240] src/GrammarApplicator.cpp, src/GrammarApplicator.hpp,
+	  src/GrammarApplicator_matchSet.cpp,
+	  src/GrammarApplicator_reflow.cpp, src/Reading.hpp, src/Tag.hpp,
+	  src/TextualParser.cpp, src/inlines.hpp, src/parser_helpers.hpp,
+	  test/T_MapAdd_Different/expected.txt,
+	  test/T_MapAdd_Different/grammar.cg3: Temporary hack for
+	  quick'n'dirty ordered tag check
+
+2017-06-13  tino
+
+	* [r12239] src/Grammar.hpp, src/Relabeller.cpp, src/Relabeller.hpp,
+	  src/inlines.hpp: Hasher for UString
+	* [r12238] src/BinaryGrammar.hpp, src/Cohort.hpp,
+	  src/ContextualTest.hpp, src/Grammar.hpp, src/GrammarWriter.cpp,
+	  src/Relabeller.cpp, src/Relabeller.hpp, src/Rule.hpp,
+	  src/Set.hpp, src/Tag.hpp, src/TextualParser.cpp,
+	  src/TextualParser.hpp, src/bloomish.hpp, src/cg-mwesplit.cpp,
+	  src/cg-relabel.cpp, src/cg_conv.cpp, src/flat_unordered_map.hpp,
+	  src/flat_unordered_set.hpp, src/interval_vector.hpp,
+	  src/libcg3.cpp, src/sorted_vector.hpp, src/stdafx.hpp,
+	  src/version.hpp: More Boost -> C++11
+	* [r12237] src/ApertiumApplicator.cpp, src/BinaryGrammar_write.cpp,
+	  src/Cohort.cpp, src/ContextualTest.cpp, src/Grammar.cpp,
+	  src/GrammarApplicator.cpp, src/GrammarApplicator_matchSet.cpp,
+	  src/GrammarApplicator_reflow.cpp,
+	  src/GrammarApplicator_runContextualTest.cpp,
+	  src/GrammarApplicator_runRules.cpp, src/GrammarWriter.cpp,
+	  src/MweSplitApplicator.cpp, src/NicelineApplicator.cpp,
+	  src/Relabeller.cpp, src/Relabeller.hpp, src/Set.cpp, src/Set.hpp,
+	  src/SingleWindow.cpp, src/Tag.hpp, src/TagTrie.hpp,
+	  src/TextualParser.cpp, src/libcg3.cpp, src/stdafx.hpp:
+	  BOOST_FOREACH -> range-for
+	* [r12236] src/BinaryGrammar_read_10043.cpp,
+	  src/BinaryGrammar_write.cpp, src/Cohort.cpp, src/Grammar.cpp,
+	  src/GrammarApplicator.cpp, src/GrammarApplicator_matchSet.cpp,
+	  src/GrammarApplicator_reflow.cpp,
+	  src/GrammarApplicator_runGrammar.cpp,
+	  src/GrammarApplicator_runRules.cpp, src/GrammarWriter.cpp,
+	  src/MweSplitApplicator.cpp, src/Relabeller.cpp,
+	  src/TextualParser.cpp, src/stdafx.hpp: BOOST_AUTO -> auto
+	* [r12235] ChangeLog, manual/rules.xml,
+	  src/GrammarApplicator_runRules.cpp, src/Rule.hpp,
+	  src/Strings.cpp, src/Strings.hpp, src/TextualParser.cpp,
+	  src/version.hpp, test/T_Append/expected.txt,
+	  test/T_Append/grammar.cg3, test/T_MapAdd_Different/expected.txt,
+	  test/T_MapAdd_Different/grammar.cg3,
+	  test/T_MapAdd_Different/grammar.cg3b.10043,
+	  test/T_MapAdd_Different/input.txt: Add BEFORE|AFTER to
+	  ADD/MAP/COPY to control where tags are inserted
+
+2017-06-12  tino
+
+	* [r12233] src/Relabeller.cpp: Make rule.name a UString instead of
+	  UChar*
+	* [r12232] src/GrammarWriter.cpp: Make rule.name a UString instead
+	  of UChar*
+	* [r12231] src/BinaryGrammar_write.cpp, src/GrammarApplicator.cpp,
+	  src/GrammarWriter.cpp, src/Rule.cpp, src/Rule.hpp,
+	  src/TextualParser.cpp, src/cg_proc.cpp, src/main.cpp: Make
+	  rule.name a UString instead of UChar*
+
+2017-06-08  tino
+
+	* [r12225] src/GrammarApplicator_runRules.cpp: Tighten seen_barrier
+	  break to only the contextual target
+
+2017-05-31  tino
+
+	* [r12219] src/GrammarApplicator_runContextualTest.cpp: *c barrier
+	  now only skips branches instead of killing the whole search
+	* [r12218] src/GrammarApplicator_runContextualTest.cpp,
+	  src/TextualParser.cpp: Barrier is now ok for self tests
+
+2017-05-30  tino
+
+	* [r12213] src/cg3.h, src/libcg3.cpp: Add 3 functions from Paul
+	  Meurer <paul.meurer at uni.no>
+
+2017-05-23  tino
+
+	* [r12203] scripts/cg3-autobin.pl.in: Forgot some are optional
+
+2017-05-22  tino
+
+	* [r12202] src/AST.hpp, src/TextualParser.cpp,
+	  src/TextualParser.hpp, src/parser_helpers.hpp,
+	  test/T_Dependency_Loops/grammar.cg3: Added LIST-TAGS as shorthand
+	  for gobbling up all LIST X = X; sets
+
+2017-05-19  tino
+
+	* [r12200] ChangeLog, newsletters/2017-05-20.txt,
+	  scripts/cg3-autobin.pl.in, src/GrammarApplicator_runRules.cpp,
+	  src/version.hpp: Release 1.0.0
+	* [r12199] manual/cmdreference.xml, manual/grammar.xml,
+	  manual/tags.xml, scripts/cg-strictify, scripts/cg3-autobin.pl.in:
+	  Document cg-strictify; Use env to find perl; Make Getopt::Long
+	  case-sensitive
+
+2017-05-18  tino
+
+	* [r12198] CMakeLists.txt, scripts/cg-strictify, src/Strings.cpp,
+	  src/Strings.hpp, src/TextualParser.cpp, src/TextualParser.hpp:
+	  Add cg-strictify script to help with STRICT-TAG'ing existing
+	  grammars; Add strict-regex and strict-icase options
+
+2017-05-15  tino
+
+	* [r12197] manual/tags.xml, src/TextualParser.cpp: Nicer
+	  strict-tags
+	* [r12196] src/FSTApplicator.cpp: Handle empty tags
+
+2017-05-13  tino
+
+	* [r12194] ChangeLog, src/version.hpp: Forgot to bump binary rev
+	  with floating point support
+	* [r12193] src/GrammarApplicator_runRules.cpp: Correct iterator
+	  after RemCohort
+
+2017-05-11  tino
+
+	* [r12190] CMakeLists.txt, manual/cmdreference.xml,
+	  manual/probabilistic.xml, manual/tags.xml,
+	  src/BinaryGrammar_read.cpp, src/BinaryGrammar_read_10043.cpp,
+	  src/BinaryGrammar_write.cpp, src/Cohort.cpp, src/Cohort.hpp,
+	  src/FSTApplicator.cpp, src/GrammarApplicator_matchSet.cpp,
+	  src/Tag.cpp, src/Tag.hpp, src/inlines.hpp, src/options_conv.hpp:
+	  Switch numeric tags to double-precision floating point; Change
+	  cg-conv to not multiply weights by default; Detect endianness at
+	  build time
+
+2017-05-09  tino
+
+	* [r12189] src/PlaintextApplicator.cpp: Add <cg-conv> tags to
+	  automatic readings to allow easy removal
+	* [r12188] src/PlaintextApplicator.cpp,
+	  src/PlaintextApplicator.hpp, src/cg_conv.cpp,
+	  src/options_conv.hpp: Option --add-tags to minimally analyze
+	  plain text input, defaults to off
+
+2017-05-07  tino
+
+	* [r12187] README.md, manual/installation.xml,
+	  manual/man/cg-comp.1, manual/man/cg-conv.1, manual/man/cg-proc.1,
+	  manual/man/cg3-autobin.pl.1, manual/man/vislcg3.1: https links;
+	  Point to nightly builds instead
+
+2017-05-03  tino
+
+	* [r12178] src/GrammarApplicator.hpp,
+	  src/GrammarApplicator_runContextualTest.cpp: pS incorrectly acted
+	  as p*
+
+2017-04-28  tino
+
+	* [r12161] src/GrammarApplicator.hpp,
+	  src/GrammarApplicator_runContextualTest.cpp: dep_deep_seen now a
+	  composite key
+
+2017-04-25  tino
+
+	* [r12146] src/sorted_vector.hpp: Committed wrong file before
+	* [r12145] manual/rules.xml, src/GrammarApplicator_runRules.cpp,
+	  src/parser_helpers.hpp, test/T_Movement/expected.txt,
+	  test/T_Movement/input.txt: Completely revamp how Move WithChild
+	  is done to satisfy tree thinking
+
+2017-04-21  tino
+
+	* [r12139] src/GrammarApplicator_runRules.cpp: If Move does
+	  nothing, don't mark state as dirty
+	* [r12135] src/GrammarApplicator_matchSet.cpp: Don't crash on
+	  capturing patterns in dep targets
+	* [r12134] src/GrammarApplicator_runRules.cpp,
+	  test/T_Movement/expected.txt, test/T_Movement/grammar.cg3,
+	  test/T_Movement/grammar.cg3b.10043, test/T_Movement/input.txt:
+	  Clear non-match cache on Repeat
+
+2017-04-20  unhammer
+
+	* [r12133] src/ApertiumApplicator.cpp: fix cg-proc -z again
+	  
+	  runningWithNullFlush isn't set until
+	  runGrammarOnTextWrapperNullFlush
+	* [r12132] test/Apertium/T_Flush,
+	  test/Apertium/T_Flush/expected.txt,
+	  test/Apertium/T_Flush/grammar.cg3,
+	  test/Apertium/T_Flush/input.txt, test/Apertium/T_Flush/run.pl:
+	  test cg-proc -z (NUL-flushing with timeout)
+
+2017-04-19  tino
+
+	* [r12131] manual/rules.xml: Document AddCohort WithChild
+	* [r12130] src/GrammarApplicator_runRules.cpp,
+	  src/TextualParser.cpp, test/T_RemCohort/expected.txt,
+	  test/T_RemCohort/grammar.cg3,
+	  test/T_RemCohort/grammar.cg3b.10043, test/T_RemCohort/input.txt:
+	  Allow WithChild for AddCohort
+
+2017-04-17  tino
+
+	* [r12128] src/ApertiumApplicator.cpp, src/ApertiumApplicator.hpp:
+	  Count lines correctly; Add cohort count to aid finding the error
+	  location
+
+2017-04-06  tino
+
+	* [r12125] src/GrammarApplicator_runRules.cpp: Only prevent edges'
+	  children from being moved if there's a conflict
+	* [r12120] src/GrammarApplicator_runRules.cpp: Prevent edges'
+	  children from being moved
+	* [r12119] src/GrammarApplicator_runRules.cpp: Prevent edges from
+	  being moved at all; Bail out if invalid moves are detected.
+
+2017-04-05  tino
+
+	* [r12114] src/GrammarApplicator_runRules.cpp: min() cast
+	* [r12113] src/GrammarApplicator_runRules.cpp: Warn about
+	  move/switch accidents
+	* [r12112] src/GrammarApplicator.hpp,
+	  src/GrammarApplicator_runRules.cpp, src/inlines.hpp,
+	  src/main.cpp, src/options.hpp: Add optional range to --trace that
+	  will stop execution if hit
+	* [r12111] src/GrammarApplicator_runContextualTest.cpp,
+	  src/GrammarApplicator_runRules.cpp, src/bloomish.hpp,
+	  src/istream.hpp: Visual Studio 2017 silently added the retarded
+	  UTF-8 BOM
+
+2017-04-04  tino
+
+	* [r12108] src/GrammarApplicator_runContextualTest.cpp,
+	  test/runall.pl: Fix left/right dependencies after movement; VS15
+
+2017-04-03  tino
+
+	* [r12098] src/GrammarApplicator_runRules.cpp, src/bloomish.hpp,
+	  src/istream.hpp: fill() casts
+	* [r12095] src/GrammarApplicator_runRules.cpp: Move Withchild
+	  should still move the target even if no children match, and ditto
+	  edges
+
+2017-03-08  tino
+
+	* [r12046] src/options.hpp: +used
+
+2017-02-20  tino
+
+	* [r12010] src/Grammar.cpp, src/Grammar.hpp, src/TextualParser.cpp:
+	  Let actualizing set operators work on more complex sets (but not
+	  really complex sets yet)
+	* [r12009] manual/compatibility.xml: Correction to - gotcha
+	* [r12008] manual/manual.xml, manual/sets.xml, src/AST.hpp,
+	  src/ApertiumApplicator.cpp, src/ApertiumApplicator.hpp,
+	  src/BinaryGrammar.cpp, src/BinaryGrammar.hpp,
+	  src/BinaryGrammar_read.cpp, src/BinaryGrammar_write.cpp,
+	  src/Cohort.cpp, src/Cohort.hpp, src/CohortIterator.cpp,
+	  src/CohortIterator.hpp, src/ContextualTest.cpp,
+	  src/ContextualTest.hpp, src/FSTApplicator.cpp,
+	  src/FSTApplicator.hpp, src/FormatConverter.cpp,
+	  src/FormatConverter.hpp, src/Grammar.cpp, src/Grammar.hpp,
+	  src/GrammarApplicator.cpp, src/GrammarApplicator.hpp,
+	  src/GrammarApplicator_matchSet.cpp,
+	  src/GrammarApplicator_reflow.cpp,
+	  src/GrammarApplicator_runContextualTest.cpp,
+	  src/GrammarApplicator_runGrammar.cpp,
+	  src/GrammarApplicator_runRules.cpp, src/GrammarWriter.cpp,
+	  src/GrammarWriter.hpp, src/IGrammarParser.hpp,
+	  src/MatxinApplicator.cpp, src/MatxinApplicator.hpp,
+	  src/MweSplitApplicator.cpp, src/MweSplitApplicator.hpp,
+	  src/NicelineApplicator.cpp, src/NicelineApplicator.hpp,
+	  src/PlaintextApplicator.cpp, src/PlaintextApplicator.hpp,
+	  src/Reading.cpp, src/Reading.hpp, src/Relabeller.cpp,
+	  src/Relabeller.hpp, src/Rule.cpp, src/Rule.hpp, src/Set.cpp,
+	  src/Set.hpp, src/SingleWindow.cpp, src/SingleWindow.hpp,
+	  src/Strings.cpp, src/Strings.hpp, src/Tag.cpp, src/Tag.hpp,
+	  src/TagTrie.hpp, src/TextualParser.cpp, src/TextualParser.hpp,
+	  src/Window.cpp, src/Window.hpp, src/bloomish.hpp,
+	  src/cg-mwesplit.cpp, src/cg-relabel.cpp, src/cg3.h,
+	  src/cg_comp.cpp, src/cg_conv.cpp, src/cg_proc.cpp,
+	  src/flat_unordered_map.hpp, src/flat_unordered_set.hpp,
+	  src/inlines.hpp, src/interval_vector.hpp, src/istream.hpp,
+	  src/libcg3.cpp, src/main.cpp, src/options.hpp,
+	  src/options_conv.hpp, src/parser_helpers.hpp, src/process.hpp,
+	  src/scoped_stack.hpp, src/sorted_vector.hpp, src/stdafx.hpp,
+	  src/test_libcg3.c, src/uextras.cpp, src/uextras.hpp,
+	  src/version.hpp, test/T_SetOps/expected.txt,
+	  test/T_SetOps/grammar.cg3, test/T_SetOps/grammar.cg3b.10043:
+	  Added a true mathematical set difference operator \ and renamed
+	  the existing - operator to Except
+	* [r12007] TODO, src/GrammarApplicator_matchSet.cpp,
+	  src/GrammarApplicator_runContextualTest.cpp: Work around OR'ed
+	  tests followed by OR'ed tests
+
+2017-02-16  tino
+
+	* [r12003] manual/tags.xml: () does not need escaping
+
+2017-02-01  unhammer
+
+	* [r11990] emacs/cg.el: :type's for some defcustom's
+	* [r11989] emacs/cg.el: lexical-binding: t; 0.3.0
+	  
+	  and underscore some unused-but-required vars
+
+2016-12-23  tino
+
+	* [r11928] src/TextualParser.cpp: Error on stand-alone o and O
+
+2016-12-19  tino
+
+	* [r11924] cmake.sh: Env tweak
+	* [r11923] cmake.sh, scripts/CG3_External.pm, scripts/external.pl,
+	  scripts/external_text.pl, test/T_External/run.pl,
+	  test/T_MweSplit/run.pl, test/T_RelabelList/run.pl,
+	  test/T_RelabelList_Apertium/run.pl, test/T_RelabelSet/run.pl,
+	  test/T_SubReadings_Apertium/run.pl, test/clean.sh,
+	  test/runall.pl: Use env to find handlers
+
+2016-11-30  tino
+
+	* [r11902] src/GrammarWriter.cpp: Better handling of binary
+	  grammars, w.r.t. sub-readings and (soft-)delimiters
+	* [r11901] src/GrammarApplicator_runRules.cpp,
+	  src/GrammarWriter.cpp, src/main.cpp: Allow writing out binary
+	  grammars by giving sets sequential names
+
+2016-11-17  tino
+
+	* [r11883] src/GrammarApplicator_runGrammar.cpp: Less verbose
+
+2016-10-31  tino
+
+	* [r11861] src/GrammarApplicator_runGrammar.cpp: Handle malformed
+	  data that has U+0085 instead of ellipsis
+	* [r11860] TODO, src/GrammarApplicator_runRules.cpp,
+	  test/T_JumpExecute/expected.txt: Fix Jump skipping rules
+
+2016-10-13  tino
+
+	* [r11783] src/GrammarApplicator_runRules.cpp: Trace SUB:*
+
+2016-10-10  tino
+
+	* [r11775] src/GrammarApplicator_runRules.cpp: Fix segfault
+
+2016-08-10  tino
+
+	* [r11705] src/GrammarApplicator_runRules.cpp, src/Rule.hpp: Coding
+	  style
+	* [r11704] clang-format.pl, scripts/profile-revisions.php: Scripts
+
+2016-08-01  unhammer
+
+	* [r11703] emacs/cg.el: commentary, todo's
+
+2016-07-25  tino
+
+	* [r11687] scripts/cg3-autobin.pl.in: Digest::SHA1 is dead and
+	  gone, use Digest::SHA
+
+2016-07-21  tino
+
+	* [r11682] CMakeLists.txt, src/CMakeLists.txt: Use LTO if available
+
+2016-07-14  tino
+
+	* [r11677] ChangeLog, TODO, manual/rules.xml,
+	  src/GrammarApplicator_runRules.cpp, src/Rule.hpp,
+	  src/Strings.cpp, src/Strings.hpp, src/version.hpp,
+	  test/T_SubstituteNil/grammar.cg3,
+	  test/T_SubstituteNil/grammar.cg3b.10043,
+	  test/T_SubstituteNil/input.txt: Implement and document rule flag
+	  REPEAT
+
+2016-07-13  unhammer
+
+	* [r11674] emacs/cg.el: `C-c [a-z]' reserved for user bindings
+	* [r11673] emacs/cg.el: derive cg-mode from prog-mode, less
+	  boilerplate
+	  
+	  also enables company-dabbrev-code which gives better completions
+
+2016-06-23  tino
+
+	* [r11656] ChangeLog, src/GrammarApplicator_runRules.cpp,
+	  src/version.hpp, test/T_MweSplit/run.pl: Correctly fail trying to
+	  get negative sub-reading when there are none; Fix MWE test to
+	  ignore ^M due to piping artifacts
+
+2016-06-07  tino
+
+	* [r11630] src/GrammarApplicator_runRules.cpp: Endless loop print
+	  window
+
+2016-06-06  tino
+
+	* [r11628] src/GrammarApplicator_runRules.cpp,
+	  src/MweSplitApplicator.cpp: More endless loop prevention
+
+2016-05-26  tino
+
+	* [r11624] src/MweSplitApplicator.cpp, src/MweSplitApplicator.hpp,
+	  src/cg-mwesplit.cpp: Formatting
+
+2016-05-26  unhammer
+
+	* [r11623] src/CMakeLists.txt, src/MweSplitApplicator.cpp,
+	  src/MweSplitApplicator.hpp, src/cg-mwesplit.cpp, test/T_MweSplit,
+	  test/T_MweSplit/expected.txt, test/T_MweSplit/input.txt,
+	  test/T_MweSplit/run.pl: cg-mwesplit: new binary for splitting
+	  multiwords
+	  
+	  like https://github.com/unhammer/cg-mwesplit but rewritten to
+	  inherit GrammarApplicator and use the standard vislcg3 parsing.
+	  
+	  Takes no options, just stdin and stdout:
+	  
+	  $ cg-mwesplit < infile > outfile
+	  
+	  More typically, it’ll be in a pipeline after hfst-tokenise and
+	  some step that disambiguates multiwords using vislcg3:
+	  
+	  $ echo words go here | hfst-tokenise --gtd tokeniser.pmhfst |
+	  vislcg3 -g mwe-dis.cg3 | cg-mwesplit
+
+2016-05-25  tino
+
+	* [r11621] ChangeLog, src/version.hpp: Let's call this a release
+
 2016-05-24  tino
 
 	* [r11620] src/Cohort.cpp, src/Cohort.hpp, src/CohortIterator.cpp,
@@ -1371,498 +1827,3 @@
 	* [r10047] emacs/cg.el: more rule flags to highlight
 	* [r10046] emacs/cg.el: hl UNMAP UNSAFE
 
-2014-07-15  tino
-
-	* [r10045] src/version.hpp: Bump binary grammar revision
-
-2014-07-14  tino
-
-	* [r10044] src/all_vislcg3.cpp: Eliminate CompositeTag
-	* [r10043] ChangeLog, TODO, src/BinaryGrammar_read.cpp,
-	  src/BinaryGrammar_write.cpp, src/CMakeLists.txt, src/Cohort.cpp,
-	  src/CompositeTag.cpp, src/CompositeTag.hpp, src/Grammar.cpp,
-	  src/Grammar.hpp, src/GrammarApplicator.cpp,
-	  src/GrammarApplicator.hpp, src/GrammarApplicator_matchSet.cpp,
-	  src/GrammarApplicator_runRules.cpp, src/GrammarWriter.cpp,
-	  src/Set.cpp, src/Set.hpp, src/Tag.hpp, src/TagTrie.hpp,
-	  src/TextualParser.cpp, src/TextualParser.hpp, src/main.cpp,
-	  src/version.hpp, test/T_Unification/expected.txt: Use a trie for
-	  tag matching (6% faster normal, 70% faster worst); Eliminate
-	  CompositeTag; Bump binary revision
-
-2014-07-13  tino
-
-	* [r10038] src/GrammarApplicator_matchSet.cpp: Unification of plain
-	  tags don't need special handling
-	* [r10037] src/Grammar.cpp, src/Set.hpp, src/Tag.hpp: Minor stuff
-
-2014-07-11  tino
-
-	* [r10036] scripts/profile-revisions-tally.php,
-	  scripts/profile-revisions.php, src/TextualParser.cpp: Flush;
-	  Profiling
-	* [r10035] src/Grammar.cpp, src/GrammarApplicator.hpp,
-	  src/GrammarApplicator_matchSet.cpp,
-	  src/GrammarApplicator_runRules.cpp, src/stdafx.hpp: Eliminate
-	  uint32Set in favour of uint32SortedVector
-	* [r10034] ChangeLog, src/CMakeLists.txt, src/version.hpp: Flat is
-	  good
-	* [r10033] src/BinaryGrammar_write.cpp, src/Grammar.cpp,
-	  src/Grammar.hpp, src/GrammarApplicator.hpp,
-	  src/GrammarApplicator_reflow.cpp,
-	  src/GrammarApplicator_runGrammar.cpp,
-	  src/GrammarApplicator_runRules.cpp, src/GrammarWriter.hpp,
-	  src/SingleWindow.cpp, src/SingleWindow.hpp, src/Window.hpp,
-	  src/flat_unordered_map.hpp, src/flat_unordered_set.hpp,
-	  src/stdafx.hpp: More flat unordered
-	* [r10032] src/Grammar.cpp, src/GrammarApplicator.cpp, src/Tag.hpp,
-	  src/flat_unordered_map.hpp, src/flat_unordered_set.hpp: New
-	  flat_unordered_map to hopefully reduce allocations
-	* [r10029] TODO, src/CohortIterator.cpp, src/CompositeTag.hpp,
-	  src/Grammar.cpp, src/Grammar.hpp,
-	  src/GrammarApplicator_matchSet.cpp,
-	  src/GrammarApplicator_runRules.cpp, src/Tag.hpp,
-	  src/TextualParser.cpp, src/sorted_vector.hpp, win32/libgen.c:
-	  Eliminate TagSet in favour of TagSortedVector
-
-2014-06-22  tino
-
-	* [r10016] ChangeLog, src/ApertiumApplicator.cpp,
-	  src/GrammarApplicator.hpp, src/GrammarApplicator_matchSet.cpp,
-	  src/GrammarApplicator_runRules.cpp, src/version.hpp: Double cache
-	  sizes => double runtimes, so undo that
-
-2014-06-17  tino
-
-	* [r10007] ChangeLog, src/version.hpp: Forgot revision
-	* [r10006] src/ApertiumApplicator.cpp, src/BinaryGrammar_read.cpp,
-	  src/BinaryGrammar_write.cpp, src/Cohort.hpp,
-	  src/FSTApplicator.cpp, src/Grammar.cpp, src/Grammar.hpp,
-	  src/GrammarApplicator.cpp, src/GrammarApplicator.hpp,
-	  src/GrammarApplicator_matchSet.cpp,
-	  src/GrammarApplicator_reflow.cpp,
-	  src/GrammarApplicator_runGrammar.cpp,
-	  src/GrammarApplicator_runRules.cpp, src/GrammarWriter.cpp,
-	  src/NicelineApplicator.cpp, src/PlaintextApplicator.cpp,
-	  src/Reading.cpp, src/Reading.hpp, src/Rule.hpp,
-	  src/TextualParser.cpp, src/libcg3.cpp, src/main.cpp,
-	  src/options.hpp, test/T_BasicSubstitute/expected.txt,
-	  test/T_BasicSubstitute/grammar.cg3: Allow modifiers i, r, ri on
-	  rule wordforms; Fix a bug where Substituted wordforms did not
-	  update the valid rules list
-	* [r10005] src/ApertiumApplicator.cpp: Better logic for printing /
-	* [r10004] ChangeLog, TODO, src/ApertiumApplicator.cpp,
-	  src/GrammarApplicator.hpp, src/GrammarApplicator_matchSet.cpp,
-	  src/GrammarApplicator_runRules.cpp, src/flat_unordered_set.hpp,
-	  src/version.hpp: cg-proc -t output ¬ before delayed/deleted
-	  readings; Embiggen some caches to eliminate collisions
-
-2014-06-10  tino
-
-	* [r9987] src/Rule.hpp, src/TextualParser.cpp: Detect ENCL_*
-	  overlap by counting bits
-	* [r9985] src/TextualParser.cpp: Throw an error for malformed /X
-	  positions
-
-2014-06-09  tino
-
-	* [r9984] manual/subreadings.xml: Document /*
-
-2014-06-07  tino
-
-	* [r9974] ChangeLog, src/ContextualTest.hpp,
-	  src/GrammarApplicator.hpp, src/TextualParser.cpp,
-	  src/version.hpp, test/T_SubReadings_CG/expected.txt,
-	  test/T_SubReadings_CG/grammar.cg3: Allow /* in contextual tests
-	  to look at any sub-reading
-
-2014-06-04  unhammer
-
-	* [r9959] emacs/cg.el: list of ADD/REMOVE/etc. only defined one
-	  place
-
-2014-05-30  tino
-
-	* [r9944] manual/installation.xml: Homebrew workaround
-
-2014-05-28  tino
-
-	* [r9942] debian, dist: Moved packaging scripts
-	* [r9941] ChangeLog, src/GrammarApplicator_runContextualTest.cpp,
-	  src/version.hpp: Fix @-N
-
-2014-05-27  tino
-
-	* [r9940] dist/build-debian-ubuntu.sh, dist/dist-debian-ubuntu.pl:
-	  Build script for nightly fully automatic publishing
-	* [r9939] ChangeLog, src/CMakeLists.txt, src/version.hpp: rpath
-	  changes for *nix to allow for relocatable packages on all
-	  platforms
-	* [r9938] src/CMakeLists.txt: Fix OS X @rpath to allow testing
-
-2014-05-26  tino
-
-	* [r9937] src/CMakeLists.txt: Fix OS X to use @rpath for finding
-	  libcg3-private.dylib
-
-2014-05-21  tino
-
-	* [r9934] dist/dist-debian-ubuntu.pl: Produce identical .tar.bz2
-	  across packaging changes
-	* [r9933] CMakeLists.txt, ChangeLog, dist/dist-debian-ubuntu.pl,
-	  src/version.hpp: Fix pkgconfig; Unlink before write
-	* [r9932] CMakeLists.txt, debian/cg3.install: Install cg.el to
-	  global cg3-mode.el
-	* [r9931] debian/control: Dependency resolution is weird
-
-2014-05-20  tino
-
-	* [r9930] ChangeLog, src/version.hpp: Version bump, 'cause
-	  Launchpad
-	* [r9929] debian/control, dist/dist-debian-ubuntu.pl: Version
-	  trickery
-	* [r9928] CMake/DebSourcePPA.cmake, CMakeLists.txt,
-	  dist/dist-debian-ubuntu.pl: Better Debian/Ubuntu packaging
-	  script; CMake 2.8.9+ required
-	* [r9927] CMakeLists.txt, ChangeLog, scripts/cg3-autobin.pl,
-	  scripts/cg3-autobin.pl.in, src/version.hpp, update-revision.pl:
-	  Set cg3-autobin.pl revision during build instead of manually
-	* [r9926] CMakeLists.txt, debian/control, debian/libcg3-0.install,
-	  debian/libcg3-0.symbols, debian/libcg3-dev.install,
-	  src/CMakeLists.txt: Multiarch support
-	* [r9925] CMakeLists.txt, ChangeLog, scripts/cg3-autobin.pl,
-	  src/Cohort.cpp, src/GrammarApplicator.hpp,
-	  src/GrammarApplicator_reflow.cpp,
-	  src/GrammarApplicator_runGrammar.cpp, src/Reading.cpp,
-	  src/version.hpp, test/T_RemCohort/expected.txt: Handle readings
-	  with multiple mappings and sub-readings; Drop Ubuntu 12.10 from
-	  PPA
-
-2014-05-19  tino
-
-	* [r9924] debian/control, debian/libcg3-0.symbols: Symbols
-	* [r9923] CMakeLists.txt, debian/changelog, debian/control,
-	  debian/libcg3-0.install, src/CMakeLists.txt, src/libcg3.cpp:
-	  CMake version, NO_SONAME, visibility
-	* [r9922] src/CMakeLists.txt: Split libcg3 further into a private
-	  and public part
-
-2014-05-12  tino
-
-	* [r9899] CMakeLists.txt, debian/cg3.install: Install man pages,
-	  now that they're up-to-date
-
-2014-05-10  tino
-
-	* [r9897] cg3.pc.in: Revert -0
-	* [r9896] cg3.pc.in, debian/changelog, debian/control, debian/docs,
-	  debian/watch: Debian fixes
-
-2014-05-08  tino
-
-	* [r9894] cmake.sh, manual/installation.xml,
-	  scripts/profile-revisions.php: Document that ldconfig is needed
-
-2014-05-07  tino
-
-	* [r9893] debian/control, debian/libcg3-0.lintian-overrides,
-	  src/CMakeLists.txt: Make soname match package name, since that
-	  depends on public C API version
-	* [r9892] ChangeLog, LICENSE, debian/cg3.install, debian/cg3.links,
-	  debian/cg3.lintian-overrides, debian/changelog, debian/control,
-	  debian/libcg3-0.install, debian/libcg3-0.lintian-overrides,
-	  debian/libcg3-0.postinst, debian/libcg3.install,
-	  debian/libcg3.postinst, manual/man, manual/man/cg-comp.1,
-	  manual/man/cg-conv.1, manual/man/cg-proc.1,
-	  manual/man/cg3-autobin.pl.1, manual/man/vislcg3.1,
-	  scripts/cg3-autobin.pl, src/CMakeLists.txt, src/cg-comp.1,
-	  src/cg-proc.1, src/version.hpp, src/vislcg3.1: Man pages,
-	  symlinks, overrides, oh my...
-
-2014-05-06  tino
-
-	* [r9891] debian, debian/cg3.install, debian/changelog,
-	  debian/compat, debian/control, debian/copyright,
-	  debian/libcg3-dev.install, debian/libcg3.install,
-	  debian/libcg3.postinst, debian/rules, debian/source,
-	  debian/source/format, src/CMakeLists.txt: Debian packaging is a
-	  nightmare
-	* [r9890] CMakeLists.txt: Force -fPIC for ancient setups
-	* [r9889] CMakeLists.txt, TODO, src/CMakeLists.txt: Build objects
-	  once and reuse for static/shared library
-
-2014-04-25  tino
-
-	* [r9873] src/stdafx.hpp: cycle.h doesn't know all platforms (such
-	  as ARM), so fall back on clock()
-
-2014-04-20  tino
-
-	* [r9867] todo.sh: Wipe TODO.list before regenerate
-
-2014-04-18  tino
-
-	* [r9864] CMakeLists.txt, ChangeLog, manual/installation.xml,
-	  scripts/cg3-autobin.pl, src/version.hpp: Ubuntu 14.04 has ICU 52,
-	  so depend on that instead of 4.8.
-
-2014-04-10  tino
-
-	* [r9853] cmake.sh: cmake.sh now accepts --prefix X and --prefix=X
-
-2014-04-08  tino
-
-	* [r9848] src/cg_proc.cpp: Clean up flags to be bool instead of
-	  int, and just use them directly.
-
-2014-04-02  tino
-
-	* [r9840] ChangeLog, manual/installation.xml,
-	  scripts/cg3-autobin.pl, src/GrammarApplicator_matchSet.cpp,
-	  src/GrammarApplicator_reflow.cpp,
-	  src/GrammarApplicator_runContextualTest.cpp,
-	  src/TextualParser.cpp, src/version.hpp: Fix context S to actually
-	  test self, even if offset is non-zero
-
-2014-03-24  tino
-
-	* [r9815] ChangeLog, manual/dependencies.xml,
-	  scripts/cg3-autobin.pl, src/GrammarApplicator_matchSet.cpp,
-	  src/GrammarApplicator_runRules.cpp, src/main.cpp,
-	  src/version.hpp, test/T_RegExp/grammar.cg3: Fix parsing non-UTF-8
-	  cmdline args; Fix readings' plain cache being used with special
-	  sets
-
-2014-03-11  tino
-
-	* [r9768] src/CMakeLists.txt: make test
-
-2014-03-07  tino
-
-	* [r9765] src/GrammarWriter.cpp: Remember subreadings and
-	  static-tags in textual writer
-	* [r9764] ChangeLog, TODO, scripts/cg3-autobin.pl,
-	  src/GrammarApplicator.hpp,
-	  src/GrammarApplicator_runContextualTest.cpp,
-	  src/GrammarApplicator_runRules.cpp, src/version.hpp,
-	  test/T_BasicDependency/grammar.cg3: Barriers now work for c*
-	  tests
-
-2014-03-06  unhammer
-
-	* [r9763] emacs/cg.el: todo
-
-2014-03-06  tino
-
-	* [r9760] ChangeLog, manual/rules.xml, scripts/cg3-autobin.pl,
-	  src/GrammarApplicator.cpp, src/GrammarApplicator.hpp,
-	  src/GrammarApplicator_runGrammar.cpp,
-	  src/GrammarApplicator_runRules.cpp, src/Rule.hpp,
-	  src/SingleWindow.hpp, src/Strings.cpp, src/Strings.hpp,
-	  src/Window.cpp, src/version.hpp, test/T_Variables/expected.txt,
-	  test/T_Variables/input.txt: Set/RemVariable now optionally takes
-	  OUTPUT to commit the changes to the stream.
-
-2014-03-04  tino
-
-	* [r9759] ChangeLog, TODO, manual/dependencies.xml,
-	  scripts/cg3-autobin.pl, src/CMakeLists.txt,
-	  src/CohortIterator.cpp, src/CohortIterator.hpp,
-	  src/ContextualTest.hpp, src/GrammarApplicator.cpp,
-	  src/GrammarApplicator.hpp,
-	  src/GrammarApplicator_runContextualTest.cpp,
-	  src/TextualParser.cpp, src/version.hpp,
-	  test/T_BasicDependency/expected.txt,
-	  test/T_BasicDependency/grammar.cg3: Implement ancestor search
-	  with 'pp'; Made right/left operators work with p, *p, pp; CMake
-	  bails out of Boost is older than 1.48.0
-
-2014-02-18  tino
-
-	* [r9709] newsletters/2014-02-18.txt: Newsletter
-	* [r9708] ChangeLog, scripts/cg3-autobin.pl, src/version.hpp: CG-3
-	  Release 0.9.8.9708
-
-2014-02-17  tino
-
-	* [r9707] src/BinaryGrammar_read.cpp, src/BinaryGrammar_write.cpp,
-	  src/version.hpp: Track binary format separately from main
-	  revision - remember to bump CG3_FEATURE_REV when adding a new
-	  feature.
-
-2014-02-12  tino
-
-	* [r9705] CMakeLists.txt, ChangeLog, TODO, scripts/cg3-autobin.pl,
-	  src/ApertiumApplicator.cpp, src/version.hpp: Apertium unique_tags
-	  check; Remove /GR- for VC++
-
-2014-02-03  tino
-
-	* [r9692] CMakeLists.txt, scripts/cg3-autobin.pl,
-	  src/CMakeLists.txt, src/version.hpp: Disable TCMalloc for OS X
-
-2014-02-02  tino
-
-	* [r9690] ChangeLog, scripts/cg3-autobin.pl, src/version.hpp:
-	  Revision bump
-
-2014-01-29  tino
-
-	* [r9684] CMakeLists.txt: PPAs can now be built for Ubuntu 14.04
-
-2014-01-28  tino
-
-	* [r9668] CMakeLists.txt: Ubuntu 13.04 is EOL, so drop from PPA
-
-2014-01-22  tino
-
-	* [r9647] src/options.hpp: Default has been UTF-8 for ages - better
-	  say so here as well.
-	* [r9646] scripts/profile-revisions.php, vapply.sh, vparse.sh:
-	  NDEBUG
-
-2014-01-21  tino
-
-	* [r9645] CMakeLists.txt, ChangeLog, scripts/cg3-autobin.pl,
-	  src/ApertiumApplicator.cpp, src/CMakeLists.txt, src/Cohort.hpp,
-	  src/CompositeTag.cpp, src/ContextualTest.cpp, src/Grammar.cpp,
-	  src/Grammar.hpp, src/GrammarApplicator.cpp,
-	  src/GrammarApplicator.hpp, src/GrammarApplicator_matchSet.cpp,
-	  src/GrammarApplicator_reflow.cpp,
-	  src/GrammarApplicator_runRules.cpp, src/Reading.cpp,
-	  src/Rule.hpp, src/Set.cpp, src/Tag.cpp, src/TextualParser.cpp,
-	  src/flat_unordered_set.hpp, src/inlines.hpp, src/libcg3.cpp,
-	  src/version.hpp: Added flat_unordered_set to reduce memory usage;
-	  Set NDEBUG for Release builds; Bumped binary revision
-
-2014-01-20  tino
-
-	* [r9640] src/GrammarApplicator.hpp,
-	  src/GrammarApplicator_matchSet.cpp,
-	  src/GrammarApplicator_runRules.cpp, src/interval_vector.hpp:
-	  Minor cleanup
-
-2014-01-14  tino
-
-	* [r9637] CMakeLists.txt, ChangeLog, scripts/cg3-autobin.pl,
-	  src/version.hpp: Explicitly depend on Boost 1.48.0 for Ubuntu
-	  12.04
-	* [r9635] ChangeLog, manual/grammar.xml, scripts/cg3-autobin.pl,
-	  src/Cohort.cpp, src/Cohort.hpp, src/CompositeTag.hpp,
-	  src/Grammar.cpp, src/GrammarApplicator.cpp,
-	  src/GrammarApplicator.hpp, src/GrammarApplicator_matchSet.cpp,
-	  src/GrammarApplicator_reflow.cpp,
-	  src/GrammarApplicator_runContextualTest.cpp,
-	  src/GrammarApplicator_runRules.cpp, src/NicelineApplicator.cpp,
-	  src/Reading.hpp, src/Set.cpp, src/Set.hpp, src/Tag.hpp,
-	  src/TextualParser.cpp, src/libcg3.cpp, src/sorted_vector.hpp,
-	  src/stdafx.hpp, src/version.hpp, test/T_Unification/expected.txt,
-	  test/T_Unification/grammar.cg3, test/T_Unification/input.txt: Fix
-	  unification (11% slower); Use more Boost flat_map/set and
-	  sorted_vector (8% faster)
-
-2014-01-09  tino
-
-	* [r9595] ChangeLog, scripts/cg3-autobin.pl, src/version.hpp:
-	  Revision bump
-	* [r9594] test/T_AnyMinusSome/run.pl, test/T_Barrier/run.pl,
-	  test/T_BasicAppend/run.pl, test/T_BasicContextTest/run.pl,
-	  test/T_BasicDelimit/run.pl, test/T_BasicDependency/run.pl,
-	  test/T_BasicIff/run.pl, test/T_BasicSelect/run.pl,
-	  test/T_CG2Compat/run.pl, test/T_CarefulBarrier/run.pl,
-	  test/T_DelayAndDelete/run.pl, test/T_Dependency_Loops/run.pl,
-	  test/T_Dependency_OutOfRange/run.pl,
-	  test/T_DontMatchEmptySet/run.pl, test/T_EndlessSelect/run.pl,
-	  test/T_External/run.pl, test/T_InputCommands/run.pl,
-	  test/T_InputMarkup/run.pl, test/T_JumpExecute/run.pl,
-	  test/T_MapAdd_Different/run.pl, test/T_MapThenRemove/run.pl,
-	  test/T_MapThenSelect/run.pl, test/T_MappingPrefix/run.pl,
-	  test/T_Movement/run.pl, test/T_MultipleSections/run.pl,
-	  test/T_NegatedContextTest/run.pl, test/T_NotContextTest/run.pl,
-	  test/T_NumericalTags/run.pl, test/T_OmniWithBarrier/run.pl,
-	  test/T_Omniscan/run.pl, test/T_OriginPassing/run.pl,
-	  test/T_Parentheses/run.pl, test/T_RegExp/run.pl,
-	  test/T_Relations/run.pl, test/T_RemCohort/run.pl,
-	  test/T_RemoveSingleTag/run.pl, test/T_ScanningTests/run.pl,
-	  test/T_SectionRanges/run.pl, test/T_Sections/run.pl,
-	  test/T_SetOp_FailFast/run.pl, test/T_SetOps/run.pl,
-	  test/T_SetParentChild/run.pl, test/T_SoftDelimiters/run.pl,
-	  test/T_SpaceInForms/run.pl, test/T_SubReadings_Apertium/run.pl,
-	  test/T_SubReadings_CG/run.pl, test/T_SubstituteNil/run.pl,
-	  test/T_Templates/run.pl, test/T_Trace/run.pl,
-	  test/T_Unification/run.pl, test/T_Variables/run.pl: No need for
-	  -b on diff any longer
-	* [r9593] src/GrammarApplicator.cpp,
-	  test/T_AnyMinusSome/expected.txt, test/T_AnyMinusSome/run.pl,
-	  test/T_Barrier/expected.txt, test/T_Barrier/run.pl,
-	  test/T_BasicContextTest/expected.txt,
-	  test/T_BasicContextTest/run.pl, test/T_BasicDelimit/expected.txt,
-	  test/T_BasicDelimit/run.pl, test/T_BasicDependency/expected.txt,
-	  test/T_BasicDependency/run.pl, test/T_BasicIff/expected.txt,
-	  test/T_BasicIff/run.pl, test/T_BasicSelect/expected.txt,
-	  test/T_BasicSelect/run.pl, test/T_CG2Compat/expected.txt,
-	  test/T_CG2Compat/run.pl, test/T_CarefulBarrier/expected.txt,
-	  test/T_CarefulBarrier/run.pl,
-	  test/T_Dependency_Loops/expected.txt,
-	  test/T_Dependency_Loops/run.pl,
-	  test/T_Dependency_OutOfRange/expected.txt,
-	  test/T_Dependency_OutOfRange/run.pl,
-	  test/T_DontMatchEmptySet/expected.txt,
-	  test/T_DontMatchEmptySet/run.pl,
-	  test/T_EndlessSelect/expected.txt, test/T_EndlessSelect/run.pl,
-	  test/T_External/expected.txt, test/T_External/run.pl,
-	  test/T_Include/expected.txt, test/T_InputCommands/expected.txt,
-	  test/T_InputCommands/run.pl, test/T_InputMarkup/expected.txt,
-	  test/T_InputMarkup/run.pl, test/T_JumpExecute/expected.txt,
-	  test/T_JumpExecute/run.pl, test/T_MapAdd_Different/expected.txt,
-	  test/T_MapAdd_Different/run.pl,
-	  test/T_MapThenRemove/expected.txt, test/T_MapThenRemove/run.pl,
-	  test/T_MapThenSelect/expected.txt, test/T_MapThenSelect/run.pl,
-	  test/T_MappingPrefix/expected.txt, test/T_MappingPrefix/run.pl,
-	  test/T_Movement/expected.txt, test/T_Movement/run.pl,
-	  test/T_MultipleSections/expected.txt,
-	  test/T_MultipleSections/run.pl,
-	  test/T_NegatedContextTest/expected.txt,
-	  test/T_NegatedContextTest/run.pl,
-	  test/T_NotContextTest/expected.txt, test/T_NotContextTest/run.pl,
-	  test/T_NumericalTags/expected.txt, test/T_NumericalTags/run.pl,
-	  test/T_OmniWithBarrier/expected.txt,
-	  test/T_OmniWithBarrier/run.pl, test/T_Omniscan/expected.txt,
-	  test/T_Omniscan/run.pl, test/T_OriginPassing/expected.txt,
-	  test/T_OriginPassing/run.pl, test/T_Parentheses/expected.txt,
-	  test/T_Parentheses/run.pl, test/T_RegExp/expected.txt,
-	  test/T_RegExp/run.pl, test/T_Relations/expected.txt,
-	  test/T_Relations/run.pl, test/T_RemoveSingleTag/expected.txt,
-	  test/T_RemoveSingleTag/run.pl, test/T_ScanningTests/expected.txt,
-	  test/T_ScanningTests/run.pl, test/T_SectionRanges/expected.txt,
-	  test/T_SectionRanges/run.pl, test/T_Sections/expected.txt,
-	  test/T_Sections/run.pl, test/T_SetOp_FailFast/expected.txt,
-	  test/T_SetOp_FailFast/run.pl, test/T_SetOps/expected.txt,
-	  test/T_SetOps/run.pl, test/T_SetParentChild/expected.txt,
-	  test/T_SetParentChild/run.pl, test/T_SoftDelimiters/expected.txt,
-	  test/T_SoftDelimiters/run.pl, test/T_SpaceInForms/expected.txt,
-	  test/T_SpaceInForms/run.pl, test/T_SubReadings_Apertium/run.pl,
-	  test/T_SubReadings_CG/expected.txt, test/T_SubReadings_CG/run.pl,
-	  test/T_SubstituteNil/expected.txt, test/T_SubstituteNil/run.pl,
-	  test/T_Templates/expected.txt, test/T_Templates/run.pl,
-	  test/T_Trace/expected.txt, test/T_Trace/run.pl,
-	  test/T_Variables/expected.txt, test/T_Variables/run.pl: Flip
-	  output order so there are no trailing spaces in readings
-	* [r9592] manual/cmdreference.xml, scripts/cg3-autobin.pl,
-	  src/GrammarApplicator.cpp, src/GrammarApplicator.hpp,
-	  src/main.cpp, src/options.hpp: Eliminate --dep-humanize
-
-2014-01-08  tino
-
-	* [r9591] TODO, manual/cmdreference.xml, src/all_cg_conv.cpp,
-	  src/all_cg_proc.cpp, src/cg_proc.cpp: Remove more references to
-	  Matxin
-	* [r9590] ChangeLog, scripts/cg3-autobin.pl, src/CMakeLists.txt,
-	  src/FormatConverter.cpp, src/FormatConverter.hpp,
-	  src/MatxinApplicator.cpp, src/MatxinApplicator.hpp,
-	  src/NicelineApplicator.cpp, src/NicelineApplicator.hpp,
-	  src/PlaintextApplicator.cpp, src/PlaintextApplicator.hpp,
-	  src/cg_conv.cpp, src/cg_proc.cpp, src/main.cpp,
-	  src/options_conv.hpp, src/version.hpp: Removed Matxin; cg-conv
-	  can now output to Niceline and Plain Text formats (-N, -P)
-
diff --git a/README.md b/README.md
index 1dbfc9a..e4df98d 100644
--- a/README.md
+++ b/README.md
@@ -1,14 +1,14 @@
 [![Build Status](https://travis-ci.org/TinoDidriksen/cg3.svg?branch=master)](https://travis-ci.org/TinoDidriksen/cg3)
 
 See instead:
-- http://visl.sdu.dk/constraint_grammar.html
-- http://visl.sdu.dk/cg3.html
-- http://visl.sdu.dk/cg3/chunked/
+- https://visl.sdu.dk/constraint_grammar.html
+- https://visl.sdu.dk/cg3.html
+- https://visl.sdu.dk/cg3/chunked/
 - manual/
 - http://groups.google.com/group/constraint-grammar
 
 Other links:
-- http://visl.sdu.dk/svn/visl/tools/vislcg3/trunk/
+- https://visl.sdu.dk/svn/visl/tools/vislcg3/trunk/
 - https://en.wikipedia.org/wiki/Constraint_Grammar
 - http://wiki.apertium.org/wiki/Constraint_Grammar
 - http://kevindonnelly.org.uk/2010/05/constraint-grammar-tutorial/
diff --git a/TODO b/TODO
index fd30def..1e0ce28 100644
--- a/TODO
+++ b/TODO
@@ -61,3 +61,7 @@ ToDo: Basque correct parse
 ToDo: Dep on readings
 ToDo: CLINK to require that all paths satisfy the linked tests
 ToDo: Include only certain sections.
+ToDo: Defer resolving sets to allow defining after use.
+ToDo: Turn cg3-autobin.pl into C++ cg-autobin and symlink old name
+ToDo: REMOVE (v) "<\\p{Lu}+>"r ; should not parse.
+ToDo: Warn on probably-wrong tags like <wordform>" and "<wordform">
diff --git a/clang-format.pl b/clang-format.pl
index 40d1014..15dd904 100755
--- a/clang-format.pl
+++ b/clang-format.pl
@@ -32,7 +32,7 @@ foreach my $file (@files) {
    $data =~ s at PRAGMA_ONCE_IFNDEF@#pragma once\n#ifndef at g;
    file_write($file, $data);
 
-   `clang-format-3.9 -style=file -i '$file'`;
+   `clang-format-4.0 -style=file -i '$file'`;
 
    my $data = file_read($file);
    $data =~ s@\n[^\n]*//[^\n]+clang-format (off|on)\n@\n at g; # Remove preprocessor protection
diff --git a/cmake.sh b/cmake.sh
index 8dc8d9b..101ef9b 100755
--- a/cmake.sh
+++ b/cmake.sh
@@ -1,4 +1,5 @@
-#!/bin/bash -e
+#!/usr/bin/env bash
+set -e
 args=()
 
 while [[ $# > 0 ]];
diff --git a/emacs/cg.el b/emacs/cg.el
index cef1076..ebe79f5 100644
--- a/emacs/cg.el
+++ b/emacs/cg.el
@@ -1,9 +1,9 @@
-;;; cg.el --- major mode for editing Constraint Grammar files
+;;; cg.el --- major mode for editing Constraint Grammar files  -*- lexical-binding: t; coding: utf-8 -*-
 
-;; Copyright (C) 2010-2016 Kevin Brubeck Unhammer
+;; Copyright (C) 2010-2017 Kevin Brubeck Unhammer
 
 ;; Author: Kevin Brubeck Unhammer <unhammer at fsfe.org>
-;; Version: 0.2.0
+;; Version: 0.3.0
 ;; Url: http://beta.visl.sdu.dk/constraint_grammar.html
 ;; Keywords: languages
 
@@ -24,6 +24,13 @@
 
 ;;; Commentary:
 
+;; This package provides a major mode for editing Constraint Grammar
+;; source files, including syntax highlighting and interactive grammar
+;; development from within Emacs.  Use `C-c C-i' to edit the text you
+;; want to edit, then just `C-c C-c' whenever you want to run the
+;; grammar over that text.  Clicking on a line number in the trace
+;; output will take you to the definition of that rule.
+
 ;; Usage:
 ;;
 ;; (autoload 'cg-mode "/path/to/cg.el"
@@ -32,18 +39,19 @@
 ;; ; Or if you use a non-standard file suffix, e.g. .rlx:
 ;; (add-to-list 'auto-mode-alist '("\\.rlx\\'" . cg-mode))
 
-;; I recommend using company-mode for tab-completion, and
-;; smartparens-mode if you're used to it (paredit-mode does not work
-;; well if you have set names with the # character in them). Both are
-;; available from MELPA (see http://melpa.milkbox.net/).
+;; I recommend using `company-mode' for tab-completion, and
+;; `smartparens-mode' if you're used to it (`paredit-mode' does not
+;; work well if you have set names with the # character in them). Both
+;; are available from MELPA (see http://melpa.milkbox.net/).
 ;;
 ;; You can lazy-load company-mode for cg-mode like this:
 ;;
 ;; (eval-after-load 'company-autoloads
-;;     (add-hook 'cg-mode-hook #'company-mode))
+;;     '(add-hook 'cg-mode-hook #'company-mode))
 
 
 ;; TODO:
+;; - investigate bug in `show-smartparens-mode' causing slowness
 ;; - different syntax highlighting for sets and tags (difficult)
 ;; - use something like prolog-clause-start to define M-a/e etc.
 ;; - run vislcg3 --show-unused-sets and buttonise with line numbers (like Occur does)
@@ -51,17 +59,14 @@
 ;; - the rest of the keywords
 ;; - http://beta.visl.sdu.dk/cg3/single/#regex-icase
 ;; - keyword tab-completion
-;; - the quotes-within-quotes thing plays merry hell with
-;;   paredit-doublequote, write a new doublequote function?
-;; - font-lock-syntactic-keywords is obsolete since 24.1
-;; - derive cg-mode from prog-mode?
+;; - `font-lock-syntactic-keywords' is obsolete since 24.1
 ;; - goto-set/list
 ;; - show definition of set/list-at-point in modeline
 ;; - show section name/number in modeline
 
 ;;; Code:
 
-(defconst cg-version "0.2.0" "Version of cg-mode.")
+(defconst cg-version "0.3.0" "Version of cg-mode.")
 
 (eval-when-compile (require 'cl))
 (require 'cl-lib)
@@ -258,8 +263,9 @@ Don't change without re-evaluating the file.")
     ;; using syntactic keywords for "
     (modify-syntax-entry ?\" "." table)
     (modify-syntax-entry ?» "." table)
-  (modify-syntax-entry ?« "." table)
-                       table))
+    (modify-syntax-entry ?« "." table)
+    table)
+  "Syntax table for CG mode.")
 
 (defun cg-beginning-of-defun ()
   (re-search-backward defun-prompt-regexp nil 'noerror)
@@ -352,40 +358,35 @@ With a prefix argument N, (un)comment that many rules."
 
 
 ;;;###autoload
-(defun cg-mode ()
+(define-derived-mode cg-mode prog-mode "CG"
   "Major mode for editing Constraint Grammar files.
 
 CG-mode provides the following specific keyboard key bindings:
 
 \\{cg-mode-map}"
-  (interactive)
-  (kill-all-local-variables)
-  (setq major-mode 'cg-mode
-        mode-name "CG")
-  (use-local-map cg-mode-map)
-  (make-local-variable 'comment-start)
-  (make-local-variable 'comment-start-skip)
-  (make-local-variable 'font-lock-defaults)
-  (make-local-variable 'indent-line-function)
-  (setq comment-start "#"
-        comment-start-skip "#+[\t ]*"
-        font-lock-defaults
-        `((cg-font-lock-keywords cg-font-lock-keywords-1 cg-font-lock-keywords-2)
-          nil				; KEYWORDS-ONLY
-          'case-fold ; some keywords (e.g. x vs X) are case-sensitive,
+  :group 'cg
+  ;; Font lock
+  (set (make-local-variable 'font-lock-defaults)
+       `((cg-font-lock-keywords cg-font-lock-keywords-1 cg-font-lock-keywords-2)
+         nil				; KEYWORDS-ONLY
+         'case-fold ; some keywords (e.g. x vs X) are case-sensitive,
                                         ; but that doesn't matter for highlighting
-          ((?/ . "w") (?~ . "w") (?. . "w") (?- . "w") (?_ . "w"))
-          nil ;	  beginning-of-line		; SYNTAX-BEGIN
-          (font-lock-syntactic-keywords . cg-font-lock-syntactic-keywords)
-          (font-lock-syntactic-face-function . cg-font-lock-syntactic-face-function)))
-  (make-local-variable 'cg-mode-syntax-table)
-  (set-syntax-table cg-mode-syntax-table)
+         ((?/ . "w") (?~ . "w") (?. . "w") (?- . "w") (?_ . "w"))
+         nil ;	  beginning-of-line		; SYNTAX-BEGIN
+         (font-lock-syntactic-keywords . cg-font-lock-syntactic-keywords)
+         (font-lock-syntactic-face-function . cg-font-lock-syntactic-face-function)))
+  ;; Indentation
+  (set (make-local-variable 'indent-line-function) #'cg-indent-line)
+  ;; Comments and blocks
+  (set (make-local-variable 'comment-start) "#")
+  (set (make-local-variable 'comment-start-skip) "#+[\t ]*")
+  (set (make-local-variable 'comment-use-syntax) t)
   (set (make-local-variable 'parse-sexp-ignore-comments) t)
   (set (make-local-variable 'parse-sexp-lookup-properties) t)
   (set (make-local-variable 'defun-prompt-regexp) (concat cg-kw-re "\\(?::[^\n\t ]+\\)[\t ]"))
   (set (make-local-variable 'beginning-of-defun-function) #'cg-beginning-of-defun)
   (set (make-local-variable 'end-of-defun-function) #'cg-end-of-defun)
-  (setq indent-line-function #'cg-indent-line)
+
   (when font-lock-mode
     (setq font-lock-set-defaults nil)
     (font-lock-set-defaults)
@@ -393,8 +394,7 @@ CG-mode provides the following specific keyboard key bindings:
     (font-lock-fontify-buffer))
   (add-hook 'after-change-functions #'cg-after-change nil 'buffer-local)
   (let ((buf (current-buffer)))
-    (run-with-idle-timer 1 'repeat 'cg-output-hl buf))
-  (run-mode-hooks 'cg-mode-hook))
+    (run-with-idle-timer 1 'repeat 'cg-output-hl buf)))
 
 
 (defconst cg-font-lock-syntactic-keywords
@@ -622,11 +622,13 @@ to.")
 (defcustom cg-check-do-cache t
   "If non-nil, `cg-check' caches the output of `cg-pre-pipe' (the
 cache is emptied whenever you make a change in the input buffer,
-or call `cg-check' from another CG file).")
+or call `cg-check' from another CG file)."
+  :group 'cg
+  :type 'bool)
 
 (defvar cg--check-cache-buffer nil "See `cg-check-do-cache'.")
 
-(defun cg-input-mode-bork-cache (from to len)
+(defun cg-input-mode-bork-cache (_from _to _len)
   "Since `cg-check' will not reuse a cache unless `cg--file' and
 `cg--cache-in' match."
   (when cg--check-cache-buffer
@@ -819,7 +821,7 @@ Call `cg-output-set-unhide' to set a regex which will be exempt
 from hiding.  Call `cg-output-show-all' to turn off all hiding."
   (interactive)
   (setq cg--output-hiding-analyses t)
-  (lexical-let (prev)
+  (let (prev)
     (save-excursion
       (goto-char (point-min))
       (while (re-search-forward "^\"<.*>\"" nil 'noerror)
@@ -878,17 +880,18 @@ See `cg-output-hide-analyses'."
 
 ;;;###autoload
 (defcustom cg-check-after-change nil
-  "If non-nil, run `cg-check' on grammar after each change to the
-buffer.")
+  "If non-nil, run `cg-check' on grammar after each change to the buffer."
+  :group 'cg
+  :type 'bool)
 
 ;;;###autoload
 (defcustom cg-check-after-change-secs 1
-  "Minimum seconds between each `cg-check' after a change to a CG
-buffer (so 0 is after each change)."
+  "Minimum seconds between each `cg-check' after a change to a CG buffer.
+Use 0 to check immediately after each change."
   :type 'integer)
 
 (defvar cg--after-change-timer nil)
-(defun cg-after-change (from to len)
+(defun cg-after-change (_from _to _len)
   (when (and cg-check-after-change
              (not (member cg--after-change-timer timer-list)))
     (setq
@@ -955,7 +958,7 @@ something like
 
 Similarly, `cg-post-pipe' is run on output."
   (interactive)
-  (lexical-let*
+  (let*
       ((file (buffer-file-name))
        (tmp (make-temp-file "cg."))
        ;; Run in a separate process buffer from cmd and post-pipe:
@@ -991,16 +994,16 @@ Similarly, `cg-post-pipe' is run on output."
         (with-current-buffer cg--check-cache-buffer
           (cg-end-process (get-buffer-process out) (buffer-string)))
 
-      (lexical-let ((cg-proc (get-buffer-process out))
-                    (pre-proc (start-process "cg-pre-pipe" "*cg-pre-pipe-output*"
-                                             "/bin/bash" "-c" pre-pipe))
-                    (cache-buffer (cg-pristine-cache-buffer file in pre-pipe)))
-        (set-process-filter pre-proc (lambda (pre-proc string)
+      (let ((cg-proc (get-buffer-process out))
+            (pre-proc (start-process "cg-pre-pipe" "*cg-pre-pipe-output*"
+                                     "/bin/bash" "-c" pre-pipe))
+            (cache-buffer (cg-pristine-cache-buffer file in pre-pipe)))
+        (set-process-filter pre-proc (lambda (_pre-proc string)
                                        (with-current-buffer cache-buffer
                                          (insert string))
                                        (when (eq (process-status cg-proc) 'run)
                                          (process-send-string cg-proc string))))
-        (set-process-sentinel pre-proc (lambda (pre-proc string)
+        (set-process-sentinel pre-proc (lambda (_pre-proc _string)
                                          (when (eq (process-status cg-proc) 'run)
                                            (cg-end-process cg-proc))))
         (with-current-buffer in
@@ -1008,7 +1011,7 @@ Similarly, `cg-post-pipe' is run on output."
 
     (display-buffer out)))
 
-(defun cg-check-finish-function (buffer change)
+(defun cg-check-finish-function (buffer _change)
   ;; Note: this makes `recompile' not work, which is why `g' is
   ;; rebound in `cg-output-mode'
   (let ((w (get-buffer-window buffer)))
@@ -1052,10 +1055,10 @@ Similarly, `cg-post-pipe' is run on output."
 
 ;;; Keybindings ---------------------------------------------------------------
 (define-key cg-mode-map (kbd "C-c C-o") #'cg-occur-list)
-(define-key cg-mode-map (kbd "C-c g") #'cg-goto-rule)
+(define-key cg-mode-map (kbd "C-c C-r") #'cg-goto-rule)
 (define-key cg-mode-map (kbd "C-c C-c") #'cg-check)
 (define-key cg-mode-map (kbd "C-c C-i") #'cg-edit-input)
-(define-key cg-mode-map (kbd "C-c c") #'cg-toggle-check-after-change)
+(define-key cg-mode-map (kbd "C-c M-c") #'cg-toggle-check-after-change)
 (define-key cg-mode-map (kbd "C-;") #'cg-comment-or-uncomment-rule)
 (define-key cg-mode-map (kbd "M-#") #'cg-comment-or-uncomment-rule)
 
diff --git a/manual/cmdreference.xml b/manual/cmdreference.xml
index 90485be..3e92d47 100644
--- a/manual/cmdreference.xml
+++ b/manual/cmdreference.xml
@@ -103,7 +103,7 @@ Options:
  -A, --out-apertium  sets output format to Apertium
  -N, --out-niceline  sets output format to Niceline CG
  -P, --out-plain     sets output format to plain text
- -W, --wfactor       FST weight factor (defaults to 100.0)
+ -W, --wfactor       FST weight factor (defaults to 1.0)
      --wtag          FST weight tag prefix (defaults to W)
  -S, --sub-delim     FST sub-reading delimiters (defaults to #)
  -r, --rtl           sets sub-reading direction to RTL (default)
@@ -146,6 +146,29 @@ Options:
     </screen>
   </section>
 
+  <section id="cg-strictify">
+    <title>cg-strictify</title>
+    <para>
+      cg-strictify will parse a grammar and output a candidate STRICT-TAGS line that you can edit and then put
+      into your grammar. Optionally, it can also output the whole grammar and strip superfluous LISTs along the way.
+    </para>
+    <screen>
+Usage: cg-strictify [OPTIONS] <grammar>
+
+Options:
+ -?, --help       outputs this help
+ -g, --grammar    the grammar to parse; defaults to first non-option argument
+ -o, --output     outputs the whole grammar with STRICT-TAGS
+     --strip      removes superfluous LISTs from the output grammar; implies -o
+     --secondary  adds secondary tags (<...>) to strict list
+     --regex      adds regular expression tags (/../r, <..>r, etc) to strict list
+     --icase      adds case-insensitive tags to strict list
+     --baseforms  adds baseform tags ("...") to strict list
+     --wordforms  adds wordform tags ("<...>") to strict list
+     --all        same as --strip --secondary --regex --icase --baseforms --wordforms
+    </screen>
+  </section>
+
   <section id="cg3-autobin.pl">
     <title>cg3-autobin.pl</title>
     <para>
diff --git a/manual/compatibility.xml b/manual/compatibility.xml
index b740ca1..1cf97e7 100644
--- a/manual/compatibility.xml
+++ b/manual/compatibility.xml
@@ -112,8 +112,10 @@
     <section id="gotcha-setop-minus">
       <title>Set Operator -</title>
       <para>
-        In CG-2 the - operator means set difference; in VISLCG it means set fail-fast; in CG-3 it means
-        set difference again, but the new operator ^ takes place of VISLCG's behavior.
+        In CG-2 the - operator meant set difference; in VISLCG it meant set fail-fast; in CG-3
+        <link linkend="set-operator-except">operator -</link> means something in between.
+        The new <link linkend="set-operator-failfast">operator ^</link> takes place of VISLCG's behavior,
+        and <link linkend="set-operator-difference">operator \</link> takes the place of CG-2's behavior.
       </para>
     </section>
 
diff --git a/manual/grammar.xml b/manual/grammar.xml
index 6b7d13c..1fa2df5 100644
--- a/manual/grammar.xml
+++ b/manual/grammar.xml
@@ -76,6 +76,26 @@
         Instructs STRICT-TAGS to forbid all secondary tags (<code><…></code>) by default.
       </para>
     </section>
+
+    <section id="grammar-options-strict-regex">
+      <title>strict-regex</title>
+      <indexterm>
+        <primary>strict-regex</primary>
+      </indexterm>
+      <para>
+        Instructs STRICT-TAGS to forbid all regular expression tags (<code>/…/r</code> and others) by default.
+      </para>
+    </section>
+
+    <section id="grammar-options-strict-icase">
+      <title>strict-icase</title>
+      <indexterm>
+        <primary>strict-icase</primary>
+      </indexterm>
+      <para>
+        Instructs STRICT-TAGS to forbid all case-insensitive tags by default.
+      </para>
+    </section>
   </section>
 
   <section id="grammar-include">
diff --git a/manual/installation.xml b/manual/installation.xml
index b20e505..f36b10e 100644
--- a/manual/installation.xml
+++ b/manual/installation.xml
@@ -5,7 +5,7 @@
   <title>Installation & Updating</title>
   <para>
     These guidelines are primarily for Linux, although I develop on Windows
-    using <ulink url="http://www.visualstudio.com/downloads/download-visual-studio-vs#d-express-windows-desktop">Visual Studio 2013 Express for Windows Desktop</ulink>.
+    using <ulink url="https://www.visualstudio.com/vs/community/">Visual Studio 2017</ulink>.
   </para>
 
   <section id="cmake">
@@ -26,10 +26,10 @@
     <title>Ubuntu / Debian</title>
     <para>
       For any currently supported version of Debian/Ubuntu or compatible derivatives thereof (such as Linux Mint),
-      there is a <ulink url="http://apertium.projectjj.com/apt/">ready-made nightly repository</ulink>,
+      there is a <ulink url="https://apertium.projectjj.com/apt/">ready-made nightly repository</ulink>,
       easily installable via
       <screen>
-        wget http://apertium.projectjj.com/apt/install-nightly.sh -O - | sudo bash
+        wget https://apertium.projectjj.com/apt/install-nightly.sh -O - | sudo bash
         sudo apt-get install cg3
       </screen>
       Rest of this page can be skipped.
@@ -45,7 +45,7 @@
         # For TCMalloc, optional
         sudo apt-get install libgoogle-perftools-dev
         cd /tmp/
-        svn co http://visl.sdu.dk/svn/visl/tools/vislcg3/trunk vislcg3
+        svn co https://visl.sdu.dk/svn/visl/tools/vislcg3/trunk vislcg3
         cd vislcg3/
         ./cmake.sh
         make -j3
@@ -85,7 +85,7 @@
         # For TCMalloc, optional
         yum install google-perftools-devel
         cd /tmp/
-        svn co http://visl.sdu.dk/svn/visl/tools/vislcg3/trunk vislcg3
+        svn co https://visl.sdu.dk/svn/visl/tools/vislcg3/trunk vislcg3
         cd vislcg3/
         ./cmake.sh
         make -j3
@@ -117,7 +117,7 @@
           brew install icu4c
           brew link icu4c
           cd /tmp
-          svn co http://visl.sdu.dk/svn/visl/tools/vislcg3/trunk vislcg3
+          svn co https://visl.sdu.dk/svn/visl/tools/vislcg3/trunk vislcg3
           cd vislcg3/
           ./cmake.sh
           make -j3
@@ -140,7 +140,7 @@
           sudo port install boost
           sudo port install icu
           cd /tmp
-          svn co http://visl.sdu.dk/svn/visl/tools/vislcg3/trunk vislcg3
+          svn co https://visl.sdu.dk/svn/visl/tools/vislcg3/trunk vislcg3
           cd vislcg3/
           ./cmake.sh --prefix=/opt/local
           make -j3
@@ -155,9 +155,9 @@
       <title>Other</title>
       <para>
         Installing from source is very similar to Linux, but since the developer tools for OS X are so large,
-        we provide binaries from <ulink url="http://visl.sdu.dk/download/vislcg3/">the download folder</ulink>.
-        Look for files named *-osx.tar.gz. The archive contains the vislcg3, cg-comp, cg-proc, and cg-conv tools,
-        the ICU library binaries, and wrapper scripts to enable running CG-3 from a self-contained folder.
+        we provide binaries from <ulink url="https://apertium.projectjj.com/osx/nightly/">the download folder</ulink>.
+        Look for file named cg3-latest.tar.bz2. The archive contains the vislcg3, cg-comp, cg-proc, and cg-conv tools,
+        the ICU library binaries.
       </para>
     </section>
 
@@ -167,8 +167,8 @@
     <title>Windows</title>
     <para>
       Installing from source is rather complicated due to lack of standard search paths for libraries, so
-      we provide binaries from <ulink url="http://visl.sdu.dk/download/vislcg3/">the download folder</ulink>.
-      Look for files named *-win32.zip. The archive contains the vislcg3, cg-comp, cg-proc, and cg-conv tools and
+      we provide binaries from <ulink url="https://apertium.projectjj.com/win32/nightly/">the download folder</ulink>.
+      Look for files named cg3-latest.zip and/or cg3ide-latest.zip. The archive contains the vislcg3, cg-comp, cg-proc, and cg-conv tools and
       the ICU library DLLs. May also require installing the
       <ulink url="http://www.microsoft.com/download/en/details.aspx?id=5555">VC++ 2010 redist</ulink>
       or <ulink url="http://www.microsoft.com/download/en/details.aspx?id=5582">VC++ 2008 redist</ulink>.
@@ -223,7 +223,7 @@
       As any user in any folder where you can find it again:
       <screen>
         svn co \
-          <ulink url="http://visl.sdu.dk/svn/visl/tools/vislcg3/trunk/">http://visl.sdu.dk/svn/visl/tools/vislcg3/trunk</ulink> vislcg3
+          <ulink url="https://visl.sdu.dk/svn/visl/tools/vislcg3/trunk/">https://visl.sdu.dk/svn/visl/tools/vislcg3/trunk</ulink> vislcg3
         cd vislcg3/
         ./cmake.sh
         make
diff --git a/manual/man/cg-comp.1 b/manual/man/cg-comp.1
index 7d0ddec..d69b13c 100644
--- a/manual/man/cg-comp.1
+++ b/manual/man/cg-comp.1
@@ -5,16 +5,16 @@ cg-comp \- This application is part of (
 )
 .PP
 This tool is part of the CG-3
-constraint grammar system: \fBhttp://visl.sdu.dk/cg3.html\fR.
+constraint grammar system: \fBhttps://visl.sdu.dk/cg3.html\fR.
 .SH SYNOPSIS
 .B cg-comp
 grammar_file [input_file [output_file]]
 .SH DESCRIPTION
-.BR cg-comp 
+.BR cg-comp
 is the program used to compile constraint grammars from a text format
-to a binary format that results in faster load times. It reads the 
+to a binary format that results in faster load times. It reads the
 grammar file from \fBinput_file\fR and writes output
-to \fBoutput_file\fR. The binary grammar files generated can be used 
+to \fBoutput_file\fR. The binary grammar files generated can be used
 in the \fBcg\-proc(1)\fR and \fBvislcg3(1)\fR disambiguators.
 .RE
 .RS
diff --git a/manual/man/cg-conv.1 b/manual/man/cg-conv.1
index ede9d0d..cf069af 100644
--- a/manual/man/cg-conv.1
+++ b/manual/man/cg-conv.1
@@ -5,12 +5,12 @@ cg-conv \- This application is part of (
 )
 .PP
 This tool is part of the CG-3
-constraint grammar system: \fBhttp://visl.sdu.dk/cg3.html\fR.
+constraint grammar system: \fBhttps://visl.sdu.dk/cg3.html\fR.
 .SH SYNOPSIS
 .B cg-conv
 [OPTIONS]
 .SH DESCRIPTION
-.BR cg-conv 
+.BR cg-conv
 is the program used to convert a text stream between various formats.
 By default it tries to auto-detect the input format and convert that to the
 CG-3 stream format.
@@ -65,7 +65,7 @@ sets sub\-reading direction to LTR
 .RS
 .SH SEE ALSO
 .I vislcg3\fR(1),
-http://visl.sdu.dk/cg3/chunked/streamformats.html
+https://visl.sdu.dk/cg3/chunked/streamformats.html
 .SH BUGS
 Email to: Tino Didriksen <mail at tinodidriksen.com>
 .SH AUTHOR
diff --git a/manual/man/cg-proc.1 b/manual/man/cg-proc.1
index 8b43f78..52b9acb 100644
--- a/manual/man/cg-proc.1
+++ b/manual/man/cg-proc.1
@@ -5,12 +5,12 @@ cg-proc \- This application is part of (
 )
 .PP
 This tool is part of the CG-3
-constraint grammar system: \fBhttp://visl.sdu.dk/cg3.html\fR.
+constraint grammar system: \fBhttps://visl.sdu.dk/cg3.html\fR.
 .SH SYNOPSIS
 .B cg-proc
 [OPTIONS] grammar_file [input_file [output_file]]
 .SH DESCRIPTION
-.BR cg-proc 
+.BR cg-proc
 is the grammar parser and disambiguator in the VISL
 constraint grammar system. It chooses between different analyses
 (or "readings") of a word delivered by a morphological analyser
diff --git a/manual/man/cg3-autobin.pl.1 b/manual/man/cg3-autobin.pl.1
index ea26269..a82278e 100644
--- a/manual/man/cg3-autobin.pl.1
+++ b/manual/man/cg3-autobin.pl.1
@@ -5,12 +5,12 @@ cg-autobin.pl \- This application is part of (
 )
 .PP
 This tool is part of the CG-3
-constraint grammar system: \fBhttp://visl.sdu.dk/cg3.html\fR.
+constraint grammar system: \fBhttps://visl.sdu.dk/cg3.html\fR.
 .SH SYNOPSIS
 .B cg-autobin.pl
 [OPTIONS]
 .SH DESCRIPTION
-.BR cg-autobin.pl 
+.BR cg-autobin.pl
 is a thin wrapper around \fBvislcg3(1)\fR to automatically compile
 textual grammars to binary form for faster loading, if the textual grammar
 was changed since last run.
diff --git a/manual/man/vislcg3.1 b/manual/man/vislcg3.1
index 81a3c99..4cbd02e 100644
--- a/manual/man/vislcg3.1
+++ b/manual/man/vislcg3.1
@@ -5,18 +5,18 @@ vislcg3 \- This application is part of (
 )
 .PP
 This tool is part of the CG-3
-constraint grammar system: \fBhttp://visl.sdu.dk/cg3.html\fR.
+constraint grammar system: \fBhttps://visl.sdu.dk/cg3.html\fR.
 .SH SYNOPSIS
 .B vislcg3
-[OPTIONS] 
+[OPTIONS]
 .SH DESCRIPTION
-.BR vislcg3 
-is the grammar parser and disambiguator in the VISL 
-constraint grammar system. It chooses between different analyses 
+.BR vislcg3
+is the grammar parser and disambiguator in the VISL
+constraint grammar system. It chooses between different analyses
 (or "readings") of a word delivered by a morphological analyser
 based on a set of grammatical rules.
 .PP
-By default, vislcg3 reads input from stdin and writes output 
+By default, vislcg3 reads input from stdin and writes output
 to stdout.
 .RE
 .SH OPTIONS
diff --git a/manual/manual.xml b/manual/manual.xml
index c170495..cb01621 100644
--- a/manual/manual.xml
+++ b/manual/manual.xml
@@ -26,7 +26,7 @@
     </authorgroup>
 
     <copyright>
-      <year>2007-2016</year>
+      <year>2007-2017</year>
       <holder>GrammarSoft ApS</holder>
     </copyright>
 
diff --git a/manual/probabilistic.xml b/manual/probabilistic.xml
index 167bcfa..bbdcd80 100644
--- a/manual/probabilistic.xml
+++ b/manual/probabilistic.xml
@@ -42,8 +42,8 @@
   <para>
     These are just examples of what <link linkend="numerical-matches">numeric tags</link> could be used for.
     There is no reason Confidence values are in % and there is no requirement that they must add up to 100%.
-    The only requirement of a numerical tag is an alphanumeric identifier and an integer value that fits
-    in a 32bit signed integer (usually -2147483648 to 2147483647).
+    The only requirement of a numerical tag is an alphanumeric identifier and a double-precision floating point value that fits
+    in the range -281474976710656.0 to +281474976710655.0.
   </para>
 
 </chapter>
diff --git a/manual/rules.xml b/manual/rules.xml
index 9014772..84429ba 100644
--- a/manual/rules.xml
+++ b/manual/rules.xml
@@ -20,14 +20,14 @@
     </para>
     <screen>
   Reading & Tag manipulations:
-      ADD <tags> <target> [contextual_tests] ;
-      MAP <tags> <target> [contextual_tests] ;
+      ADD <tags> [BEFORE|AFTER <tags>] <target> [contextual_tests] ;
+      MAP <tags> [BEFORE|AFTER <tags>] <target> [contextual_tests] ;
       SUBSTITUTE <locate tags> <replacement tags> <target> [contextual_tests] ;
       UNMAP <target> [contextual_tests] ;
 
       REPLACE <tags> <target> [contextual_tests] ;
       APPEND <tags> <target> [contextual_tests] ;
-      COPY <extra tags> [EXCEPT <except tags>] <target> [contextual_tests] ;
+      COPY <extra tags> [EXCEPT <except tags>] [BEFORE|AFTER <tags>] <target> [contextual_tests] ;
 
       SELECT <target> [contextual_tests] ;
       REMOVE <target> [contextual_tests] ;
@@ -54,7 +54,8 @@
           TO|FROM <contextual_target> [contextual_tests] ;
 
   Cohort manipulation:
-      ADDCOHORT <cohort tags> BEFORE|AFTER <target> [contextual_tests] ;
+      ADDCOHORT <cohort tags> BEFORE|AFTER [WITHCHILD <child_set>|NOCHILD]
+          <target> [contextual_tests] ;
       REMCOHORT <target> [contextual_tests] ;
       SPLITCOHORT <cohort recipe> <target> [contextual_tests] ;
 
@@ -161,11 +162,18 @@
       <primary>ADDCOHORT</primary>
     </indexterm>
     <screen>
-      [wordform] ADDCOHORT <cohort tags> BEFORE|AFTER <target> [contextual_tests] ;
+      [wordform] ADDCOHORT <cohort tags> BEFORE|AFTER [WITHCHILD <child_set>|NOCHILD]
+          <target> [contextual_tests] ;
     </screen>
     <para>
       Inserts a new cohort before or after the target.
     </para>
+    <para>
+      WITHCHILD uses the children of the cohort you're targeting as edges so you can avoid creating cohorts
+      in the middle of another dependency group.
+      If you specify WITHCHILD you will need to provide a set that the children you want to apply must match.
+      The (*) set will match all children.
+    </para>
     <screen>
       ADDCOHORT ("<wordform>" "baseform" tags) BEFORE (@waffles) ;
       ADDCOHORT ("<wordform>" "baseform" tags) AFTER (@waffles) ;
@@ -250,9 +258,14 @@
       The (*) set will match all children.
     </para>
     <para>
-      The first WITHCHILD specifies which children you want moved.
+      The first WITHCHILD specifies which children you want moved - the target cohorts.
       The second WITHCHILD uses the children of the cohort you're moving to as edges so you can avoid moving into
-      another dependency group.
+      another dependency group - the anchor cohorts.
+    </para>
+    <para>
+      WITHCHILD will match direct children only and then gobble up all descendents of the matched children.
+      If the target cohort is a descendent of the anchor, all target cohorts are removed from the anchor cohorts.
+      Otherwise, the inverse is done. This allows maximal movement while retaining chosen subtree integrity.
     </para>
   </section>
 
@@ -851,6 +864,21 @@
         Causes a <link linkend="cmd-setvar">STREAMCMD</link> to be output for the given action.
       </para>
     </section>
+
+    <section id="rule-options-repeat">
+      <title>REPEAT</title>
+      <indexterm>
+        <primary>REPEAT</primary>
+      </indexterm>
+      <para>
+        Applicable for all rule types.
+      </para>
+      <para>
+        If the rule does anything that changes the state of the window, REPEAT forces the rule to run through the
+        window again after the current pass. Does not by itself cause a full section reiteration.
+        Useful for making <link linkend="substitute">SUBSTITUTE</link> remove all matching tags instead of just one.
+      </para>
+    </section>
   </section>
 
 </chapter>
diff --git a/manual/sets.xml b/manual/sets.xml
index ead7603..fe8613c 100644
--- a/manual/sets.xml
+++ b/manual/sets.xml
@@ -23,10 +23,10 @@
       </screen>
     </section>
 
-    <section id="set-operator-difference">
-      <title>Difference: -</title>
+    <section id="set-operator-except">
+      <title>Except: -</title>
       <para>
-        Equivalent to the mathematical set complement &#x2216; operator.
+        Equivalent to the SQL Except operator.
       </para>
       <screen>
         LIST a = a b c d ;
@@ -38,6 +38,21 @@
       </screen>
     </section>
 
+    <section id="set-operator-difference">
+      <title>Difference: \</title>
+      <para>
+        Equivalent to the mathematical set complement &#x2216; operator.
+        The symbol is a normal backslash.
+      </para>
+      <screen>
+        LIST a = a b c d ;
+        LIST b = c d e f ;
+
+        # Logically yields a set containing tags: a b
+        SET r = a \ b ;
+      </screen>
+    </section>
+
     <section id="set-operator-symdiff">
       <title>Symmetric Difference: &#x2206;</title>
       <para>
diff --git a/manual/tags.xml b/manual/tags.xml
index 5c4be7b..573fec6 100644
--- a/manual/tags.xml
+++ b/manual/tags.xml
@@ -55,7 +55,7 @@
     </para>
     <para>
       Due to tags themselves needing the occasional escaping, regular expressions need double-escaping of
-      some special symbols. E.g. grouping with () needs to be written as "a\(b|c\)d"r, while literal non-grouping ()
+      symbols that have special meaning to CG-3. E.g. literal non-grouping ()
       need to be written as "a\\(b\\)c"r. Metacharacters also need double-escaping, so \w needs to be written as \\w.
     </para>
     <para>
@@ -138,8 +138,8 @@
     <para>
       The two special values MIN and MAX (both case-sensitive) will scan the cohort for their respective minimum
       or maximum value, and use that for the comparison.
-      Internally the value is stored in a 32bit signed integer, so usually MIN is equal to -2147483648 and MAX is 2147483647,
-      and using those values will also act such.
+      Internally the value is stored in a double, and the range is capped between -281474976710656.0 to +281474976710655.0,
+      and using values beyond that range will also act as those limits.
       <screen>
         # Select the maximum value of W. Readings with no W will also be removed.
         SELECT (<W=MAX>) ;
@@ -369,21 +369,19 @@
       STRICT-TAGS += N V ADJ etc ... ;
     </screen>
     <para>
-      By default, STRICT-TAGS always allows wordforms, baseforms, and VISL-style secondary tags
+      By default, STRICT-TAGS always allows wordforms, baseforms, regular expressions, case-insensitive, and VISL-style secondary tags
       (<code>"<…>"</code>, <code>"…"</code>, <code><…></code>), since those are too prolific to list
       individually. If you are extra paranoid, you can change that with <link linkend="grammar-options">OPTIONS</link>.
     </para>
     <para>
       To get a list of unique used tags, pass --show-tags to CG-3. To filter this list to the default set of interesting tags,
-      something like this can be used:
+      <link linkend="cg-strictify">cg-strictify</link> can be used:
       <screen>
-        vislcg3 --show-tags -g grammar-goes-here | LC_ALL=C sort | egrep -v '^"' | egrep -v '^(/)?<.*>(/r|v)?$' | \
-        egrep -v '^\^' | egrep -v '^VSTR:' | egrep -v '^VAR:' | egrep -v '^_.*_$' | grep -v 'dummy string' | \
-        grep -v '^\*$' | grep -v '^<<<$' | grep -v '^>>>$' > tags.txt
+        cg-strictify grammar-goes-here
       </screen>
       <emphasis>For comparison, this yields 285 tags for VISL's 10000-rule Danish grammar.</emphasis>
       Edit the resulting list to remove any tags you can see are typos or should otherwise not be allowed,
-      collapse the list to a line, stuff it at the top of the grammar with STRICT-TAGS, and recompile the grammar.
+      stuff it at the top of the grammar, and recompile the grammar.
       Any errors you get will be lines where forbidden tags are used, which can be whole sets if those sets aren't used in any rules.
     </para>
     <para>
diff --git a/newsletters/2017-05-20.txt b/newsletters/2017-05-20.txt
new file mode 100644
index 0000000..0886237
--- /dev/null
+++ b/newsletters/2017-05-20.txt
@@ -0,0 +1,49 @@
+A new release of CG-3 has been tagged v1.0.0.12200, in preparation for the NoDaLiDa 2017 Constraint Grammar Workshop ( https://visl.sdu.dk/nodalida2017.html ) which takes place on Monday.
+
+Finally calling it version 1.0. Doesn't change anything formally, but CG-3 is stable enough and widely enough used that it's about time for the 1.0 tag.
+
+New features:
+- Cmdline flag --dump-ast to output an XML dump of the parser representation. Useful for 3rd party transformation tools and highlighters
+- Rule SPLITCOHORT that splits a cohort into multiple, while allowing the creation of an internal dependency tree. See https://visl.sdu.dk/cg3/chunked/rules.html#splitcohort
+- Added a Bag of Tags feature. See https://visl.sdu.dk/cg3/chunked/contexts.html#test-bag-of-tags
+- Added rule flag REPEAT. See https://visl.sdu.dk/cg3/chunked/rules.html#rule-options-repeat
+- Added mathematical set difference set operator as \. See https://visl.sdu.dk/cg3/chunked/sets.html#set-operator-difference
+- Renamed the old set difference operator (-) to the except operator
+- Cmdline flag --trace now optionally takes a range of rules to break execution at
+- ADDCOHORT now takes WITHCHILD to further narrow down where to insert the cohort
+- New tool cg-strictify to aid in making your existing grammars STRICT-TAGS compliant. See https://visl.sdu.dk/cg3/chunked/cmdreference.html#cg-strictify
+
+Changes:
+- Now requires C++11 to build, but will test for and use C++14 and C++17 where available
+- Regex captures are now done on a per-reading basis, which eliminates weird gotchas about matches
+- Linking from a position override template will now go from the min/max edges of cohorts the template reached, rather than just the final cohort it touched
+- OR'ed templates will now backtrack instead of trying only first match
+- Input stream initial SETVAR commands are now honored immediately, so that variables can be used in DELIMITERS
+- Context modifier A now works for almost all rule types
+- KEEPORDER will now automatically be added for easily detectable cases where it is needed
+- SUBSTITUTE will add the replacement tags for each found contiguous tag to be removed
+- Better detection of endless loops
+- Non-scanning contexts with modifers O and o will now throw an error as the writer probably meant 0
+- Binary grammars can now be dumped as text, but in the internal mangled structure
+- MOVE WITHCHILD changed to more logically interact with moving into and out of trees
+- cg-conv no longer creates readings for plain text input. Cmdline flag --add-tags added to get these back where desired
+- Numeric tags are now double-precision floating point
+- cg-conv no longer multiplies weights in FST input by default
+
+Fixed Bugs:
+- Fixed removing a cohort that owned temporarily enclosed cohorts
+- Fixed bug where e.g. <C:NN> was considered numerical with value 0
+- Fixed a lot of bugs related to cohort moving, adding, and removing
+- Fixed segfault when parsing tags longer than 256 bytes
+- Fixed modifier pS acting as if it was p*
+
+Main site is https://visl.sdu.dk/cg3.html
+Google Group is https://groups.google.com/group/constraint-grammar
+Source snapshots available at https://visl.sdu.dk/download/cg3/
+Windows binary is at https://apertium.projectjj.com/win32/nightly/
+OS X binary is at https://apertium.projectjj.com/osx/nightly/
+RHEL/Fedora/CentOS/OpenSUSE packages are at https://apertium.projectjj.com/rpm/howto.txt
+Debian/Ubuntu packages are at https://apertium.projectjj.com/apt/howto.txt
+
+-- Tino Didriksen
+CG-3 Developer
diff --git a/scripts/CG3_External.pm b/scripts/CG3_External.pm
index 4ea5145..f0b6799 100644
--- a/scripts/CG3_External.pm
+++ b/scripts/CG3_External.pm
@@ -1,4 +1,4 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
 # -*- mode: cperl; indent-tabs-mode: nil; tab-width: 3; cperl-indent-level: 3; -*-
 package CG3_External;
 use strict;
@@ -92,7 +92,7 @@ sub check_protocol {
    if ($protocol == $VERSION) {
       return $VERSION;
    }
-   
+
    return undef;
 }
 
@@ -118,7 +118,7 @@ sub read_window {
          $cohort{'parent'} = read_uint32_t($fh, 'cohort parent');
       }
       $cohort{'wordform'} = read_utf8_string($fh, 'cohort wordform');
-      
+
       my @readings;
       my $rlen = read_uint32_t($fh, 'num readings');
       for (my $r=0 ; $r<$rlen ; $r++) {
@@ -129,7 +129,7 @@ sub read_window {
          if ($reading{'flags'} & (1 << 3)) {
             $reading{'baseform'} = read_utf8_string($fh, 'reading baseform');
          }
-         
+
          my @tags;
          my $tlen = read_uint32_t($fh, 'num tags');
          for (my $t=0 ; $t<$tlen ; $t++) {
@@ -149,7 +149,7 @@ sub read_window {
       push @cohorts, \%cohort;
    }
    $window{'cohorts'} = \@cohorts;
-   
+
    return \%window;
 }
 
@@ -166,7 +166,7 @@ sub write_window {
 
    foreach my $c (@{$w->{'cohorts'}}) {
       open(my $fc, '>', \my $co);
-      
+
       write_uint32_t($fc, $c->{'num'}, 'cohort number');
       if ($c->{'text'}) {
          $c->{'flags'} |= (1 << 0);
@@ -179,7 +179,7 @@ sub write_window {
          write_uint32_t($fc, $c->{'parent'}, 'cohort parent');
       }
       write_utf8_string($fc, $c->{'wordform'}, 'cohort wordform');
-      
+
       my $rlen = @{$c->{'readings'}};
       write_uint32_t($fc, $rlen, 'num readings');
 
diff --git a/scripts/cg-strictify b/scripts/cg-strictify
new file mode 100755
index 0000000..aa2bfdb
--- /dev/null
+++ b/scripts/cg-strictify
@@ -0,0 +1,202 @@
+#!/usr/bin/env perl
+use warnings;
+use strict;
+use utf8;
+BEGIN {
+	$| = 1;
+	binmode(STDIN, ':encoding(UTF-8)');
+	binmode(STDOUT, ':encoding(UTF-8)');
+}
+use open qw( :encoding(UTF-8) :std );
+
+use Getopt::Long;
+Getopt::Long::Configure('no_ignore_case');
+
+my %opts = ();
+GetOptions (\%opts,
+   'help|?',
+   'grammar|g=s',
+   'output|o',
+   'strip',
+   'secondary',
+   'regex',
+   'icase',
+   'baseforms',
+   'wordforms',
+   'all',
+   );
+
+sub print_help {
+   print <<'XOUT';
+Usage: cg-strictify [OPTIONS] <grammar>
+
+Options:
+ -?, --help       outputs this help
+ -g, --grammar    the grammar to parse; defaults to first non-option argument
+ -o, --output     outputs the whole grammar with STRICT-TAGS
+     --strip      removes superfluous LISTs from the output grammar; implies -o
+     --secondary  adds secondary tags (<...>) to strict list
+     --regex      adds regular expression tags (/../r, <..>r, etc) to strict list
+     --icase      adds case-insensitive tags to strict list
+     --baseforms  adds baseform tags ("...") to strict list
+     --wordforms  adds wordform tags ("<...>") to strict list
+     --all        same as --strip --secondary --regex --icase --baseforms --wordforms
+
+XOUT
+}
+
+if (defined $opts{'help'}) {
+   print_help();
+   exit(0);
+}
+if (!defined $opts{'grammar'}) {
+   if (!$ARGV[0]) {
+      print "Missing input grammar argument!\n\n";
+      print_help();
+      exit(-1);
+   }
+   $opts{'grammar'} = $ARGV[0];
+}
+if (! -s $opts{'grammar'}) {
+   print "Grammar is either missing or empty!\n";
+   exit(-1);
+}
+
+if (defined $opts{'strip'}) {
+   $opts{'output'} = 1;
+}
+
+if (defined $opts{'all'}) {
+   $opts{'secondary'} = 1;
+   $opts{'regex'} = 1;
+   $opts{'icase'} = 1;
+   $opts{'baseforms'} = 1;
+   $opts{'wordforms'} = 1;
+}
+
+my $tags = `vislcg3 --show-tags -g '$opts{grammar}' | LC_ALL=C sort`;
+$tags =~ s@[\r\n]+@\n at g; # Normalize newlines
+chomp($tags);
+my @tags = split /\n/, $tags;
+
+my $options = '';
+if (defined $opts{'secondary'} || defined $opts{'regex'} || defined $opts{'icase'} || defined $opts{'baseforms'} || defined $opts{'wordforms'}) {
+   $options .= 'OPTIONS +=';
+   if (defined $opts{'secondary'}) {
+      $options .= ' strict-secondary';
+   }
+   if (defined $opts{'regex'}) {
+      $options .= ' strict-regex';
+   }
+   if (defined $opts{'icase'}) {
+      $options .= ' strict-icase';
+   }
+   if (defined $opts{'baseforms'}) {
+      $options .= ' strict-baseforms';
+   }
+   if (defined $opts{'wordforms'}) {
+      $options .= ' strict-wordforms';
+   }
+   $options .= " ;\n";
+}
+
+my $strict_tags = 'STRICT-TAGS +=';
+my @strict = ();
+my $lf = '';
+foreach my $tag (@tags) {
+   if ($tag eq '*') {
+      next; # Marker for erasure, any, etc.
+   }
+   if ($tag eq '>>>') {
+      next; # Start of window marker
+   }
+   if ($tag eq '<<<') {
+      next; # End of window marker
+   }
+   if ($tag =~ m@^\^@) {
+      next; # Fail-fast tags
+   }
+   if ($tag =~ m@^VSTR:@) {
+      next; # Varstrings
+   }
+   if ($tag =~ m@^VAR:@) {
+      next; # Global variables
+   }
+   if ($tag =~ m@^_.+_$@) {
+      next; # Magic placeholders such as _TARGET_
+   }
+   if ($tag =~ m at dummy string@) {
+      next; # Internal placeholder for tag #0
+   }
+
+   if ($tag =~ m@[>"/]v$@) {
+      next;
+   }
+   if (! defined $opts{'secondary'} && $tag =~ m@^<.+>[riv]*$@) {
+      next;
+   }
+   if (! defined $opts{'regex'} && $tag =~ m@^/.+/[riv]*$@) {
+      next;
+   }
+   if (! defined $opts{'icase'} && $tag =~ m@[>"/]i$@) {
+      next;
+   }
+   if (! defined $opts{'baseforms'} && $tag =~ m@^"[^<].*"[riv]*$@) {
+      next;
+   }
+   if (! defined $opts{'wordforms'} && $tag =~ m@^"<.*>"[riv]*$@) {
+      next;
+   }
+
+   my $nlf = substr($tag, 0, 1);
+   if ($nlf =~ m@^\p{Lu}@) {
+      $nlf = 'A';
+   }
+   elsif ($nlf =~ m@^\p{Ll}@) {
+      $nlf = 'a';
+   }
+   elsif ($tag =~ m@^"<.+>"[riv]*$@) {
+      $nlf = 'W';
+   }
+   elsif ($tag =~ m@^".+"[riv]*$@) {
+      $nlf = 'B';
+   }
+   if ($lf && $lf ne $nlf) {
+      $strict_tags .= "\n";
+   }
+   $lf = $nlf;
+
+   $strict_tags .= " $tag";
+   push(@strict, $tag);
+}
+$strict_tags .= " ;\n";
+
+if (! defined $opts{'output'}) {
+   print "Put these lines at the top of your grammar, and edit the STRICT-TAGS list by removing invalid tags:\n\n";
+   print $options;
+   print $strict_tags;
+   exit(0);
+}
+
+my $g = '';
+{
+   local $/ = undef;
+   open(FH, '<'.$opts{'grammar'});
+   $g = <FH>;
+   close(FH);
+}
+
+if (defined $opts{'strip'}) {
+   print STDERR "Deleting unnecessary LISTs...\n";
+   foreach my $tag (@strict) {
+      if ($g =~ m@[\b\n][Ll][Ii][Ss][Tt]\s+\Q$tag\E\s*=\s*\Q$tag\E\s*;@) {
+         print STDERR "Deleting LIST $tag\n";
+         $g =~ s@[\b\n][Ll][Ii][Ss][Tt]\s+\Q$tag\E\s*=\s*\Q$tag\E\s*;@@;
+      }
+   }
+}
+
+print $options;
+print $strict_tags;
+print "\n";
+print $g;
diff --git a/scripts/cg3-autobin.pl.in b/scripts/cg3-autobin.pl.in
index 927caa7..9719b70 100755
--- a/scripts/cg3-autobin.pl.in
+++ b/scripts/cg3-autobin.pl.in
@@ -1,10 +1,11 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
 BEGIN { $| = 1; }
 use warnings;
 use strict;
 use File::Spec;
 use Getopt::Long;
-use Digest::SHA1 qw(sha1_hex);
+use Digest::SHA qw(sha1_hex);
+Getopt::Long::Configure('no_ignore_case');
 
 # This is updated by the update-revision.pl script.
 my $revision = @REVISION@;
@@ -42,7 +43,7 @@ GetOptions (\%h,
 "no-before-sections",
 "no-sections",
 "no-after-sections",
-"trace|t",
+"trace|t:s",
 "trace-name-only",
 "trace-no-removed",
 "trace-encl",
@@ -65,10 +66,13 @@ GetOptions (\%h,
 "dep-no-crossing",
 "no-magic-readings",
 "no-pass-origin|o",
+"split-mappings",
 "show-end-tags|e",
 "show-unused-sets",
+"show-tags",
 "show-tag-hashes",
-"show-set-hashes"
+"show-set-hashes",
+"dump-ast"
 );
 
 if (defined $h{'grammar-bin'}) {
diff --git a/scripts/external.pl b/scripts/external.pl
index b4f32e3..8fbb542 100755
--- a/scripts/external.pl
+++ b/scripts/external.pl
@@ -1,4 +1,4 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
 # -*- mode: cperl; indent-tabs-mode: nil; tab-width: 3; cperl-indent-level: 3; -*-
 BEGIN {
    $| = 1;
diff --git a/scripts/external_text.pl b/scripts/external_text.pl
index d65aa29..d398614 100755
--- a/scripts/external_text.pl
+++ b/scripts/external_text.pl
@@ -1,4 +1,4 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
 # -*- mode: cperl; indent-tabs-mode: nil; tab-width: 3; cperl-indent-level: 3; -*-
 BEGIN {
    $| = 1;
@@ -28,7 +28,7 @@ my $af_started = 0;
 
 sub initSubChain {
 	if ($ENV{'DEBUG'}) { print STDERR "$0 initSubChain enter\n"; }
-	$af_h = start \@af_cmd, \$af_in, \$af_out, \$af_err;	
+	$af_h = start \@af_cmd, \$af_in, \$af_out, \$af_err;
 	$af_started = 1;
 	if ($ENV{'DEBUG'}) { print STDERR "$0 initSubChain exit\n"; }
 }
@@ -76,9 +76,9 @@ while (my $w = read_window(*STDIN)) {
          $out .= "\n";
       }
    }
-   
+
    my $in = callSubChain($out);
-   
+
 	$out =~ s@^\s+@@g;
 	$out =~ s@\s+$@@g;
 
@@ -86,7 +86,7 @@ while (my $w = read_window(*STDIN)) {
       write_null_response(*STDOUT);
       next;
    }
-   
+
    my @out = split /\n/, $out;
 
    my @in = split /\n/, $in;
@@ -98,7 +98,7 @@ while (my $w = read_window(*STDIN)) {
       write_null_response(*STDOUT);
       next;
    }
-   
+
    my $cc = 0;
    for (my $i = 0 ; $i<$lin ; $i++) {
       if ($in[$i] !~ /\t/) { # Found a cohort line, start looking for readings
@@ -114,7 +114,7 @@ while (my $w = read_window(*STDIN)) {
          my $rc = 0;
          my $j;
          for ($j = $i+1 ; $j<$lin ; $j++) {
-            if ($in[$j] !~ /\t/) { # Found a cohort line, so stop looking for readings 
+            if ($in[$j] !~ /\t/) { # Found a cohort line, so stop looking for readings
                last;
             }
             $rc++;
@@ -133,6 +133,6 @@ while (my $w = read_window(*STDIN)) {
          $i = $j-1;
       }
    }
-   
+
    write_window(*STDOUT, $w);
 }
diff --git a/scripts/profile-revisions.php b/scripts/profile-revisions.php
index 9b51d0f..9fe7cf7 100755
--- a/scripts/profile-revisions.php
+++ b/scripts/profile-revisions.php
@@ -14,13 +14,13 @@ function profile_revision($rev) {
 
 	if (file_exists('./src/all_vislcg3.cpp')) {
 		echo "Using all_vislcg3.cpp and Boost...\n";
-		echo shell_exec('g++ -std=c++1y -DHAVE_BOOST -DNDEBUG -pthread -pipe -Wall -Wextra -Wno-deprecated -fPIC -O3 -Iinclude -Iinclude/exec-stream -Iinclude/posix ./src/all_vislcg3.cpp -o vislcg3 -L/usr/lib/x86_64-linux-gnu -licui18n -licudata -licuio -licuuc 2>&1');
-		echo shell_exec('g++ -std=c++1y -DHAVE_BOOST -DNDEBUG -pthread -pipe -Wall -Wextra -Wno-deprecated -fPIC -O3 -Iinclude -Iinclude/exec-stream -Iinclude/posix ./src/all_vislcg3.cpp -o vislcg3-tc -L/usr/lib/x86_64-linux-gnu -licui18n -licudata -licuio -licuuc -ltcmalloc 2>&1');
+		echo shell_exec('g++ -std=c++17 -DHAVE_BOOST -DNDEBUG -pthread -pipe -Wall -Wextra -Wno-deprecated -fPIC -flto -O3 -Iinclude -Iinclude/exec-stream -Iinclude/posix ./src/all_vislcg3.cpp -o vislcg3 -L/usr/lib/x86_64-linux-gnu -licui18n -licudata -licuio -licuuc 2>&1');
+		echo shell_exec('g++ -std=c++17 -DHAVE_BOOST -DNDEBUG -pthread -pipe -Wall -Wextra -Wno-deprecated -fPIC -flto -O3 -Iinclude -Iinclude/exec-stream -Iinclude/posix ./src/all_vislcg3.cpp -o vislcg3-tc -L/usr/lib/x86_64-linux-gnu -licui18n -licudata -licuio -licuuc -ltcmalloc 2>&1');
 	}
 	else {
 		echo "Using old-style without Boost...\n";
-		echo shell_exec('g++ -std=c++1y -DHAVE_BOOST -pthread -pipe -Wall -Wextra -Wno-deprecated -fPIC -O3 -Iinclude -Iinclude/exec-stream $(ls -1 ./src/*.cpp | egrep -v "/test_" | egrep -v "/cg_" | egrep -v "/all_" | grep -v Apertium | grep -v Matxin | grep -v FormatConverter) -o vislcg3 -L/usr/lib/x86_64-linux-gnu -licui18n -licudata -licuio -licuuc -ltcmalloc 2>&1');
-		echo shell_exec('g++ -std=c++1y -DHAVE_BOOST -pthread -pipe -Wall -Wextra -Wno-deprecated -fPIC -O3 -ltcmalloc -Iinclude -Iinclude/exec-stream $(ls -1 ./src/*.cpp | egrep -v "/test_" | egrep -v "/cg_" | egrep -v "/all_" | grep -v Apertium | grep -v Matxin | grep -v FormatConverter) -o vislcg3-tc -L/usr/lib/x86_64-linux-gnu -licui18n -licudata -licuio -licuuc -ltcmalloc 2>&1');
+		echo shell_exec('g++ -std=c++17 -DHAVE_BOOST -pthread -pipe -Wall -Wextra -Wno-deprecated -fPIC -flto -O3 -Iinclude -Iinclude/exec-stream $(ls -1 ./src/*.cpp | egrep -v "/test_" | egrep -v "/cg_" | egrep -v "/all_" | grep -v Apertium | grep -v Matxin | grep -v FormatConverter) -o vislcg3 -L/usr/lib/x86_64-linux-gnu -licui18n -licudata -licuio -licuuc -ltcmalloc 2>&1');
+		echo shell_exec('g++ -std=c++17 -DHAVE_BOOST -pthread -pipe -Wall -Wextra -Wno-deprecated -fPIC -flto -O3 -ltcmalloc -Iinclude -Iinclude/exec-stream $(ls -1 ./src/*.cpp | egrep -v "/test_" | egrep -v "/cg_" | egrep -v "/all_" | grep -v Apertium | grep -v Matxin | grep -v FormatConverter) -o vislcg3-tc -L/usr/lib/x86_64-linux-gnu -licui18n -licudata -licuio -licuuc -ltcmalloc 2>&1');
 	}
 
 	if (!file_exists('vislcg3') || !file_exists('vislcg3-tc')) {
@@ -73,8 +73,8 @@ function profile_revision($rev) {
 	shell_exec('rm -rf '.$dir.' 2>&1 >/dev/null');
 }
 
-$revs = array(10809, 10800, 10373, 10044);
-$revs = array(10824);
+$revs = array(10824, 10809, 10800, 10373, 10044);
+$revs = array(11682);
 foreach ($revs as $rev) {
 	profile_revision($rev);
 }
diff --git a/src/AST.hpp b/src/AST.hpp
index 9c61986..a15ccd9 100644
--- a/src/AST.hpp
+++ b/src/AST.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -75,6 +75,7 @@ enum ASTType {
 	AST_SoftDelimiters,
 	AST_StaticSets,
 	AST_StrictTags,
+	AST_ListTags,
 	AST_SubReadings,
 	AST_SubReadingsDirection,
 	AST_Tag,
@@ -166,12 +167,12 @@ void print_ast(UFILE *out, const UChar *b, size_t n, const ASTNode& node) {
 		return;
 	}
 	u_fprintf(out, ">\n");
-	foreach (it, node.cs) {
-		if (it->type == AST_Grammar) {
-			print_ast(out, it->b, n + 1, *it);
+	for (auto& it : node.cs) {
+		if (it.type == AST_Grammar) {
+			print_ast(out, it.b, n + 1, it);
 		}
 		else {
-			print_ast(out, b, n + 1, *it);
+			print_ast(out, b, n + 1, it);
 		}
 	}
 	u_fprintf(out, "%s</%s>\n", indent.c_str(), ASTType_str[node.type]);
diff --git a/src/ApertiumApplicator.cpp b/src/ApertiumApplicator.cpp
index 06f1d61..849f44a 100644
--- a/src/ApertiumApplicator.cpp
+++ b/src/ApertiumApplicator.cpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -42,16 +42,13 @@ ApertiumApplicator::ApertiumApplicator(UFILE *ux_err)
 	fgetc_error = U_ZERO_ERROR;
 }
 
-
-bool ApertiumApplicator::getNullFlush() {
-	return nullFlush;
-}
-
 void ApertiumApplicator::setNullFlush(bool pNullFlush) {
 	nullFlush = pNullFlush;
 }
 
 UChar ApertiumApplicator::u_fgetc_wrapper(istream& input) {
+	UChar rv = U_EOF;
+
 	if (runningWithNullFlush) {
 		if (!fgetc_converter) {
 			fgetc_error = U_ZERO_ERROR;
@@ -83,11 +80,17 @@ UChar ApertiumApplicator::u_fgetc_wrapper(istream& input) {
 		if (fgetc_outputbuf[0] == 0xFFFD && input.eof()) {
 			return U_EOF;
 		}
-		return fgetc_outputbuf[0];
+		rv = fgetc_outputbuf[0];
 	}
 	else {
-		return input.getc();
+		rv = input.getc();
 	}
+
+	if (ISNL(rv)) {
+		++numLines;
+	}
+
+	return rv;
 }
 
 
@@ -107,7 +110,7 @@ void ApertiumApplicator::runGrammarOnTextWrapperNullFlush(istream& input, UFILE
  */
 
 void ApertiumApplicator::runGrammarOnText(istream& input, UFILE *output) {
-	if (getNullFlush()) {
+	if (nullFlush) {
 		runGrammarOnTextWrapperNullFlush(input, output);
 		return;
 	}
@@ -217,8 +220,8 @@ void ApertiumApplicator::runGrammarOnText(istream& input, UFILE *output) {
 			}
 			if (cCohort && cSWindow->cohorts.size() >= soft_limit && grammar->soft_delimiters && doesSetMatchCohortNormal(*cCohort, grammar->soft_delimiters->number)) {
 				// ie. we've read some cohorts
-				foreach (iter, cCohort->readings) {
-					addTagToReading(**iter, endtag);
+				for (auto iter : cCohort->readings) {
+					addTagToReading(*iter, endtag);
 				}
 
 				cSWindow->appendCohort(cCohort);
@@ -229,11 +232,11 @@ void ApertiumApplicator::runGrammarOnText(istream& input, UFILE *output) {
 			} // end >= soft_limit
 			if (cCohort && (cSWindow->cohorts.size() >= hard_limit || (grammar->delimiters && doesSetMatchCohortNormal(*cCohort, grammar->delimiters->number)))) {
 				if (!is_conv && cSWindow->cohorts.size() >= hard_limit) {
-					u_fprintf(ux_stderr, "Warning: Hard limit of %u cohorts reached at line %u - forcing break.\n", hard_limit, numLines);
+					u_fprintf(ux_stderr, "Warning: Hard limit of %u cohorts reached at cohort %u on line %u - forcing break.\n", hard_limit, numCohorts, numLines);
 					u_fflush(ux_stderr);
 				}
-				foreach (iter, cCohort->readings) {
-					addTagToReading(**iter, endtag);
+				for (auto iter : cCohort->readings) {
+					addTagToReading(*iter, endtag);
 				}
 
 				cSWindow->appendCohort(cCohort);
@@ -405,11 +408,10 @@ void ApertiumApplicator::runGrammarOnText(istream& input, UFILE *output) {
 			} // end while not $
 
 			if (!cReading->baseform) {
-				u_fprintf(ux_stderr, "Warning: Line %u had no valid baseform.\n", numLines);
+				u_fprintf(ux_stderr, "Warning: Cohort %u on line %u had no valid baseform.\n", numCohorts, numLines);
 				u_fflush(ux_stderr);
 			}
 		} // end reading
-		numLines++;
 	} // end input loop
 
 	if (!firstblank.empty()) {
@@ -423,8 +425,8 @@ void ApertiumApplicator::runGrammarOnText(istream& input, UFILE *output) {
 		if (cCohort->readings.empty()) {
 			initEmptyCohort(*cCohort);
 		}
-		foreach (iter, cCohort->readings) {
-			addTagToReading(**iter, endtag);
+		for (auto iter : cCohort->readings) {
+			addTagToReading(*iter, endtag);
 		}
 		cReading = 0;
 		cCohort = 0;
@@ -692,7 +694,7 @@ void ApertiumApplicator::printReading(Reading *reading, UFILE *output) {
 
 	if (reading->baseform) {
 		// Lop off the initial and final '"' characters
-		UnicodeString bf(single_tags[reading->baseform]->tag.c_str() + 1, single_tags[reading->baseform]->tag.length() - 2);
+		UnicodeString bf(single_tags[reading->baseform]->tag.c_str() + 1, single_tags[reading->baseform]->tag.size() - 2);
 
 		if (wordform_case && !reading->next) {
 			// Use surface/wordform case, eg. if lt-proc
@@ -700,7 +702,7 @@ void ApertiumApplicator::printReading(Reading *reading, UFILE *output) {
 			// dictionary case on lemma/basefrom)
 			// Lop off the initial and final '"<>"' characters
 			// ToDo: A copy does not need to be made here - use pointer offsets
-			UnicodeString wf(reading->parent->wordform->tag.c_str() + 2, reading->parent->wordform->tag.length() - 4);
+			UnicodeString wf(reading->parent->wordform->tag.c_str() + 2, reading->parent->wordform->tag.size() - 4);
 
 			int first = 0; // first occurrence of a lowercase character in baseform
 			for (; first < bf.length(); ++first) {
@@ -787,9 +789,9 @@ void ApertiumApplicator::printReading(Reading *reading, UFILE *output) {
 	}
 
 	if (trace) {
-		foreach (iter_hb, reading->hit_by) {
+		for (auto iter_hb : reading->hit_by) {
 			u_fputc('<', output);
-			printTrace(output, *iter_hb);
+			printTrace(output, iter_hb);
 			u_fputc('>', output);
 		}
 	}
@@ -818,7 +820,7 @@ void ApertiumApplicator::printSingleWindow(SingleWindow *window, UFILE *output)
 		if (print_word_forms == true) {
 			// Lop off the initial and final '"' characters
 			// ToDo: A copy does not need to be made here - use pointer offsets
-			UnicodeString wf(cohort->wordform->tag.c_str() + 2, cohort->wordform->tag.length() - 4);
+			UnicodeString wf(cohort->wordform->tag.c_str() + 2, cohort->wordform->tag.size() - 4);
 			UString wf_escaped;
 			for (int i = 0; i < wf.length(); ++i) {
 				if (wf[i] == '^' || wf[i] == '\\' || wf[i] == '/' || wf[i] == '$' || wf[i] == '[' || wf[i] == ']' || wf[i] == '{' || wf[i] == '}' || wf[i] == '<' || wf[i] == '>') {
@@ -830,11 +832,11 @@ void ApertiumApplicator::printSingleWindow(SingleWindow *window, UFILE *output)
 
 			// Print the static reading tags
 			if (cohort->wread) {
-				foreach (tter, cohort->wread->tags_list) {
-					if (*tter == cohort->wordform->hash) {
+				for (auto tter : cohort->wread->tags_list) {
+					if (tter == cohort->wordform->hash) {
 						continue;
 					}
-					const Tag *tag = single_tags[*tter];
+					const Tag *tag = single_tags[tter];
 					u_fprintf(output, "<%S>", tag->tag.c_str());
 				}
 			}
@@ -843,7 +845,7 @@ void ApertiumApplicator::printSingleWindow(SingleWindow *window, UFILE *output)
 		bool need_slash = print_word_forms;
 
 		//Tag::printTagRaw(output, single_tags[cohort->wordform]);
-		boost_foreach (Reading *reading, cohort->readings) {
+		for (auto reading : cohort->readings) {
 			if (need_slash) {
 				u_fprintf(output, "/");
 			}
@@ -859,7 +861,7 @@ void ApertiumApplicator::printSingleWindow(SingleWindow *window, UFILE *output)
 
 		if (trace) {
 			const UChar not_sign = L'\u00AC';
-			boost_foreach (Reading *reading, cohort->delayed) {
+			for (auto reading : cohort->delayed) {
 				if (need_slash) {
 					u_fprintf(output, "/%C", not_sign);
 				}
@@ -869,7 +871,7 @@ void ApertiumApplicator::printSingleWindow(SingleWindow *window, UFILE *output)
 				}
 				printReading(reading, output);
 			}
-			boost_foreach (Reading *reading, cohort->deleted) {
+			for (auto reading : cohort->deleted) {
 				if (need_slash) {
 					u_fprintf(output, "/%C", not_sign);
 				}
@@ -899,20 +901,20 @@ void ApertiumApplicator::mergeMappings(Cohort& cohort) {
 	// foo<N><Sg><Acc><@←SUBJ>/foo<N><Sg><Acc><@←OBJ>
 	// => foo<N><Sg><Acc><@←SUBJ>/foo<N><Sg><Acc><@←OBJ>
 	std::map<uint32_t, ReadingList> mlist;
-	foreach (iter, cohort.readings) {
-		Reading *r = *iter;
+	for (auto iter : cohort.readings) {
+		Reading *r = iter;
 		uint32_t hp = r->hash; // instead of hash_plain, which doesn't include mapping tags
 		if (trace) {
-			foreach (iter_hb, r->hit_by) {
-				hp = hash_value(*iter_hb, hp);
+			for (auto iter_hb : r->hit_by) {
+				hp = hash_value(iter_hb, hp);
 			}
 		}
 		Reading *sub = r->next;
 		while (sub) {
 			hp = hash_value(sub->hash, hp);
 			if (trace) {
-				foreach (iter_hb, sub->hit_by) {
-					hp = hash_value(*iter_hb, hp);
+				for (auto iter_hb : sub->hit_by) {
+					hp = hash_value(iter_hb, hp);
 				}
 			}
 			sub = sub->next;
@@ -934,8 +936,8 @@ void ApertiumApplicator::mergeMappings(Cohort& cohort) {
 		order.push_back(clist.front());
 
 		clist.erase(clist.begin());
-		foreach (cit, clist) {
-			free_reading(*cit);
+		for (auto cit : clist) {
+			free_reading(cit);
 		}
 	}
 
diff --git a/src/ApertiumApplicator.hpp b/src/ApertiumApplicator.hpp
index cb0b6d1..d670968 100644
--- a/src/ApertiumApplicator.hpp
+++ b/src/ApertiumApplicator.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -32,7 +32,6 @@ public:
 
 	void runGrammarOnText(istream& input, UFILE *output);
 
-	bool getNullFlush();
 	bool wordform_case;
 	bool print_word_forms;
 	bool print_only_first;
diff --git a/src/BinaryGrammar.cpp b/src/BinaryGrammar.cpp
index 5a9e03c..3a88797 100644
--- a/src/BinaryGrammar.cpp
+++ b/src/BinaryGrammar.cpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
diff --git a/src/BinaryGrammar.hpp b/src/BinaryGrammar.hpp
index 19f6949..4bce73b 100644
--- a/src/BinaryGrammar.hpp
+++ b/src/BinaryGrammar.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -44,9 +44,9 @@ private:
 	void writeContextualTest(ContextualTest *t, FILE *output);
 	ContextualTest *readContextualTest(FILE *input);
 
-	typedef stdext::hash_map<ContextualTest*, uint32_t> deferred_t;
+	typedef std::unordered_map<ContextualTest*, uint32_t> deferred_t;
 	deferred_t deferred_tmpls;
-	typedef stdext::hash_map<ContextualTest*, std::vector<uint32_t> > deferred_ors_t;
+	typedef std::unordered_map<ContextualTest*, std::vector<uint32_t> > deferred_ors_t;
 	deferred_ors_t deferred_ors;
 
 	uint32FlatHashSet seen_uint32;
diff --git a/src/BinaryGrammar_read.cpp b/src/BinaryGrammar_read.cpp
index fdde347..571c772 100644
--- a/src/BinaryGrammar_read.cpp
+++ b/src/BinaryGrammar_read.cpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -42,6 +42,7 @@ int BinaryGrammar::readBinaryGrammar(FILE *input) {
 	uint8_t u8tmp = 0;
 	UErrorCode err = U_ZERO_ERROR;
 	UConverter *conv = ucnv_open("UTF-8", &err);
+	std::stringstream buffer;
 
 	if (fread_throw(&cbuffers[0][0], 1, 4, input) != 4) {
 		std::cerr << "Error: Error reading first 4 bytes from grammar!" << std::endl;
@@ -140,6 +141,20 @@ int BinaryGrammar::readBinaryGrammar(FILE *input) {
 		if (fields & (1 << 7)) {
 			fread_throw(&i32tmp, sizeof(int32_t), 1, input);
 			t->comparison_val = (int32_t)ntohl(i32tmp);
+			if (t->comparison_val <= std::numeric_limits<int32_t>::min()) {
+				t->comparison_val = NUMERIC_MIN;
+			}
+			if (t->comparison_val >= std::numeric_limits<int32_t>::max()) {
+				t->comparison_val = NUMERIC_MAX;
+			}
+		}
+		if (fields & (1 << 12)) {
+			char buf[sizeof(uint64_t)+ sizeof(int32_t)] = {};
+			fread_throw(&buf[0], sizeof(buf), 1, input);
+			buffer.str("");
+			buffer.clear();
+			buffer.write(buf, sizeof(buf));
+			t->comparison_val = readSwapped<double>(buffer);
 		}
 
 		if (fields & (1 << 8)) {
@@ -205,6 +220,7 @@ int BinaryGrammar::readBinaryGrammar(FILE *input) {
 				}
 			}
 		}
+		// 1 << 12 used earlier
 
 		grammar->single_tags[t->hash] = t;
 		grammar->single_tags_list[t->number] = t;
@@ -336,10 +352,10 @@ int BinaryGrammar::readBinaryGrammar(FILE *input) {
 	}
 
 	// Actually assign sets to the varstring tags now that sets are loaded
-	foreach (iter, tag_varsets) {
-		Tag *t = grammar->single_tags_list[iter->first];
-		foreach (uit, iter->second) {
-			Set *s = grammar->sets_list[*uit];
+	for (auto iter : tag_varsets) {
+		Tag *t = grammar->single_tags_list[iter.first];
+		for (auto uit : iter.second) {
+			Set *s = grammar->sets_list[uit];
 			t->vs_sets->push_back(s);
 		}
 	}
@@ -484,15 +500,15 @@ int BinaryGrammar::readBinaryGrammar(FILE *input) {
 	}
 
 	// Bind the templates to where they are used
-	foreach (it, deferred_tmpls) {
-		it->first->tmpl = grammar->contexts.find(it->second)->second;
+	for (auto it : deferred_tmpls) {
+		it.first->tmpl = grammar->contexts.find(it.second)->second;
 	}
 
 	// Bind the OR'ed contexts to where they are used
-	foreach (it, deferred_ors) {
-		it->first->ors.reserve(it->second.size());
-		foreach (orit, it->second) {
-			it->first->ors.push_back(grammar->contexts.find(*orit)->second);
+	for (auto it : deferred_ors) {
+		it.first->ors.reserve(it.second.size());
+		for (auto orit : it.second) {
+			it.first->ors.push_back(grammar->contexts.find(orit)->second);
 		}
 	}
 
diff --git a/src/BinaryGrammar_read_10043.cpp b/src/BinaryGrammar_read_10043.cpp
index 652437d..bec91bf 100644
--- a/src/BinaryGrammar_read_10043.cpp
+++ b/src/BinaryGrammar_read_10043.cpp
@@ -130,6 +130,12 @@ int BinaryGrammar::readBinaryGrammar_10043(FILE *input) {
 		if (fields & (1 << 7)) {
 			fread_throw(&i32tmp, sizeof(int32_t), 1, input);
 			t->comparison_val = (int32_t)ntohl(i32tmp);
+			if (t->comparison_val <= std::numeric_limits<int32_t>::min()) {
+				t->comparison_val = NUMERIC_MIN;
+			}
+			if (t->comparison_val >= std::numeric_limits<int32_t>::max()) {
+				t->comparison_val = NUMERIC_MAX;
+			}
 		}
 
 		if (fields & (1 << 8)) {
@@ -318,10 +324,10 @@ int BinaryGrammar::readBinaryGrammar_10043(FILE *input) {
 	}
 
 	// Actually assign sets to the varstring tags now that sets are loaded
-	foreach (iter, tag_varsets) {
-		Tag *t = grammar->single_tags_list[iter->first];
-		foreach (uit, iter->second) {
-			Set *s = grammar->sets_list[*uit];
+	for (auto iter : tag_varsets) {
+		Tag *t = grammar->single_tags_list[iter.first];
+		for (auto uit : iter.second) {
+			Set *s = grammar->sets_list[uit];
 			t->vs_sets->push_back(s);
 		}
 	}
@@ -468,9 +474,9 @@ int BinaryGrammar::readBinaryGrammar_10043(FILE *input) {
 	}
 
 	// Bind the named templates to where they are used
-	foreach (it, deferred_tmpls) {
-		BOOST_AUTO(tmt, templates.find(it->second));
-		it->first->tmpl = tmt->second;
+	for (auto it : deferred_tmpls) {
+		auto tmt = templates.find(it.second);
+		it.first->tmpl = tmt->second;
 	}
 
 	ucnv_close(conv);
diff --git a/src/BinaryGrammar_write.cpp b/src/BinaryGrammar_write.cpp
index 0272dab..ee69d9e 100644
--- a/src/BinaryGrammar_write.cpp
+++ b/src/BinaryGrammar_write.cpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -147,15 +147,16 @@ int BinaryGrammar::writeBinaryGrammar(FILE *output) {
 			fields |= (1 << 6);
 			writeSwapped<uint32_t>(buffer, t->comparison_op);
 		}
+		// ToDo: Field 1 << 7 cannot be reused until hard format break
 		if (t->comparison_val) {
-			fields |= (1 << 7);
+			fields |= (1 << 12);
 			writeSwapped(buffer, t->comparison_val);
 		}
 
 		if (!t->tag.empty()) {
 			fields |= (1 << 8);
 			ucnv_reset(conv);
-			i32tmp = ucnv_fromUChars(conv, &cbuffers[0][0], CG3_BUFFER_SIZE - 1, t->tag.c_str(), t->tag.length(), &err);
+			i32tmp = ucnv_fromUChars(conv, &cbuffers[0][0], CG3_BUFFER_SIZE - 1, t->tag.c_str(), t->tag.size(), &err);
 			writeSwapped(buffer, i32tmp);
 			buffer.write(&cbuffers[0][0], i32tmp);
 		}
@@ -173,20 +174,21 @@ int BinaryGrammar::writeBinaryGrammar(FILE *output) {
 		if (t->vs_sets) {
 			fields |= (1 << 10);
 			writeSwapped<uint32_t>(buffer, t->vs_sets->size());
-			foreach (iter, *t->vs_sets) {
-				writeSwapped(buffer, (*iter)->number);
+			for (auto iter : *t->vs_sets) {
+				writeSwapped(buffer, iter->number);
 			}
 		}
 		if (t->vs_names) {
 			fields |= (1 << 11);
 			writeSwapped<uint32_t>(buffer, t->vs_names->size());
-			foreach (iter, *t->vs_names) {
+			for (auto iter : *t->vs_names) {
 				ucnv_reset(conv);
-				i32tmp = ucnv_fromUChars(conv, &cbuffers[0][0], CG3_BUFFER_SIZE - 1, (*iter).c_str(), (*iter).length(), &err);
+				i32tmp = ucnv_fromUChars(conv, &cbuffers[0][0], CG3_BUFFER_SIZE - 1, iter.c_str(), iter.size(), &err);
 				writeSwapped(buffer, i32tmp);
 				buffer.write(&cbuffers[0][0], i32tmp);
 			}
 		}
+		// 1 << 12 used above
 
 		u32tmp = (uint32_t)htonl(fields);
 		fwrite_throw(&u32tmp, sizeof(uint32_t), 1, output);
@@ -197,7 +199,7 @@ int BinaryGrammar::writeBinaryGrammar(FILE *output) {
 		u32tmp = (uint32_t)htonl((uint32_t)grammar->reopen_mappings.size());
 		fwrite_throw(&u32tmp, sizeof(uint32_t), 1, output);
 	}
-	for (BOOST_AUTO(iter, grammar->reopen_mappings.begin()); iter != grammar->reopen_mappings.end(); ++iter) {
+	for (auto iter = grammar->reopen_mappings.begin(); iter != grammar->reopen_mappings.end(); ++iter) {
 		u32tmp = (uint32_t)htonl((uint32_t)*iter);
 		fwrite_throw(&u32tmp, sizeof(uint32_t), 1, output);
 	}
@@ -206,7 +208,7 @@ int BinaryGrammar::writeBinaryGrammar(FILE *output) {
 		u32tmp = (uint32_t)htonl((uint32_t)grammar->preferred_targets.size());
 		fwrite_throw(&u32tmp, sizeof(uint32_t), 1, output);
 	}
-	for (BOOST_AUTO(iter, grammar->preferred_targets.begin()); iter != grammar->preferred_targets.end(); ++iter) {
+	for (auto iter = grammar->preferred_targets.begin(); iter != grammar->preferred_targets.end(); ++iter) {
 		u32tmp = (uint32_t)htonl((uint32_t)*iter);
 		fwrite_throw(&u32tmp, sizeof(uint32_t), 1, output);
 	}
@@ -215,7 +217,7 @@ int BinaryGrammar::writeBinaryGrammar(FILE *output) {
 		u32tmp = (uint32_t)htonl((uint32_t)grammar->parentheses.size());
 		fwrite_throw(&u32tmp, sizeof(uint32_t), 1, output);
 	}
-	boost_foreach (const Grammar::parentheses_t::value_type& iter_par, grammar->parentheses) {
+	for (auto iter_par : grammar->parentheses) {
 		u32tmp = (uint32_t)htonl(iter_par.first);
 		fwrite_throw(&u32tmp, sizeof(uint32_t), 1, output);
 		u32tmp = (uint32_t)htonl(iter_par.second);
@@ -226,10 +228,10 @@ int BinaryGrammar::writeBinaryGrammar(FILE *output) {
 		u32tmp = (uint32_t)htonl((uint32_t)grammar->anchors.size());
 		fwrite_throw(&u32tmp, sizeof(uint32_t), 1, output);
 	}
-	foreach (iter_anchor, grammar->anchors) {
-		u32tmp = (uint32_t)htonl((uint32_t)iter_anchor->first);
+	for (auto iter_anchor : grammar->anchors) {
+		u32tmp = (uint32_t)htonl((uint32_t)iter_anchor.first);
 		fwrite_throw(&u32tmp, sizeof(uint32_t), 1, output);
-		u32tmp = (uint32_t)htonl((uint32_t)iter_anchor->second);
+		u32tmp = (uint32_t)htonl((uint32_t)iter_anchor.second);
 		fwrite_throw(&u32tmp, sizeof(uint32_t), 1, output);
 	}
 
@@ -264,15 +266,15 @@ int BinaryGrammar::writeBinaryGrammar(FILE *output) {
 		if (!s->set_ops.empty()) {
 			fields |= (1 << 4);
 			writeSwapped<uint32_t>(buffer, s->set_ops.size());
-			foreach (iter, s->set_ops) {
-				writeSwapped(buffer, *iter);
+			for (auto iter : s->set_ops) {
+				writeSwapped(buffer, iter);
 			}
 		}
 		if (!s->sets.empty()) {
 			fields |= (1 << 5);
 			writeSwapped<uint32_t>(buffer, s->sets.size());
-			foreach (iter, s->sets) {
-				writeSwapped(buffer, *iter);
+			for (auto iter : s->sets) {
+				writeSwapped(buffer, iter);
 			}
 		}
 		if (s->type & ST_STATIC) {
@@ -303,7 +305,7 @@ int BinaryGrammar::writeBinaryGrammar(FILE *output) {
 		u32tmp = (uint32_t)htonl((uint32_t)grammar->contexts.size());
 		fwrite_throw(&u32tmp, sizeof(uint32_t), 1, output);
 	}
-	for (BOOST_AUTO(cntx, grammar->contexts.begin()); cntx != grammar->contexts.end(); ++cntx) {
+	for (auto cntx = grammar->contexts.begin(); cntx != grammar->contexts.end(); ++cntx) {
 		writeContextualTest(cntx->second, output);
 	}
 
@@ -311,9 +313,7 @@ int BinaryGrammar::writeBinaryGrammar(FILE *output) {
 		u32tmp = (uint32_t)htonl((uint32_t)grammar->rule_by_number.size());
 		fwrite_throw(&u32tmp, sizeof(uint32_t), 1, output);
 	}
-	foreach (rule_iter, grammar->rule_by_number) {
-		Rule *r = *rule_iter;
-
+	for (auto r : grammar->rule_by_number) {
 		uint32_t fields = 0;
 		buffer.str("");
 		buffer.clear();
@@ -334,10 +334,10 @@ int BinaryGrammar::writeBinaryGrammar(FILE *output) {
 			fields |= (1 << 3);
 			writeSwapped(buffer, r->flags);
 		}
-		if (r->name) {
+		if (!r->name.empty()) {
 			fields |= (1 << 4);
 			ucnv_reset(conv);
-			i32tmp = ucnv_fromUChars(conv, &cbuffers[0][0], CG3_BUFFER_SIZE - 1, r->name, u_strlen(r->name), &err);
+			i32tmp = ucnv_fromUChars(conv, &cbuffers[0][0], CG3_BUFFER_SIZE - 1, r->name.c_str(), r->name.size(), &err);
 			writeSwapped(buffer, i32tmp);
 			buffer.write(&cbuffers[0][0], i32tmp);
 		}
@@ -399,15 +399,15 @@ int BinaryGrammar::writeBinaryGrammar(FILE *output) {
 		r->reverseContextualTests();
 		u32tmp = (uint32_t)htonl(r->dep_tests.size());
 		fwrite_throw(&u32tmp, sizeof(uint32_t), 1, output);
-		foreach (it, r->dep_tests) {
-			u32tmp = (uint32_t)htonl((*it)->hash);
+		for (auto it : r->dep_tests) {
+			u32tmp = (uint32_t)htonl(it->hash);
 			fwrite_throw(&u32tmp, sizeof(uint32_t), 1, output);
 		}
 
 		u32tmp = (uint32_t)htonl(r->tests.size());
 		fwrite_throw(&u32tmp, sizeof(uint32_t), 1, output);
-		foreach (it, r->tests) {
-			u32tmp = (uint32_t)htonl((*it)->hash);
+		for (auto it : r->tests) {
+			u32tmp = (uint32_t)htonl(it->hash);
 			fwrite_throw(&u32tmp, sizeof(uint32_t), 1, output);
 		}
 	}
@@ -425,7 +425,7 @@ void BinaryGrammar::writeContextualTest(ContextualTest *t, FILE *output) {
 	if (t->tmpl) {
 		writeContextualTest(t->tmpl, output);
 	}
-	boost_foreach (ContextualTest *iter, t->ors) {
+	for (auto iter : t->ors) {
 		writeContextualTest(iter, output);
 	}
 	if (t->linked) {
@@ -498,7 +498,7 @@ void BinaryGrammar::writeContextualTest(ContextualTest *t, FILE *output) {
 		u32tmp = (uint32_t)htonl((uint32_t)t->ors.size());
 		fwrite_throw(&u32tmp, sizeof(uint32_t), 1, output);
 
-		boost_foreach (ContextualTest *iter, t->ors) {
+		for (auto iter : t->ors) {
 			u32tmp = (uint32_t)htonl(iter->hash);
 			fwrite_throw(&u32tmp, sizeof(uint32_t), 1, output);
 		}
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 72819ab..15cd3ce 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -129,14 +129,16 @@ endif()
 
 add_library(libcg3-objs OBJECT ${LIBCG3_SOURCES})
 
-add_library(libcg3 STATIC libcg3.cpp $<TARGET_OBJECTS:libcg3-objs>)
-set_target_properties(libcg3 PROPERTIES OUTPUT_NAME "cg3")
-set_target_properties(libcg3 PROPERTIES PREFIX "lib")
-if(MSVC AND NOT CMAKE_BUILD_TYPE STREQUAL Debug)
-	set_target_properties(libcg3 PROPERTIES STATIC_LIBRARY_FLAGS "/LTCG")
+if(MSVC OR INSTALL_STATIC)
+	add_library(libcg3 STATIC libcg3.cpp $<TARGET_OBJECTS:libcg3-objs>)
+	set_target_properties(libcg3 PROPERTIES OUTPUT_NAME "cg3")
+	set_target_properties(libcg3 PROPERTIES PREFIX "lib")
+	if(MSVC AND NOT CMAKE_BUILD_TYPE STREQUAL Debug)
+		set_target_properties(libcg3 PROPERTIES STATIC_LIBRARY_FLAGS "/LTCG")
+	endif()
+	cg3_link(libcg3)
+	set(LINKLIB libcg3)
 endif()
-cg3_link(libcg3)
-set(LINKLIB libcg3)
 
 if(NOT MSVC)
 	add_library(libcg3-shared SHARED libcg3.cpp $<TARGET_OBJECTS:libcg3-objs>)
diff --git a/src/Cohort.cpp b/src/Cohort.cpp
index 6c4f509..bb88f00 100644
--- a/src/Cohort.cpp
+++ b/src/Cohort.cpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -75,19 +75,19 @@ Cohort::~Cohort() {
 	std::cerr << "OBJECT: " << __PRETTY_FUNCTION__ << ": " << readings.size() << ", " << deleted.size() << ", " << delayed.size() << std::endl;
 	#endif
 
-	foreach (iter1, readings) {
-		delete (*iter1);
+	for (auto iter1 : readings) {
+		delete (iter1);
 	}
-	foreach (iter2, deleted) {
-		delete (*iter2);
+	for (auto iter2 : deleted) {
+		delete (iter2);
 	}
-	foreach (iter3, delayed) {
-		delete (*iter3);
+	for (auto iter3 : delayed) {
+		delete (iter3);
 	}
 	delete wread;
 
-	foreach (iter, removed) {
-		delete (*iter);
+	for (auto iter : removed) {
+		delete (iter);
 	}
 	if (parent) {
 		parent->parent->cohort_map.erase(global_number);
@@ -121,14 +121,14 @@ void Cohort::clear() {
 	relations.clear();
 	relations_input.clear();
 
-	foreach (iter1, readings) {
-		free_reading(*iter1);
+	for (auto iter1 : readings) {
+		free_reading(iter1);
 	}
-	foreach (iter2, deleted) {
-		free_reading(*iter2);
+	for (auto iter2 : deleted) {
+		free_reading(iter2);
 	}
-	foreach (iter3, delayed) {
-		free_reading(*iter3);
+	for (auto iter3 : delayed) {
+		free_reading(iter3);
 	}
 	free_reading(wread);
 
@@ -137,8 +137,8 @@ void Cohort::clear() {
 	delayed.clear();
 	wread = 0;
 
-	foreach (iter, removed) {
-		free_cohort(*iter);
+	for (auto iter : removed) {
+		free_cohort(iter);
 	}
 	removed.clear();
 	assert(enclosed.empty() && "Enclosed was not empty!");
@@ -186,8 +186,8 @@ void Cohort::updateMinMax() {
 	}
 	num_min.clear();
 	num_max.clear();
-	foreach (rter, readings) {
-		boost_foreach (Reading::tags_numerical_t::value_type& nter, (*rter)->tags_numerical) {
+	for (auto rter : readings) {
+		for (auto nter : rter->tags_numerical) {
 			const Tag *tag = nter.second;
 			if (num_min.find(tag->comparison_hash) == num_min.end() || tag->comparison_val < num_min[tag->comparison_hash]) {
 				num_min[tag->comparison_hash] = tag->comparison_val;
@@ -200,31 +200,31 @@ void Cohort::updateMinMax() {
 	type |= CT_NUM_CURRENT;
 }
 
-int32_t Cohort::getMin(uint32_t key) {
+double Cohort::getMin(uint32_t key) {
 	updateMinMax();
 	if (num_min.find(key) != num_min.end()) {
 		return num_min[key];
 	}
-	return std::numeric_limits<int32_t>::min();
+	return NUMERIC_MIN;
 }
 
-int32_t Cohort::getMax(uint32_t key) {
+double Cohort::getMax(uint32_t key) {
 	updateMinMax();
 	if (num_max.find(key) != num_max.end()) {
 		return num_max[key];
 	}
-	return std::numeric_limits<int32_t>::max();
+	return NUMERIC_MAX;
 }
 
 bool Cohort::addRelation(uint32_t rel, uint32_t cohort) {
-	BOOST_AUTO(&cohorts, relations[rel]);
+	auto& cohorts = relations[rel];
 	const size_t sz = cohorts.size();
 	cohorts.insert(cohort);
 	return (sz != cohorts.size());
 }
 
 bool Cohort::setRelation(uint32_t rel, uint32_t cohort) {
-	BOOST_AUTO(&cohorts, relations[rel]);
+	auto& cohorts = relations[rel];
 	if (cohorts.size() == 1 && cohorts.find(cohort) != cohorts.end()) {
 		return false;
 	}
diff --git a/src/Cohort.hpp b/src/Cohort.hpp
index 8a3e3ff..f3376e5 100644
--- a/src/Cohort.hpp
+++ b/src/Cohort.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -63,7 +63,7 @@ public:
 	ReadingList readings;
 	ReadingList deleted;
 	ReadingList delayed;
-	typedef bc::flat_map<uint32_t, int32_t> num_t;
+	typedef bc::flat_map<uint32_t, double> num_t;
 	num_t num_max, num_min;
 	uint32SortedVector dep_children;
 	boost::dynamic_bitset<> possible_sets;
@@ -72,8 +72,8 @@ public:
 	RelationCtn relations;
 	RelationCtn relations_input;
 
-	int32_t getMin(uint32_t key);
-	int32_t getMax(uint32_t key);
+	double getMin(uint32_t key);
+	double getMax(uint32_t key);
 
 	void detach();
 
@@ -96,7 +96,7 @@ private:
 struct compare_Cohort;
 
 typedef sorted_vector<Cohort*, compare_Cohort> CohortSet;
-typedef stdext::hash_map<uint32_t, CohortSet> uint32ToCohortsMap;
+typedef std::unordered_map<uint32_t, CohortSet> uint32ToCohortsMap;
 
 Cohort *alloc_cohort(SingleWindow *p);
 void free_cohort(Cohort *c);
diff --git a/src/CohortIterator.cpp b/src/CohortIterator.cpp
index 8d81900..b78b363 100644
--- a/src/CohortIterator.cpp
+++ b/src/CohortIterator.cpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -164,11 +164,11 @@ void DepDescendentIter::reset(Cohort *cohort, const ContextualTest *test, bool s
 	m_cohort = 0;
 
 	if (cohort && test) {
-		foreach (dter, cohort->dep_children) {
-			if (cohort->parent->parent->cohort_map.find(*dter) == cohort->parent->parent->cohort_map.end()) {
+		for (auto dter : cohort->dep_children) {
+			if (cohort->parent->parent->cohort_map.find(dter) == cohort->parent->parent->cohort_map.end()) {
 				continue;
 			}
-			Cohort *current = cohort->parent->parent->cohort_map.find(*dter)->second;
+			Cohort *current = cohort->parent->parent->cohort_map.find(dter)->second;
 			bool good = true;
 			if (current->parent != cohort->parent) {
 				if ((!(test->pos & (POS_SPAN_BOTH | POS_SPAN_LEFT))) && current->parent->number < cohort->parent->number) {
@@ -191,18 +191,17 @@ void DepDescendentIter::reset(Cohort *cohort, const ContextualTest *test, bool s
 			added = false;
 			CohortSet to_add;
 
-			foreach (iter, m_descendents) {
-				Cohort *cohort_inner = *iter;
+			for (auto cohort_inner : m_descendents) {
 				if (m_seen.find(cohort_inner) != m_seen.end()) {
 					continue;
 				}
 				m_seen.insert(cohort_inner);
 
-				foreach (dter, cohort_inner->dep_children) {
-					if (cohort_inner->parent->parent->cohort_map.find(*dter) == cohort_inner->parent->parent->cohort_map.end()) {
+				for (auto dter : cohort_inner->dep_children) {
+					if (cohort_inner->parent->parent->cohort_map.find(dter) == cohort_inner->parent->parent->cohort_map.end()) {
 						continue;
 					}
-					Cohort *current = cohort_inner->parent->parent->cohort_map.find(*dter)->second;
+					Cohort *current = cohort_inner->parent->parent->cohort_map.find(dter)->second;
 					bool good = true;
 					if (current->parent != cohort->parent) {
 						if ((!(test->pos & (POS_SPAN_BOTH | POS_SPAN_LEFT))) && current->parent->number < cohort->parent->number) {
@@ -219,8 +218,8 @@ void DepDescendentIter::reset(Cohort *cohort, const ContextualTest *test, bool s
 				}
 			}
 
-			foreach (iter, to_add) {
-				m_descendents.insert(*iter);
+			for (auto iter : to_add) {
+				m_descendents.insert(iter);
 			}
 		} while (added);
 
diff --git a/src/CohortIterator.hpp b/src/CohortIterator.hpp
index ac366b8..fd32432 100644
--- a/src/CohortIterator.hpp
+++ b/src/CohortIterator.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
diff --git a/src/ContextualTest.cpp b/src/ContextualTest.cpp
index a9aebc6..324263d 100644
--- a/src/ContextualTest.cpp
+++ b/src/ContextualTest.cpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -109,7 +109,7 @@ uint32_t ContextualTest::rehash() {
 	if (tmpl) {
 		hash = hash_value(hash, static_cast<uint32_t>(reinterpret_cast<uintptr_t>(tmpl)));
 	}
-	boost_foreach (ContextualTest *iter, ors) {
+	for (auto iter : ors) {
 		hash = hash_value(hash, iter->rehash());
 	}
 
@@ -125,7 +125,7 @@ void ContextualTest::resetStatistics() {
 	if (tmpl) {
 		tmpl->resetStatistics();
 	}
-	boost_foreach (ContextualTest *idts, ors) {
+	for (auto idts : ors) {
 		idts->resetStatistics();
 	}
 	if (linked) {
@@ -155,7 +155,7 @@ void ContextualTest::markUsed(Grammar& grammar) {
 	if (tmpl) {
 		tmpl->markUsed(grammar);
 	}
-	boost_foreach (ContextualTest *idts, ors) {
+	for (auto idts : ors) {
 		idts->markUsed(grammar);
 	}
 	if (linked) {
diff --git a/src/ContextualTest.hpp b/src/ContextualTest.hpp
index 9d93d9e..bf7bc39 100644
--- a/src/ContextualTest.hpp
+++ b/src/ContextualTest.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -26,7 +26,7 @@
 #include "stdafx.hpp"
 #include <vector>
 #include <list>
-#include <stdint.h>
+#include <cstdint>
 
 namespace CG3 {
 class Grammar;
diff --git a/src/FSTApplicator.cpp b/src/FSTApplicator.cpp
index 2b59b34..cfa504b 100644
--- a/src/FSTApplicator.cpp
+++ b/src/FSTApplicator.cpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -31,7 +31,7 @@ namespace CG3 {
 
 FSTApplicator::FSTApplicator(UFILE *ux_err)
   : GrammarApplicator(ux_err)
-  , wfactor(100.0)
+  , wfactor(1.0)
 {
 	wtag += 'W';
 	sub_delims += '#';
@@ -171,6 +171,7 @@ void FSTApplicator::runGrammarOnText(istream& input, UFILE *output) {
 				insert_if_exists(cReading->parent->possible_sets, grammar->sets_any);
 				addTagToReading(*cReading, cCohort->wordform);
 
+				constexpr UChar notag[] = { '_', 0 };
 				const UChar *base = space;
 				TagList mappings;
 
@@ -191,12 +192,12 @@ void FSTApplicator::runGrammarOnText(istream& input, UFILE *output) {
 					}
 					buf[i] = 0;
 					if (strcmp(buf, "inf") == 0) {
-						i = sprintf(buf, "%d", std::numeric_limits<int32_t>::max());
+						i = sprintf(buf, "%f", NUMERIC_MAX);
 					}
 					else {
 						weight = strtof(buf, 0);
 						weight *= wfactor;
-						i = sprintf(buf, "%.0f", weight);
+						i = sprintf(buf, "%f", weight);
 					}
 					wtag_buf.clear();
 					wtag_buf.reserve(wtag.size() + i + 3);
@@ -212,7 +213,7 @@ void FSTApplicator::runGrammarOnText(istream& input, UFILE *output) {
 				UChar *plus = u_strchr(space, '+');
 				if (plus) {
 					++plus;
-					const UChar cplus[] = { '+', 0 };
+					constexpr UChar cplus[] = { '+', 0 };
 					int32_t p = u_strspn(plus, cplus);
 					space = plus + p;
 					--space;
@@ -241,6 +242,11 @@ void FSTApplicator::runGrammarOnText(istream& input, UFILE *output) {
 							tag += '"';
 							base = tag.c_str();
 						}
+						if (base[0] == 0) {
+							base = notag;
+							u_fprintf(ux_stderr, "Warning: Line %u had empty tag.\n", numLines);
+							u_fflush(ux_stderr);
+						}
 						Tag *tag = addTag(base);
 						if (tag->type & T_MAPPING || tag->tag[0] == grammar->mapping_prefix) {
 							mappings.push_back(tag);
@@ -326,8 +332,8 @@ void FSTApplicator::runGrammarOnText(istream& input, UFILE *output) {
 					u_fprintf(ux_stderr, "Warning: Soft limit of %u cohorts reached at line %u but found suitable soft delimiter.\n", soft_limit, numLines);
 					u_fflush(ux_stderr);
 				}
-				foreach (iter, cCohort->readings) {
-					addTagToReading(**iter, endtag);
+				for (auto iter : cCohort->readings) {
+					addTagToReading(*iter, endtag);
 				}
 
 				cSWindow->appendCohort(cCohort);
@@ -341,8 +347,8 @@ void FSTApplicator::runGrammarOnText(istream& input, UFILE *output) {
 					u_fprintf(ux_stderr, "Warning: Hard limit of %u cohorts reached at line %u - forcing break.\n", hard_limit, numLines);
 					u_fflush(ux_stderr);
 				}
-				foreach (iter, cCohort->readings) {
-					addTagToReading(**iter, endtag);
+				for (auto iter : cCohort->readings) {
+					addTagToReading(*iter, endtag);
 				}
 
 				cSWindow->appendCohort(cCohort);
@@ -407,8 +413,8 @@ void FSTApplicator::runGrammarOnText(istream& input, UFILE *output) {
 		if (cCohort->readings.empty()) {
 			initEmptyCohort(*cCohort);
 		}
-		foreach (iter, cCohort->readings) {
-			addTagToReading(**iter, endtag);
+		for (auto iter : cCohort->readings) {
+			addTagToReading(*iter, endtag);
 		}
 		cReading = 0;
 		cCohort = 0;
diff --git a/src/FSTApplicator.hpp b/src/FSTApplicator.hpp
index e966e6e..a44319e 100644
--- a/src/FSTApplicator.hpp
+++ b/src/FSTApplicator.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
diff --git a/src/FormatConverter.cpp b/src/FormatConverter.cpp
index 8c491b7..09c27ce 100644
--- a/src/FormatConverter.cpp
+++ b/src/FormatConverter.cpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
diff --git a/src/FormatConverter.hpp b/src/FormatConverter.hpp
index 0a2dfe2..fa93685 100644
--- a/src/FormatConverter.hpp
+++ b/src/FormatConverter.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
diff --git a/src/Grammar.cpp b/src/Grammar.cpp
index 5be5fa6..c809d00 100644
--- a/src/Grammar.cpp
+++ b/src/Grammar.cpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -49,12 +49,12 @@ Grammar::Grammar()
 }
 
 Grammar::~Grammar() {
-	foreach (iter_set, sets_list) {
-		destroySet(*iter_set);
+	for (auto iter_set : sets_list) {
+		destroySet(iter_set);
 	}
 
-	foreach (rsets, sets_all) {
-		delete *rsets;
+	for (auto rsets : sets_all) {
+		delete rsets;
 	}
 
 	Taguint32HashMap::iterator iter_stag;
@@ -64,12 +64,12 @@ Grammar::~Grammar() {
 		}
 	}
 
-	foreach (iter_rules, rule_by_number) {
-		delete *iter_rules;
+	for (auto iter_rules : rule_by_number) {
+		delete iter_rules;
 	}
 
-	for (BOOST_AUTO(cntx, contexts.begin()); cntx != contexts.end(); ++cntx) {
-		delete cntx->second;
+	for (auto cntx : contexts) {
+		delete cntx.second;
 	}
 }
 
@@ -116,7 +116,7 @@ void Grammar::addSet(Set *& to) {
 				}
 				else {
 					bool special = false;
-					boost_foreach (Tag *tag, tv) {
+					for (auto tag : tv) {
 						if (tag->type & T_SPECIAL) {
 							special = true;
 							break;
@@ -161,8 +161,8 @@ void Grammar::addSet(Set *& to) {
 		positive->trie.swap(to->trie);
 		positive->trie_special.swap(to->trie_special);
 
-		boost_foreach (Tag *iter, to->ff_tags) {
-			BOOST_AUTO(pit, positive->trie_special.find(iter));
+		for (auto iter : to->ff_tags) {
+			auto pit = positive->trie_special.find(iter);
 			if (pit != positive->trie_special.end()) {
 				if (pit->second.terminal) {
 					if (pit->second.trie) {
@@ -289,8 +289,8 @@ void Grammar::addSetToList(Set *s) {
 	if (s->number == 0) {
 		if (sets_list.empty() || sets_list[0] != s) {
 			if (!s->sets.empty()) {
-				foreach (sit, s->sets) {
-					addSetToList(getSet(*sit));
+				for (auto sit : s->sets) {
+					addSetToList(getSet(sit));
 				}
 			}
 			sets_list.push_back(s);
@@ -314,7 +314,7 @@ uint32_t Grammar::removeNumericTags(uint32_t s) {
 	Set *set = getSet(s);
 	if (!set->sets.empty()) {
 		bool did = false;
-		BOOST_AUTO(sets, set->sets);
+		auto sets = set->sets;
 		for (size_t i = 0; i < sets.size(); ++i) {
 			uint32_t ns = removeNumericTags(sets[i]);
 			if (ns == 0) {
@@ -351,8 +351,8 @@ uint32_t Grammar::removeNumericTags(uint32_t s) {
 			if (tries[i]->empty()) {
 				continue;
 			}
-			BOOST_AUTO(ctags, trie_getTags(*tries[i]));
-			for (BOOST_AUTO(it, ctags.begin()); it != ctags.end(); ++it) {
+			auto ctags = trie_getTags(*tries[i]);
+			for (auto it = ctags.begin(); it != ctags.end(); ++it) {
 				bool special = false;
 				tags.clear();
 				fill_tagvector(*it, tags, did, special);
@@ -390,7 +390,7 @@ uint32_t Grammar::removeNumericTags(uint32_t s) {
 			ns->name += 'B';
 			ns->name += '_';
 
-			for (BOOST_AUTO(it, ntags.begin()); it != ntags.end(); ++it) {
+			for (auto it = ntags.begin(); it != ntags.end(); ++it) {
 				if (it->second) {
 					if (it->first.size() == 1 && (it->first[0]->type & T_FAILFAST)) {
 						ns->ff_tags.insert(it->first[0]);
@@ -410,6 +410,17 @@ uint32_t Grammar::removeNumericTags(uint32_t s) {
 	return set->hash;
 }
 
+void Grammar::getTags(const Set& set, std::set<TagVector>& rv) {
+	// ToDo: getTags() ought to account for other operators than OR
+	for (auto s : set.sets) {
+		getTags(*getSet(s), rv);
+	}
+	TagVector tv;
+	trie_getTags(set.trie, rv, tv);
+	tv.clear();
+	trie_getTags(set.trie_special, rv, tv);
+}
+
 Rule *Grammar::allocateRule() {
 	return new Rule;
 }
@@ -511,7 +522,7 @@ ContextualTest *Grammar::addContextualTest(ContextualTest *t) {
 	t->rehash();
 
 	t->linked = addContextualTest(t->linked);
-	boost_foreach (ContextualTest *& it, t->ors) {
+	for (auto& it : t->ors) {
 		it = addContextualTest(it);
 	}
 
@@ -572,8 +583,7 @@ void Grammar::resetStatistics() {
 }
 
 void Grammar::renameAllRules() {
-	foreach (iter_rule, rule_by_number) {
-		Rule *r = *iter_rule;
+	for (auto r : rule_by_number) {
 		gbuffers[0][0] = 0;
 		u_sprintf(&gbuffers[0][0], "L%u", r->line);
 		r->setName(&gbuffers[0][0]);
@@ -581,32 +591,32 @@ void Grammar::renameAllRules() {
 }
 
 void Grammar::reindex(bool unused_sets, bool used_tags) {
-	foreach (dset, sets_by_contents) {
-		if (dset->second->number == std::numeric_limits<uint32_t>::max()) {
-			dset->second->type |= ST_USED;
+	for (auto dset : sets_by_contents) {
+		if (dset.second->number == std::numeric_limits<uint32_t>::max()) {
+			dset.second->type |= ST_USED;
 			continue;
 		}
-		if (!(dset->second->type & ST_STATIC)) {
-			dset->second->type &= ~ST_USED;
+		if (!(dset.second->type & ST_STATIC)) {
+			dset.second->type &= ~ST_USED;
 		}
-		dset->second->number = 0;
+		dset.second->number = 0;
 	}
 
-	foreach (sset, static_sets) {
-		uint32_t sh = hash_value(*sset);
+	for (auto sset : static_sets) {
+		uint32_t sh = hash_value(sset);
 		if (set_alias.find(sh) != set_alias.end()) {
-			u_fprintf(ux_stderr, "Error: Static set %S is an alias; only real sets may be made static!\n", (*sset).c_str());
+			u_fprintf(ux_stderr, "Error: Static set %S is an alias; only real sets may be made static!\n", sset.c_str());
 			CG3Quit(1);
 		}
 		Set *s = getSet(sh);
 		if (!s) {
 			if (verbosity_level > 0) {
-				u_fprintf(ux_stderr, "Warning: Set %S was not defined, so cannot make it static.\n", (*sset).c_str());
+				u_fprintf(ux_stderr, "Warning: Set %S was not defined, so cannot make it static.\n", sset.c_str());
 			}
 			continue;
 		}
-		if (s->name != *sset) {
-			s->setName(*sset);
+		if (s->name != sset) {
+			s->setName(sset);
 		}
 		s->markUsed(*this);
 		s->type |= ST_STATIC;
@@ -627,41 +637,41 @@ void Grammar::reindex(bool unused_sets, bool used_tags) {
 	sets_any = 0;
 	rules_any = 0;
 
-	foreach (iter, single_tags_list) {
-		if ((*iter)->regexp && (*iter)->tag[0] != '"' && (*iter)->tag[0] != '<') {
-			regex_tags.insert((*iter)->regexp);
+	for (auto iter : single_tags_list) {
+		if (iter->regexp && iter->tag[0] != '"' && iter->tag[0] != '<') {
+			regex_tags.insert(iter->regexp);
 		}
-		if (((*iter)->type & T_CASE_INSENSITIVE) && (*iter)->tag[0] != '"' && (*iter)->tag[0] != '<') {
-			icase_tags.insert((*iter));
+		if ((iter->type & T_CASE_INSENSITIVE) && iter->tag[0] != '"' && iter->tag[0] != '<') {
+			icase_tags.insert(iter);
 		}
 		if (is_binary) {
 			continue;
 		}
-		if (!(*iter)->vs_sets) {
+		if (!iter->vs_sets) {
 			continue;
 		}
-		foreach (sit, *(*iter)->vs_sets) {
-			(*sit)->markUsed(*this);
+		for (auto sit : *iter->vs_sets) {
+			sit->markUsed(*this);
 		}
 	}
 
-	foreach (titer, single_tags_list) {
-		if ((*titer)->type & T_TEXTUAL) {
+	for (auto titer : single_tags_list) {
+		if (titer->type & T_TEXTUAL) {
 			continue;
 		}
-		foreach (iter, regex_tags) {
+		for (auto iter : regex_tags) {
 			UErrorCode status = U_ZERO_ERROR;
-			uregex_setText(*iter, (*titer)->tag.c_str(), (*titer)->tag.length(), &status);
+			uregex_setText(iter, titer->tag.c_str(), titer->tag.size(), &status);
 			if (status == U_ZERO_ERROR) {
-				if (uregex_find(*iter, -1, &status)) {
-					(*titer)->type |= T_TEXTUAL;
+				if (uregex_find(iter, -1, &status)) {
+					titer->type |= T_TEXTUAL;
 				}
 			}
 		}
-		foreach (iter, icase_tags) {
+		for (auto iter : icase_tags) {
 			UErrorCode status = U_ZERO_ERROR;
-			if (u_strCaseCompare((*titer)->tag.c_str(), (*titer)->tag.length(), (*iter)->tag.c_str(), (*iter)->tag.length(), U_FOLD_CASE_DEFAULT, &status) == 0) {
-				(*titer)->type |= T_TEXTUAL;
+			if (u_strCaseCompare(titer->tag.c_str(), titer->tag.size(), iter->tag.c_str(), iter->tag.size(), U_FOLD_CASE_DEFAULT, &status) == 0) {
+				titer->type |= T_TEXTUAL;
 			}
 			if (status != U_ZERO_ERROR) {
 				u_fprintf(ux_stderr, "Error: u_strCaseCompare() returned %s - cannot continue!\n", u_errorName(status));
@@ -670,47 +680,47 @@ void Grammar::reindex(bool unused_sets, bool used_tags) {
 		}
 	}
 
-	for (BOOST_AUTO(it, parentheses.begin()); it != parentheses.end(); ++it) {
+	for (auto it = parentheses.begin(); it != parentheses.end(); ++it) {
 		single_tags[it->first]->markUsed();
 		single_tags[it->second]->markUsed();
 	}
 
-	for (BOOST_AUTO(it, preferred_targets.begin()); it != preferred_targets.end(); ++it) {
+	for (auto it = preferred_targets.begin(); it != preferred_targets.end(); ++it) {
 		single_tags[*it]->markUsed();
 	}
 
-	foreach (iter_rule, rule_by_number) {
-		if ((*iter_rule)->wordform) {
-			wf_rules.push_back(*iter_rule);
+	for (auto rule : rule_by_number) {
+		if (rule->wordform) {
+			wf_rules.push_back(rule);
 		}
 		if (is_binary) {
 			continue;
 		}
 		Set *s = 0;
-		s = getSet((*iter_rule)->target);
+		s = getSet(rule->target);
 		s->markUsed(*this);
-		if ((*iter_rule)->childset1) {
-			s = getSet((*iter_rule)->childset1);
+		if (rule->childset1) {
+			s = getSet(rule->childset1);
 			s->markUsed(*this);
 		}
-		if ((*iter_rule)->childset2) {
-			s = getSet((*iter_rule)->childset2);
+		if (rule->childset2) {
+			s = getSet(rule->childset2);
 			s->markUsed(*this);
 		}
-		if ((*iter_rule)->maplist) {
-			(*iter_rule)->maplist->markUsed(*this);
+		if (rule->maplist) {
+			rule->maplist->markUsed(*this);
 		}
-		if ((*iter_rule)->sublist) {
-			(*iter_rule)->sublist->markUsed(*this);
+		if (rule->sublist) {
+			rule->sublist->markUsed(*this);
 		}
-		if ((*iter_rule)->dep_target) {
-			(*iter_rule)->dep_target->markUsed(*this);
+		if (rule->dep_target) {
+			rule->dep_target->markUsed(*this);
 		}
-		foreach (it, (*iter_rule)->tests) {
-			(*it)->markUsed(*this);
+		for (auto it : rule->tests) {
+			it->markUsed(*this);
 		}
-		foreach (it, (*iter_rule)->dep_tests) {
-			(*it)->markUsed(*this);
+		for (auto it : rule->dep_tests) {
+			it->markUsed(*this);
 		}
 	}
 
@@ -723,7 +733,7 @@ void Grammar::reindex(bool unused_sets, bool used_tags) {
 		}
 
 		contexts_t tosave;
-		for (BOOST_AUTO(cntx, contexts.begin()); cntx != contexts.end(); ++cntx) {
+		for (auto cntx = contexts.begin(); cntx != contexts.end(); ++cntx) {
 			if (cntx->second->is_used) {
 				tosave[cntx->first] = cntx->second;
 				continue;
@@ -735,10 +745,10 @@ void Grammar::reindex(bool unused_sets, bool used_tags) {
 
 	if (unused_sets) {
 		u_fprintf(ux_stdout, "Unused sets:\n");
-		foreach (rset, sets_by_contents) {
-			if (!(rset->second->type & ST_USED) && !rset->second->name.empty() && maybe_used_sets.count(rset->second) == 0) {
-				if (rset->second->name[0] != '_' || rset->second->name[1] != 'G' || rset->second->name[2] != '_') {
-					u_fprintf(ux_stdout, "Line %u set %S\n", rset->second->line, rset->second->name.c_str());
+		for (auto rset : sets_by_contents) {
+			if (!(rset.second->type & ST_USED) && !rset.second->name.empty() && maybe_used_sets.count(rset.second) == 0) {
+				if (rset.second->name[0] != '_' || rset.second->name[1] != 'G' || rset.second->name[2] != '_') {
+					u_fprintf(ux_stdout, "Line %u set %S\n", rset.second->line, rset.second->name.c_str());
 				}
 			}
 		}
@@ -748,14 +758,14 @@ void Grammar::reindex(bool unused_sets, bool used_tags) {
 
 	// Stuff below this line is not optional...
 
-	foreach (tset, sets_by_contents) {
-		if (tset->second->type & ST_USED) {
-			addSetToList(tset->second);
+	for (auto tset : sets_by_contents) {
+		if (tset.second->type & ST_USED) {
+			addSetToList(tset.second);
 		}
 	}
 
-	for (BOOST_AUTO(iter_tags, single_tags.begin()); iter_tags != single_tags.end(); ++iter_tags) {
-		Tag *tag = iter_tags->second;
+	for (auto iter_tags : single_tags) {
+		Tag *tag = iter_tags.second;
 		if (tag->tag[0] == mapping_prefix) {
 			tag->type |= T_MAPPING;
 		}
@@ -765,70 +775,70 @@ void Grammar::reindex(bool unused_sets, bool used_tags) {
 	}
 
 	if (!is_binary) {
-		boost_foreach (Set *set, sets_list) {
+		for (auto set : sets_list) {
 			set->reindex(*this);
 		}
-		boost_foreach (Set *set, sets_list) {
+		for (auto set : sets_list) {
 			setAdjustSets(set);
 		}
 	}
-	boost_foreach (Set *set, sets_list) {
+	for (auto set : sets_list) {
 		indexSets(set->number, set);
 	}
 
 	uint32SortedVector sects;
 
-	foreach (iter_rule, rule_by_number) {
-		if ((*iter_rule)->section == -1) {
-			before_sections.push_back(*iter_rule);
+	for (auto rule : rule_by_number) {
+		if (rule->section == -1) {
+			before_sections.push_back(rule);
 		}
-		else if ((*iter_rule)->section == -2) {
-			after_sections.push_back(*iter_rule);
+		else if (rule->section == -2) {
+			after_sections.push_back(rule);
 		}
-		else if ((*iter_rule)->section == -3) {
-			null_section.push_back(*iter_rule);
+		else if (rule->section == -3) {
+			null_section.push_back(rule);
 		}
 		else {
-			sects.insert((*iter_rule)->section);
-			rules.push_back(*iter_rule);
+			sects.insert(rule->section);
+			rules.push_back(rule);
 		}
-		if ((*iter_rule)->target) {
+		if (rule->target) {
 			Set *set = 0;
 			if (is_binary) {
-				set = sets_list[(*iter_rule)->target];
+				set = sets_list[rule->target];
 			}
 			else {
-				set = sets_by_contents.find((*iter_rule)->target)->second;
-				(*iter_rule)->target = set->number;
+				set = sets_by_contents.find(rule->target)->second;
+				rule->target = set->number;
 			}
-			indexSetToRule((*iter_rule)->number, set);
-			rules_by_set[(*iter_rule)->target].insert((*iter_rule)->number);
+			indexSetToRule(rule->number, set);
+			rules_by_set[rule->target].insert(rule->number);
 		}
 		else {
-			u_fprintf(ux_stderr, "Warning: Rule on line %u had no target.\n", (*iter_rule)->line);
+			u_fprintf(ux_stderr, "Warning: Rule on line %u had no target.\n", rule->line);
 			u_fflush(ux_stderr);
 		}
-		if (((*iter_rule)->maplist && ((*iter_rule)->maplist->type & ST_CHILD_UNIFY)) || ((*iter_rule)->sublist && ((*iter_rule)->sublist->type & ST_CHILD_UNIFY))) {
-			(*iter_rule)->flags |= FL_CAPTURE_UNIF;
+		if ((rule->maplist && (rule->maplist->type & ST_CHILD_UNIFY)) || (rule->sublist && (rule->sublist->type & ST_CHILD_UNIFY))) {
+			rule->flags |= FL_CAPTURE_UNIF;
 		}
 		if (is_binary) {
 			continue;
 		}
-		if ((*iter_rule)->childset1) {
-			Set *set = sets_by_contents.find((*iter_rule)->childset1)->second;
-			(*iter_rule)->childset1 = set->number;
+		if (rule->childset1) {
+			Set *set = sets_by_contents.find(rule->childset1)->second;
+			rule->childset1 = set->number;
 		}
-		if ((*iter_rule)->childset2) {
-			Set *set = sets_by_contents.find((*iter_rule)->childset2)->second;
-			(*iter_rule)->childset2 = set->number;
+		if (rule->childset2) {
+			Set *set = sets_by_contents.find(rule->childset2)->second;
+			rule->childset2 = set->number;
 		}
-		if ((*iter_rule)->dep_target) {
-			contextAdjustTarget((*iter_rule)->dep_target);
+		if (rule->dep_target) {
+			contextAdjustTarget(rule->dep_target);
 		}
-		boost_foreach (ContextualTest *test, (*iter_rule)->tests) {
+		for (auto test : rule->tests) {
 			contextAdjustTarget(test);
 		}
-		boost_foreach (ContextualTest *test, (*iter_rule)->dep_tests) {
+		for (auto test : rule->dep_tests) {
 			contextAdjustTarget(test);
 		}
 	}
@@ -848,7 +858,7 @@ void Grammar::reindex(bool unused_sets, bool used_tags) {
 
 	sets_by_contents.clear();
 
-	boost_foreach (Set *to, sets_list) {
+	for (auto to : sets_list) {
 		if (to->type & ST_STATIC) {
 			uint32_t nhash = hash_value(to->name);
 			const uint32_t cnum = to->number;
@@ -884,19 +894,19 @@ void Grammar::reindex(bool unused_sets, bool used_tags) {
 	bool did = true;
 	while (did) {
 		did = false;
-		foreach (set, sets_list) {
-			if (sets_vstr.test((*set)->number)) {
+		for (auto set : sets_list) {
+			if (sets_vstr.test(set->number)) {
 				continue;
 			}
-			foreach (iset, (*set)->sets) {
-				if (sets_vstr.test(*iset)) {
-					sets_vstr.set((*set)->number);
+			for (auto iset : set->sets) {
+				if (sets_vstr.test(iset)) {
+					sets_vstr.set(set->number);
 					did = true;
 					break;
 				}
 			}
-			if (trie_hasType((*set)->trie, T_VARSTRING) || trie_hasType((*set)->trie_special, T_VARSTRING)) {
-				sets_vstr.set((*set)->number);
+			if (trie_hasType(set->trie, T_VARSTRING) || trie_hasType(set->trie_special, T_VARSTRING)) {
+				sets_vstr.set(set->number);
 				did = true;
 			}
 		}
@@ -908,8 +918,8 @@ void Grammar::reindex(bool unused_sets, bool used_tags) {
 	while (did) {
 		did = false;
 
-		foreach (cntx, contexts) {
-			ContextualTest *t = cntx->second;
+		for (auto cntx : contexts) {
+			ContextualTest *t = cntx.second;
 
 			if (nk.count(t)) {
 				continue;
@@ -958,9 +968,7 @@ void Grammar::reindex(bool unused_sets, bool used_tags) {
 		}
 	}
 
-	foreach (it, rule_by_number) {
-		Rule *r = *it;
-
+	for (auto r : rule_by_number) {
 		// Determine whether this rule probably needs KEEPORDER
 		if (r->flags & RF_KEEPORDER) {
 			continue;
@@ -974,13 +982,13 @@ void Grammar::reindex(bool unused_sets, bool used_tags) {
 		if (r->dep_target && nk.count(r->dep_target)) {
 			needs = true;
 		}
-		foreach (cntx, r->tests) {
-			if (nk.count(*cntx)) {
+		for (auto cntx : r->tests) {
+			if (nk.count(cntx)) {
 				needs = true;
 			}
 		}
-		foreach (cntx, r->dep_tests) {
-			if (nk.count(*cntx)) {
+		for (auto cntx : r->dep_tests) {
+			if (nk.count(cntx)) {
 				needs = true;
 			}
 		}
@@ -996,7 +1004,7 @@ void Grammar::reindex(bool unused_sets, bool used_tags) {
 	}
 
 	if (used_tags) {
-		for (BOOST_AUTO(iter_tags, single_tags.begin()); iter_tags != single_tags.end(); ++iter_tags) {
+		for (auto iter_tags = single_tags.begin(); iter_tags != single_tags.end(); ++iter_tags) {
 			Tag *tag = iter_tags->second;
 			if (tag->type & T_USED) {
 				UString tmp(tag->toUString(true));
@@ -1008,7 +1016,7 @@ void Grammar::reindex(bool unused_sets, bool used_tags) {
 }
 
 inline void trie_indexToRule(const trie_t& trie, Grammar& grammar, uint32_t r) {
-	boost_foreach (const trie_t::value_type& kv, trie) {
+	for (auto& kv : trie) {
 		grammar.indexTagToRule(kv.first->hash, r);
 		if (kv.second.trie) {
 			trie_indexToRule(*kv.second.trie, grammar, r);
@@ -1036,7 +1044,7 @@ void Grammar::indexTagToRule(uint32_t t, uint32_t r) {
 }
 
 inline void trie_indexToSet(const trie_t& trie, Grammar& grammar, uint32_t r) {
-	boost_foreach (const trie_t::value_type& kv, trie) {
+	for (auto& kv : trie) {
 		grammar.indexTagToSet(kv.first->hash, r);
 		if (kv.second.trie) {
 			trie_indexToSet(*kv.second.trie, grammar, r);
@@ -1098,7 +1106,7 @@ void Grammar::contextAdjustTarget(ContextualTest *test) {
 		test->cbarrier = set->number;
 	}
 
-	boost_foreach (ContextualTest *tor, test->ors) {
+	for (auto tor : test->ors) {
 		contextAdjustTarget(tor);
 	}
 	if (test->tmpl) {
diff --git a/src/Grammar.hpp b/src/Grammar.hpp
index 6769ef0..0039aa1 100644
--- a/src/Grammar.hpp
+++ b/src/Grammar.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -56,7 +56,7 @@ public:
 	std::vector<Set*> sets_list;
 	SetSet sets_all;
 	uint32FlatHashMap sets_by_name;
-	typedef stdext::hash_map<UString, uint32_t> set_name_seeds_t;
+	typedef std::unordered_map<UString, uint32_t, hash_ustring> set_name_seeds_t;
 	set_name_seeds_t set_name_seeds;
 	Setuint32HashMap sets_by_contents;
 	uint32FlatHashMap set_alias;
@@ -70,15 +70,15 @@ public:
 	typedef TagSortedVector icase_tags_t;
 	icase_tags_t icase_tags;
 
-	typedef stdext::hash_map<uint32_t, ContextualTest*> contexts_t;
+	typedef std::unordered_map<uint32_t, ContextualTest*> contexts_t;
 	contexts_t templates;
 	contexts_t contexts;
 
-	typedef stdext::hash_map<uint32_t, uint32IntervalVector> rules_by_set_t;
+	typedef std::unordered_map<uint32_t, uint32IntervalVector> rules_by_set_t;
 	rules_by_set_t rules_by_set;
-	typedef stdext::hash_map<uint32_t, uint32IntervalVector> rules_by_tag_t;
+	typedef std::unordered_map<uint32_t, uint32IntervalVector> rules_by_tag_t;
 	rules_by_tag_t rules_by_tag;
-	typedef stdext::hash_map<uint32_t, boost::dynamic_bitset<> > sets_by_tag_t;
+	typedef std::unordered_map<uint32_t, boost::dynamic_bitset<> > sets_by_tag_t;
 	sets_by_tag_t sets_by_tag;
 
 	uint32IntervalVector *rules_any;
@@ -113,6 +113,7 @@ public:
 	void addSetToList(Set *s);
 	void allocateDummySet();
 	uint32_t removeNumericTags(uint32_t s);
+	void getTags(const Set& set, std::set<TagVector>& rv);
 
 	void addAnchor(const UChar *to, uint32_t at, bool primary = false);
 
diff --git a/src/GrammarApplicator.cpp b/src/GrammarApplicator.cpp
index 9efcf4b..85254fb 100644
--- a/src/GrammarApplicator.cpp
+++ b/src/GrammarApplicator.cpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -94,7 +94,6 @@ GrammarApplicator::GrammarApplicator(UFILE *ux_err)
   , par_left_pos(0)
   , par_right_pos(0)
   , did_final_enclosure(false)
-  , tmpl_cntx_pos(0)
   , same_basic(0)
   , target(0)
   , mark(0)
@@ -129,10 +128,10 @@ GrammarApplicator::~GrammarApplicator() {
 }
 
 void GrammarApplicator::resetIndexes() {
-	boost_foreach (uint32FlatHashSet& sv, index_readingSet_yes) {
+	for (auto& sv : index_readingSet_yes) {
 		sv.clear();
 	}
-	boost_foreach (uint32FlatHashSet& sv, index_readingSet_no) {
+	for (auto& sv : index_readingSet_no) {
 		sv.clear();
 	}
 	index_regexp_yes.clear();
@@ -162,26 +161,30 @@ void GrammarApplicator::index() {
 		return;
 	}
 
+	// ToDo: Remove for real ordered mode
+	for (auto iter : single_tags) {
+		if (iter.second->type & T_REGEXP_LINE) {
+			ordered = true;
+		}
+	}
+
 	if (!grammar->before_sections.empty()) {
 		uint32IntervalVector& m = runsections[-1];
-		foreach (iter_rules, grammar->before_sections) {
-			const Rule *r = *iter_rules;
+		for (auto r : grammar->before_sections) {
 			m.insert(r->number);
 		}
 	}
 
 	if (!grammar->after_sections.empty()) {
 		uint32IntervalVector& m = runsections[-2];
-		foreach (iter_rules, grammar->after_sections) {
-			const Rule *r = *iter_rules;
+		for (auto r : grammar->after_sections) {
 			m.insert(r->number);
 		}
 	}
 
 	if (!grammar->null_section.empty()) {
 		uint32IntervalVector& m = runsections[-3];
-		foreach (iter_rules, grammar->null_section) {
-			const Rule *r = *iter_rules;
+		for (auto r : grammar->null_section) {
 			m.insert(r->number);
 		}
 	}
@@ -189,8 +192,7 @@ void GrammarApplicator::index() {
 	if (sections.empty()) {
 		int32_t smax = (int32_t)grammar->sections.size();
 		for (int32_t i = 0; i < smax; i++) {
-			foreach (iter_rules, grammar->rules) {
-				const Rule *r = *iter_rules;
+			for (auto r : grammar->rules) {
 				if (r->section < 0 || r->section > i) {
 					continue;
 				}
@@ -203,8 +205,7 @@ void GrammarApplicator::index() {
 		numsections = sections.size();
 		for (uint32_t n = 0; n < numsections; n++) {
 			for (uint32_t e = 0; e <= n; e++) {
-				foreach (iter_rules, grammar->rules) {
-					const Rule *r = *iter_rules;
+				for (auto r : grammar->rules) {
 					if (r->section != (int32_t)sections[e] - 1) {
 						continue;
 					}
@@ -217,16 +218,16 @@ void GrammarApplicator::index() {
 
 	if (!valid_rules.empty()) {
 		uint32IntervalVector vr;
-		foreach (iter, grammar->rule_by_number) {
-			if (valid_rules.contains((*iter)->line)) {
-				vr.push_back((*iter)->number);
+		for (auto iter : grammar->rule_by_number) {
+			if (valid_rules.contains(iter->line)) {
+				vr.push_back(iter->number);
 			}
 		}
 		valid_rules = vr;
 	}
 
-	const UChar local_utf_pattern[] = { ' ', '#', '%', 'u', '%', '0', '?', 'u', L'\u2192', '%', 'u', '%', '0', '?', 'u', 0 };
-	const UChar local_latin_pattern[] = { ' ', '#', '%', 'u', '%', '0', '?', 'u', '-', '>', '%', 'u', '%', '0', '?', 'u', 0 };
+	constexpr UChar local_utf_pattern[] = { ' ', '#', '%', 'u', '%', '0', '?', 'u', L'\u2192', '%', 'u', '%', '0', '?', 'u', 0 };
+	constexpr UChar local_latin_pattern[] = { ' ', '#', '%', 'u', '%', '0', '?', 'u', '-', '>', '%', 'u', '%', '0', '?', 'u', 0 };
 
 	span_pattern_utf = local_utf_pattern;
 	span_pattern_latin = local_latin_pattern;
@@ -297,16 +298,16 @@ Tag *GrammarApplicator::addTag(const UChar *txt, bool vstr) {
 	bool reflow = false;
 	if ((tag->type & T_REGEXP) && tag->tag[0] != '"' && tag->tag[0] != '<') {
 		if (grammar->regex_tags.insert(tag->regexp).second) {
-			foreach (titer, single_tags) {
-				if (titer->second->type & T_TEXTUAL) {
+			for (auto titer : single_tags) {
+				if (titer.second->type & T_TEXTUAL) {
 					continue;
 				}
-				foreach (iter, grammar->regex_tags) {
+				for (auto iter : grammar->regex_tags) {
 					UErrorCode status = U_ZERO_ERROR;
-					uregex_setText(*iter, titer->second->tag.c_str(), titer->second->tag.length(), &status);
+					uregex_setText(iter, titer.second->tag.c_str(), titer.second->tag.size(), &status);
 					if (status == U_ZERO_ERROR) {
-						if (uregex_find(*iter, -1, &status)) {
-							titer->second->type |= T_TEXTUAL;
+						if (uregex_find(iter, -1, &status)) {
+							titer.second->type |= T_TEXTUAL;
 							reflow = true;
 						}
 					}
@@ -316,14 +317,14 @@ Tag *GrammarApplicator::addTag(const UChar *txt, bool vstr) {
 	}
 	if ((tag->type & T_CASE_INSENSITIVE) && tag->tag[0] != '"' && tag->tag[0] != '<') {
 		if (grammar->icase_tags.insert(tag).second) {
-			foreach (titer, single_tags) {
-				if (titer->second->type & T_TEXTUAL) {
+			for (auto titer : single_tags) {
+				if (titer.second->type & T_TEXTUAL) {
 					continue;
 				}
-				foreach (iter, grammar->icase_tags) {
+				for (auto iter : grammar->icase_tags) {
 					UErrorCode status = U_ZERO_ERROR;
-					if (u_strCaseCompare(titer->second->tag.c_str(), titer->second->tag.length(), (*iter)->tag.c_str(), (*iter)->tag.length(), U_FOLD_CASE_DEFAULT, &status) == 0) {
-						titer->second->type |= T_TEXTUAL;
+					if (u_strCaseCompare(titer.second->tag.c_str(), titer.second->tag.size(), iter->tag.c_str(), iter->tag.size(), U_FOLD_CASE_DEFAULT, &status) == 0) {
+						titer.second->type |= T_TEXTUAL;
 						reflow = true;
 					}
 					if (status != U_ZERO_ERROR) {
@@ -357,12 +358,12 @@ void GrammarApplicator::printTrace(UFILE *output, uint32_t hit_by) {
 			}
 			u_fprintf(output, ")");
 		}
-		if (!trace_name_only || !r->name) {
+		if (!trace_name_only || r->name.empty()) {
 			u_fprintf(output, ":%u", r->line);
 		}
-		if (r->name) {
+		if (!r->name.empty()) {
 			u_fputc(':', output);
-			u_fprintf(output, "%S", r->name);
+			u_fprintf(output, "%S", r->name.c_str());
 		}
 	}
 	else {
@@ -392,20 +393,20 @@ void GrammarApplicator::printReading(const Reading *reading, UFILE *output, size
 	}
 
 	uint32SortedVector unique;
-	foreach (tter, reading->tags_list) {
-		if ((!show_end_tags && *tter == endtag) || *tter == begintag) {
+	for (auto tter : reading->tags_list) {
+		if ((!show_end_tags && tter == endtag) || tter == begintag) {
 			continue;
 		}
-		if (*tter == reading->baseform || *tter == reading->parent->wordform->hash) {
+		if (tter == reading->baseform || tter == reading->parent->wordform->hash) {
 			continue;
 		}
 		if (unique_tags) {
-			if (unique.find(*tter) != unique.end()) {
+			if (unique.find(tter) != unique.end()) {
 				continue;
 			}
-			unique.insert(*tter);
+			unique.insert(tter);
 		}
-		const Tag *tag = single_tags[*tter];
+		const Tag *tag = single_tags[tter];
 		if (tag->type & T_DEPENDENCY && has_dep && !dep_original) {
 			continue;
 		}
@@ -430,8 +431,8 @@ void GrammarApplicator::printReading(const Reading *reading, UFILE *output, size
 			}
 		}
 
-		const UChar local_utf_pattern[] = { ' ', '#', '%', 'u', L'\u2192', '%', 'u', 0 };
-		const UChar local_latin_pattern[] = { ' ', '#', '%', 'u', '-', '>', '%', 'u', 0 };
+		constexpr UChar local_utf_pattern[] = { ' ', '#', '%', 'u', L'\u2192', '%', 'u', 0 };
+		constexpr UChar local_latin_pattern[] = { ' ', '#', '%', 'u', '-', '>', '%', 'u', 0 };
 		const UChar *pattern = local_latin_pattern;
 		if (unicode_tags) {
 			pattern = local_utf_pattern;
@@ -466,18 +467,18 @@ void GrammarApplicator::printReading(const Reading *reading, UFILE *output, size
 	if (reading->parent->type & CT_RELATED) {
 		u_fprintf(output, " ID:%u", reading->parent->global_number);
 		if (!reading->parent->relations.empty()) {
-			foreach (miter, reading->parent->relations) {
-				boost_foreach (uint32_t siter, miter->second) {
-					u_fprintf(output, " R:%S:%u", grammar->single_tags.find(miter->first)->second->tag.c_str(), siter);
+			for (auto miter : reading->parent->relations) {
+				for (auto siter : miter.second) {
+					u_fprintf(output, " R:%S:%u", grammar->single_tags.find(miter.first)->second->tag.c_str(), siter);
 				}
 			}
 		}
 	}
 
 	if (trace) {
-		foreach (iter_hb, reading->hit_by) {
+		for (auto iter_hb : reading->hit_by) {
 			u_fputc(' ', output);
-			printTrace(output, *iter_hb);
+			printTrace(output, iter_hb);
 		}
 	}
 
@@ -490,7 +491,7 @@ void GrammarApplicator::printReading(const Reading *reading, UFILE *output, size
 }
 
 void GrammarApplicator::printCohort(Cohort *cohort, UFILE *output) {
-	const UChar ws[] = { ' ', '\t', 0 };
+	constexpr UChar ws[] = { ' ', '\t', 0 };
 
 	if (cohort->local_number == 0) {
 		goto removed;
@@ -505,11 +506,11 @@ void GrammarApplicator::printCohort(Cohort *cohort, UFILE *output) {
 	}
 	u_fprintf(output, "%S", cohort->wordform->tag.c_str());
 	if (cohort->wread) {
-		foreach (tter, cohort->wread->tags_list) {
-			if (*tter == cohort->wordform->hash) {
+		for (auto tter : cohort->wread->tags_list) {
+			if (tter == cohort->wordform->hash) {
 				continue;
 			}
-			const Tag *tag = single_tags[*tter];
+			const Tag *tag = single_tags[tter];
 			u_fprintf(output, " %S", tag->tag.c_str());
 		}
 	}
@@ -519,15 +520,15 @@ void GrammarApplicator::printCohort(Cohort *cohort, UFILE *output) {
 		mergeMappings(*cohort);
 	}
 
-	foreach (rter1, cohort->readings) {
-		printReading(*rter1, output);
+	for (auto rter1 : cohort->readings) {
+		printReading(rter1, output);
 	}
 	if (trace && !trace_no_removed) {
-		foreach (rter3, cohort->delayed) {
-			printReading(*rter3, output);
+		for (auto rter3 : cohort->delayed) {
+			printReading(rter3, output);
 		}
-		foreach (rter2, cohort->deleted) {
-			printReading(*rter2, output);
+		for (auto rter2 : cohort->deleted) {
+			printReading(rter2, output);
 		}
 	}
 
@@ -539,15 +540,15 @@ removed:
 		}
 	}
 
-	foreach (iter, cohort->removed) {
-		printCohort(*iter, output);
+	for (auto iter : cohort->removed) {
+		printCohort(iter, output);
 	}
 }
 
 void GrammarApplicator::printSingleWindow(SingleWindow *window, UFILE *output) {
-	boost_foreach (uint32_t var, window->variables_output) {
+	for (auto var : window->variables_output) {
 		Tag *key = single_tags[var];
-		BOOST_AUTO(iter, window->variables_set.find(var));
+		auto iter = window->variables_set.find(var);
 		if (iter != window->variables_set.end()) {
 			if (iter->second != grammar->tag_any) {
 				Tag *value = single_tags[iter->second];
@@ -600,11 +601,11 @@ void GrammarApplicator::pipeOutReading(const Reading *reading, std::ostream& out
 	}
 
 	uint32_t cs = 0;
-	foreach (tter, reading->tags_list) {
-		if (*tter == reading->baseform || *tter == reading->parent->wordform->hash) {
+	for (auto tter : reading->tags_list) {
+		if (tter == reading->baseform || tter == reading->parent->wordform->hash) {
 			continue;
 		}
-		const Tag *tag = single_tags.find(*tter)->second;
+		const Tag *tag = single_tags.find(tter)->second;
 		if (tag->type & T_DEPENDENCY && has_dep) {
 			continue;
 		}
@@ -612,11 +613,11 @@ void GrammarApplicator::pipeOutReading(const Reading *reading, std::ostream& out
 	}
 
 	writeRaw(ss, cs);
-	foreach (tter, reading->tags_list) {
-		if (*tter == reading->baseform || *tter == reading->parent->wordform->hash) {
+	for (auto tter : reading->tags_list) {
+		if (tter == reading->baseform || tter == reading->parent->wordform->hash) {
 			continue;
 		}
-		const Tag *tag = single_tags.find(*tter)->second;
+		const Tag *tag = single_tags.find(tter)->second;
 		if (tag->type & T_DEPENDENCY && has_dep) {
 			continue;
 		}
@@ -651,8 +652,8 @@ void GrammarApplicator::pipeOutCohort(const Cohort *cohort, std::ostream& output
 
 	uint32_t cs = cohort->readings.size();
 	writeRaw(ss, cs);
-	foreach (rter1, cohort->readings) {
-		pipeOutReading(*rter1, ss);
+	for (auto rter1 : cohort->readings) {
+		pipeOutReading(rter1, ss);
 	}
 	if (!cohort->text.empty()) {
 		writeUTF8String(ss, cohort->text);
@@ -831,11 +832,11 @@ void GrammarApplicator::pipeInSingleWindow(SingleWindow& window, Process& input)
 void GrammarApplicator::error(const char *str, const UChar *p) {
 	(void)p;
 	if (current_rule && current_rule->line) {
-		const UChar buf[] = { 'R', 'T', ' ', 'R', 'U', 'L', 'E', 0 };
+		constexpr UChar buf[] = { 'R', 'T', ' ', 'R', 'U', 'L', 'E', 0 };
 		u_fprintf(ux_stderr, str, buf, current_rule->line, buf);
 	}
 	else {
-		const UChar buf[] = { 'R', 'T', ' ', 'I', 'N', 'P', 'U', 'T', 0 };
+		constexpr UChar buf[] = { 'R', 'T', ' ', 'I', 'N', 'P', 'U', 'T', 0 };
 		u_fprintf(ux_stderr, str, buf, numLines, buf);
 	}
 }
@@ -843,11 +844,11 @@ void GrammarApplicator::error(const char *str, const UChar *p) {
 void GrammarApplicator::error(const char *str, const char *s, const UChar *p) {
 	(void)p;
 	if (current_rule && current_rule->line) {
-		const UChar buf[] = { 'R', 'T', ' ', 'R', 'U', 'L', 'E', 0 };
+		constexpr UChar buf[] = { 'R', 'T', ' ', 'R', 'U', 'L', 'E', 0 };
 		u_fprintf(ux_stderr, str, buf, s, current_rule->line, buf);
 	}
 	else {
-		const UChar buf[] = { 'R', 'T', ' ', 'I', 'N', 'P', 'U', 'T', 0 };
+		constexpr UChar buf[] = { 'R', 'T', ' ', 'I', 'N', 'P', 'U', 'T', 0 };
 		u_fprintf(ux_stderr, str, buf, s, numLines, buf);
 	}
 }
@@ -855,11 +856,11 @@ void GrammarApplicator::error(const char *str, const char *s, const UChar *p) {
 void GrammarApplicator::error(const char *str, const UChar *s, const UChar *p) {
 	(void)p;
 	if (current_rule && current_rule->line) {
-		const UChar buf[] = { 'R', 'T', ' ', 'R', 'U', 'L', 'E', 0 };
+		constexpr UChar buf[] = { 'R', 'T', ' ', 'R', 'U', 'L', 'E', 0 };
 		u_fprintf(ux_stderr, str, buf, s, current_rule->line, buf);
 	}
 	else {
-		const UChar buf[] = { 'R', 'T', ' ', 'I', 'N', 'P', 'U', 'T', 0 };
+		constexpr UChar buf[] = { 'R', 'T', ' ', 'I', 'N', 'P', 'U', 'T', 0 };
 		u_fprintf(ux_stderr, str, buf, s, numLines, buf);
 	}
 }
@@ -867,11 +868,11 @@ void GrammarApplicator::error(const char *str, const UChar *s, const UChar *p) {
 void GrammarApplicator::error(const char *str, const char *s, const UChar *S, const UChar *p) {
 	(void)p;
 	if (current_rule && current_rule->line) {
-		const UChar buf[] = { 'R', 'T', ' ', 'R', 'U', 'L', 'E', 0 };
+		constexpr UChar buf[] = { 'R', 'T', ' ', 'R', 'U', 'L', 'E', 0 };
 		u_fprintf(ux_stderr, str, buf, s, S, current_rule->line, buf);
 	}
 	else {
-		const UChar buf[] = { 'R', 'T', ' ', 'I', 'N', 'P', 'U', 'T', 0 };
+		constexpr UChar buf[] = { 'R', 'T', ' ', 'I', 'N', 'P', 'U', 'T', 0 };
 		u_fprintf(ux_stderr, str, buf, s, S, numLines, buf);
 	}
 }
diff --git a/src/GrammarApplicator.hpp b/src/GrammarApplicator.hpp
index faf4b5f..a7c0a35 100644
--- a/src/GrammarApplicator.hpp
+++ b/src/GrammarApplicator.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -48,6 +48,20 @@ class Rule;
 
 typedef std::vector<UnicodeString> regexgrps_t;
 
+struct tmpl_context_t {
+	Cohort *min = 0;
+	Cohort *max = 0;
+	std::vector<const ContextualTest*> linked;
+	bool in_template = false;
+
+	void clear() {
+		min = 0;
+		max = 0;
+		linked.clear();
+		in_template = false;
+	}
+};
+
 struct dSMC_Context {
 	const ContextualTest *test;
 	Cohort **deep;
@@ -96,6 +110,7 @@ public:
 	uint32_t hard_limit;
 	uint32Vector sections;
 	uint32IntervalVector valid_rules;
+	uint32IntervalVector trace_rules;
 	uint32FlatHashMap variables;
 	uint32_t verbosity_level;
 	uint32_t debug_level;
@@ -167,7 +182,7 @@ protected:
 	uint32_t numReadings;
 
 	bool did_index;
-	uint32SortedVector dep_deep_seen;
+	sorted_vector<std::pair<uint32_t,uint32_t>> dep_deep_seen;
 
 	uint32_t numsections;
 	typedef std::map<int32_t, uint32IntervalVector> RSType;
@@ -191,20 +206,7 @@ protected:
 	uint32_t par_left_pos, par_right_pos;
 	bool did_final_enclosure;
 
-	struct tmpl_context_t {
-		Cohort *min;
-		Cohort *max;
-		const ContextualTest *test;
-
-		tmpl_context_t(const ContextualTest *test)
-		  : min(0)
-		  , max(0)
-		  , test(test)
-		{
-		}
-	};
-	std::vector<tmpl_context_t> tmpl_cntxs;
-	size_t tmpl_cntx_pos;
+	tmpl_context_t tmpl_cntx;
 
 	std::vector<regexgrps_t> regexgrps_store;
 	std::pair<uint8_t, regexgrps_t*> regexgrps;
@@ -256,6 +258,7 @@ protected:
 	enum ST_RETVALS {
 		TRV_BREAK   = (1 <<  0),
 		TRV_BARRIER = (1 <<  1),
+		TRV_BREAK_DEFAULT = (1 <<  2),
 	};
 	Cohort *runSingleTest(Cohort *cohort, const ContextualTest *test, uint8_t& rvs, bool *retval, Cohort **deep = 0, Cohort *origin = 0);
 	Cohort *runSingleTest(SingleWindow *sWindow, size_t i, const ContextualTest *test, uint8_t& rvs, bool *retval, Cohort **deep = 0, Cohort *origin = 0);
@@ -269,6 +272,7 @@ protected:
 	bool doesWordformsMatch(const Tag *cword, const Tag *rword);
 	uint32_t doesTagMatchRegexp(uint32_t test, const Tag& tag, bool bypass_index = false);
 	uint32_t doesTagMatchIcase(uint32_t test, const Tag& tag, bool bypass_index = false);
+	uint32_t doesRegexpMatchLine(const Reading& reading, const Tag& tag, bool bypass_index = false);
 	uint32_t doesRegexpMatchReading(const Reading& reading, const Tag& tag, bool bypass_index = false);
 	uint32_t doesTagMatchReading(const Reading& reading, const Tag& tag, bool unif_mode = false, bool bypass_index = false);
 	bool doesSetMatchReading_trie(const Reading& reading, const Set& theset, const trie_t& trie, bool unif_mode = false);
diff --git a/src/GrammarApplicator_matchSet.cpp b/src/GrammarApplicator_matchSet.cpp
index 2905fe9..4be9c6b 100644
--- a/src/GrammarApplicator_matchSet.cpp
+++ b/src/GrammarApplicator_matchSet.cpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -47,7 +47,7 @@ inline bool TagSet_SubsetOf_TSet(const TagSortedVector& a, const T& b) {
 	}
 	//*/
 	typename T::const_iterator bi = b.lower_bound((*a.begin())->hash);
-	boost_foreach (Tag *ai, a) {
+	for (auto ai : a) {
 		while (bi != b.end() && *bi < ai->hash) {
 			++bi;
 		}
@@ -76,7 +76,7 @@ uint32_t GrammarApplicator::doesTagMatchRegexp(uint32_t test, const Tag& tag, bo
 	else {
 		const Tag& itag = *(single_tags.find(test)->second);
 		UErrorCode status = U_ZERO_ERROR;
-		uregex_setText(tag.regexp, itag.tag.c_str(), itag.tag.length(), &status);
+		uregex_setText(tag.regexp, itag.tag.c_str(), itag.tag.size(), &status);
 		if (status != U_ZERO_ERROR) {
 			u_fprintf(ux_stderr, "Error: uregex_setText(MatchSet) returned %s for tag %S before input line %u - cannot continue!\n", u_errorName(status), tag.tag.c_str(), numLines);
 			CG3Quit(1);
@@ -91,7 +91,8 @@ uint32_t GrammarApplicator::doesTagMatchRegexp(uint32_t test, const Tag& tag, bo
 		}
 		if (match) {
 			int32_t gc = uregex_groupCount(tag.regexp, &status);
-			if (gc > 0) {
+			// ToDo: Allow regex captures from dependency target contexts without any captures in normal target contexts
+			if (gc > 0 && regexgrps.second != 0) {
 				UChar tmp[1024];
 				for (int i = 1; i <= gc; ++i) {
 					tmp[0] = 0;
@@ -126,7 +127,7 @@ uint32_t GrammarApplicator::doesTagMatchIcase(uint32_t test, const Tag& tag, boo
 	else {
 		const Tag& itag = *(single_tags.find(test)->second);
 		UErrorCode status = U_ZERO_ERROR;
-		if (u_strCaseCompare(tag.tag.c_str(), tag.tag.length(), itag.tag.c_str(), itag.tag.length(), U_FOLD_CASE_DEFAULT, &status) == 0) {
+		if (u_strCaseCompare(tag.tag.c_str(), tag.tag.size(), itag.tag.c_str(), itag.tag.size(), U_FOLD_CASE_DEFAULT, &status) == 0) {
 			match = itag.hash;
 		}
 		if (status != U_ZERO_ERROR) {
@@ -143,6 +144,57 @@ uint32_t GrammarApplicator::doesTagMatchIcase(uint32_t test, const Tag& tag, boo
 	return match;
 }
 
+// ToDo: Remove for real ordered mode
+uint32_t GrammarApplicator::doesRegexpMatchLine(const Reading& reading, const Tag& tag, bool bypass_index) {
+	uint32_t match = 0;
+	uint32_t ih = reading.tags_string_hash;
+	if (!bypass_index && index_matches(index_regexp_no, ih)) {
+		match = 0;
+	}
+	else if (!bypass_index && index_matches(index_regexp_yes, ih)) {
+		match = reading.tags_string_hash;
+	}
+	else {
+		UErrorCode status = U_ZERO_ERROR;
+		uregex_setText(tag.regexp, reading.tags_string.c_str(), reading.tags_string.size(), &status);
+		if (status != U_ZERO_ERROR) {
+			u_fprintf(ux_stderr, "Error: uregex_setText(MatchSet) returned %s for tag %S before input line %u - cannot continue!\n", u_errorName(status), tag.tag.c_str(), numLines);
+			CG3Quit(1);
+		}
+		status = U_ZERO_ERROR;
+		if (uregex_find(tag.regexp, -1, &status)) {
+			match = reading.tags_string_hash;
+		}
+		if (status != U_ZERO_ERROR) {
+			u_fprintf(ux_stderr, "Error: uregex_find(MatchSet) returned %s for tag %S before input line %u - cannot continue!\n", u_errorName(status), tag.tag.c_str(), numLines);
+			CG3Quit(1);
+		}
+		if (match) {
+			int32_t gc = uregex_groupCount(tag.regexp, &status);
+			// ToDo: Allow regex captures from dependency target contexts without any captures in normal target contexts
+			if (gc > 0 && regexgrps.second != 0) {
+				UChar tmp[1024];
+				for (int i = 1; i <= gc; ++i) {
+					tmp[0] = 0;
+					int32_t len = uregex_group(tag.regexp, i, tmp, 1024, &status);
+					regexgrps.second->resize(std::max(static_cast<size_t>(regexgrps.first) + 1, regexgrps.second->size()));
+					UnicodeString& ucstr = (*regexgrps.second)[regexgrps.first];
+					ucstr.remove();
+					ucstr.append(tmp, len);
+					++regexgrps.first;
+				}
+			}
+			else {
+				index_regexp_yes.insert(ih);
+			}
+		}
+		else {
+			index_regexp_no.insert(ih);
+		}
+	}
+	return match;
+}
+
 /**
  * Tests whether a given reading matches a given tag's stored regular expression.
  *
@@ -152,9 +204,14 @@ uint32_t GrammarApplicator::doesTagMatchIcase(uint32_t test, const Tag& tag, boo
 uint32_t GrammarApplicator::doesRegexpMatchReading(const Reading& reading, const Tag& tag, bool bypass_index) {
 	uint32_t match = 0;
 
+	// ToDo: Remove for real ordered mode
+	if (tag.type & T_REGEXP_LINE) {
+		return doesRegexpMatchLine(reading, tag, bypass_index);
+	}
+
 	// Grammar::reindex() will do a one-time pass to mark any potential matching tag as T_TEXTUAL
-	foreach (mter, reading.tags_textual) {
-		match = doesTagMatchRegexp(*mter, tag, bypass_index);
+	for (auto mter : reading.tags_textual) {
+		match = doesTagMatchRegexp(mter, tag, bypass_index);
 		if (match) {
 			break;
 		}
@@ -205,8 +262,8 @@ uint32_t GrammarApplicator::doesTagMatchReading(const Reading& reading, const Ta
 		match = doesRegexpMatchReading(reading, tag, bypass_index);
 	}
 	else if (tag.type & T_CASE_INSENSITIVE) {
-		foreach (mter, reading.tags_textual) {
-			match = doesTagMatchIcase(*mter, tag, bypass_index);
+		for (auto mter : reading.tags_textual) {
+			match = doesTagMatchIcase(mter, tag, bypass_index);
 			if (match) {
 				break;
 			}
@@ -240,18 +297,18 @@ uint32_t GrammarApplicator::doesTagMatchReading(const Reading& reading, const Ta
 			}
 		}
 		else {
-			foreach (mter, reading.tags_textual) {
-				const Tag& itag = *(single_tags.find(*mter)->second);
+			for (auto mter : reading.tags_textual) {
+				const Tag& itag = *(single_tags.find(mter)->second);
 				if (!(itag.type & (T_BASEFORM | T_WORDFORM))) {
 					match = itag.hash;
 					if (unif_mode) {
 						if (unif_last_textual) {
-							if (unif_last_textual != *mter) {
+							if (unif_last_textual != mter) {
 								match = 0;
 							}
 						}
 						else {
-							unif_last_textual = *mter;
+							unif_last_textual = mter;
 						}
 					}
 				}
@@ -262,13 +319,13 @@ uint32_t GrammarApplicator::doesTagMatchReading(const Reading& reading, const Ta
 		}
 	}
 	else if (tag.type & T_NUMERICAL) {
-		boost_foreach (const Reading::tags_numerical_t::value_type& mter, reading.tags_numerical) {
+		for (auto mter : reading.tags_numerical) {
 			const Tag& itag = *(mter.second);
-			int32_t compval = tag.comparison_val;
-			if (compval == INT_MIN) {
+			double compval = tag.comparison_val;
+			if (compval <= NUMERIC_MIN) {
 				compval = reading.parent->getMin(tag.comparison_hash);
 			}
-			else if (compval == INT_MAX) {
+			else if (compval >= NUMERIC_MAX) {
 				compval = reading.parent->getMax(tag.comparison_hash);
 			}
 			if (tag.comparison_hash == itag.comparison_hash) {
@@ -441,7 +498,7 @@ uint32_t GrammarApplicator::doesTagMatchReading(const Reading& reading, const Ta
 }
 
 bool GrammarApplicator::doesSetMatchReading_trie(const Reading& reading, const Set& theset, const trie_t& trie, bool unif_mode) {
-	boost_foreach (const trie_t::value_type& kv, trie) {
+	for (auto& kv : trie) {
 		bool match = (doesTagMatchReading(reading, *kv.first, unif_mode) != 0);
 		if (match) {
 			if (kv.first->type & T_FAILFAST) {
@@ -449,7 +506,7 @@ bool GrammarApplicator::doesSetMatchReading_trie(const Reading& reading, const S
 			}
 			if (kv.second.terminal) {
 				if (unif_mode) {
-					BOOST_AUTO(it, unif_tags->find(theset.number));
+					auto it = unif_tags->find(theset.number);
 					if (it != unif_tags->end() && it->second != &kv) {
 						continue;
 					}
@@ -478,7 +535,7 @@ bool GrammarApplicator::doesSetMatchReading_tags(const Reading& reading, const S
 	bool retval = false;
 
 	if (!theset.ff_tags.empty()) {
-		boost_foreach (const Tag *tag, theset.ff_tags) {
+		for (auto tag : theset.ff_tags) {
 			if (doesTagMatchReading(reading, *tag, unif_mode)) {
 				return false;
 			}
@@ -494,7 +551,7 @@ bool GrammarApplicator::doesSetMatchReading_tags(const Reading& reading, const S
 			if (*oiter == iiter->first->hash) {
 				if (iiter->second.terminal) {
 					if (unif_mode) {
-						BOOST_AUTO(it, unif_tags->find(theset.number));
+						auto it = unif_tags->find(theset.number);
 						if (it != unif_tags->end() && it->second != &*iiter) {
 							++iiter;
 							continue;
@@ -587,10 +644,10 @@ bool GrammarApplicator::doesSetMatchReading(const Reading& reading, const uint32
 		}
 		// Subsequent times, test whether any of the previously stored sets match the reading
 		else {
-			BOOST_AUTO(sets, ss_u32sv.get());
-			foreach (usi, *unif_sets) {
-				if (doesSetMatchReading(reading, *usi, bypass_index, unif_mode)) {
-					sets->insert(*usi);
+			auto sets = ss_u32sv.get();
+			for (auto usi : *unif_sets) {
+				if (doesSetMatchReading(reading, usi, bypass_index, unif_mode)) {
+					sets->insert(usi);
 				}
 			}
 			retval = !sets->empty();
@@ -653,7 +710,7 @@ bool GrammarApplicator::doesSetMatchReading(const Reading& reading, const uint32
 		if (unif_mode || (theset.type & ST_TAG_UNIFY)) {
 			const void *tag = 0;
 			for (size_t i = 0; i < size; ++i) {
-				BOOST_AUTO(it, unif_tags->find(theset.sets[i]));
+				auto it = unif_tags->find(theset.sets[i]);
 				if (it != unif_tags->end()) {
 					tag = it->second;
 					break;
@@ -702,15 +759,20 @@ inline bool _check_options(std::vector<Reading*>& rv, uint32_t options, size_t n
 
 inline bool GrammarApplicator::doesSetMatchCohort_testLinked(Cohort& cohort, const Set& theset, dSMC_Context *context) {
 	bool retval = true;
+	bool reset = false;
 	const ContextualTest *linked = 0;
-	inc_dec<size_t> ic;
+	Cohort *min = 0;
+	Cohort *max = 0;
 
 	if (context->test && context->test->linked) {
 		linked = context->test->linked;
 	}
-	else if (!tmpl_cntxs.empty() && tmpl_cntx_pos < tmpl_cntxs.size()) {
-		ic.inc(tmpl_cntx_pos);
-		linked = tmpl_cntxs[tmpl_cntxs.size() - tmpl_cntx_pos].test;
+	else if (!tmpl_cntx.linked.empty()) {
+		min = tmpl_cntx.min;
+		max = tmpl_cntx.max;
+		linked = tmpl_cntx.linked.back();
+		tmpl_cntx.linked.pop_back();
+		reset = true;
 	}
 	if (linked) {
 		if (!context->did_test) {
@@ -726,13 +788,20 @@ inline bool GrammarApplicator::doesSetMatchCohort_testLinked(Cohort& cohort, con
 		}
 		retval = context->matched_tests;
 	}
+	if (reset) {
+		tmpl_cntx.linked.push_back(linked);
+	}
+	if (!retval) {
+		tmpl_cntx.min = min;
+		tmpl_cntx.max = max;
+	}
 	return retval;
 }
 
 inline bool GrammarApplicator::doesSetMatchCohort_helper(Cohort& cohort, Reading& reading, const Set& theset, dSMC_Context *context) {
 	bool retval = false;
-	BOOST_AUTO(utags, ss_utags.get());
-	BOOST_AUTO(usets, ss_u32sv.get());
+	auto utags = ss_utags.get();
+	auto usets = ss_u32sv.get();
 	uint8_t orz = regexgrps.first;
 
 	if (context && !(current_rule->flags & FL_CAPTURE_UNIF) && (theset.type & ST_CHILD_UNIFY)) {
@@ -799,8 +868,7 @@ bool GrammarApplicator::doesSetMatchCohortNormal(Cohort& cohort, const uint32_t
 		if (lists[i] == 0) {
 			continue;
 		}
-		foreach (iter, *lists[i]) {
-			Reading *reading = *iter;
+		for (auto reading : *lists[i]) {
 			if (context && context->test) {
 				// ToDo: Barriers need some way to escape sub-readings
 				reading = get_sub_reading(reading, context->test->offset_sub);
@@ -853,8 +921,7 @@ bool GrammarApplicator::doesSetMatchCohortCareful(Cohort& cohort, const uint32_t
 		if (lists[i] == 0) {
 			continue;
 		}
-		foreach (iter, *lists[i]) {
-			Reading *reading = *iter;
+		for (auto reading : *lists[i]) {
 			if (context && context->test) {
 				// ToDo: Barriers need some way to escape sub-readings
 				reading = get_sub_reading(reading, context->test->offset_sub);
diff --git a/src/GrammarApplicator_reflow.cpp b/src/GrammarApplicator_reflow.cpp
index 0ffcf90..bbcc159 100644
--- a/src/GrammarApplicator_reflow.cpp
+++ b/src/GrammarApplicator_reflow.cpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -34,7 +34,7 @@ Tag *GrammarApplicator::makeBaseFromWord(uint32_t tag) {
 }
 
 Tag *GrammarApplicator::makeBaseFromWord(Tag *tag) {
-	const size_t len = tag->tag.length();
+	const size_t len = tag->tag.size();
 	if (len < 5) {
 		return tag;
 	}
@@ -223,13 +223,13 @@ void GrammarApplicator::reflowDependencyWindow(uint32_t max) {
 		gWindow->cohort_map[0] = tmp;
 	}
 
-	for (BOOST_AUTO(begin, gWindow->dep_window.begin()); begin != gWindow->dep_window.end();) {
+	for (auto begin = gWindow->dep_window.begin(); begin != gWindow->dep_window.end();) {
 		while (begin != gWindow->dep_window.end() && (begin->second->type & CT_DEP_DONE || !begin->second->dep_self)) {
 			++begin;
 		}
 		gWindow->dep_map.clear();
 
-		BOOST_AUTO(end, begin);
+		auto end = begin;
 		for (; end != gWindow->dep_window.end(); ++end) {
 			Cohort *cohort = end->second;
 			if (cohort->type & CT_DEP_DONE) {
@@ -304,9 +304,9 @@ void GrammarApplicator::reflowRelationWindow(uint32_t max) {
 		}
 
 		for (RelationCtn::iterator rel = cohort->relations_input.begin(); rel != cohort->relations_input.end();) {
-			BOOST_AUTO(newrel, ss_u32sv.get());
+			auto newrel = ss_u32sv.get();
 
-			boost_foreach (uint32_t target, rel->second) {
+			for (auto target : rel->second) {
 				uint32FlatHashMap::iterator it = gWindow->relation_map.find(target);
 				if (it != gWindow->relation_map.end()) {
 					cohort->relations[rel->first].insert(it->second);
@@ -337,14 +337,15 @@ void GrammarApplicator::reflowReading(Reading& reading) {
 	reading.tags_textual_bloom.clear();
 	reading.tags_plain_bloom.clear();
 	reading.mapping = 0;
+	reading.tags_string.clear();
 
 	insert_if_exists(reading.parent->possible_sets, grammar->sets_any);
 
 	Reading::tags_list_t tlist;
 	tlist.swap(reading.tags_list);
 
-	foreach (tter, tlist) {
-		addTagToReading(reading, *tter, false);
+	for (auto tter : tlist) {
+		addTagToReading(reading, tter, false);
 	}
 
 	reading.rehash();
@@ -353,13 +354,13 @@ void GrammarApplicator::reflowReading(Reading& reading) {
 Tag *GrammarApplicator::generateVarstringTag(const Tag *tag) {
 	static UnicodeString tmp;
 	tmp.remove();
-	tmp.append(tag->tag.c_str(), tag->tag.length());
+	tmp.append(tag->tag.c_str(), tag->tag.size());
 	bool did_something = false;
 
 	// Replace unified sets with their matching tags
 	if (tag->vs_sets) {
 		for (size_t i = 0; i < tag->vs_sets->size(); ++i) {
-			BOOST_AUTO(tags, ss_taglist.get());
+			auto tags = ss_taglist.get();
 			getTagList(*(*tag->vs_sets)[i], tags);
 			static UString rpl;
 			rpl.clear();
@@ -465,6 +466,14 @@ uint32_t GrammarApplicator::addTagToReading(Reading& reading, Tag *tag, bool reh
 	reading.tags.insert(tag->hash);
 	reading.tags_list.push_back(tag->hash);
 	reading.tags_bloom.insert(tag->hash);
+	// ToDo: Remove for real ordered mode
+	if (ordered) {
+		if (!reading.tags_string.empty()) {
+			reading.tags_string += ' ';
+		}
+		reading.tags_string += tag->tag;
+		reading.tags_string_hash = hash_value(reading.tags_string);
+	}
 	if (grammar->parentheses.find(tag->hash) != grammar->parentheses.end()) {
 		reading.parent->is_pleft = tag->hash;
 	}
@@ -604,11 +613,11 @@ void GrammarApplicator::splitMappings(TagList& mappings, Cohort& cohort, Reading
 	Tag *tag = mappings.back();
 	mappings.pop_back();
 	size_t i = mappings.size();
-	foreach (ttag, mappings) {
+	for (auto ttag : mappings) {
 		// To avoid duplicating needlessly many times, check for a similar reading in the cohort that's already got this mapping
 		bool found = false;
-		foreach (itr, cohort.readings) {
-			if ((*itr)->hash_plain == reading.hash_plain && (*itr)->mapping && (*itr)->mapping->hash == (*ttag)->hash) {
+		for (auto itr : cohort.readings) {
+			if (itr->hash_plain == reading.hash_plain && itr->mapping && itr->mapping->hash == ttag->hash) {
 				found = true;
 				break;
 			}
@@ -619,12 +628,12 @@ void GrammarApplicator::splitMappings(TagList& mappings, Cohort& cohort, Reading
 		Reading *nr = alloc_reading(reading);
 		nr->mapped = mapped;
 		nr->number = reading.number - i--;
-		uint32_t mp = addTagToReading(*nr, *ttag);
-		if (mp != (*ttag)->hash) {
+		uint32_t mp = addTagToReading(*nr, ttag);
+		if (mp != ttag->hash) {
 			nr->mapping = single_tags.find(mp)->second;
 		}
 		else {
-			nr->mapping = *ttag;
+			nr->mapping = ttag;
 		}
 		cohort.appendReading(nr);
 		numReadings++;
@@ -646,8 +655,8 @@ void GrammarApplicator::splitAllMappings(all_mappings_t& all_mappings, Cohort& c
 	}
 	static ReadingList readings;
 	readings = cohort.readings;
-	boost_foreach (Reading *reading, readings) {
-		BOOST_AUTO(iter, all_mappings.find(reading));
+	for (auto reading : readings) {
+		auto iter = all_mappings.find(reading);
 		if (iter == all_mappings.end()) {
 			continue;
 		}
@@ -655,7 +664,7 @@ void GrammarApplicator::splitAllMappings(all_mappings_t& all_mappings, Cohort& c
 	}
 	std::sort(cohort.readings.begin(), cohort.readings.end(), CG3::Reading::cmp_number);
 	if (!grammar->reopen_mappings.empty()) {
-		boost_foreach (Reading *reading, cohort.readings) {
+		for (auto reading : cohort.readings) {
 			if (reading->mapping && grammar->reopen_mappings.count(reading->mapping->hash)) {
 				reading->mapped = false;
 			}
@@ -672,13 +681,12 @@ void GrammarApplicator::mergeReadings(ReadingList& readings) {
 	mlist.clear();
 	mlist.reserve(readings.size());
 
-	foreach (iter, readings) {
-		Reading *r = *iter;
+	for (auto r : readings) {
 		uint32_t hp = r->hash_plain, hplain = r->hash_plain;
 		uint32_t nm = 0;
 		if (trace) {
-			foreach (iter_hb, r->hit_by) {
-				hp = hash_value(*iter_hb, hp);
+			for (auto iter_hb : r->hit_by) {
+				hp = hash_value(iter_hb, hp);
 			}
 		}
 		if (r->mapping) {
@@ -689,8 +697,8 @@ void GrammarApplicator::mergeReadings(ReadingList& readings) {
 			hp = hash_value(sub->hash_plain, hp);
 			hplain = hash_value(sub->hash_plain, hplain);
 			if (trace) {
-				foreach (iter_hb, sub->hit_by) {
-					hp = hash_value(*iter_hb, hp);
+				for (auto iter_hb : sub->hit_by) {
+					hp = hash_value(iter_hb, hp);
 				}
 			}
 			if (sub->mapping) {
@@ -718,17 +726,17 @@ void GrammarApplicator::mergeReadings(ReadingList& readings) {
 	static std::vector<Reading*> order;
 	order.clear();
 
-	for (BOOST_AUTO(miter, mlist.begin()); miter != mlist.end(); miter++) {
+	for (auto miter = mlist.begin(); miter != mlist.end(); miter++) {
 		const ReadingList& clist = miter->second;
 		Reading *nr = alloc_reading(*(clist.front()));
 		if (nr->mapping) {
 			erase(nr->tags_list, nr->mapping->hash);
 		}
-		foreach (iter1, clist) {
-			if ((*iter1)->mapping && std::find(nr->tags_list.begin(), nr->tags_list.end(), (*iter1)->mapping->hash) == nr->tags_list.end()) {
-				nr->tags_list.push_back((*iter1)->mapping->hash);
+		for (auto iter1 : clist) {
+			if (iter1->mapping && std::find(nr->tags_list.begin(), nr->tags_list.end(), iter1->mapping->hash) == nr->tags_list.end()) {
+				nr->tags_list.push_back(iter1->mapping->hash);
 			}
-			free_reading(*iter1);
+			free_reading(iter1);
 		}
 		order.push_back(nr);
 	}
@@ -800,8 +808,7 @@ Cohort *GrammarApplicator::delimitAt(SingleWindow& current, Cohort *cohort) {
 	}
 
 	cohort = current.cohorts.back();
-	foreach (rter3, cohort->readings) {
-		Reading *reading = *rter3;
+	for (auto reading : cohort->readings) {
 		addTagToReading(*reading, endtag);
 	}
 	gWindow->rebuildCohortLinks();
@@ -813,46 +820,46 @@ void GrammarApplicator::reflowTextuals_Reading(Reading& r) {
 	if (r.next) {
 		reflowTextuals_Reading(*r.next);
 	}
-	foreach (it, r.tags) {
-		Tag *tag = single_tags.find(*it)->second;
+	for (auto it : r.tags) {
+		Tag *tag = single_tags.find(it)->second;
 		if (tag->type & T_TEXTUAL) {
-			r.tags_textual.insert(*it);
-			r.tags_textual_bloom.insert(*it);
+			r.tags_textual.insert(it);
+			r.tags_textual_bloom.insert(it);
 		}
 	}
 }
 
 void GrammarApplicator::reflowTextuals_Cohort(Cohort& c) {
-	foreach (it, c.enclosed) {
-		reflowTextuals_Cohort(**it);
+	for (auto it : c.enclosed) {
+		reflowTextuals_Cohort(*it);
 	}
-	foreach (it, c.removed) {
-		reflowTextuals_Cohort(**it);
+	for (auto it : c.removed) {
+		reflowTextuals_Cohort(*it);
 	}
-	foreach (it, c.readings) {
-		reflowTextuals_Reading(**it);
+	for (auto it : c.readings) {
+		reflowTextuals_Reading(*it);
 	}
-	foreach (it, c.deleted) {
-		reflowTextuals_Reading(**it);
+	for (auto it : c.deleted) {
+		reflowTextuals_Reading(*it);
 	}
-	foreach (it, c.delayed) {
-		reflowTextuals_Reading(**it);
+	for (auto it : c.delayed) {
+		reflowTextuals_Reading(*it);
 	}
 }
 
 void GrammarApplicator::reflowTextuals_SingleWindow(SingleWindow& sw) {
-	foreach (it, sw.cohorts) {
-		reflowTextuals_Cohort(**it);
+	for (auto it : sw.cohorts) {
+		reflowTextuals_Cohort(*it);
 	}
 }
 
 void GrammarApplicator::reflowTextuals() {
-	foreach (swit, gWindow->previous) {
-		reflowTextuals_SingleWindow(**swit);
+	for (auto swit : gWindow->previous) {
+		reflowTextuals_SingleWindow(*swit);
 	}
 	reflowTextuals_SingleWindow(*gWindow->current);
-	foreach (swit, gWindow->next) {
-		reflowTextuals_SingleWindow(**swit);
+	for (auto swit : gWindow->next) {
+		reflowTextuals_SingleWindow(*swit);
 	}
 }
 }
diff --git a/src/GrammarApplicator_runContextualTest.cpp b/src/GrammarApplicator_runContextualTest.cpp
index 7e30e2a..c97a924 100644
--- a/src/GrammarApplicator_runContextualTest.cpp
+++ b/src/GrammarApplicator_runContextualTest.cpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -50,8 +50,7 @@ Cohort *GrammarApplicator::runSingleTest(Cohort *cohort, const ContextualTest *t
 				if (lists[i] == 0) {
 					continue;
 				}
-				foreach (iter, *lists[i]) {
-					Reading *reading = *iter;
+				for (auto reading : *lists[i]) {
 					reading->matched_target = false;
 					reading->matched_tests = false;
 				}
@@ -87,8 +86,8 @@ Cohort *GrammarApplicator::runSingleTest(Cohort *cohort, const ContextualTest *t
 	if (context.matched_target && (test->pos & POS_SCANFIRST)) {
 		rvs |= TRV_BREAK;
 	}
-	else if (!(test->pos & (POS_SCANALL | POS_SCANFIRST | POS_SELF))) {
-		rvs |= TRV_BREAK;
+	else if (!(test->pos & (POS_SCANALL | POS_SCANFIRST))) {
+		rvs |= TRV_BREAK | TRV_BREAK_DEFAULT;
 	}
 
 	context.test = 0;
@@ -101,6 +100,7 @@ Cohort *GrammarApplicator::runSingleTest(Cohort *cohort, const ContextualTest *t
 		if (barrier) {
 			seen_barrier = true;
 			rvs |= TRV_BREAK | TRV_BARRIER;
+			rvs &= ~TRV_BREAK_DEFAULT;
 		}
 	}
 	if (test->cbarrier) {
@@ -109,6 +109,7 @@ Cohort *GrammarApplicator::runSingleTest(Cohort *cohort, const ContextualTest *t
 		if (cbarrier) {
 			seen_barrier = true;
 			rvs |= TRV_BREAK | TRV_BARRIER;
+			rvs &= ~TRV_BREAK_DEFAULT;
 		}
 	}
 	if (context.matched_target && *retval) {
@@ -169,9 +170,11 @@ bool GrammarApplicator::posOutputHelper(const SingleWindow *sWindow, uint32_t po
 		cohort,
 		cdeep,
 	};
-	if (!tmpl_cntxs.empty()) {
-		cs[2] = tmpl_cntxs.back().min;
-		cs[3] = tmpl_cntxs.back().max;
+	if (tmpl_cntx.min) {
+		cs[2] = tmpl_cntx.min;
+	}
+	if (tmpl_cntx.max) {
+		cs[3] = tmpl_cntx.max;
 	}
 
 	std::sort(cs, cs + 4, compare_Cohort());
@@ -205,10 +208,14 @@ bool GrammarApplicator::posOutputHelper(const SingleWindow *sWindow, uint32_t po
 }
 
 Cohort *GrammarApplicator::runContextualTest_tmpl(SingleWindow *sWindow, size_t position, const ContextualTest *test, ContextualTest *tmpl, Cohort *& cdeep, Cohort *origin) {
+	Cohort *min = tmpl_cntx.min;
+	Cohort *max = tmpl_cntx.max;
+	bool in_template = tmpl_cntx.in_template;
+	tmpl_cntx.in_template = true;
 	if (test->linked) {
-		tmpl_cntxs.push_back(test->linked);
+		tmpl_cntx.linked.push_back(test->linked);
 	}
-
+	
 	uint64_t orgpos = tmpl->pos;
 	int32_t orgoffset = tmpl->offset;
 	uint32_t orgcbar = tmpl->cbarrier;
@@ -239,7 +246,12 @@ Cohort *GrammarApplicator::runContextualTest_tmpl(SingleWindow *sWindow, size_t
 	}
 
 	if (test->linked) {
-		tmpl_cntxs.pop_back();
+		tmpl_cntx.linked.pop_back();
+	}
+	if (!cohort) {
+		tmpl_cntx.min = min;
+		tmpl_cntx.max = max;
+		tmpl_cntx.in_template = in_template;
 	}
 
 	return cohort;
@@ -274,7 +286,7 @@ Cohort *GrammarApplicator::runContextualTest(SingleWindow *sWindow, size_t posit
 	}
 	else if (!test->ors.empty()) {
 		Cohort *cdeep = 0;
-		boost_foreach (ContextualTest *iter, test->ors) {
+		for (auto iter : test->ors) {
 			dep_deep_seen.clear();
 			cohort = runContextualTest_tmpl(sWindow, position, test, iter, cdeep, origin);
 			if (cohort) {
@@ -302,22 +314,20 @@ Cohort *GrammarApplicator::runContextualTest(SingleWindow *sWindow, size_t posit
 		if (deep) {
 			*deep = cohort;
 		}
-		if (!tmpl_cntxs.empty()) {
-			tmpl_context_t& tmpl_cntx = tmpl_cntxs.back();
-			uint64_t gpos = (static_cast<uint64_t>(cohort->parent->number) << 32) | cohort->local_number;
-			if (tmpl_cntx.min == 0 || gpos < (static_cast<uint64_t>(tmpl_cntx.min->parent->number) << 32 | tmpl_cntx.min->local_number)) {
+		if (tmpl_cntx.in_template) {
+			auto gpos = make_64(cohort->parent->number, cohort->local_number);
+			if (tmpl_cntx.min == 0 || gpos < make_64(tmpl_cntx.min->parent->number, tmpl_cntx.min->local_number)) {
 				tmpl_cntx.min = cohort;
 			}
-			if (tmpl_cntx.max == 0 || gpos > (static_cast<uint64_t>(tmpl_cntx.max->parent->number) << 32 | tmpl_cntx.max->local_number)) {
+			if (tmpl_cntx.max == 0 || gpos > make_64(tmpl_cntx.max->parent->number, tmpl_cntx.max->local_number)) {
 				tmpl_cntx.max = cohort;
 			}
 			if (deep) {
-				tmpl_context_t& tmpl_cntx = tmpl_cntxs.back();
-				uint64_t gpos = (static_cast<uint64_t>((*deep)->parent->number) << 32) | (*deep)->local_number;
-				if (tmpl_cntx.min == 0 || gpos < (static_cast<uint64_t>(tmpl_cntx.min->parent->number) << 32 | tmpl_cntx.min->local_number)) {
+				auto gpos = make_64((*deep)->parent->number, (*deep)->local_number);
+				if (tmpl_cntx.min == 0 || gpos < make_64(tmpl_cntx.min->parent->number, tmpl_cntx.min->local_number)) {
 					tmpl_cntx.min = *deep;
 				}
-				if (tmpl_cntx.max == 0 || gpos > (static_cast<uint64_t>(tmpl_cntx.max->parent->number) << 32 | tmpl_cntx.max->local_number)) {
+				if (tmpl_cntx.max == 0 || gpos > make_64(tmpl_cntx.max->parent->number, tmpl_cntx.max->local_number)) {
 					tmpl_cntx.max = *deep;
 				}
 			}
@@ -334,7 +344,7 @@ Cohort *GrammarApplicator::runContextualTest(SingleWindow *sWindow, size_t posit
 			it = &depDescendentIters[ci_depths[4]++];
 		}
 		else if (test->pos & (POS_DEP_CHILD | POS_DEP_SIBLING)) {
-			Cohort *nc = runDependencyTest(sWindow, cohort, test, deep, origin);
+			Cohort *nc = runDependencyTest(sWindow, cohort, test, deep, origin, 0);
 			if (nc) {
 				cohort = nc;
 				retval = true;
@@ -416,6 +426,9 @@ Cohort *GrammarApplicator::runContextualTest(SingleWindow *sWindow, size_t posit
 			uint8_t rvs = 0;
 			if (test->pos & POS_SELF) {
 				cohort = runSingleTest(cohort, test, rvs, &retval, deep, origin);
+				if (!retval && (rvs & TRV_BREAK_DEFAULT)) {
+					rvs &= ~(TRV_BREAK | TRV_BREAK_DEFAULT);
+				}
 			}
 			if ((rvs & TRV_BREAK) && retval) {
 				goto label_gotACohort;
@@ -490,6 +503,9 @@ Cohort *GrammarApplicator::runContextualTest(SingleWindow *sWindow, size_t posit
 				assert(pos >= 0 && pos < static_cast<int32_t>(sWindow->cohorts.size()) && "Somehow, the input position wasn't inside the current window.");
 				Cohort *self = sWindow->cohorts[position];
 				nc = runSingleTest(self, test, rvs, &retval, deep, origin);
+				if (!retval && (rvs & TRV_BREAK_DEFAULT)) {
+					rvs &= ~(TRV_BREAK | TRV_BREAK_DEFAULT);
+				}
 			}
 			if (!(rvs & TRV_BREAK)) {
 				Cohort *current = cohort;
@@ -575,12 +591,12 @@ Cohort *GrammarApplicator::runDependencyTest(SingleWindow *sWindow, Cohort *curr
 		self = current;
 	}
 
-	// ToDo: Make the dep_deep_seen key a composite of cohort number and test hash so we don't have to clear as often
+	// ToDo: Now that dep_deep_seen is a composite, investigate all .clear() to see if they're needed
 	if (test->pos & POS_DEP_DEEP) {
-		if (index_matches(dep_deep_seen, current->global_number)) {
+		if (index_matches(dep_deep_seen, std::make_pair(test->hash, current->global_number))) {
 			return 0;
 		}
-		dep_deep_seen.insert(current->global_number);
+		dep_deep_seen.insert(std::make_pair(test->hash, current->global_number));
 	}
 
 	if ((test->pos & POS_SELF) && !(test->pos & MASK_POS_LORR)) {
@@ -620,15 +636,27 @@ Cohort *GrammarApplicator::runDependencyTest(SingleWindow *sWindow, Cohort *curr
 		}
 	}
 
+	// ToDo: This whole function could resolve cohorts earlier and skip doing it twice
 	if (test->pos & MASK_POS_LORR) {
-		tmp_deps = *deps;
-
-		if (test->pos & POS_LEFT) {
-			tmp_deps.assign(deps->begin(), deps->lower_bound(current->global_number));
-		}
-		if (test->pos & POS_RIGHT) {
-			tmp_deps.assign(deps->lower_bound(current->global_number), deps->end());
+		// I think this way around makes most sense? Loop over the container that's slower to look up in. But tests will show.
+		for (auto iter : sWindow->parent->cohort_map) {
+			if (deps->count(iter.second->global_number)) {
+				if (test->pos & POS_LEFT) {
+					if (less_Cohort(iter.second, current)) {
+						tmp_deps.insert(iter.second->global_number);
+					}
+				}
+				else if ((test->pos & POS_RIGHT)) {
+					if (less_Cohort(current, iter.second)) {
+						tmp_deps.insert(iter.second->global_number);
+					}
+				}
+				else {
+					tmp_deps.insert(iter.second->global_number);
+				}
+			}
 		}
+
 		if (test->pos & POS_SELF) {
 			tmp_deps.insert(current->global_number);
 		}
@@ -640,23 +668,23 @@ Cohort *GrammarApplicator::runDependencyTest(SingleWindow *sWindow, Cohort *curr
 		deps = &tmp_deps;
 	}
 
-	foreach (dter, *deps) {
-		if (*dter == current->global_number && !(test->pos & POS_SELF)) {
+	for (auto dter : *deps) {
+		if (dter == current->global_number && !(test->pos & POS_SELF)) {
 			continue;
 		}
-		if (sWindow->parent->cohort_map.find(*dter) == sWindow->parent->cohort_map.end()) {
+		if (sWindow->parent->cohort_map.find(dter) == sWindow->parent->cohort_map.end()) {
 			if (verbosity_level > 0) {
 				if (test->pos & POS_DEP_CHILD) {
-					u_fprintf(ux_stderr, "Warning: Child dependency %u -> %u does not exist - ignoring.\n", current->dep_self, *dter);
+					u_fprintf(ux_stderr, "Warning: Child dependency %u -> %u does not exist - ignoring.\n", current->dep_self, dter);
 				}
 				else {
-					u_fprintf(ux_stderr, "Warning: Sibling dependency %u -> %u does not exist - ignoring.\n", current->dep_self, *dter);
+					u_fprintf(ux_stderr, "Warning: Sibling dependency %u -> %u does not exist - ignoring.\n", current->dep_self, dter);
 				}
 				u_fflush(ux_stderr);
 			}
 			continue;
 		}
-		Cohort *cohort = sWindow->parent->cohort_map.find(*dter)->second;
+		Cohort *cohort = sWindow->parent->cohort_map.find(dter)->second;
 		if (cohort->type & CT_REMOVED) {
 			continue;
 		}
@@ -688,7 +716,7 @@ Cohort *GrammarApplicator::runDependencyTest(SingleWindow *sWindow, Cohort *curr
 			break;
 		}
 		else if (rvs & TRV_BARRIER) {
-			break;
+			continue;
 		}
 		else if (test->pos & POS_DEP_DEEP) {
 			Cohort *tmc = runDependencyTest(cohort->parent, cohort, test, deep, origin, self);
@@ -734,8 +762,8 @@ Cohort *GrammarApplicator::runRelationTest(SingleWindow *sWindow, Cohort *curren
 	CohortSet rels;
 
 	if (test->relation == grammar->tag_any) {
-		foreach (riter, current->relations) {
-			boost_foreach (uint32_t citer, riter->second) {
+		for (auto riter : current->relations) {
+			for (auto citer : riter.second) {
 				std::map<uint32_t, Cohort*>::iterator it = sWindow->parent->cohort_map.find(citer);
 				if (it != sWindow->parent->cohort_map.end()) {
 					rels.insert(it->second);
@@ -746,7 +774,7 @@ Cohort *GrammarApplicator::runRelationTest(SingleWindow *sWindow, Cohort *curren
 	else {
 		RelationCtn::const_iterator riter = current->relations.find(test->relation);
 		if (riter != current->relations.end()) {
-			boost_foreach (uint32_t citer, riter->second) {
+			for (auto citer : riter->second) {
 				std::map<uint32_t, Cohort*>::iterator it = sWindow->parent->cohort_map.find(citer);
 				if (it != sWindow->parent->cohort_map.end()) {
 					rels.insert(it->second);
@@ -780,22 +808,22 @@ Cohort *GrammarApplicator::runRelationTest(SingleWindow *sWindow, Cohort *curren
 	}
 
 	Cohort *rv = 0;
-	foreach (iter, rels) {
+	for (auto iter : rels) {
 		uint8_t rvs = 0;
 		bool retval = false;
 
-		runSingleTest(*iter, test, rvs, &retval, deep, origin);
+		runSingleTest(iter, test, rvs, &retval, deep, origin);
 		if (test->pos & POS_ALL) {
 			if (!retval) {
 				rv = 0;
 				break;
 			}
 			else {
-				rv = *iter;
+				rv = iter;
 			}
 		}
 		else if (retval) {
-			rv = *iter;
+			rv = iter;
 			break;
 		}
 	}
diff --git a/src/GrammarApplicator_runGrammar.cpp b/src/GrammarApplicator_runGrammar.cpp
index ee07da3..6e44d24 100644
--- a/src/GrammarApplicator_runGrammar.cpp
+++ b/src/GrammarApplicator_runGrammar.cpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -141,10 +141,14 @@ void GrammarApplicator::runGrammarOnText(istream& input, UFILE *output) {
 				}
 				cleaned[packoff++] = line[i];
 			}
-			// If we reached this, buffer wasn't big enough. Double the size of the buffer and try again.
-			offset = line.size() - 2;
-			line.resize(line.size() * 2, 0);
-			cleaned.resize(line.size() + 1, 0);
+			// Either buffer wasn't big enough, or someone fed us malformed data thinking U+0085 is ellipsis when it in fact is Next Line (NEL)
+			line = cleaned;
+			offset = packoff;
+			if (packoff > line.size() / 2) {
+				// If we reached this, buffer wasn't big enough. Double the size of the buffer and try again.
+				line.resize(line.size() * 2, 0);
+				cleaned.resize(line.size() + 1, 0);
+			}
 		}
 
 	gotaline:
@@ -198,8 +202,8 @@ void GrammarApplicator::runGrammarOnText(istream& input, UFILE *output) {
 					u_fprintf(ux_stderr, "Warning: Soft limit of %u cohorts reached at line %u but found suitable soft delimiter.\n", soft_limit, numLines);
 					u_fflush(ux_stderr);
 				}
-				foreach (iter, cCohort->readings) {
-					addTagToReading(**iter, endtag);
+				for (auto iter : cCohort->readings) {
+					addTagToReading(*iter, endtag);
 				}
 
 				splitAllMappings(all_mappings, *cCohort, true);
@@ -215,8 +219,8 @@ void GrammarApplicator::runGrammarOnText(istream& input, UFILE *output) {
 					u_fprintf(ux_stderr, "Warning: Hard limit of %u cohorts reached at line %u - forcing break.\n", hard_limit, numLines);
 					u_fflush(ux_stderr);
 				}
-				foreach (iter, cCohort->readings) {
-					addTagToReading(**iter, endtag);
+				for (auto iter : cCohort->readings) {
+					addTagToReading(*iter, endtag);
 				}
 
 				splitAllMappings(all_mappings, *cCohort, true);
@@ -373,7 +377,7 @@ void GrammarApplicator::runGrammarOnText(istream& input, UFILE *output) {
 				cCohort->appendReading(cReading);
 			}
 			else {
-				BOOST_AUTO(iter, all_mappings.find(cReading));
+				auto iter = all_mappings.find(cReading);
 				if (iter != all_mappings.end()) {
 					while (iter->second.size() > 1) {
 						u_fprintf(ux_stderr, "Warning: Sub-reading mapping %S on line %u will be discarded.\n", iter->second.back()->tag.c_str(), numLines);
@@ -394,8 +398,8 @@ void GrammarApplicator::runGrammarOnText(istream& input, UFILE *output) {
 				gWindow->dep_map.clear();
 				gWindow->dep_window.clear();
 
-				foreach (iter, cSWindow->cohorts.back()->readings) {
-					addTagToReading(**iter, endtag);
+				for (auto iter : cSWindow->cohorts.back()->readings) {
+					addTagToReading(*iter, endtag);
 				}
 
 				cSWindow = gWindow->allocAppendSingleWindow();
@@ -415,8 +419,8 @@ void GrammarApplicator::runGrammarOnText(istream& input, UFILE *output) {
 				if (grammar->has_bag_of_tags) {
 					// This is slow and not 100% correct as it doesn't remove the tags from the previous window
 					cCohort->parent = cSWindow;
-					foreach (rit, cCohort->readings) {
-						reflowReading(**rit);
+					for (auto rit : cCohort->readings) {
+						reflowReading(*rit);
 					}
 				}
 			}
@@ -431,15 +435,17 @@ void GrammarApplicator::runGrammarOnText(istream& input, UFILE *output) {
 		istext:
 			if (cleaned[0]) {
 				if (u_strcmp(&cleaned[0], stringbits[S_CMD_FLUSH].getTerminatedBuffer()) == 0) {
-					u_fprintf(ux_stderr, "Info: FLUSH encountered on line %u. Flushing...\n", numLines);
+					if (verbosity_level > 0) {
+						u_fprintf(ux_stderr, "Info: FLUSH encountered on line %u. Flushing...\n", numLines);
+					}
 					if (cCohort && cSWindow) {
 						splitAllMappings(all_mappings, *cCohort, true);
 						cSWindow->appendCohort(cCohort);
 						if (cCohort->readings.empty()) {
 							initEmptyCohort(*cCohort);
 						}
-						foreach (iter, cCohort->readings) {
-							addTagToReading(**iter, endtag);
+						for (auto iter : cCohort->readings) {
+							addTagToReading(*iter, endtag);
 						}
 						cReading = lReading = 0;
 						cCohort = lCohort = 0;
@@ -478,15 +484,21 @@ void GrammarApplicator::runGrammarOnText(istream& input, UFILE *output) {
 					fflush(stderr);
 				}
 				else if (u_strcmp(&cleaned[0], stringbits[S_CMD_IGNORE].getTerminatedBuffer()) == 0) {
-					u_fprintf(ux_stderr, "Info: IGNORE encountered on line %u. Passing through all input...\n", numLines);
+					if (verbosity_level > 0) {
+						u_fprintf(ux_stderr, "Info: IGNORE encountered on line %u. Passing through all input...\n", numLines);
+					}
 					ignoreinput = true;
 				}
 				else if (u_strcmp(&cleaned[0], stringbits[S_CMD_RESUME].getTerminatedBuffer()) == 0) {
-					u_fprintf(ux_stderr, "Info: RESUME encountered on line %u. Resuming CG...\n", numLines);
+					if (verbosity_level > 0) {
+						u_fprintf(ux_stderr, "Info: RESUME encountered on line %u. Resuming CG...\n", numLines);
+					}
 					ignoreinput = false;
 				}
 				else if (u_strcmp(&cleaned[0], stringbits[S_CMD_EXIT].getTerminatedBuffer()) == 0) {
-					u_fprintf(ux_stderr, "Info: EXIT encountered on line %u. Exiting...\n", numLines);
+					if (verbosity_level > 0) {
+						u_fprintf(ux_stderr, "Info: EXIT encountered on line %u. Exiting...\n", numLines);
+					}
 					u_fprintf(output, "%S", &line[0]);
 					goto CGCMD_EXIT;
 				}
@@ -618,8 +630,8 @@ void GrammarApplicator::runGrammarOnText(istream& input, UFILE *output) {
 		if (cCohort->readings.empty()) {
 			initEmptyCohort(*cCohort);
 		}
-		foreach (iter, cCohort->readings) {
-			addTagToReading(**iter, endtag);
+		for (auto iter : cCohort->readings) {
+			addTagToReading(*iter, endtag);
 		}
 		cReading = 0;
 		cCohort = 0;
diff --git a/src/GrammarApplicator_runRules.cpp b/src/GrammarApplicator_runRules.cpp
index bba0d5f..c6a8e56 100644
--- a/src/GrammarApplicator_runRules.cpp
+++ b/src/GrammarApplicator_runRules.cpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -36,6 +36,7 @@ enum {
 	RV_NOTHING   = 1,
 	RV_SOMETHING = 2,
 	RV_DELIMITED = 4,
+	RV_TRACERULE = 8,
 };
 
 bool GrammarApplicator::doesWordformsMatch(const Tag *cword, const Tag *rword) {
@@ -77,9 +78,9 @@ bool GrammarApplicator::updateValidRules(const uint32IntervalVector& rules, uint
 	Grammar::rules_by_tag_t::const_iterator it = grammar->rules_by_tag.find(hash);
 	if (it != grammar->rules_by_tag.end()) {
 		Cohort& c = *(reading.parent);
-		foreach (rsit, (it->second)) {
-			if (updateRuleToCohorts(c, *rsit) && rules.contains(*rsit)) {
-				intersects.insert(*rsit);
+		for (auto rsit : (it->second)) {
+			if (updateRuleToCohorts(c, rsit) && rules.contains(rsit)) {
+				intersects.insert(rsit);
 			}
 		}
 	}
@@ -89,21 +90,20 @@ bool GrammarApplicator::updateValidRules(const uint32IntervalVector& rules, uint
 void GrammarApplicator::indexSingleWindow(SingleWindow& current) {
 	current.valid_rules.clear();
 	current.rule_to_cohorts.resize(grammar->rule_by_number.size());
-	boost_foreach (CohortSet& cs, current.rule_to_cohorts) {
+	for (auto& cs : current.rule_to_cohorts) {
 		cs.clear();
 	}
 
-	foreach (iter, current.cohorts) {
-		Cohort *c = *iter;
+	for (auto c : current.cohorts) {
 		for (uint32_t psit = 0; psit < c->possible_sets.size(); ++psit) {
 			if (c->possible_sets.test(psit) == false) {
 				continue;
 			}
-			BOOST_AUTO(rules_it, grammar->rules_by_set.find(psit));
+			auto rules_it = grammar->rules_by_set.find(psit);
 			if (rules_it == grammar->rules_by_set.end()) {
 				continue;
 			}
-			boost_foreach (uint32_t rsit, rules_it->second) {
+			for (auto rsit : rules_it->second) {
 				updateRuleToCohorts(*c, rsit);
 			}
 		}
@@ -119,24 +119,24 @@ TagList GrammarApplicator::getTagList(const Set& theSet, bool unif_mode) const {
 void GrammarApplicator::getTagList(const Set& theSet, TagList& theTags, bool unif_mode) const {
 	if (theSet.type & ST_SET_UNIFY) {
 		const Set& pSet = *(grammar->sets_list[theSet.sets[0]]);
-		foreach (iter, pSet.sets) {
-			if (unif_sets->count(*iter)) {
-				getTagList(*(grammar->sets_list[*iter]), theTags);
+		for (auto iter : pSet.sets) {
+			if (unif_sets->count(iter)) {
+				getTagList(*(grammar->sets_list[iter]), theTags);
 			}
 		}
 	}
 	else if (theSet.type & ST_TAG_UNIFY) {
-		foreach (iter, theSet.sets) {
-			getTagList(*(grammar->sets_list[*iter]), theTags, true);
+		for (auto iter : theSet.sets) {
+			getTagList(*(grammar->sets_list[iter]), theTags, true);
 		}
 	}
 	else if (!theSet.sets.empty()) {
-		foreach (iter, theSet.sets) {
-			getTagList(*(grammar->sets_list[*iter]), theTags, unif_mode);
+		for (auto iter : theSet.sets) {
+			getTagList(*(grammar->sets_list[iter]), theTags, unif_mode);
 		}
 	}
 	else if (unif_mode) {
-		BOOST_AUTO(iter, unif_tags->find(theSet.number));
+		auto iter = unif_tags->find(theSet.number);
 		if (iter != unif_tags->end()) {
 			trie_getTagList(theSet.trie, theTags, iter->second);
 			trie_getTagList(theSet.trie_special, theTags, iter->second);
@@ -174,15 +174,15 @@ Reading *GrammarApplicator::get_sub_reading(Reading *tr, int sub_reading) {
 			tr = tr->next;
 			reading->tags_list.push_back(0);
 			reading->tags_list.insert(reading->tags_list.end(), tr->tags_list.begin(), tr->tags_list.end());
-			boost_foreach (uint32_t tag, tr->tags) {
+			for (auto tag : tr->tags) {
 				reading->tags.insert(tag);
 				reading->tags_bloom.insert(tag);
 			}
-			boost_foreach (uint32_t tag, tr->tags_plain) {
+			for (auto tag : tr->tags_plain) {
 				reading->tags_plain.insert(tag);
 				reading->tags_plain_bloom.insert(tag);
 			}
-			boost_foreach (uint32_t tag, tr->tags_textual) {
+			for (auto tag : tr->tags_textual) {
 				reading->tags_textual.insert(tag);
 				reading->tags_textual_bloom.insert(tag);
 			}
@@ -215,6 +215,9 @@ Reading *GrammarApplicator::get_sub_reading(Reading *tr, int sub_reading) {
 			ttr = ttr->next;
 			--ntr;
 		}
+		if (!tr->next) {
+			tr = 0;
+		}
 		for (int i = ntr; i < sub_reading && tr; ++i) {
 			tr = tr->next;
 		}
@@ -222,6 +225,79 @@ Reading *GrammarApplicator::get_sub_reading(Reading *tr, int sub_reading) {
 	return tr;
 }
 
+#define TRACE \
+	do { \
+		reading.hit_by.push_back(rule.number); \
+		if (rule.sub_reading == 32767) { \
+			reading_head.hit_by.push_back(rule.number); \
+		} \
+	} while(0)
+
+#define FILL_TAG_LIST(taglist) \
+	do { \
+		for (TagList::iterator it = (taglist)->begin(); it != (taglist)->end();) { \
+			if (reading.tags.find((*it)->hash) == reading.tags.end()) { \
+				const Tag *tt = *it; \
+				it = (taglist)->erase(it); \
+				if (tt->type & T_SPECIAL) { \
+					if (regexgrps.second == 0) { \
+						regexgrps.second = &regexgrps_store[used_regex]; \
+					} \
+					uint32_t stag = doesTagMatchReading(reading, *tt, false, true); \
+					if (stag) { \
+						(taglist)->insert(it, single_tags.find(stag)->second); \
+					} \
+				} \
+				continue; \
+			} \
+			++it; \
+		} \
+	} while(0)
+
+#define APPEND_TAGLIST_TO_READING(taglist, reading) \
+	do { \
+		for (auto tter : (taglist)) { \
+			uint32_t hash = tter->hash; \
+			while (tter->type & T_VARSTRING) { \
+				tter = generateVarstringTag(tter); \
+			} \
+			if (tter->type & T_MAPPING || tter->tag[0] == grammar->mapping_prefix) { \
+				mappings->push_back(tter); \
+			} \
+			else { \
+				hash = addTagToReading((reading), tter); \
+			} \
+			if (updateValidRules(rules, intersects, hash, (reading))) { \
+				iter_rules = intersects.find(rule.number); \
+				iter_rules_end = intersects.end(); \
+			} \
+		} \
+	} while (0)
+
+#define INSERT_TAGLIST_TO_READING(iter, taglist, reading) \
+	do { \
+		for (auto tag : (taglist)) { \
+			if (tag->type & T_VARSTRING) { \
+				tag = generateVarstringTag(tag); \
+			} \
+			if (tag->hash == grammar->tag_any) { \
+				break; \
+			} \
+			if (tag->type & T_MAPPING || tag->tag[0] == grammar->mapping_prefix) { \
+				mappings->push_back(tag); \
+			} \
+			else { \
+				(iter) = (reading).tags_list.insert((iter), tag->hash); \
+				++(iter); \
+			} \
+			if (updateValidRules(rules, intersects, tag->hash, (reading))) { \
+				iter_rules = intersects.find(rule.number); \
+				iter_rules_end = intersects.end(); \
+			} \
+		} \
+		reflowReading(reading); \
+	} while(0)
+
 /**
  * Applies the passed rules to the passed SingleWindow.
  *
@@ -257,6 +333,8 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 	current.parent->cohort_map[0] = current.cohorts.front();
 
 	foreach (iter_rules, intersects) {
+	repeat_rule:
+		bool rule_did_something = false;
 		uint32_t j = (*iter_rules);
 
 		// Check whether this rule is in the allowed rule list from cmdline flag --rule(s)
@@ -512,9 +590,8 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 							// Keeps track of where we have been, to prevent infinite recursion in trees with loops
 							dep_deep_seen.clear();
 							// Reset the counters for which types of CohortIterator we have in play
-							std::fill(ci_depths.begin(), ci_depths.end(), 0);
-							tmpl_cntxs.clear();
-							tmpl_cntx_pos = 0;
+							std::fill(ci_depths.begin(), ci_depths.end(), static_cast<uint32_t>(0));
+							tmpl_cntx.clear();
 							// Run the contextual test...
 							if (!(test->pos & POS_PASS_ORIGIN) && (no_pass_origin || (test->pos & POS_NO_PASS_ORIGIN))) {
 								test_good = (runContextualTest(&current, c, test, 0, cohort) != 0);
@@ -548,7 +625,7 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 						++rule.num_match;
 
 						if (regex_prop && i && regexgrps_c.size()) {
-							BOOST_AUTO(it, regexgrps_c.find(cohort->readings[i - 1]->number));
+							auto it = regexgrps_c.find(cohort->readings[i - 1]->number);
 							if (it != regexgrps_c.end()) {
 								regexgrps_c.insert(std::make_pair(reading->number, it->second));
 								regexgrps_z.insert(std::make_pair(reading->number, regexgrps_z.find(cohort->readings[i - 1]->number)->second));
@@ -644,7 +721,7 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 				}
 				if (dry_run) {
 					if (good) {
-						reading.hit_by.push_back(rule.number);
+						TRACE;
 					}
 					continue;
 				}
@@ -664,12 +741,12 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 					if (good) {
 						selected.push_back(&reading_head);
 						index_ruleCohort_no.clear();
-						reading.hit_by.push_back(rule.number);
+						TRACE;
 					}
 					else {
 						removed.push_back(&reading_head);
 						index_ruleCohort_no.clear();
-						reading.hit_by.push_back(rule.number);
+						TRACE;
 					}
 					if (good) {
 						if (debug_level > 0) {
@@ -687,7 +764,7 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 						}
 						else {
 							removed.push_back(&reading_head);
-							reading.hit_by.push_back(rule.number);
+							TRACE;
 						}
 						index_ruleCohort_no.clear();
 						if (debug_level > 0) {
@@ -695,7 +772,7 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 						}
 					}
 					else if (type == K_JUMP) {
-						reading.hit_by.push_back(rule.number);
+						TRACE;
 						const Tag *to = getTagList(*rule.maplist).front();
 						uint32FlatHashMap::const_iterator it = grammar->anchors.find(to->hash);
 						if (it == grammar->anchors.end()) {
@@ -705,13 +782,12 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 							iter_rules = intersects.lower_bound(it->second);
 							--iter_rules;
 						}
-						break;
+						goto repeat_rule;
 					}
 					else if (type == K_REMVARIABLE) {
-						reading.hit_by.push_back(rule.number);
+						TRACE;
 						const TagList names = getTagList(*rule.maplist);
-						foreach (tter, names) {
-							const Tag *tag = *tter;
+						for (auto tag : names) {
 							variables.erase(tag->hash);
 							if (rule.flags & RF_OUTPUT) {
 								current.variables_output.insert(tag->hash);
@@ -721,7 +797,7 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 						break;
 					}
 					else if (type == K_SETVARIABLE) {
-						reading.hit_by.push_back(rule.number);
+						TRACE;
 						const TagList names = getTagList(*rule.maplist);
 						const TagList values = getTagList(*rule.sublist);
 						variables[names.front()->hash] = values.front()->hash;
@@ -746,7 +822,7 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 						if (ei == externals.end()) {
 							Tag *ext = single_tags.find(rule.varname)->second;
 							UErrorCode err = U_ZERO_ERROR;
-							u_strToUTF8(&cbuffers[0][0], CG3_BUFFER_SIZE - 1, 0, ext->tag.c_str(), ext->tag.length(), &err);
+							u_strToUTF8(&cbuffers[0][0], CG3_BUFFER_SIZE - 1, 0, ext->tag.c_str(), ext->tag.size(), &err);
 
 							Process& es = externals[rule.varname];
 							try {
@@ -775,9 +851,9 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 						break;
 					}
 					else if (type == K_REMCOHORT) {
-						foreach (iter, cohort->readings) {
-							(*iter)->hit_by.push_back(rule.number);
-							(*iter)->deleted = true;
+						for (auto iter : cohort->readings) {
+							iter->hit_by.push_back(rule.number);
+							iter->deleted = true;
 						}
 						// Move any enclosed parentheses to the previous cohort
 						if (!cohort->enclosed.empty()) {
@@ -785,8 +861,8 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 							cohort->enclosed.clear();
 						}
 						// Remove the cohort from all rules
-						foreach (cs, current.rule_to_cohorts) {
-							cs->erase(cohort);
+						for (auto cs : current.rule_to_cohorts) {
+							cs.erase(cohort);
 						}
 						// Forward all children of this cohort to the parent of this cohort
 						// ToDo: Named relations must be erased
@@ -798,8 +874,8 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 						cohort->type |= CT_REMOVED;
 						cohort->prev->removed.push_back(cohort);
 						cohort->detach();
-						foreach (cm, current.parent->cohort_map) {
-							cm->second->dep_children.erase(cohort->dep_self);
+						for (auto cm : current.parent->cohort_map) {
+							cm.second->dep_children.erase(cohort->dep_self);
 						}
 						current.parent->cohort_map.erase(cohort->global_number);
 						current.cohorts.erase(current.cohorts.begin() + cohort->local_number);
@@ -809,7 +885,7 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 						gWindow->rebuildCohortLinks();
 						// If we just removed the last cohort, add <<< to the new last cohort
 						if (cohort->readings.front()->tags.count(endtag)) {
-							boost_foreach (Reading *r, current.cohorts.back()->readings) {
+							for (auto r : current.cohorts.back()->readings) {
 								addTagToReading(*r, endtag);
 								if (updateValidRules(rules, intersects, endtag, *r)) {
 									iter_rules = intersects.find(rule.number);
@@ -824,16 +900,13 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 							rocit = cohortset->end();
 						}
 						else {
-							rocit = cohortset->find(current.cohorts[cohort->local_number]);
-							if (rocit != cohortset->end()) {
-								++rocit;
-							}
+							rocit = cohortset->lower_bound(current.cohorts[cohort->local_number]);
 						}
 						readings_changed = true;
 						break;
 					}
 					else if (type == K_ADDCOHORT_AFTER || type == K_ADDCOHORT_BEFORE) {
-						reading.hit_by.push_back(rule.number);
+						TRACE;
 						index_ruleCohort_no.clear();
 
 						Cohort *cCohort = alloc_cohort(&current);
@@ -841,23 +914,23 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 
 						Tag *wf = 0;
 						std::vector<TagList> readings;
-						BOOST_AUTO(theTags, ss_taglist.get());
+						auto theTags = ss_taglist.get();
 						getTagList(*rule.maplist, theTags);
 
-						foreach (tter, *theTags) {
-							while ((*tter)->type & T_VARSTRING) {
-								*tter = generateVarstringTag(*tter);
+						for (auto tter : *theTags) {
+							while (tter->type & T_VARSTRING) {
+								tter = generateVarstringTag(tter);
 							}
-							if ((*tter)->type & T_WORDFORM) {
-								cCohort->wordform = *tter;
-								wf = *tter;
+							if (tter->type & T_WORDFORM) {
+								cCohort->wordform = tter;
+								wf = tter;
 								continue;
 							}
 							if (!wf) {
 								u_fprintf(ux_stderr, "Error: There must be a wordform before any other tags in ADDCOHORT on line %u before input line %u.\n", rule.line, numLines);
 								CG3Quit(1);
 							}
-							if ((*tter)->type & T_BASEFORM) {
+							if (tter->type & T_BASEFORM) {
 								readings.resize(readings.size() + 1);
 								readings.back().push_back(wf);
 							}
@@ -865,23 +938,23 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 								u_fprintf(ux_stderr, "Error: There must be a baseform after the wordform in ADDCOHORT on line %u before input line %u.\n", rule.line, numLines);
 								CG3Quit(1);
 							}
-							readings.back().push_back(*tter);
+							readings.back().push_back(tter);
 						}
 
-						foreach (rit, readings) {
+						for (auto rit : readings) {
 							Reading *cReading = alloc_reading(cCohort);
 							++numReadings;
 							insert_if_exists(cReading->parent->possible_sets, grammar->sets_any);
 							cReading->hit_by.push_back(rule.number);
 							cReading->noprint = false;
 							TagList mappings;
-							foreach (tter, *rit) {
-								uint32_t hash = (*tter)->hash;
-								while ((*tter)->type & T_VARSTRING) {
-									*tter = generateVarstringTag(*tter);
+							for (auto tter : rit) {
+								uint32_t hash = tter->hash;
+								while (tter->type & T_VARSTRING) {
+									tter = generateVarstringTag(tter);
 								}
-								if ((*tter)->type & T_MAPPING || (*tter)->tag[0] == grammar->mapping_prefix) {
-									mappings.push_back(*tter);
+								if (tter->type & T_MAPPING || tter->tag[0] == grammar->mapping_prefix) {
+									mappings.push_back(tter);
 								}
 								else {
 									hash = addTagToReading(*cReading, hash);
@@ -904,11 +977,40 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 							initEmptyCohort(*cCohort);
 						}
 
+						CohortSet cohorts;
+						if (rule.childset1) {
+							for (auto iter : current.cohorts) {
+								// Always consider the target cohort a match
+								if (iter->global_number == cohort->global_number) {
+									cohorts.insert(iter);
+								}
+								else if (iter->dep_parent == cohort->global_number && doesSetMatchCohortNormal(*iter, rule.childset1)) {
+									cohorts.insert(iter);
+								}
+							}
+							CohortSet more;
+							for (auto iter : current.cohorts) {
+								for (auto cht : cohorts) {
+									// Do not grab the whole tree from the root, in case WithChild is not (*)
+									if (cht->global_number == cohort->global_number) {
+										continue;
+									}
+									if (isChildOf(iter, cht)) {
+										more.insert(iter);
+									}
+								}
+							}
+							cohorts.insert(more.begin(), more.end());
+						}
+						else {
+							cohorts.insert(cohort);
+						}
+
 						if (type == K_ADDCOHORT_BEFORE) {
-							current.cohorts.insert(current.cohorts.begin() + cohort->local_number, cCohort);
+							current.cohorts.insert(current.cohorts.begin() + cohorts.front()->local_number, cCohort);
 						}
 						else {
-							current.cohorts.insert(current.cohorts.begin() + cohort->local_number + 1, cCohort);
+							current.cohorts.insert(current.cohorts.begin() + cohorts.back()->local_number + 1, cCohort);
 						}
 
 						foreach (iter, current.cohorts) {
@@ -916,10 +1018,10 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 						}
 						// If the new cohort is now the last cohort, add <<< to it and remove <<< from previous last cohort
 						if (current.cohorts.back() == cCohort) {
-							boost_foreach (Reading *r, current.cohorts[current.cohorts.size() - 2]->readings) {
+							for (auto r : current.cohorts[current.cohorts.size() - 2]->readings) {
 								delTagFromReading(*r, endtag);
 							}
-							boost_foreach (Reading *r, current.cohorts.back()->readings) {
+							for (auto r : current.cohorts.back()->readings) {
 								addTagToReading(*r, endtag);
 								if (updateValidRules(rules, intersects, endtag, *r)) {
 									iter_rules = intersects.find(rule.number);
@@ -941,16 +1043,16 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 
 						std::vector<std::pair<Cohort*, std::vector<TagList> > > cohorts;
 
-						BOOST_AUTO(theTags, ss_taglist.get());
+						auto theTags = ss_taglist.get();
 						getTagList(*rule.maplist, theTags);
 
 						Tag *wf = 0;
-						foreach (tter, *theTags) {
-							if ((*tter)->type & T_WORDFORM) {
+						for (auto tter : *theTags) {
+							if (tter->type & T_WORDFORM) {
 								cohorts.resize(cohorts.size() + 1);
 								cohorts.back().first = alloc_cohort(&current);
 								cohorts.back().first->global_number = gWindow->cohort_counter++;
-								wf = *tter;
+								wf = tter;
 								while (wf->type & T_VARSTRING) {
 									wf = generateVarstringTag(wf);
 								}
@@ -975,17 +1077,17 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 						size_t i = 0;
 						std::vector<TagList> *readings = &cohorts.front().second;
 						Tag *bf = 0;
-						foreach (tter, *theTags) {
-							if ((*tter)->type & T_WORDFORM) {
+						for (auto tter : *theTags) {
+							if (tter->type & T_WORDFORM) {
 								++i;
 								bf = 0;
 								continue;
 							}
-							if ((*tter)->type & T_BASEFORM) {
+							if (tter->type & T_BASEFORM) {
 								readings = &cohorts[i - 1].second;
 								readings->resize(readings->size() + 1);
 								readings->back().push_back(cohorts[i - 1].first->wordform);
-								bf = *tter;
+								bf = tter;
 							}
 							if (!bf) {
 								u_fprintf(ux_stderr, "Error: There must be a baseform after the wordform in SPLITCOHORT on line %u before input line %u.\n", rule.line, numLines);
@@ -994,7 +1096,7 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 
 							UChar dep_self[12] = {};
 							UChar dep_parent[12] = {};
-							if (u_sscanf((*tter)->tag.c_str(), "%[0-9cd]->%[0-9pm]", &dep_self, &dep_parent) == 2) {
+							if (u_sscanf(tter->tag.c_str(), "%[0-9cd]->%[0-9pm]", &dep_self, &dep_parent) == 2) {
 								if (dep_self[0] == 'c' || dep_self[0] == 'd') {
 									cohort_dep[i - 1].first = DEP_NO_PARENT;
 									if (rel_trg == DEP_NO_PARENT) {
@@ -1014,11 +1116,11 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 								}
 								continue;
 							}
-							if ((*tter)->tag.size() == 3 && (*tter)->tag[0] == 'R' && (*tter)->tag[1] == ':' && (*tter)->tag[2] == '*') {
+							if (tter->tag.size() == 3 && tter->tag[0] == 'R' && tter->tag[1] == ':' && tter->tag[2] == '*') {
 								rel_trg = i - 1;
 								continue;
 							}
-							readings->back().push_back(*tter);
+							readings->back().push_back(tter);
 						}
 
 						if (rel_trg == DEP_NO_PARENT) {
@@ -1029,8 +1131,7 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 							Cohort *cCohort = cohorts[i].first;
 							readings = &cohorts[i].second;
 
-							foreach (rit, *readings) {
-								TagList& tags = *rit;
+							for (auto tags : *readings) {
 								Reading *cReading = alloc_reading(cCohort);
 								++numReadings;
 								insert_if_exists(cReading->parent->possible_sets, grammar->sets_any);
@@ -1056,13 +1157,13 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 									}
 								}
 
-								foreach (tter, tags) {
-									uint32_t hash = (*tter)->hash;
-									while ((*tter)->type & T_VARSTRING) {
-										*tter = generateVarstringTag(*tter);
+								for (auto tter : tags) {
+									uint32_t hash = tter->hash;
+									while (tter->type & T_VARSTRING) {
+										tter = generateVarstringTag(tter);
 									}
-									if ((*tter)->type & T_MAPPING || (*tter)->tag[0] == grammar->mapping_prefix) {
-										mappings.push_back(*tter);
+									if (tter->type & T_MAPPING || tter->tag[0] == grammar->mapping_prefix) {
+										mappings.push_back(tter);
 									}
 									else {
 										hash = addTagToReading(*cReading, hash);
@@ -1100,7 +1201,9 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 							}
 
 							if (cohort_dep[i].second == DEP_NO_PARENT) {
-								attachParentChild(*current.parent->cohort_map[cohort->dep_parent], *cCohort, true, true);
+								if (current.parent->cohort_map.count(cohort->dep_parent)) {
+									attachParentChild(*current.parent->cohort_map[cohort->dep_parent], *cCohort, true, true);
+								}
 							}
 							else {
 								attachParentChild(*current.parent->cohort_map[cohorts.front().first->global_number + cohort_dep[i].second - 1], *cCohort, true, true);
@@ -1118,11 +1221,11 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 								};
 								for (size_t w = 0; w < 3; ++w) {
 									for (size_t sw = 0; sw < swss[w].second; ++sw) {
-										foreach (ch, swss[w].first[sw]->cohorts) {
-											foreach (rel, (*ch)->relations) {
-												if (rel->second.count(cohort->global_number)) {
-													rel->second.erase(cohort->global_number);
-													rel->second.insert(cCohort->global_number);
+										for (auto ch : swss[w].first[sw]->cohorts) {
+											for (auto& rel : ch->relations) {
+												if (rel.second.count(cohort->global_number)) {
+													rel.second.erase(cohort->global_number);
+													rel.second.insert(cCohort->global_number);
 												}
 											}
 										}
@@ -1132,9 +1235,9 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 						}
 
 						// Remove the source cohort
-						foreach (iter, cohort->readings) {
-							(*iter)->hit_by.push_back(rule.number);
-							(*iter)->deleted = true;
+						for (auto iter : cohort->readings) {
+							iter->hit_by.push_back(rule.number);
+							iter->deleted = true;
 						}
 						// Move any enclosed parentheses to the previous cohort
 						if (!cohort->enclosed.empty()) {
@@ -1144,8 +1247,8 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 						cohort->type |= CT_REMOVED;
 						cohort->prev->removed.push_back(cohort);
 						cohort->detach();
-						foreach (cm, current.parent->cohort_map) {
-							cm->second->dep_children.erase(cohort->dep_self);
+						for (auto cm : current.parent->cohort_map) {
+							cm.second->dep_children.erase(cohort->dep_self);
 						}
 						current.parent->cohort_map.erase(cohort->global_number);
 						current.cohorts.erase(current.cohorts.begin() + cohort->local_number);
@@ -1165,28 +1268,44 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 					}
 					else if (rule.type == K_ADD || rule.type == K_MAP) {
 						index_ruleCohort_no.clear();
-						reading.hit_by.push_back(rule.number);
+						TRACE;
 						reading.noprint = false;
-						BOOST_AUTO(mappings, ss_taglist.get());
-						BOOST_AUTO(theTags, ss_taglist.get());
+						auto mappings = ss_taglist.get();
+						auto theTags = ss_taglist.get();
 						getTagList(*rule.maplist, theTags);
 
-						foreach (tter, *theTags) {
-							uint32_t hash = (*tter)->hash;
-							while ((*tter)->type & T_VARSTRING) {
-								*tter = generateVarstringTag(*tter);
-							}
-							if ((*tter)->type & T_MAPPING || (*tter)->tag[0] == grammar->mapping_prefix) {
-								mappings->push_back(*tter);
+						bool did_insert = false;
+						if (rule.childset1) {
+							auto spot_tags = ss_taglist.get();
+							getTagList(*grammar->sets_list[rule.childset1], spot_tags);
+							FILL_TAG_LIST(spot_tags);
+							auto it = reading.tags_list.begin();
+							for (; it != reading.tags_list.end() ; ++it) {
+								bool found = true;
+								auto tmp = it;
+								for (auto tag : *spot_tags) {
+									if (*tmp != tag->hash) {
+										found = false;
+										break;
+									}
+									++tmp;
+								}
+								if (found) {
+									break;
+								}
 							}
-							else {
-								hash = addTagToReading(reading, *tter);
+							if (rule.flags & RF_AFTER) {
+								std::advance(it, spot_tags->size());
 							}
-							if (updateValidRules(rules, intersects, hash, reading)) {
-								iter_rules = intersects.find(rule.number);
-								iter_rules_end = intersects.end();
+							if (it != reading.tags_list.end()) {
+								INSERT_TAGLIST_TO_READING(it, *theTags, reading);
+								did_insert = true;
 							}
 						}
+
+						if (!did_insert) {
+							APPEND_TAGLIST_TO_READING(*theTags, reading);
+						}
 						if (!mappings->empty()) {
 							splitMappings(mappings, *cohort, reading, rule.type == K_MAP);
 						}
@@ -1205,32 +1324,18 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 					}
 					else if (rule.type == K_REPLACE) {
 						index_ruleCohort_no.clear();
-						reading.hit_by.push_back(rule.number);
+						TRACE;
 						reading.noprint = false;
 						reading.tags_list.clear();
 						reading.tags_list.push_back(cohort->wordform->hash);
 						reading.tags_list.push_back(reading.baseform);
 						reflowReading(reading);
-						BOOST_AUTO(mappings, ss_taglist.get());
-						BOOST_AUTO(theTags, ss_taglist.get());
+						auto mappings = ss_taglist.get();
+						auto theTags = ss_taglist.get();
 						getTagList(*rule.maplist, theTags);
 
-						foreach (tter, *theTags) {
-							uint32_t hash = (*tter)->hash;
-							while ((*tter)->type & T_VARSTRING) {
-								*tter = generateVarstringTag(*tter);
-							}
-							if ((*tter)->type & T_MAPPING || (*tter)->tag[0] == grammar->mapping_prefix) {
-								mappings->push_back(*tter);
-							}
-							else {
-								hash = addTagToReading(reading, *tter);
-							}
-							if (updateValidRules(rules, intersects, hash, reading)) {
-								iter_rules = intersects.find(rule.number);
-								iter_rules_end = intersects.end();
-							}
-						}
+						APPEND_TAGLIST_TO_READING(*theTags, reading);
+
 						if (!mappings->empty()) {
 							splitMappings(mappings, *cohort, reading, true);
 						}
@@ -1242,33 +1347,17 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 						// ToDo: Check whether this substitution will do nothing at all to the end result
 						// ToDo: Not actually...instead, test whether any reading in the cohort already is the end result
 
-						BOOST_AUTO(theTags, ss_taglist.get());
+						auto theTags = ss_taglist.get();
 						getTagList(*rule.sublist, theTags);
 
 						// Modify the list of tags to remove to be the actual list of tags present, including matching regex and icase tags
-						for (TagList::iterator it = theTags->begin(); it != theTags->end();) {
-							if (reading.tags.find((*it)->hash) == reading.tags.end()) {
-								const Tag *tt = *it;
-								it = theTags->erase(it);
-								if (tt->type & T_SPECIAL) {
-									if (regexgrps.second == 0) {
-										regexgrps.second = &regexgrps_store[used_regex];
-									}
-									uint32_t stag = doesTagMatchReading(reading, *tt, false, true);
-									if (stag) {
-										theTags->insert(it, single_tags.find(stag)->second);
-									}
-								}
-								continue;
-							}
-							++it;
-						}
+						FILL_TAG_LIST(theTags);
 
 						// Perform the tag removal, remembering the position of the final removed tag for use as insertion spot
 						size_t tpos = std::numeric_limits<size_t>::max();
 						bool plain = true;
 						for (size_t i = 0; i < reading.tags_list.size();) {
-							BOOST_AUTO(&remter, reading.tags_list[i]);
+							auto& remter = reading.tags_list[i];
 
 							if (plain && remter == (*theTags->begin())->hash) {
 								if (reading.baseform == remter) {
@@ -1277,8 +1366,8 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 								remter = substtag;
 								tpos = i;
 								for (size_t j = 1; j < theTags->size() && i < reading.tags_list.size(); ++j, ++i) {
-									BOOST_AUTO(&remter, reading.tags_list[i]);
-									BOOST_AUTO(tter, (*theTags)[j]->hash);
+									auto& remter = reading.tags_list[i];
+									auto tter = (*theTags)[j]->hash;
 									if (remter != tter) {
 										plain = false;
 										break;
@@ -1292,14 +1381,14 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 								continue;
 							}
 
-							foreach (tter, *theTags) {
-								if (remter != (*tter)->hash) {
+							for (auto tter : *theTags) {
+								if (remter != tter->hash) {
 									continue;
 								}
 								tpos = i;
 								remter = substtag;
-								reading.tags.erase((*tter)->hash);
-								if (reading.baseform == (*tter)->hash) {
+								reading.tags.erase(tter->hash);
+								if (reading.baseform == tter->hash) {
 									reading.baseform = 0;
 								}
 							}
@@ -1323,14 +1412,14 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 
 							Tag *wf = 0;
 							index_ruleCohort_no.clear();
-							reading.hit_by.push_back(rule.number);
+							TRACE;
 							reading.noprint = false;
 							if (tpos >= reading.tags_list.size()) {
 								tpos = reading.tags_list.size() - 1;
 							}
 							++tpos;
-							BOOST_AUTO(mappings, ss_taglist.get());
-							BOOST_AUTO(theTags, ss_taglist.get());
+							auto mappings = ss_taglist.get();
+							auto theTags = ss_taglist.get();
 							getTagList(*rule.maplist, theTags);
 
 							for (size_t i = 0; i < reading.tags_list.size();) {
@@ -1338,8 +1427,7 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 									reading.tags_list.erase(reading.tags_list.begin() + i);
 									tpos = i;
 
-									foreach (tter, *theTags) {
-										Tag *tag = *tter;
+									for (auto tag : *theTags) {
 										if (tag->type & T_VARSTRING) {
 											tag = generateVarstringTag(tag);
 										}
@@ -1372,20 +1460,20 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 								splitMappings(mappings, *cohort, reading, true);
 							}
 							if (wf && wf != reading.parent->wordform) {
-								boost_foreach (Reading *r, reading.parent->readings) {
+								for (auto r : reading.parent->readings) {
 									delTagFromReading(*r, reading.parent->wordform);
 									addTagToReading(*r, wf);
 								}
-								boost_foreach (Reading *r, reading.parent->deleted) {
+								for (auto r : reading.parent->deleted) {
 									delTagFromReading(*r, reading.parent->wordform);
 									addTagToReading(*r, wf);
 								}
-								boost_foreach (Reading *r, reading.parent->delayed) {
+								for (auto r : reading.parent->delayed) {
 									delTagFromReading(*r, reading.parent->wordform);
 									addTagToReading(*r, wf);
 								}
 								reading.parent->wordform = wf;
-								boost_foreach (Rule *r, grammar->wf_rules) {
+								for (auto r : grammar->wf_rules) {
 									if (doesWordformsMatch(wf, r->wordform)) {
 										current.rule_to_cohorts[r->number].insert(cohort);
 										intersects.insert(r->number);
@@ -1408,25 +1496,25 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 
 						Tag *bf = 0;
 						std::vector<TagList> readings;
-						BOOST_AUTO(theTags, ss_taglist.get());
+						auto theTags = ss_taglist.get();
 						getTagList(*rule.maplist, theTags);
 
-						foreach (tter, *theTags) {
-							while ((*tter)->type & T_VARSTRING) {
-								*tter = generateVarstringTag(*tter);
+						for (auto tter : *theTags) {
+							while (tter->type & T_VARSTRING) {
+								tter = generateVarstringTag(tter);
 							}
-							if ((*tter)->type & T_BASEFORM) {
-								bf = *tter;
+							if (tter->type & T_BASEFORM) {
+								bf = tter;
 								readings.resize(readings.size() + 1);
 							}
 							if (bf == 0) {
 								u_fprintf(ux_stderr, "Error: There must be a baseform before any other tags in APPEND on line %u.\n", rule.line);
 								CG3Quit(1);
 							}
-							readings.back().push_back(*tter);
+							readings.back().push_back(tter);
 						}
 
-						foreach (rit, readings) {
+						for (auto rit : readings) {
 							Reading *cReading = alloc_reading(cohort);
 							++numReadings;
 							insert_if_exists(cReading->parent->possible_sets, grammar->sets_any);
@@ -1434,16 +1522,16 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 							cReading->hit_by.push_back(rule.number);
 							cReading->noprint = false;
 							TagList mappings;
-							foreach (tter, *rit) {
-								uint32_t hash = (*tter)->hash;
-								while ((*tter)->type & T_VARSTRING) {
-									*tter = generateVarstringTag(*tter);
+							for (auto tter : rit) {
+								uint32_t hash = tter->hash;
+								while (tter->type & T_VARSTRING) {
+									tter = generateVarstringTag(tter);
 								}
-								if ((*tter)->type & T_MAPPING || (*tter)->tag[0] == grammar->mapping_prefix) {
-									mappings.push_back(*tter);
+								if (tter->type & T_MAPPING || tter->tag[0] == grammar->mapping_prefix) {
+									mappings.push_back(tter);
 								}
 								else {
-									hash = addTagToReading(*cReading, *tter);
+									hash = addTagToReading(*cReading, tter);
 								}
 								if (updateValidRules(rules, intersects, hash, *cReading)) {
 									iter_rules = intersects.find(rule.number);
@@ -1470,44 +1558,62 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 						break;
 					}
 					else if (rule.type == K_COPY) {
+						// ToDo: Also copy sub-readings
+						// ToDo: Maybe just goto Substitute directly?
 						Reading *cReading = cohort->allocateAppendReading();
 						++numReadings;
 						index_ruleCohort_no.clear();
 						cReading->hit_by.push_back(rule.number);
 						cReading->noprint = false;
-						foreach (iter, reading.tags_list) {
-							addTagToReading(*cReading, *iter);
+						for (auto iter : reading.tags_list) {
+							addTagToReading(*cReading, iter);
 						}
 
 						if (rule.sublist) {
-							// ToDo: Use the code from Substitute to make this match and remove special tags
-							BOOST_AUTO(excepts, ss_taglist.get());
+							auto excepts = ss_taglist.get();
 							getTagList(*rule.sublist, excepts);
-							foreach (tter, *excepts) {
-								delTagFromReading(*cReading, *tter);
+							FILL_TAG_LIST(excepts);
+							for (auto tter : *excepts) {
+								delTagFromReading(*cReading, tter);
 							}
 						}
 
-						BOOST_AUTO(mappings, ss_taglist.get());
-						BOOST_AUTO(theTags, ss_taglist.get());
+						auto mappings = ss_taglist.get();
+						auto theTags = ss_taglist.get();
 						getTagList(*rule.maplist, theTags);
 
-						foreach (tter, *theTags) {
-							uint32_t hash = (*tter)->hash;
-							while ((*tter)->type & T_VARSTRING) {
-								*tter = generateVarstringTag(*tter);
-							}
-							if ((*tter)->type & T_MAPPING || (*tter)->tag[0] == grammar->mapping_prefix) {
-								mappings->push_back(*tter);
+						bool did_insert = false;
+						if (rule.childset1) {
+							auto spot_tags = ss_taglist.get();
+							getTagList(*grammar->sets_list[rule.childset1], spot_tags);
+							FILL_TAG_LIST(spot_tags);
+							auto it = cReading->tags_list.begin();
+							for (; it != cReading->tags_list.end(); ++it) {
+								bool found = true;
+								auto tmp = it;
+								for (auto tag : *spot_tags) {
+									if (*tmp != tag->hash) {
+										found = false;
+										break;
+									}
+									++tmp;
+								}
+								if (found) {
+									break;
+								}
 							}
-							else {
-								hash = addTagToReading(*cReading, *tter);
+							if (rule.flags & RF_AFTER) {
+								std::advance(it, spot_tags->size());
 							}
-							if (updateValidRules(rules, intersects, hash, reading)) {
-								iter_rules = intersects.find(rule.number);
-								iter_rules_end = intersects.end();
+							if (it != cReading->tags_list.end()) {
+								INSERT_TAGLIST_TO_READING(it, *theTags, *cReading);
+								did_insert = true;
 							}
 						}
+
+						if (!did_insert) {
+							APPEND_TAGLIST_TO_READING(*theTags, *cReading);
+						}
 						if (!mappings->empty()) {
 							splitMappings(mappings, *cohort, *cReading, true);
 						}
@@ -1515,34 +1621,33 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 					}
 					else if (type == K_SETPARENT || type == K_SETCHILD) {
 						int32_t orgoffset = rule.dep_target->offset;
-						BOOST_AUTO(seen_targets, ss_u32sv.get());
+						auto seen_targets = ss_u32sv.get();
 
-						seen_barrier = false;
 						bool attached = false;
 						Cohort *target = cohort;
 						while (!attached) {
-							BOOST_AUTO(utags, ss_utags.get());
-							BOOST_AUTO(usets, ss_u32sv.get());
+							auto utags = ss_utags.get();
+							auto usets = ss_u32sv.get();
 							*utags = *unif_tags;
 							*usets = *unif_sets;
 
 							Cohort *attach = 0;
 							seen_targets->insert(target->global_number);
 							dep_deep_seen.clear();
-							tmpl_cntxs.clear();
-							tmpl_cntx_pos = 0;
+							tmpl_cntx.clear();
 							attach_to = 0;
+							seen_barrier = false;
 							if (runContextualTest(target->parent, target->local_number, rule.dep_target, &attach) && attach) {
+								bool break_after = seen_barrier || (rule.flags & RF_NEAREST);
 								if (attach_to) {
 									attach = attach_to;
 								}
 								bool good = true;
-								foreach (it, rule.dep_tests) {
+								for (auto it : rule.dep_tests) {
 									mark = attach;
 									dep_deep_seen.clear();
-									tmpl_cntxs.clear();
-									tmpl_cntx_pos = 0;
-									test_good = (runContextualTest(attach->parent, attach->local_number, *it) != 0);
+									tmpl_cntx.clear();
+									test_good = (runContextualTest(attach->parent, attach->local_number, it) != 0);
 									if (!test_good) {
 										good = test_good;
 										break;
@@ -1558,14 +1663,14 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 									}
 									if (attached) {
 										index_ruleCohort_no.clear();
-										reading.hit_by.push_back(rule.number);
+										TRACE;
 										reading.noprint = false;
 										has_dep = true;
 										readings_changed = true;
 										break;
 									}
 								}
-								if (seen_barrier || (rule.flags & RF_NEAREST)) {
+								if (break_after) {
 									break;
 								}
 								if (seen_targets->count(attach->global_number)) {
@@ -1595,20 +1700,18 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 						// ToDo: ** tests will not correctly work for MOVE/SWITCH; cannot move cohorts between windows
 						Cohort *attach = 0;
 						dep_deep_seen.clear();
-						tmpl_cntxs.clear();
-						tmpl_cntx_pos = 0;
+						tmpl_cntx.clear();
 						attach_to = 0;
 						if (runContextualTest(&current, c, rule.dep_target, &attach) && attach && cohort->parent == attach->parent) {
 							if (attach_to) {
 								attach = attach_to;
 							}
 							bool good = true;
-							foreach (it, rule.dep_tests) {
+							for (auto it : rule.dep_tests) {
 								mark = attach;
 								dep_deep_seen.clear();
-								tmpl_cntxs.clear();
-								tmpl_cntx_pos = 0;
-								test_good = (runContextualTest(attach->parent, attach->local_number, *it) != 0);
+								tmpl_cntx.clear();
+								test_good = (runContextualTest(attach->parent, attach->local_number, it) != 0);
 								if (!test_good) {
 									good = test_good;
 									break;
@@ -1627,46 +1730,113 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 								}
 								current.cohorts[cohort->local_number] = attach;
 								current.cohorts[attach->local_number] = cohort;
-								foreach (iter, cohort->readings) {
-									(*iter)->hit_by.push_back(rule.number);
+								for (auto iter : cohort->readings) {
+									iter->hit_by.push_back(rule.number);
 								}
-								foreach (iter, attach->readings) {
-									(*iter)->hit_by.push_back(rule.number);
+								for (auto iter : attach->readings) {
+									iter->hit_by.push_back(rule.number);
 								}
 							}
 							else {
-								CohortVector cohorts;
+								CohortSet edges;
+								if (rule.childset2) {
+									for (auto iter : current.cohorts) {
+										// Always consider the anchor cohort a match
+										if (iter->global_number == attach->global_number) {
+											edges.insert(iter);
+										}
+										else if (iter->dep_parent == attach->global_number && doesSetMatchCohortNormal(*iter, rule.childset2)) {
+											edges.insert(iter);
+										}
+									}
+									CohortSet more;
+									for (auto iter : current.cohorts) {
+										for (auto edge : edges) {
+											// Do not grab the whole tree from the root, in case WithChild is not (*)
+											if (edge->global_number == attach->global_number) {
+												continue;
+											}
+											if (isChildOf(iter, edge)) {
+												more.insert(iter);
+											}
+										}
+									}
+									edges.insert(more.begin(), more.end());
+								}
+								else {
+									edges.insert(attach);
+								}
+
+								CohortSet cohorts;
 								if (rule.childset1) {
-									for (CohortVector::iterator iter = current.cohorts.begin(); iter != current.cohorts.end();) {
-										if (isChildOf(*iter, cohort) && doesSetMatchCohortNormal(**iter, rule.childset1)) {
-											cohorts.push_back(*iter);
-											iter = current.cohorts.erase(iter);
+									for (auto iter : current.cohorts) {
+										// Always consider the target cohort a match
+										if (iter->global_number == cohort->global_number) {
+											cohorts.insert(iter);
 										}
-										else {
-											++iter;
+										else if (iter->dep_parent == cohort->global_number && doesSetMatchCohortNormal(*iter, rule.childset1)) {
+											cohorts.insert(iter);
+										}
+									}
+									CohortSet more;
+									for (auto iter : current.cohorts) {
+										for (auto cht : cohorts) {
+											// Do not grab the whole tree from the root, in case WithChild is not (*)
+											if (cht->global_number == cohort->global_number) {
+												continue;
+											}
+											if (isChildOf(iter, cht)) {
+												more.insert(iter);
+											}
 										}
 									}
+									cohorts.insert(more.begin(), more.end());
 								}
 								else {
-									cohorts.push_back(cohort);
-									current.cohorts.erase(current.cohorts.begin() + cohort->local_number);
+									cohorts.insert(cohort);
+								}
+
+								bool need_clean = false;
+								for (auto iter :cohorts) {
+									if (edges.count(iter)) {
+										need_clean = true;
+										break;
+									}
+								}
+
+								if (need_clean) {
+									if (isChildOf(cohort, attach)) {
+										edges.erase(cohorts.rbegin(), cohorts.rend());
+									}
+									else /* if (isChildOf(attach, cohort)) */ {
+										cohorts.erase(edges.rbegin(), edges.rend());
+									}
+								}
+								if (cohorts.empty() || edges.empty()) {
+									break;
+								}
+
+								reverse_foreach (iter, cohorts) {
+									current.cohorts.erase(current.cohorts.begin() + (*iter)->local_number);
 								}
 
 								foreach (iter, current.cohorts) {
 									(*iter)->local_number = std::distance(current.cohorts.begin(), iter);
 								}
 
-								CohortVector edges;
-								if (rule.childset2) {
-									foreach (iter, current.cohorts) {
-										if (isChildOf(*iter, attach) && doesSetMatchCohortNormal(**iter, rule.childset2)) {
-											edges.push_back(*iter);
+								for (auto iter : edges) {
+									if (iter->parent != cohort->parent) {
+										u_fprintf(ux_stderr, "Error: Move/Switch on line %u tried to move across window boundaries.\n", rule.line);
+										CG3Quit(1);
+									}
+									for (auto cohort : cohorts) {
+										if (iter == cohort) {
+											u_fprintf(ux_stderr, "Error: Move/Switch on line %u tried to move to a removed position.\n", rule.line);
+											CG3Quit(1);
 										}
 									}
 								}
-								else {
-									edges.push_back(attach);
-								}
+
 								uint32_t spot = 0;
 								if (type == K_MOVE_BEFORE) {
 									spot = edges.front()->local_number;
@@ -1678,9 +1848,14 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 									spot = edges.back()->local_number + 1;
 								}
 
+								if (spot > current.cohorts.size()) {
+									u_fprintf(ux_stderr, "Error: Move/Switch on line %u tried to move out of bounds.\n", rule.line);
+									CG3Quit(1);
+								}
+
 								while (!cohorts.empty()) {
-									foreach (iter, cohorts.back()->readings) {
-										(*iter)->hit_by.push_back(rule.number);
+									for (auto iter : cohorts.back()->readings) {
+										iter->hit_by.push_back(rule.number);
 									}
 									current.cohorts.insert(current.cohorts.begin() + spot, cohorts.back());
 									cohorts.pop_back();
@@ -1704,10 +1879,10 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 								attach = attach_to;
 							}
 							bool good = true;
-							foreach (it, rule.dep_tests) {
+							for (auto it : rule.dep_tests) {
 								mark = attach;
 								dep_deep_seen.clear();
-								test_good = (runContextualTest(attach->parent, attach->local_number, *it) != 0);
+								test_good = (runContextualTest(attach->parent, attach->local_number, it) != 0);
 								if (!test_good) {
 									good = test_good;
 									break;
@@ -1716,27 +1891,27 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 							if (good) {
 								swapper<Cohort*> sw((rule.flags & RF_REVERSE) != 0, attach, cohort);
 								bool rel_did_anything = false;
-								BOOST_AUTO(theTags, ss_taglist.get());
+								auto theTags = ss_taglist.get();
 								getTagList(*rule.maplist, theTags);
 
-								foreach (tter, *theTags) {
+								for (auto tter : *theTags) {
 									if (type == K_ADDRELATION) {
 										attach->type |= CT_RELATED;
 										cohort->type |= CT_RELATED;
-										rel_did_anything |= cohort->addRelation((*tter)->hash, attach->global_number);
+										rel_did_anything |= cohort->addRelation(tter->hash, attach->global_number);
 									}
 									else if (type == K_SETRELATION) {
 										attach->type |= CT_RELATED;
 										cohort->type |= CT_RELATED;
-										rel_did_anything |= cohort->setRelation((*tter)->hash, attach->global_number);
+										rel_did_anything |= cohort->setRelation(tter->hash, attach->global_number);
 									}
 									else {
-										rel_did_anything |= cohort->remRelation((*tter)->hash, attach->global_number);
+										rel_did_anything |= cohort->remRelation(tter->hash, attach->global_number);
 									}
 								}
 								if (rel_did_anything) {
 									index_ruleCohort_no.clear();
-									reading.hit_by.push_back(rule.number);
+									TRACE;
 									reading.noprint = false;
 									readings_changed = true;
 								}
@@ -1747,20 +1922,18 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 					else if (type == K_ADDRELATIONS || type == K_SETRELATIONS || type == K_REMRELATIONS) {
 						Cohort *attach = 0;
 						dep_deep_seen.clear();
-						tmpl_cntxs.clear();
-						tmpl_cntx_pos = 0;
+						tmpl_cntx.clear();
 						attach_to = 0;
 						if (runContextualTest(&current, c, rule.dep_target, &attach) && attach) {
 							if (attach_to) {
 								attach = attach_to;
 							}
 							bool good = true;
-							foreach (it, rule.dep_tests) {
+							for (auto it : rule.dep_tests) {
 								mark = attach;
 								dep_deep_seen.clear();
-								tmpl_cntxs.clear();
-								tmpl_cntx_pos = 0;
-								test_good = (runContextualTest(attach->parent, attach->local_number, *it) != 0);
+								tmpl_cntx.clear();
+								test_good = (runContextualTest(attach->parent, attach->local_number, it) != 0);
 								if (!test_good) {
 									good = test_good;
 									break;
@@ -1770,41 +1943,41 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 								swapper<Cohort*> sw((rule.flags & RF_REVERSE) != 0, attach, cohort);
 								bool rel_did_anything = false;
 
-								BOOST_AUTO(sublist, ss_taglist.get());
+								auto sublist = ss_taglist.get();
 								getTagList(*rule.sublist, sublist);
 
-								BOOST_AUTO(maplist, ss_taglist.get());
+								auto maplist = ss_taglist.get();
 								getTagList(*rule.maplist, maplist);
 
-								foreach (tter, *maplist) {
+								for (auto tter : *maplist) {
 									if (type == K_ADDRELATIONS) {
 										cohort->type |= CT_RELATED;
-										rel_did_anything |= cohort->addRelation((*tter)->hash, attach->global_number);
+										rel_did_anything |= cohort->addRelation(tter->hash, attach->global_number);
 									}
 									else if (type == K_SETRELATIONS) {
 										cohort->type |= CT_RELATED;
-										rel_did_anything |= cohort->setRelation((*tter)->hash, attach->global_number);
+										rel_did_anything |= cohort->setRelation(tter->hash, attach->global_number);
 									}
 									else {
-										rel_did_anything |= cohort->remRelation((*tter)->hash, attach->global_number);
+										rel_did_anything |= cohort->remRelation(tter->hash, attach->global_number);
 									}
 								}
-								foreach (tter, *sublist) {
+								for (auto tter : *sublist) {
 									if (type == K_ADDRELATIONS) {
 										attach->type |= CT_RELATED;
-										rel_did_anything |= attach->addRelation((*tter)->hash, cohort->global_number);
+										rel_did_anything |= attach->addRelation(tter->hash, cohort->global_number);
 									}
 									else if (type == K_SETRELATIONS) {
 										attach->type |= CT_RELATED;
-										rel_did_anything |= attach->setRelation((*tter)->hash, cohort->global_number);
+										rel_did_anything |= attach->setRelation(tter->hash, cohort->global_number);
 									}
 									else {
-										rel_did_anything |= attach->remRelation((*tter)->hash, cohort->global_number);
+										rel_did_anything |= attach->remRelation(tter->hash, cohort->global_number);
 									}
 								}
 								if (rel_did_anything) {
 									index_ruleCohort_no.clear();
-									reading.hit_by.push_back(rule.number);
+									TRACE;
 									reading.noprint = false;
 									readings_changed = true;
 								}
@@ -1858,6 +2031,7 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 				if (!(rule.flags & RF_NOITERATE) && section_max_count != 1) {
 					section_did_something = true;
 				}
+				rule_did_something = true;
 				cohort->type &= ~CT_NUM_CURRENT;
 			}
 
@@ -1884,9 +2058,21 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 			rule.total_time += elapsed(tmp, tstamp);
 		}
 
+		if (rule_did_something) {
+			if (trace_rules.contains(rule.line)) {
+				retval |= RV_TRACERULE;
+			}
+		}
 		if (delimited) {
 			break;
 		}
+		if (rule_did_something && (rule.flags & RF_REPEAT)) {
+			index_ruleCohort_no.clear();
+			goto repeat_rule;
+		}
+		if (retval & RV_TRACERULE) {
+			break;
+		}
 	}
 
 	if (section_did_something) {
@@ -1901,7 +2087,7 @@ uint32_t GrammarApplicator::runRulesOnSingleWindow(SingleWindow& current, const
 uint32_t GrammarApplicator::runGrammarOnSingleWindow(SingleWindow& current) {
 	if (!grammar->before_sections.empty() && !no_before_sections) {
 		uint32_t rv = runRulesOnSingleWindow(current, runsections[-1]);
-		if (rv & RV_DELIMITED) {
+		if (rv & (RV_DELIMITED | RV_TRACERULE)) {
 			return rv;
 		}
 	}
@@ -1911,7 +2097,7 @@ uint32_t GrammarApplicator::runGrammarOnSingleWindow(SingleWindow& current) {
 		// Caveat: This may look as if it is not recursing previous sections, but those rules are preprocessed into the successive sections so they are actually run.
 		RSType::iterator iter = runsections.begin();
 		RSType::iterator iter_end = runsections.end();
-		for (; iter != iter_end;) {
+		for (size_t pass = 0; iter != iter_end; ++pass) {
 			if (iter->first < 0 || (section_max_count && counter[iter->first] >= section_max_count)) {
 				++iter;
 				continue;
@@ -1922,18 +2108,31 @@ uint32_t GrammarApplicator::runGrammarOnSingleWindow(SingleWindow& current) {
 			}
 			rv = runRulesOnSingleWindow(current, iter->second);
 			++counter[iter->first];
-			if (rv & RV_DELIMITED) {
+			if (rv & (RV_DELIMITED | RV_TRACERULE)) {
 				return rv;
 			}
 			if (!(rv & RV_SOMETHING)) {
 				++iter;
+				pass = 0;
+			}
+			if (pass >= 1000) {
+				u_fprintf(ux_stderr, "Warning: Endless loop detected before input line %u. Window contents was:", numLines);
+				UString tag;
+				for (size_t i = 1; i < current.cohorts.size(); ++i) {
+					Tag *t = current.cohorts[i]->wordform;
+					tag.assign(t->tag.begin() + 2, t->tag.begin() + t->tag.size() - 2);
+					u_fprintf(ux_stderr, " %S", tag.c_str());
+				}
+				u_fprintf(ux_stderr, "\n");
+				u_fflush(ux_stderr);
+				break;
 			}
 		}
 	}
 
 	if (!grammar->after_sections.empty() && !no_after_sections) {
 		uint32_t rv = runRulesOnSingleWindow(current, runsections[-2]);
-		if (rv & RV_DELIMITED) {
+		if (rv & (RV_DELIMITED | RV_TRACERULE)) {
 			return rv;
 		}
 	}
@@ -1945,11 +2144,11 @@ void GrammarApplicator::runGrammarOnWindow() {
 	SingleWindow *current = gWindow->current;
 	did_final_enclosure = false;
 
-	foreach (vit, current->variables_set) {
-		variables[vit->first] = vit->second;
+	for (auto vit : current->variables_set) {
+		variables[vit.first] = vit.second;
 	}
-	foreach (vit, current->variables_rem) {
-		variables.erase(*vit);
+	for (auto vit : current->variables_rem) {
+		variables.erase(vit);
 	}
 
 	if (has_dep) {
@@ -1957,8 +2156,7 @@ void GrammarApplicator::runGrammarOnWindow() {
 		gWindow->dep_map.clear();
 		gWindow->dep_window.clear();
 		if (!input_eof && !gWindow->next.empty() && gWindow->next.back()->cohorts.size() > 1) {
-			foreach (iter, gWindow->next.back()->cohorts) {
-				Cohort *cohort = *iter;
+			for (auto cohort : gWindow->next.back()->cohorts) {
 				gWindow->dep_window[cohort->global_number] = cohort;
 			}
 		}
@@ -2006,11 +2204,11 @@ void GrammarApplicator::runGrammarOnWindow() {
 						++left;
 					}
 					current->cohorts.resize(current->cohorts.size() - encs.size());
-					foreach (eiter, encs) {
-						(*eiter)->type |= CT_ENCLOSED;
+					for (auto eiter : encs) {
+						eiter->type |= CT_ENCLOSED;
 					}
-					foreach (eiter2, c->enclosed) {
-						encs.push_back(*eiter2);
+					for (auto eiter2 : c->enclosed) {
+						encs.push_back(eiter2);
 					}
 					c->enclosed = encs;
 					current->has_enclosures = true;
@@ -2035,7 +2233,15 @@ label_runGrammarOnWindow_begin:
 
 	++pass;
 	if (pass > 1000) {
-		u_fprintf(ux_stderr, "Warning: Endless loop detected before input line %u - will try to break it.\n", numLines);
+		u_fprintf(ux_stderr, "Warning: Endless loop detected before input line %u. Window contents was:", numLines);
+		UString tag;
+		for (size_t i = 1; i < current->cohorts.size(); ++i) {
+			Tag *t = current->cohorts[i]->wordform;
+			tag.assign(t->tag.begin() + 2, t->tag.begin() + t->tag.size() - 2);
+			u_fprintf(ux_stderr, " %S", tag.c_str());
+		}
+		u_fprintf(ux_stderr, "\n");
+		u_fflush(ux_stderr);
 		return;
 	}
 
@@ -2044,8 +2250,8 @@ label_runGrammarOnWindow_begin:
 		size_t nc = current->cohorts.size();
 		for (size_t i = 0; i < nc; ++i) {
 			Cohort *c = current->cohorts[i];
-			foreach (rit, c->readings) {
-				(*rit)->hit_by.push_back(hitpass);
+			for (auto rit : c->readings) {
+				rit->hit_by.push_back(hitpass);
 			}
 		}
 	}
@@ -2055,6 +2261,7 @@ label_runGrammarOnWindow_begin:
 		goto label_runGrammarOnWindow_begin;
 	}
 
+label_unpackEnclosures:
 	if (!grammar->parentheses.empty() && current->has_enclosures) {
 		size_t nc = current->cohorts.size();
 		for (size_t i = 0; i < nc; ++i) {
@@ -2077,6 +2284,9 @@ label_runGrammarOnWindow_begin:
 				par_left_pos = i + 1;
 				par_right_pos = i + ne;
 				c->enclosed.clear();
+				if (rv & RV_TRACERULE) {
+					goto label_unpackEnclosures;
+				}
 				goto label_runGrammarOnWindow_begin;
 			}
 		}
@@ -2086,8 +2296,14 @@ label_runGrammarOnWindow_begin:
 			par_left_pos = 0;
 			par_right_pos = 0;
 			did_final_enclosure = true;
+			if (rv & RV_TRACERULE) {
+				goto label_unpackEnclosures;
+			}
 			goto label_runGrammarOnWindow_begin;
 		}
 	}
 }
 }
+
+// This helps the all_vislcg3.cpp profiling builds
+#undef TRACE
diff --git a/src/GrammarWriter.cpp b/src/GrammarWriter.cpp
index e7dcdc2..ac8d889 100644
--- a/src/GrammarWriter.cpp
+++ b/src/GrammarWriter.cpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -53,12 +53,12 @@ void GrammarWriter::printSet(UFILE *output, const Set& curset) {
 		used_sets.insert(curset.number);
 		u_fprintf(output, "LIST %S = ", curset.name.c_str());
 		std::set<TagVector> tagsets[] = { trie_getTagsOrdered(curset.trie), trie_getTagsOrdered(curset.trie_special) };
-		boost_foreach (const std::set<TagVector>& tvs, tagsets) {
-			boost_foreach (const TagVector& tags, tvs) {
+		for (auto& tvs : tagsets) {
+			for (auto& tags : tvs) {
 				if (tags.size() > 1) {
 					u_fprintf(output, "(");
 				}
-				boost_foreach (const Tag *tag, tags) {
+				for (auto tag : tags) {
 					printTag(output, *tag);
 					u_fprintf(output, " ");
 				}
@@ -104,10 +104,6 @@ int GrammarWriter::writeGrammar(UFILE *output) {
 		u_fprintf(ux_stderr, "Error: No grammar provided - cannot continue!\n");
 		CG3Quit(1);
 	}
-	if (grammar->is_binary) {
-		u_fprintf(ux_stderr, "Error: Grammar is binary and cannot be output in textual form!\n");
-		CG3Quit(1);
-	}
 
 	if (statistics) {
 		if (ceil(grammar->total_time) == floor(grammar->total_time)) {
@@ -130,7 +126,7 @@ int GrammarWriter::writeGrammar(UFILE *output) {
 
 	if (!grammar->static_sets.empty()) {
 		u_fprintf(output, "STATIC-SETS =");
-		boost_foreach (const UString& str, grammar->static_sets) {
+		for (auto& str : grammar->static_sets) {
 			u_fprintf(output, " %S", str.c_str());
 		}
 		u_fprintf(output, " ;\n");
@@ -149,14 +145,26 @@ int GrammarWriter::writeGrammar(UFILE *output) {
 	u_fprintf(output, "\n");
 
 	used_sets.clear();
-	boost_foreach (Set *s, grammar->sets_list) {
+	for (auto s : grammar->sets_list) {
+		if (s->name.empty()) {
+			if (s == grammar->delimiters) {
+				s->name.assign(stringbits[S_DELIMITSET].getTerminatedBuffer());
+			}
+			else if (s == grammar->soft_delimiters) {
+				s->name.assign(stringbits[S_SOFTDELIMITSET].getTerminatedBuffer());
+			}
+			else {
+				s->name.resize(12);
+				s->name.resize(u_sprintf(&s->name[0], "S%u", s->number));
+			}
+		}
 		if (s->name[0] == '_' && s->name[1] == 'G' && s->name[2] == '_') {
 			s->name.insert(s->name.begin(), '3');
 			s->name.insert(s->name.begin(), 'G');
 			s->name.insert(s->name.begin(), 'C');
 		}
 	}
-	boost_foreach (Set *s, grammar->sets_list) {
+	for (auto s : grammar->sets_list) {
 		printSet(output, *s);
 	}
 	u_fprintf(output, "\n");
@@ -171,8 +179,8 @@ int GrammarWriter::writeGrammar(UFILE *output) {
 	//*/
 
 	bool found = false;
-	foreach (rule_iter, grammar->rule_by_number) {
-		const Rule& r = **rule_iter;
+	for (auto rule_iter : grammar->rule_by_number) {
+		const Rule& r = *rule_iter;
 		if (r.section == -1) {
 			if (!found) {
 				u_fprintf(output, "\nBEFORE-SECTIONS\n");
@@ -182,11 +190,11 @@ int GrammarWriter::writeGrammar(UFILE *output) {
 			u_fprintf(output, " ;\n");
 		}
 	}
-	foreach (isec, grammar->sections) {
+	for (auto isec : grammar->sections) {
 		found = false;
-		foreach (rule_iter, grammar->rule_by_number) {
-			const Rule& r = **rule_iter;
-			if (r.section == (int32_t)*isec) {
+		for (auto rule_iter : grammar->rule_by_number) {
+			const Rule& r = *rule_iter;
+			if (r.section == static_cast<int32_t>(isec)) {
 				if (!found) {
 					u_fprintf(output, "\nSECTION\n");
 					found = true;
@@ -197,8 +205,8 @@ int GrammarWriter::writeGrammar(UFILE *output) {
 		}
 	}
 	found = false;
-	foreach (rule_iter, grammar->rule_by_number) {
-		const Rule& r = **rule_iter;
+	for (auto rule_iter : grammar->rule_by_number) {
+		const Rule& r = *rule_iter;
 		if (r.section == -2) {
 			if (!found) {
 				u_fprintf(output, "\nAFTER-SECTIONS\n");
@@ -209,8 +217,8 @@ int GrammarWriter::writeGrammar(UFILE *output) {
 		}
 	}
 	found = false;
-	foreach (rule_iter, grammar->rule_by_number) {
-		const Rule& r = **rule_iter;
+	for (auto rule_iter : grammar->rule_by_number) {
+		const Rule& r = *rule_iter;
 		if (r.section == -3) {
 			if (!found) {
 				u_fprintf(output, "\nNULL-SECTION\n");
@@ -241,14 +249,19 @@ void GrammarWriter::printRule(UFILE *to, const Rule& rule) {
 
 	u_fprintf(to, "%S", keywords[rule.type].getTerminatedBuffer());
 
-	if (rule.name && !(rule.name[0] == '_' && rule.name[1] == 'R' && rule.name[2] == '_')) {
-		u_fprintf(to, ":%S", rule.name);
+	if (!rule.name.empty() && !(rule.name[0] == '_' && rule.name[1] == 'R' && rule.name[2] == '_')) {
+		u_fprintf(to, ":%S", rule.name.c_str());
 	}
 	u_fprintf(to, " ");
 
 	for (uint32_t i = 0; i < FLAGS_COUNT; i++) {
 		if (rule.flags & (1 << i)) {
-			u_fprintf(to, "%S ", g_flags[i].getTerminatedBuffer());
+			if (i == FL_SUB) {
+				u_fprintf(to, "%S:%d ", g_flags[i].getTerminatedBuffer(), rule.sub_reading);
+			}
+			else {
+				u_fprintf(to, "%S ", g_flags[i].getTerminatedBuffer());
+			}
 		}
 	}
 
@@ -264,9 +277,9 @@ void GrammarWriter::printRule(UFILE *to, const Rule& rule) {
 		u_fprintf(to, "%S ", grammar->sets_list[rule.target]->name.c_str());
 	}
 
-	foreach (it, rule.tests) {
+	for (auto it : rule.tests) {
 		u_fprintf(to, "(");
-		printContextualTest(to, **it);
+		printContextualTest(to, *it);
 		u_fprintf(to, ") ");
 	}
 
@@ -274,9 +287,9 @@ void GrammarWriter::printRule(UFILE *to, const Rule& rule) {
 		u_fprintf(to, "TO (");
 		printContextualTest(to, *(rule.dep_target));
 		u_fprintf(to, ") ");
-		foreach (it, rule.dep_tests) {
+		for (auto it : rule.dep_tests) {
 			u_fprintf(to, "(");
-			printContextualTest(to, **it);
+			printContextualTest(to, *it);
 			u_fprintf(to, ") ");
 		}
 	}
@@ -295,7 +308,7 @@ void GrammarWriter::printContextualTest(UFILE *to, const ContextualTest& test) {
 		u_fprintf(to, "T:%u ", test.tmpl->hash);
 	}
 	else if (!test.ors.empty()) {
-		for (BOOST_AUTO(iter, test.ors.begin()); iter != test.ors.end();) {
+		for (auto iter = test.ors.begin(); iter != test.ors.end();) {
 			u_fprintf(to, "(");
 			printContextualTest(to, **iter);
 			u_fprintf(to, ")");
diff --git a/src/GrammarWriter.hpp b/src/GrammarWriter.hpp
index 30942f2..0bea97f 100644
--- a/src/GrammarWriter.hpp
+++ b/src/GrammarWriter.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
diff --git a/src/IGrammarParser.hpp b/src/IGrammarParser.hpp
index 6e4d935..1f74679 100644
--- a/src/IGrammarParser.hpp
+++ b/src/IGrammarParser.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
diff --git a/src/MatxinApplicator.cpp b/src/MatxinApplicator.cpp
index 170f093..fe67aa9 100644
--- a/src/MatxinApplicator.cpp
+++ b/src/MatxinApplicator.cpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -217,8 +217,8 @@ void MatxinApplicator::runGrammarOnText(istream& input, UFILE *output) {
 			}
 			if (cCohort && cSWindow->cohorts.size() >= soft_limit && grammar->soft_delimiters && doesSetMatchCohortNormal(*cCohort, grammar->soft_delimiters->number)) {
 				// ie. we've read some cohorts
-				foreach (iter, cCohort->readings) {
-					addTagToReading(**iter, endtag);
+				for (auto iter : cCohort->readings) {
+					addTagToReading(*iter, endtag);
 				}
 
 				cSWindow->appendCohort(cCohort);
@@ -232,8 +232,8 @@ void MatxinApplicator::runGrammarOnText(istream& input, UFILE *output) {
 					u_fprintf(ux_stderr, "Warning: Hard limit of %u cohorts reached at line %u - forcing break.\n", hard_limit, numLines);
 					u_fflush(ux_stderr);
 				}
-				foreach (iter, cCohort->readings) {
-					addTagToReading(**iter, endtag);
+				for (auto iter : cCohort->readings) {
+					addTagToReading(*iter, endtag);
 				}
 
 				cSWindow->appendCohort(cCohort);
@@ -423,8 +423,8 @@ void MatxinApplicator::runGrammarOnText(istream& input, UFILE *output) {
 		if (cCohort->readings.empty()) {
 			initEmptyCohort(*cCohort);
 		}
-		foreach (iter, cCohort->readings) {
-			addTagToReading(**iter, endtag);
+		for (auto iter : cCohort->readings) {
+			addTagToReading(*iter, endtag);
 		}
 		cReading = 0;
 		cCohort = 0;
@@ -684,7 +684,7 @@ void MatxinApplicator::printReading(Reading *reading, Node& node, UFILE *output)
 	}
 
 	// Lop off the initial and final '"' characters
-	UnicodeString bf(single_tags[reading->baseform]->tag.c_str() + 1, single_tags[reading->baseform]->tag.length() - 2);
+	UnicodeString bf(single_tags[reading->baseform]->tag.c_str() + 1, single_tags[reading->baseform]->tag.size() - 2);
 
 	node.lemma = bf.getTerminatedBuffer();
 
@@ -779,7 +779,7 @@ void MatxinApplicator::printSingleWindow(SingleWindow *window, UFILE *output) {
 
 		// Lop off the initial and final '"' characters
 		// ToDo: A copy does not need to be made here - use pointer offsets
-		UnicodeString wf(cohort->wordform->tag.c_str() + 2, cohort->wordform->tag.length() - 4);
+		UnicodeString wf(cohort->wordform->tag.c_str() + 2, cohort->wordform->tag.size() - 4);
 		UString wf_escaped;
 		for (int i = 0; i < wf.length(); ++i) {
 			if (wf[i] == '&') {
@@ -806,11 +806,11 @@ void MatxinApplicator::printSingleWindow(SingleWindow *window, UFILE *output) {
 		/*
 		// Print the static reading tags
 		if (cohort->wread) {
-			foreach (tter, cohort->wread->tags_list) {
-				if (*tter == cohort->wordform->hash) {
+			for (auto tter : cohort->wread->tags_list) {
+				if (tter == cohort->wordform->hash) {
 					continue;
 				}
-				const Tag *tag = single_tags[*tter];
+				const Tag *tag = single_tags[tter];
 				u_fprintf(output, "<%S>", tag->tag.c_str());
 			}
 		}
@@ -918,20 +918,20 @@ void MatxinApplicator::mergeMappings(Cohort& cohort) {
 	// foo<N><Sg><Acc><@←SUBJ>/foo<N><Sg><Acc><@←OBJ>
 	// => foo<N><Sg><Acc><@←SUBJ>/foo<N><Sg><Acc><@←OBJ>
 	std::map<uint32_t, ReadingList> mlist;
-	foreach (iter, cohort.readings) {
-		Reading *r = *iter;
+	for (auto iter : cohort.readings) {
+		Reading *r = iter;
 		uint32_t hp = r->hash; // instead of hash_plain, which doesn't include mapping tags
 		if (trace) {
-			foreach (iter_hb, r->hit_by) {
-				hp = hash_value(*iter_hb, hp);
+			for (auto iter_hb : r->hit_by) {
+				hp = hash_value(iter_hb, hp);
 			}
 		}
 		Reading *sub = r->next;
 		while (sub) {
 			hp = hash_value(sub->hash, hp);
 			if (trace) {
-				foreach (iter_hb, sub->hit_by) {
-					hp = hash_value(*iter_hb, hp);
+				for (auto iter_hb : sub->hit_by) {
+					hp = hash_value(iter_hb, hp);
 				}
 			}
 			sub = sub->next;
diff --git a/src/MatxinApplicator.hpp b/src/MatxinApplicator.hpp
index 6cb8015..f3dd4e2 100644
--- a/src/MatxinApplicator.hpp
+++ b/src/MatxinApplicator.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
diff --git a/src/MweSplitApplicator.cpp b/src/MweSplitApplicator.cpp
index 58d9433..a1757f8 100644
--- a/src/MweSplitApplicator.cpp
+++ b/src/MweSplitApplicator.cpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -35,14 +35,14 @@ void MweSplitApplicator::runGrammarOnText(istream& input, UFILE *output) {
 
 
 const Tag *MweSplitApplicator::maybeWfTag(const Reading *r) {
-	foreach (tter, r->tags_list) {
-		if ((!show_end_tags && *tter == endtag) || *tter == begintag) {
+	for (auto tter : r->tags_list) {
+		if ((!show_end_tags && tter == endtag) || tter == begintag) {
 			continue;
 		}
-		if (*tter == r->baseform || *tter == r->parent->wordform->hash) {
+		if (tter == r->baseform || tter == r->parent->wordform->hash) {
 			continue;
 		}
-		const Tag *tag = single_tags[*tter];
+		const Tag *tag = single_tags[tter];
 		// If we are to split, there has to be at least one wordform on a head (not-sub) reading
 		if (tag->type & T_WORDFORM) {
 			return tag;
@@ -52,13 +52,13 @@ const Tag *MweSplitApplicator::maybeWfTag(const Reading *r) {
 }
 
 std::vector<Cohort*> MweSplitApplicator::splitMwe(Cohort *cohort) {
-	const UChar rtrimblank[] = { ' ', '\n', '\r', '\t', 0 };
-	const UChar textprefix[] = { ':', 0 };
+	constexpr UChar rtrimblank[] = { ' ', '\n', '\r', '\t', 0 };
+	constexpr UChar textprefix[] = { ':', 0 };
 	std::vector<Cohort*> cos;
 	size_t n_wftags = 0;
 	size_t n_goodreadings = 0;
-	foreach (rter1, cohort->readings) {
-		if (maybeWfTag(*rter1) != NULL) {
+	for (auto rter1 : cohort->readings) {
+		if (maybeWfTag(rter1) != NULL) {
 			++n_wftags;
 		}
 		++n_goodreadings;
@@ -72,10 +72,10 @@ std::vector<Cohort*> MweSplitApplicator::splitMwe(Cohort *cohort) {
 		cos.push_back(cohort);
 		return cos;
 	}
-	foreach (r, cohort->readings) {
-		size_t pos = -1;
+	for (auto r : cohort->readings) {
+		size_t pos = std::numeric_limits<size_t>::max();
 		Reading *prev = NULL; // prev == NULL || prev->next == rNew (or a ->next of rNew)
-		for (Reading *sub = (*r); sub; sub = sub->next) {
+		for (auto sub = r; sub; sub = sub->next) {
 			const Tag *wfTag = maybeWfTag(sub);
 			if (wfTag == NULL) {
 				prev = prev->next;
@@ -107,7 +107,7 @@ std::vector<Cohort*> MweSplitApplicator::splitMwe(Cohort *cohort) {
 
 				Reading *rNew = alloc_reading(*sub);
 				for (size_t i = 0; i < rNew->tags_list.size(); ++i) {
-					BOOST_AUTO(&tter, rNew->tags_list[i]);
+					auto& tter = rNew->tags_list[i];
 					if (tter == wfTag->hash || tter == rNew->parent->wordform->hash) {
 						rNew->tags_list.erase(rNew->tags_list.begin() + i);
 						rNew->tags.erase(tter);
@@ -136,9 +136,9 @@ std::vector<Cohort*> MweSplitApplicator::splitMwe(Cohort *cohort) {
 
 
 void MweSplitApplicator::printSingleWindow(SingleWindow *window, UFILE *output) {
-	boost_foreach (uint32_t var, window->variables_output) {
+	for (auto var : window->variables_output) {
 		Tag *key = single_tags[var];
-		BOOST_AUTO(iter, window->variables_set.find(var));
+		auto iter = window->variables_set.find(var);
 		if (iter != window->variables_set.end()) {
 			if (iter->second != grammar->tag_any) {
 				Tag *value = single_tags[iter->second];
@@ -164,8 +164,8 @@ void MweSplitApplicator::printSingleWindow(SingleWindow *window, UFILE *output)
 	for (uint32_t c = 0; c < cs; c++) {
 		Cohort *cohort = window->cohorts[c];
 		std::vector<Cohort*> cs = splitMwe(cohort);
-		foreach (iter, cs) {
-			printCohort(*iter, output);
+		for (auto iter : cs) {
+			printCohort(iter, output);
 		}
 	}
 	u_fputc('\n', output);
diff --git a/src/MweSplitApplicator.hpp b/src/MweSplitApplicator.hpp
index 55469c5..3f328d1 100644
--- a/src/MweSplitApplicator.hpp
+++ b/src/MweSplitApplicator.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
diff --git a/src/NicelineApplicator.cpp b/src/NicelineApplicator.cpp
index aa63c94..6465ca2 100644
--- a/src/NicelineApplicator.cpp
+++ b/src/NicelineApplicator.cpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -159,8 +159,8 @@ void NicelineApplicator::runGrammarOnText(istream& input, UFILE *output) {
 					u_fprintf(ux_stderr, "Warning: Soft limit of %u cohorts reached at line %u but found suitable soft delimiter.\n", soft_limit, numLines);
 					u_fflush(ux_stderr);
 				}
-				foreach (iter, cCohort->readings) {
-					addTagToReading(**iter, endtag);
+				for (auto iter : cCohort->readings) {
+					addTagToReading(*iter, endtag);
 				}
 
 				cSWindow->appendCohort(cCohort);
@@ -175,8 +175,8 @@ void NicelineApplicator::runGrammarOnText(istream& input, UFILE *output) {
 					u_fprintf(ux_stderr, "Warning: Hard limit of %u cohorts reached at line %u - forcing break.\n", hard_limit, numLines);
 					u_fflush(ux_stderr);
 				}
-				foreach (iter, cCohort->readings) {
-					addTagToReading(**iter, endtag);
+				for (auto iter : cCohort->readings) {
+					addTagToReading(*iter, endtag);
 				}
 
 				cSWindow->appendCohort(cCohort);
@@ -328,8 +328,8 @@ void NicelineApplicator::runGrammarOnText(istream& input, UFILE *output) {
 		if (cCohort->readings.empty()) {
 			initEmptyCohort(*cCohort);
 		}
-		foreach (iter, cCohort->readings) {
-			addTagToReading(**iter, endtag);
+		for (auto iter : cCohort->readings) {
+			addTagToReading(*iter, endtag);
 		}
 		cReading = 0;
 		cCohort = 0;
@@ -370,20 +370,20 @@ void NicelineApplicator::printReading(const Reading *reading, UFILE *output) {
 	}
 
 	uint32SortedVector unique;
-	foreach (tter, reading->tags_list) {
-		if ((!show_end_tags && *tter == endtag) || *tter == begintag) {
+	for (auto tter : reading->tags_list) {
+		if ((!show_end_tags && tter == endtag) || tter == begintag) {
 			continue;
 		}
-		if (*tter == reading->baseform || *tter == reading->parent->wordform->hash) {
+		if (tter == reading->baseform || tter == reading->parent->wordform->hash) {
 			continue;
 		}
 		if (unique_tags) {
-			if (unique.find(*tter) != unique.end()) {
+			if (unique.find(tter) != unique.end()) {
 				continue;
 			}
-			unique.insert(*tter);
+			unique.insert(tter);
 		}
-		const Tag *tag = single_tags[*tter];
+		const Tag *tag = single_tags[tter];
 		if (tag->type & T_DEPENDENCY && has_dep && !dep_original) {
 			continue;
 		}
@@ -408,8 +408,8 @@ void NicelineApplicator::printReading(const Reading *reading, UFILE *output) {
 			}
 		}
 
-		const UChar local_utf_pattern[] = { ' ', '#', '%', 'u', L'\u2192', '%', 'u', 0 };
-		const UChar local_latin_pattern[] = { ' ', '#', '%', 'u', '-', '>', '%', 'u', 0 };
+		constexpr UChar local_utf_pattern[] = { ' ', '#', '%', 'u', L'\u2192', '%', 'u', 0 };
+		constexpr UChar local_latin_pattern[] = { ' ', '#', '%', 'u', '-', '>', '%', 'u', 0 };
 		const UChar *pattern = local_latin_pattern;
 		if (unicode_tags) {
 			pattern = local_utf_pattern;
@@ -436,18 +436,18 @@ void NicelineApplicator::printReading(const Reading *reading, UFILE *output) {
 	if (reading->parent->type & CT_RELATED) {
 		u_fprintf(output, " ID:%u", reading->parent->global_number);
 		if (!reading->parent->relations.empty()) {
-			foreach (miter, reading->parent->relations) {
-				boost_foreach (uint32_t siter, miter->second) {
-					u_fprintf(output, " R:%S:%u", grammar->single_tags.find(miter->first)->second->tag.c_str(), siter);
+			for (auto miter : reading->parent->relations) {
+				for (auto siter : miter.second) {
+					u_fprintf(output, " R:%S:%u", grammar->single_tags.find(miter.first)->second->tag.c_str(), siter);
 				}
 			}
 		}
 	}
 
 	if (trace) {
-		foreach (iter_hb, reading->hit_by) {
+		for (auto iter_hb : reading->hit_by) {
 			u_fputc(' ', output);
-			printTrace(output, *iter_hb);
+			printTrace(output, iter_hb);
 		}
 	}
 
@@ -459,7 +459,7 @@ void NicelineApplicator::printReading(const Reading *reading, UFILE *output) {
 }
 
 void NicelineApplicator::printCohort(Cohort *cohort, UFILE *output) {
-	const UChar ws[] = { ' ', '\t', 0 };
+	constexpr UChar ws[] = { ' ', '\t', 0 };
 
 	if (cohort->local_number == 0) {
 		goto removed;
@@ -482,7 +482,7 @@ void NicelineApplicator::printCohort(Cohort *cohort, UFILE *output) {
 	if (cohort->readings.empty()) {
 		u_fputc('\t', output);
 	}
-	boost_foreach (Reading *rter, cohort->readings) {
+	for (auto rter : cohort->readings) {
 		printReading(rter, output);
 	}
 
diff --git a/src/NicelineApplicator.hpp b/src/NicelineApplicator.hpp
index 199acba..3dd3e55 100644
--- a/src/NicelineApplicator.hpp
+++ b/src/NicelineApplicator.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
diff --git a/src/PlaintextApplicator.cpp b/src/PlaintextApplicator.cpp
index b2f40c2..a5a67be 100644
--- a/src/PlaintextApplicator.cpp
+++ b/src/PlaintextApplicator.cpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -145,8 +145,8 @@ void PlaintextApplicator::runGrammarOnText(istream& input, UFILE *output) {
 					u_fprintf(ux_stderr, "Warning: Soft limit of %u cohorts reached at line %u but found suitable soft delimiter.\n", soft_limit, numLines);
 					u_fflush(ux_stderr);
 				}
-				foreach (iter, cCohort->readings) {
-					addTagToReading(**iter, endtag);
+				for (auto iter : cCohort->readings) {
+					addTagToReading(*iter, endtag);
 				}
 
 				cSWindow->appendCohort(cCohort);
@@ -162,8 +162,8 @@ void PlaintextApplicator::runGrammarOnText(istream& input, UFILE *output) {
 					u_fprintf(ux_stderr, "Warning: Hard limit of %u cohorts reached at line %u - forcing break.\n", hard_limit, numLines);
 					u_fflush(ux_stderr);
 				}
-				foreach (iter, cCohort->readings) {
-					addTagToReading(**iter, endtag);
+				for (auto iter : cCohort->readings) {
+					addTagToReading(*iter, endtag);
 				}
 
 				cSWindow->appendCohort(cCohort);
@@ -264,8 +264,13 @@ void PlaintextApplicator::runGrammarOnText(istream& input, UFILE *output) {
 				lCohort = cCohort;
 				numCohorts++;
 				cReading = initEmptyCohort(*cCohort);
-				cReading->noprint = false;
-				if (first_upper || all_upper || mixed_upper) {
+				cReading->noprint = !add_tags;
+				if (add_tags) {
+					constexpr char _tag[] = "<cg-conv>";
+					tag.assign(_tag, _tag + sizeof(_tag) - 1);
+					addTagToReading(*cReading, addTag(tag));
+				}
+				if (add_tags && (first_upper || all_upper || mixed_upper)) {
 					delTagFromReading(*cReading, cReading->baseform);
 					token.toLower();
 					tag.clear();
@@ -274,17 +279,17 @@ void PlaintextApplicator::runGrammarOnText(istream& input, UFILE *output) {
 					tag += '"';
 					addTagToReading(*cReading, addTag(tag));
 					if (all_upper) {
-						static const char _tag[] = "<all-upper>";
+						constexpr char _tag[] = "<all-upper>";
 						tag.assign(_tag, _tag + sizeof(_tag) - 1);
 						addTagToReading(*cReading, addTag(tag));
 					}
 					if (first_upper) {
-						static const char _tag[] = "<first-upper>";
+						constexpr char _tag[] = "<first-upper>";
 						tag.assign(_tag, _tag + sizeof(_tag) - 1);
 						addTagToReading(*cReading, addTag(tag));
 					}
 					if (mixed_upper && !all_upper) {
-						static const char _tag[] = "<mixed-upper>";
+						constexpr char _tag[] = "<mixed-upper>";
 						tag.assign(_tag, _tag + sizeof(_tag) - 1);
 						addTagToReading(*cReading, addTag(tag));
 					}
@@ -315,8 +320,8 @@ void PlaintextApplicator::runGrammarOnText(istream& input, UFILE *output) {
 		if (cCohort->readings.empty()) {
 			initEmptyCohort(*cCohort);
 		}
-		foreach (iter, cCohort->readings) {
-			addTagToReading(**iter, endtag);
+		for (auto iter : cCohort->readings) {
+			addTagToReading(*iter, endtag);
 		}
 		cReading = 0;
 		cCohort = 0;
diff --git a/src/PlaintextApplicator.hpp b/src/PlaintextApplicator.hpp
index abc5cb5..470454a 100644
--- a/src/PlaintextApplicator.hpp
+++ b/src/PlaintextApplicator.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -29,6 +29,8 @@ namespace CG3 {
 
 class PlaintextApplicator : public virtual GrammarApplicator {
 public:
+	bool add_tags = false;
+
 	PlaintextApplicator(UFILE *ux_err);
 	void runGrammarOnText(istream& input, UFILE *output);
 
diff --git a/src/Reading.cpp b/src/Reading.cpp
index 1a043df..6fcad35 100644
--- a/src/Reading.cpp
+++ b/src/Reading.cpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -176,9 +176,9 @@ Reading *Reading::allocateReading(const Reading& r) {
 uint32_t Reading::rehash() {
 	hash = 0;
 	hash_plain = 0;
-	foreach (iter, tags) {
-		if (!mapping || mapping->hash != *iter) {
-			hash = hash_value(*iter, hash);
+	for (auto iter : tags) {
+		if (!mapping || mapping->hash != iter) {
+			hash = hash_value(iter, hash);
 		}
 	}
 	hash_plain = hash;
diff --git a/src/Reading.hpp b/src/Reading.hpp
index 157f98e..d84361d 100644
--- a/src/Reading.hpp
+++ b/src/Reading.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -59,6 +59,10 @@ public:
 	uint32SortedVector tags_textual;
 	typedef bc::flat_map<uint32_t, Tag*> tags_numerical_t;
 	tags_numerical_t tags_numerical;
+	
+	// ToDo: Remove for real ordered mode
+	UString tags_string;
+	uint32_t tags_string_hash = 0;
 
 	Reading(Cohort *p = 0);
 	Reading(const Reading& r);
diff --git a/src/Relabeller.cpp b/src/Relabeller.cpp
index 68f43e3..82622a7 100644
--- a/src/Relabeller.cpp
+++ b/src/Relabeller.cpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -34,32 +34,32 @@ Relabeller::Relabeller(Grammar& res, const Grammar& relabels, UFILE *ux_err)
 	UStringSetMap *as_list = new UStringSetMap;
 	UStringSetMap *as_set = new UStringSetMap;
 
-	boost_foreach (const RuleVector::value_type rule, relabels.rule_by_number) {
+	for (auto rule : relabels.rule_by_number) {
 		const TagVector& fromTags = trie_getTagList(rule->maplist->trie);
 		Set *target = relabels.sets_list[rule->target];
 		const TagVector& toTags = trie_getTagList(target->trie);
 		if (!(rule->maplist->trie_special.empty() && target->trie_special.empty())) {
-			u_fprintf(ux_stderr, "Warning: Relabel rule '%S' on line %d has %d special tags, skipping!\n", rule->name, rule->line);
+			u_fprintf(ux_stderr, "Warning: Relabel rule '%S' on line %d has %d special tags, skipping!\n", rule->name.c_str(), rule->line);
 			continue;
 		}
 		if (!rule->tests.empty()) {
-			u_fprintf(ux_stderr, "Warning: Relabel rule '%S' on line %d had context tests, skipping!\n", rule->name, rule->line);
+			u_fprintf(ux_stderr, "Warning: Relabel rule '%S' on line %d had context tests, skipping!\n", rule->name.c_str(), rule->line);
 			continue;
 		}
 		if (rule->wordform) {
-			u_fprintf(ux_stderr, "Warning: Relabel rule '%S' on line %d had a wordform, skipping!\n", rule->name, rule->line);
+			u_fprintf(ux_stderr, "Warning: Relabel rule '%S' on line %d had a wordform, skipping!\n", rule->name.c_str(), rule->line);
 			continue;
 		}
 		if (rule->type != K_MAP) {
-			u_fprintf(ux_stderr, "Warning: Relabel rule '%S' on line %d has unexpected keyword (expected MAP), skipping!\n", rule->name, rule->line);
+			u_fprintf(ux_stderr, "Warning: Relabel rule '%S' on line %d has unexpected keyword (expected MAP), skipping!\n", rule->name.c_str(), rule->line);
 			continue;
 		}
 		if (fromTags.size() != 1) {
-			u_fprintf(ux_stderr, "Warning: Relabel rule '%S' on line %d has %d tags in the maplist (expected 1), skipping!\n", rule->name, rule->line, fromTags.size());
+			u_fprintf(ux_stderr, "Warning: Relabel rule '%S' on line %d has %d tags in the maplist (expected 1), skipping!\n", rule->name.c_str(), rule->line, fromTags.size());
 			continue;
 		}
 		Tag *fromTag = fromTags[0];
-		boost_foreach (const TagVector::value_type toit, toTags) {
+		for (auto toit : toTags) {
 			if (toit->type & T_SPECIAL) {
 				u_fprintf(ux_stderr, "Warning: Special tags (%S) not supported yet.\n", toit->tag.c_str());
 			}
@@ -85,7 +85,7 @@ Relabeller::~Relabeller() {
 
 TagVector Relabeller::transferTags(const TagVector tv_r) {
 	TagVector tv_g;
-	boost_foreach (Tag *tag_r, tv_r) {
+	for (auto tag_r : tv_r) {
 		Tag *tag_g = new Tag(*tag_r);
 		tag_g = grammar->addTag(tag_g); // new is deleted if it exists
 		tv_g.push_back(tag_g);
@@ -121,19 +121,19 @@ void Relabeller::addTaglistsToSet(const std::set<TagVector> tvs, Set *s) {
 	bc::flat_map<Tag*, size_t> tag_freq;
 	std::set<TagVector> tvs_sort_uniq;
 
-	boost_foreach (const TagVector& tvc, tvs) {
+	for (auto& tvc : tvs) {
 		TagVector& tags = const_cast<TagVector&>(tvc);
 		// From TextualParser::parseTagList
 		std::sort(tags.begin(), tags.end());
 		tags.erase(std::unique(tags.begin(), tags.end()), tags.end());
 		if (tvs_sort_uniq.insert(tags).second) {
-			boost_foreach (Tag *t, tags) {
+			for (auto t : tags) {
 				++tag_freq[t];
 			}
 		}
 	}
 	freq_sorter fs(tag_freq);
-	boost_foreach (const TagVector& tvc, tvs_sort_uniq) {
+	for (auto& tvc : tvs_sort_uniq) {
 		if (tvc.empty()) {
 			continue;
 		}
@@ -146,7 +146,7 @@ void Relabeller::addTaglistsToSet(const std::set<TagVector> tvs, Set *s) {
 		// Doing this yields a very cheap imperfect form of trie compression, but it's good enough
 		std::sort(tv.begin(), tv.end(), fs);
 		bool special = false;
-		boost_foreach (Tag *tag, tv) {
+		for (auto tag : tv) {
 			if (tag->type & T_SPECIAL) {
 				special = true;
 				break;
@@ -167,11 +167,11 @@ void Relabeller::relabelAsList(Set *set_g, const Set *set_r, const Tag *fromTag)
 	set_g->trie.clear();
 
 	std::set<TagVector> taglists;
-	boost_foreach (const TagVector& old_tags, old_tvs) {
+	for (auto& old_tags : old_tvs) {
 		TagVector tags_except_from;
 
 		bool seen = false;
-		boost_foreach (Tag *old_tag, old_tags) {
+		for (auto old_tag : old_tags) {
 			if (old_tag->hash == fromTag->hash) {
 				seen = true;
 			}
@@ -187,7 +187,7 @@ void Relabeller::relabelAsList(Set *set_g, const Set *set_r, const Tag *fromTag)
 			TagVector dummy;
 			suffixes.insert(dummy);
 		}
-		boost_foreach (const TagVector& suf, suffixes) {
+		for (auto& suf : suffixes) {
 			TagVector tags = TagVector(tags_except_from);
 			tags.insert(tags.end(), suf.begin(), suf.end());
 			tags = transferTags(tags);
@@ -278,11 +278,11 @@ void Relabeller::relabelAsSet(Set *set_g, const Set *set_r, const Tag *fromTag)
 
 	std::set<TagVector> tvs_with_from;
 	std::set<TagVector> tvs_no_from;
-	boost_foreach (const TagVector& old_tags, old_tvs) {
+	for (auto& old_tags : old_tvs) {
 		TagVector tags_except_from;
 
 		bool seen = false;
-		boost_foreach (Tag *old_tag, old_tags) {
+		for (auto old_tag : old_tags) {
 			if (old_tag->hash == fromTag->hash) {
 				seen = true;
 			}
@@ -341,37 +341,37 @@ void Relabeller::relabelAsSet(Set *set_g, const Set *set_r, const Tag *fromTag)
 }
 
 void Relabeller::relabel() {
-	stdext::hash_map<UString, Tag*> tag_by_str;
-	boost_foreach (const std::vector<Tag*>::value_type tag_g, grammar->single_tags_list) {
+	std::unordered_map<UString, Tag*, hash_ustring> tag_by_str;
+	for (auto tag_g : grammar->single_tags_list) {
 		tag_by_str[tag_g->tag] = tag_g;
 	}
-	stdext::hash_map<UString, std::set<Set*> > sets_by_tag;
-	boost_foreach (const std::vector<Set*>::value_type it, grammar->sets_list) {
+	std::unordered_map<UString, std::set<Set*>, hash_ustring> sets_by_tag;
+	for (auto it : grammar->sets_list) {
 		const TagVector& toTags = trie_getTagList(it->trie);
-		boost_foreach (const TagVector::value_type toit, toTags) {
+		for (auto toit : toTags) {
 			sets_by_tag[toit->tag].insert(it);
 		}
 	}
 	// RELABEL AS LIST:
-	boost_foreach (const UStringSetMap::value_type& it, *relabel_as_list) {
+	for (auto& it : *relabel_as_list) {
 		const Set *set_r = relabels->sets_list[it.second->number];
 		const Tag *fromTag = tag_by_str[it.first];
 
-		BOOST_AUTO(const sets_g, sets_by_tag.find(it.first));
+		const auto sets_g = sets_by_tag.find(it.first);
 		if (sets_g != sets_by_tag.end()) {
-			boost_foreach (Set *set_g, sets_g->second) {
+			for (auto set_g : sets_g->second) {
 				relabelAsList(set_g, set_r, fromTag);
 			}
 		}
 	}
 	// RELABEL AS SET:
-	boost_foreach (const UStringSetMap::value_type& it, *relabel_as_set) {
+	for (auto& it : *relabel_as_set) {
 		const Set *set_r = relabels->sets_list[it.second->number];
 		const Tag *fromTag = tag_by_str[it.first];
 
-		BOOST_AUTO(const sets_g, sets_by_tag.find(it.first));
+		const auto sets_g = sets_by_tag.find(it.first);
 		if (sets_g != sets_by_tag.end()) {
-			boost_foreach (Set *set_g, sets_g->second) {
+			for (auto set_g : sets_g->second) {
 				relabelAsSet(set_g, set_r, fromTag);
 			}
 		}
diff --git a/src/Relabeller.hpp b/src/Relabeller.hpp
index 562e45d..fcceb71 100644
--- a/src/Relabeller.hpp
+++ b/src/Relabeller.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -44,8 +44,8 @@ private:
 	Grammar *grammar;
 	const Grammar *relabels;
 
-	typedef stdext::hash_map<UString, UString> UStringMap;
-	typedef stdext::hash_map<UString, Set*> UStringSetMap;
+	typedef std::unordered_map<UString, UString, hash_ustring> UStringMap;
+	typedef std::unordered_map<UString, Set*, hash_ustring> UStringSetMap;
 	const UStringSetMap *relabel_as_list;
 	const UStringSetMap *relabel_as_set;
 
@@ -61,7 +61,7 @@ private:
 
 inline trie_t *_trie_copy_helper(const trie_t& trie, Grammar& grammar) {
 	trie_t *nt = new trie_t;
-	boost_foreach (const trie_t::value_type& p, trie) {
+	for (auto& p : trie) {
 		Tag *t = new Tag(*p.first);
 		t = grammar.addTag(t); // new is deleted if it exists
 		(*nt)[t].terminal = p.second.terminal;
@@ -74,7 +74,7 @@ inline trie_t *_trie_copy_helper(const trie_t& trie, Grammar& grammar) {
 
 inline trie_t trie_copy(const trie_t& trie, Grammar& grammar) {
 	trie_t nt;
-	boost_foreach (const trie_t::value_type& p, trie) {
+	for (auto& p : trie) {
 		Tag *t = new Tag(*p.first);
 		t = grammar.addTag(t); // new is deleted if it exists
 		nt[t].terminal = p.second.terminal;
diff --git a/src/Rule.cpp b/src/Rule.cpp
index adcdb83..81c1c1e 100644
--- a/src/Rule.cpp
+++ b/src/Rule.cpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -27,8 +27,7 @@
 namespace CG3 {
 
 Rule::Rule()
-  : name(0)
-  , wordform(0)
+  : wordform(0)
   , target(0)
   , childset1(0)
   , childset2(0)
@@ -39,8 +38,6 @@ Rule::Rule()
   , flags(0)
   , section(0)
   , sub_reading(0)
-  , weight(0.0)
-  , quality(0.0)
   , type(K_IGNORE)
   , maplist(0)
   , sublist(0)
@@ -53,15 +50,12 @@ Rule::Rule()
 }
 
 Rule::~Rule() {
-	delete[] name;
 }
 
 void Rule::setName(const UChar *to) {
-	delete[] name;
-	name = 0;
+	name.clear();
 	if (to) {
-		name = new UChar[u_strlen(to) + 1];
-		u_strcpy(name, to);
+		name = to;
 	}
 }
 
@@ -75,11 +69,11 @@ void Rule::reverseContextualTests() {
 }
 
 void Rule::resetStatistics() {
-	foreach (it, tests) {
-		(*it)->resetStatistics();
+	for (auto it : tests) {
+		it->resetStatistics();
 	}
-	foreach (it, dep_tests) {
-		(*it)->resetStatistics();
+	for (auto it : dep_tests) {
+		it->resetStatistics();
 	}
 	num_fail = 0;
 	num_match = 0;
diff --git a/src/Rule.hpp b/src/Rule.hpp
index 3287eb3..200022a 100644
--- a/src/Rule.hpp
+++ b/src/Rule.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -36,38 +36,42 @@ class Set;
 
 // This must be kept in lock-step with Strings.hpp's FLAGS
 enum {
-	RF_NEAREST     = (1 <<  0),
-	RF_ALLOWLOOP   = (1 <<  1),
-	RF_DELAYED     = (1 <<  2),
-	RF_IMMEDIATE   = (1 <<  3),
-	RF_LOOKDELETED = (1 <<  4),
-	RF_LOOKDELAYED = (1 <<  5),
-	RF_UNSAFE      = (1 <<  6),
-	RF_SAFE        = (1 <<  7),
-	RF_REMEMBERX   = (1 <<  8),
-	RF_RESETX      = (1 <<  9),
-	RF_KEEPORDER   = (1 << 10),
-	RF_VARYORDER   = (1 << 11),
-	RF_ENCL_INNER  = (1 << 12),
-	RF_ENCL_OUTER  = (1 << 13),
-	RF_ENCL_FINAL  = (1 << 14),
-	RF_ENCL_ANY    = (1 << 15),
-	RF_ALLOWCROSS  = (1 << 16),
-	RF_WITHCHILD   = (1 << 17),
-	RF_NOCHILD     = (1 << 18),
-	RF_ITERATE     = (1 << 19),
-	RF_NOITERATE   = (1 << 20),
-	RF_UNMAPLAST   = (1 << 21),
-	RF_REVERSE     = (1 << 22),
-	RF_SUB         = (1 << 23),
-	RF_OUTPUT      = (1 << 24),
+	RF_NEAREST      = (1 <<  0),
+	RF_ALLOWLOOP    = (1 <<  1),
+	RF_DELAYED      = (1 <<  2),
+	RF_IMMEDIATE    = (1 <<  3),
+	RF_LOOKDELETED  = (1 <<  4),
+	RF_LOOKDELAYED  = (1 <<  5),
+	RF_UNSAFE       = (1 <<  6),
+	RF_SAFE         = (1 <<  7),
+	RF_REMEMBERX    = (1 <<  8),
+	RF_RESETX       = (1 <<  9),
+	RF_KEEPORDER    = (1 << 10),
+	RF_VARYORDER    = (1 << 11),
+	RF_ENCL_INNER   = (1 << 12),
+	RF_ENCL_OUTER   = (1 << 13),
+	RF_ENCL_FINAL   = (1 << 14),
+	RF_ENCL_ANY     = (1 << 15),
+	RF_ALLOWCROSS   = (1 << 16),
+	RF_WITHCHILD    = (1 << 17),
+	RF_NOCHILD      = (1 << 18),
+	RF_ITERATE      = (1 << 19),
+	RF_NOITERATE    = (1 << 20),
+	RF_UNMAPLAST    = (1 << 21),
+	RF_REVERSE      = (1 << 22),
+	RF_SUB          = (1 << 23),
+	RF_OUTPUT       = (1 << 24),
+	RF_CAPTURE_UNIF = (1 << 25),
+	RF_REPEAT       = (1 << 26),
+	RF_BEFORE       = (1 << 27),
+	RF_AFTER        = (1 << 28),
 
-	MASK_ENCL      = RF_ENCL_INNER | RF_ENCL_OUTER | RF_ENCL_FINAL | RF_ENCL_ANY,
+	MASK_ENCL       = RF_ENCL_INNER | RF_ENCL_OUTER | RF_ENCL_FINAL | RF_ENCL_ANY,
 };
 
 class Rule {
 public:
-	UChar *name;
+	UString name;
 	Tag *wordform;
 	uint32_t target;
 	uint32_t childset1, childset2;
@@ -76,8 +80,6 @@ public:
 	uint32_t flags;
 	int32_t section;
 	int32_t sub_reading;
-	// ToDo: Add proper "quality" quantifier based on num_fail, num_match, total_time
-	double weight, quality;
 	KEYWORDS type;
 	Set *maplist;
 	Set *sublist;
@@ -102,7 +104,7 @@ public:
 
 typedef std::vector<Rule*> RuleVector;
 typedef std::map<uint32_t, Rule*> RuleByLineMap;
-typedef stdext::hash_map<uint32_t, Rule*> RuleByLineHashMap;
+typedef std::unordered_map<uint32_t, Rule*> RuleByLineHashMap;
 }
 
 #endif
diff --git a/src/Set.cpp b/src/Set.cpp
index 4b00153..96f6405 100644
--- a/src/Set.cpp
+++ b/src/Set.cpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -137,7 +137,7 @@ void Set::markUsed(Grammar& grammar) {
 	trie_markused(trie);
 	trie_markused(trie_special);
 
-	boost_foreach (Tag *tag, ff_tags) {
+	for (auto tag : ff_tags) {
 		tag->markUsed();
 	}
 
diff --git a/src/Set.hpp b/src/Set.hpp
index 0707d4f..40246b2 100644
--- a/src/Set.hpp
+++ b/src/Set.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -88,11 +88,11 @@ public:
 
 typedef sorted_vector<Set*> SetSet;
 typedef std::vector<Set*> SetVector;
-typedef stdext::hash_map<uint32_t, Set*> Setuint32HashMap;
+typedef std::unordered_map<uint32_t, Set*> Setuint32HashMap;
 
 inline uint8_t trie_reindex(const trie_t& trie) {
 	uint8_t type = 0;
-	boost_foreach (const trie_t::value_type& kv, trie) {
+	for (auto& kv : trie) {
 		if (kv.first->type & T_SPECIAL) {
 			type |= ST_SPECIAL;
 		}
diff --git a/src/SingleWindow.cpp b/src/SingleWindow.cpp
index a46e2d5..d997017 100644
--- a/src/SingleWindow.cpp
+++ b/src/SingleWindow.cpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -74,8 +74,8 @@ SingleWindow::~SingleWindow() {
 		}
 	}
 
-	foreach (iter, cohorts) {
-		delete *iter;
+	for (auto iter : cohorts) {
+		delete iter;
 	}
 	if (next && previous) {
 		next->previous = previous;
@@ -103,8 +103,8 @@ void SingleWindow::clear() {
 		}
 	}
 
-	foreach (iter, cohorts) {
-		free_cohort(*iter);
+	for (auto iter : cohorts) {
+		free_cohort(iter);
 	}
 	if (next && previous) {
 		next->previous = previous;
@@ -128,7 +128,7 @@ void SingleWindow::clear() {
 	cohorts.clear();
 	valid_rules.clear();
 	hit_external.clear();
-	boost_foreach (CohortSet& cs, rule_to_cohorts) {
+	for (auto& cs : rule_to_cohorts) {
 		cs.clear();
 	}
 	variables_set.clear();
diff --git a/src/SingleWindow.hpp b/src/SingleWindow.hpp
index 27f7501..0001827 100644
--- a/src/SingleWindow.hpp
+++ b/src/SingleWindow.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
diff --git a/src/Strings.cpp b/src/Strings.cpp
index 311a6d9..cf93353 100644
--- a/src/Strings.cpp
+++ b/src/Strings.cpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -50,6 +50,9 @@ UnicodeString g_flags[FLAGS_COUNT] = {
 	UNICODE_STRING_SIMPLE("SUB"),
 	UNICODE_STRING_SIMPLE("OUTPUT"),
 	UNICODE_STRING_SIMPLE("CAPTURE_UNIF"),
+	UNICODE_STRING_SIMPLE("REPEAT"),
+	UNICODE_STRING_SIMPLE("BEFORE"),
+	UNICODE_STRING_SIMPLE("AFTER"),
 };
 
 UnicodeString keywords[KEYWORD_COUNT] = {
@@ -115,8 +118,8 @@ UnicodeString keywords[KEYWORD_COUNT] = {
 	UNICODE_STRING_SIMPLE("SPLITCOHORT"),
 };
 
-const UChar _S_SET_ISECT_U[] = { L'\u2229', 0 };
-const UChar _S_SET_SYMDIFF_U[] = { L'\u2206', 0 };
+constexpr UChar _S_SET_ISECT_U[] = { L'\u2229', 0 };
+constexpr UChar _S_SET_SYMDIFF_U[] = { L'\u2206', 0 };
 
 UnicodeString stringbits[STRINGS_COUNT] = {
 	UNICODE_STRING_SIMPLE("__CG3_DUMMY_STRINGBIT__"),
@@ -182,6 +185,7 @@ UnicodeString stringbits[STRINGS_COUNT] = {
 	UNICODE_STRING_SIMPLE("NEGATIVE"),
 	UNICODE_STRING_SIMPLE("ONCE"),
 	UNICODE_STRING_SIMPLE("ALWAYS"),
+	UNICODE_STRING_SIMPLE("\\"),
 	_S_SET_ISECT_U,
 	_S_SET_SYMDIFF_U,
 	UNICODE_STRING_SIMPLE("FROM"),
@@ -193,6 +197,8 @@ UnicodeString stringbits[STRINGS_COUNT] = {
 	UNICODE_STRING_SIMPLE("strict-wordforms"),
 	UNICODE_STRING_SIMPLE("strict-baseforms"),
 	UNICODE_STRING_SIMPLE("strict-secondary"),
+	UNICODE_STRING_SIMPLE("strict-regex"),
+	UNICODE_STRING_SIMPLE("strict-icase"),
 	UNICODE_STRING_SIMPLE("<STREAMCMD:SETVAR:"),
 	UNICODE_STRING_SIMPLE("<STREAMCMD:REMVAR:"),
 };
diff --git a/src/Strings.hpp b/src/Strings.hpp
index 05a7257..b8a308c 100644
--- a/src/Strings.hpp
+++ b/src/Strings.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -154,6 +154,7 @@ enum {
 	S_NEGATIVE,
 	S_ONCE,
 	S_ALWAYS,
+	S_SET_DIFF,
 	S_SET_ISECT_U,
 	S_SET_SYMDIFF_U,
 	S_FROM,
@@ -165,6 +166,8 @@ enum {
 	S_STRICT_WFORMS,
 	S_STRICT_BFORMS,
 	S_STRICT_SECOND,
+	S_STRICT_REGEX,
+	S_STRICT_ICASE,
 	S_CMD_SETVAR,
 	S_CMD_REMVAR,
 	STRINGS_COUNT,
@@ -198,6 +201,9 @@ enum {
 	FL_SUB,
 	FL_OUTPUT,
 	FL_CAPTURE_UNIF,
+	FL_REPEAT,
+	FL_BEFORE,
+	FL_AFTER,
 	FLAGS_COUNT,
 };
 }
diff --git a/src/Tag.cpp b/src/Tag.cpp
index 12c2833..e4415a3 100644
--- a/src/Tag.cpp
+++ b/src/Tag.cpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -112,18 +112,18 @@ void Tag::parseTagRaw(const UChar *to, Grammar *grammar) {
 
 	tag.assign(tmp, length);
 
-	foreach (iter, grammar->regex_tags) {
+	for (auto iter : grammar->regex_tags) {
 		UErrorCode status = U_ZERO_ERROR;
-		uregex_setText(*iter, tag.c_str(), tag.length(), &status);
+		uregex_setText(iter, tag.c_str(), tag.size(), &status);
 		if (status == U_ZERO_ERROR) {
-			if (uregex_matches(*iter, 0, &status)) {
+			if (uregex_matches(iter, 0, &status)) {
 				type |= T_TEXTUAL;
 			}
 		}
 	}
-	foreach (iter, grammar->icase_tags) {
+	for (auto iter : grammar->icase_tags) {
 		UErrorCode status = U_ZERO_ERROR;
-		if (u_strCaseCompare(tag.c_str(), tag.length(), (*iter)->tag.c_str(), (*iter)->tag.length(), U_FOLD_CASE_DEFAULT, &status) == 0) {
+		if (u_strCaseCompare(tag.c_str(), tag.size(), iter->tag.c_str(), iter->tag.size(), U_FOLD_CASE_DEFAULT, &status) == 0) {
 			type |= T_TEXTUAL;
 		}
 	}
@@ -135,7 +135,7 @@ void Tag::parseTagRaw(const UChar *to, Grammar *grammar) {
 		if (u_sscanf(tag.c_str(), "#%i->%i", &dep_self, &dep_parent) == 2 && dep_self != 0) {
 			type |= T_DEPENDENCY;
 		}
-		const UChar local_dep_unicode[] = { '#', '%', 'i', L'\u2192', '%', 'i', 0 };
+		constexpr UChar local_dep_unicode[] = { '#', '%', 'i', L'\u2192', '%', 'i', 0 };
 		if (u_sscanf_u(tag.c_str(), local_dep_unicode, &dep_self, &dep_parent) == 2 && dep_self != 0) {
 			type |= T_DEPENDENCY;
 		}
@@ -167,22 +167,28 @@ void Tag::parseNumeric() {
 	UChar tkey[256];
 	UChar top[256];
 	UChar txval[256];
-	UChar spn[] = { '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 0 };
+	UChar spn[] = { '-', '.', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 0 };
 	tkey[0] = 0;
 	top[0] = 0;
 	txval[0] = 0;
-	if (u_sscanf(tag.c_str(), "%*[<]%[^<>=:!]%[<>=:!]%[-MAXIN0-9]%*[>]", &tkey, &top, &txval) == 3 && top[0] && txval[0]) {
-		int32_t tval = 0;
+	if (u_sscanf(tag.c_str(), "%*[<]%[^<>=:!]%[<>=:!]%[-.MAXIN0-9]%*[>]", &tkey, &top, &txval) == 3 && top[0] && txval[0]) {
+		double tval = 0;
 		int32_t r = u_strspn(txval, spn);
 		if (txval[0] == 'M' && txval[1] == 'A' && txval[2] == 'X' && txval[3] == 0) {
-			tval = std::numeric_limits<int32_t>::max();
+			tval = NUMERIC_MAX;
 		}
 		else if (txval[0] == 'M' && txval[1] == 'I' && txval[2] == 'N' && txval[3] == 0) {
-			tval = std::numeric_limits<int32_t>::min();
+			tval = NUMERIC_MIN;
 		}
-		else if (txval[r] || u_sscanf(txval, "%d", &tval) != 1) {
+		else if (txval[r] || u_sscanf(txval, "%lf", &tval) != 1) {
 			return;
 		}
+		if (tval < NUMERIC_MIN) {
+			tval = NUMERIC_MIN;
+		}
+		if (tval > NUMERIC_MAX) {
+			tval = NUMERIC_MAX;
+		}
 		if (top[0] == '<') {
 			comparison_op = OP_LESSTHAN;
 		}
@@ -299,7 +305,7 @@ void Tag::allocateVsNames() {
 
 UString Tag::toUString(bool escape) const {
 	UString str;
-	str.reserve(tag.length());
+	str.reserve(tag.size());
 
 	if (type & T_FAILFAST) {
 		str += '^';
@@ -336,7 +342,7 @@ UString Tag::toUString(bool escape) const {
 	}
 
 	if (escape) {
-		for (size_t i = 0; i < tag.length(); ++i) {
+		for (size_t i = 0; i < tag.size(); ++i) {
 			if (tag[i] == '\\' || tag[i] == '(' || tag[i] == ')' || tag[i] == ';' || tag[i] == '#') {
 				str += '\\';
 			}
diff --git a/src/Tag.hpp b/src/Tag.hpp
index 049ebbe..ddb2227 100644
--- a/src/Tag.hpp
+++ b/src/Tag.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -33,7 +33,7 @@ class Set;
 
 typedef std::vector<Set*> SetVector;
 
-enum C_OPS {
+enum C_OPS : uint8_t {
 	OP_NOP,
 	OP_EQUALS,
 	OP_LESSTHAN,
@@ -44,7 +44,7 @@ enum C_OPS {
 	NUM_OPS,
 };
 
-enum {
+enum : uint32_t {
 	T_ANY              = (1 <<  0),
 	T_NUMERICAL        = (1 <<  1),
 	T_MAPPING          = (1 <<  2),
@@ -73,6 +73,8 @@ enum {
 	T_ENCL             = (1 << 25),
 	T_RELATION         = (1 << 26),
 
+	T_REGEXP_LINE      = (1u << 31), // ToDo: Remove for real ordered mode
+
 	MASK_TAG_SPECIAL   = T_ANY | T_TARGET | T_MARK | T_ATTACHTO | T_PAR_LEFT | T_PAR_RIGHT | T_NUMERICAL | T_VARIABLE | T_META | T_FAILFAST | T_CASE_INSENSITIVE | T_REGEXP | T_REGEXP_ANY | T_VARSTRING | T_SET | T_ENCL | T_SAME_BASIC,
 };
 
@@ -81,7 +83,7 @@ public:
 	static UFILE *dump_hashes_out;
 
 	C_OPS comparison_op;
-	int32_t comparison_val;
+	double comparison_val;
 	uint32_t type;
 	uint32_t comparison_hash;
 	uint32_t dep_self, dep_parent;
@@ -90,8 +92,8 @@ public:
 	uint32_t number;
 	uint32_t seed;
 	UString tag;
-	boost::scoped_ptr<SetVector> vs_sets;
-	boost::scoped_ptr<UStringVector> vs_names;
+	std::unique_ptr<SetVector> vs_sets;
+	std::unique_ptr<UStringVector> vs_names;
 	mutable URegularExpression *regexp;
 
 	Tag();
@@ -120,7 +122,7 @@ typedef sorted_vector<Tag*, compare_Tag> TagSortedVector;
 
 template<typename T>
 inline void fill_tagvector(const T& in, TagVector& tags, bool& did, bool& special) {
-	boost_foreach (Tag *tag, in) {
+	for (auto tag : in) {
 		if (tag->type & T_NUMERICAL) {
 			did = true;
 		}
diff --git a/src/TagTrie.hpp b/src/TagTrie.hpp
index 43c5f05..97df666 100644
--- a/src/TagTrie.hpp
+++ b/src/TagTrie.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -70,7 +70,7 @@ inline bool trie_insert(trie_t& trie, const TagVector& tv, size_t w = 0) {
 
 inline trie_t *_trie_copy_helper(const trie_t& trie) {
 	trie_t *nt = new trie_t;
-	boost_foreach (const trie_t::value_type& p, trie) {
+	for (auto& p : trie) {
 		(*nt)[p.first].terminal = p.second.terminal;
 		if (p.second.trie) {
 			(*nt)[p.first].trie = _trie_copy_helper(*p.second.trie);
@@ -81,7 +81,7 @@ inline trie_t *_trie_copy_helper(const trie_t& trie) {
 
 inline trie_t trie_copy(const trie_t& trie) {
 	trie_t nt;
-	boost_foreach (const trie_t::value_type& p, trie) {
+	for (auto& p : trie) {
 		nt[p.first].terminal = p.second.terminal;
 		if (p.second.trie) {
 			nt[p.first].trie = _trie_copy_helper(*p.second.trie);
@@ -91,7 +91,7 @@ inline trie_t trie_copy(const trie_t& trie) {
 }
 
 inline void trie_delete(trie_t& trie) {
-	boost_foreach (trie_t::value_type& p, trie) {
+	for (auto& p : trie) {
 		if (p.second.trie) {
 			trie_delete(*p.second.trie);
 			delete p.second.trie;
@@ -116,7 +116,7 @@ inline bool trie_singular(const trie_t& trie) {
 
 inline uint32_t trie_rehash(const trie_t& trie) {
 	uint32_t retval = 0;
-	boost_foreach (const trie_t::value_type& kv, trie) {
+	for (auto& kv : trie) {
 		retval = hash_value(kv.first->hash, retval);
 		if (kv.second.trie) {
 			retval = hash_value(trie_rehash(*kv.second.trie), retval);
@@ -126,7 +126,7 @@ inline uint32_t trie_rehash(const trie_t& trie) {
 }
 
 inline void trie_markused(trie_t& trie) {
-	boost_foreach (trie_t::value_type& kv, trie) {
+	for (auto& kv : trie) {
 		kv.first->markUsed();
 		if (kv.second.trie) {
 			trie_markused(*kv.second.trie);
@@ -135,7 +135,7 @@ inline void trie_markused(trie_t& trie) {
 }
 
 inline bool trie_hasType(trie_t& trie, uint32_t type) {
-	boost_foreach (trie_t::value_type& kv, trie) {
+	for (auto& kv : trie) {
 		if (kv.first->type & type) {
 			return true;
 		}
@@ -147,7 +147,7 @@ inline bool trie_hasType(trie_t& trie, uint32_t type) {
 }
 
 inline void trie_getTagList(const trie_t& trie, TagList& theTags) {
-	boost_foreach (const trie_t::value_type& kv, trie) {
+	for (auto& kv : trie) {
 		theTags.push_back(kv.first);
 		if (kv.second.trie) {
 			trie_getTagList(*kv.second.trie, theTags);
@@ -156,7 +156,7 @@ inline void trie_getTagList(const trie_t& trie, TagList& theTags) {
 }
 
 inline bool trie_getTagList(const trie_t& trie, TagList& theTags, const void *node) {
-	boost_foreach (const trie_t::value_type& kv, trie) {
+	for (auto& kv : trie) {
 		theTags.push_back(kv.first);
 		if (node == &kv) {
 			return true;
@@ -171,7 +171,7 @@ inline bool trie_getTagList(const trie_t& trie, TagList& theTags, const void *no
 
 /*
 	inline void trie_getTagList(const trie_t& trie, TagVector& theTags) {
-		boost_foreach (const trie_t::value_type& kv, trie) {
+		for (auto& kv : trie) {
 			theTags.push_back(kv.first);
 			if (kv.second.trie) {
 				trie_getTagList(*kv.second.trie, theTags);
@@ -182,7 +182,7 @@ inline bool trie_getTagList(const trie_t& trie, TagList& theTags, const void *no
 
 inline TagVector trie_getTagList(const trie_t& trie) {
 	TagVector theTags;
-	boost_foreach (const trie_t::value_type& kv, trie) {
+	for (auto& kv : trie) {
 		theTags.push_back(kv.first);
 		if (kv.second.trie) {
 			trie_getTagList(*kv.second.trie, theTags);
@@ -192,7 +192,7 @@ inline TagVector trie_getTagList(const trie_t& trie) {
 }
 
 inline void trie_getTags(const trie_t& trie, std::set<TagVector>& rv, TagVector& tv) {
-	boost_foreach (const trie_t::value_type& kv, trie) {
+	for (auto& kv : trie) {
 		tv.push_back(kv.first);
 		if (kv.second.terminal) {
 			std::sort(tv.begin(), tv.end());
@@ -208,7 +208,7 @@ inline void trie_getTags(const trie_t& trie, std::set<TagVector>& rv, TagVector&
 
 inline std::set<TagVector> trie_getTags(const trie_t& trie) {
 	std::set<TagVector> rv;
-	boost_foreach (const trie_t::value_type& kv, trie) {
+	for (auto& kv : trie) {
 		TagVector tv;
 		tv.push_back(kv.first);
 		if (kv.second.terminal) {
@@ -225,7 +225,7 @@ inline std::set<TagVector> trie_getTags(const trie_t& trie) {
 }
 
 inline void trie_getTagsOrdered(const trie_t& trie, std::set<TagVector>& rv, TagVector& tv) {
-	boost_foreach (const trie_t::value_type& kv, trie) {
+	for (auto& kv : trie) {
 		tv.push_back(kv.first);
 		if (kv.second.terminal) {
 			rv.insert(tv);
@@ -240,7 +240,7 @@ inline void trie_getTagsOrdered(const trie_t& trie, std::set<TagVector>& rv, Tag
 
 inline std::set<TagVector> trie_getTagsOrdered(const trie_t& trie) {
 	std::set<TagVector> rv;
-	boost_foreach (const trie_t::value_type& kv, trie) {
+	for (auto& kv : trie) {
 		TagVector tv;
 		tv.push_back(kv.first);
 		if (kv.second.terminal) {
@@ -256,7 +256,7 @@ inline std::set<TagVector> trie_getTagsOrdered(const trie_t& trie) {
 }
 
 inline void trie_serialize(const trie_t& trie, std::ostream& out) {
-	boost_foreach (const trie_t::value_type& kv, trie) {
+	for (auto& kv : trie) {
 		writeSwapped<uint32_t>(out, kv.first->number);
 		writeSwapped<uint8_t>(out, kv.second.terminal);
 		if (kv.second.trie) {
diff --git a/src/TextualParser.cpp b/src/TextualParser.cpp
index ad304b6..25556b6 100644
--- a/src/TextualParser.cpp
+++ b/src/TextualParser.cpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -136,6 +136,16 @@ Tag *TextualParser::parseTag(const UChar *to, const UChar *p) {
 		else if (u_strcmp(tag->tag.c_str(), stringbits[S_BEGINTAG].getTerminatedBuffer()) == 0 || u_strcmp(tag->tag.c_str(), stringbits[S_ENDTAG].getTerminatedBuffer()) == 0) {
 			// Always allow >>> and <<<
 		}
+		else if (tag->type & (T_REGEXP | T_REGEXP_ANY)) {
+			if (strict_regex) {
+				error("%s: Error: Regex tag %S not on the strict-tags list, on line %u near `%S`!\n", tag->tag.c_str(), p);
+			}
+		}
+		else if (tag->type & T_CASE_INSENSITIVE) {
+			if (strict_icase) {
+				error("%s: Error: Case-insensitive tag %S not on the strict-tags list, on line %u near `%S`!\n", tag->tag.c_str(), p);
+			}
+		}
 		else if (tag->type & T_WORDFORM) {
 			if (strict_wforms) {
 				error("%s: Error: Wordform tag %S not on the strict-tags list, on line %u near `%S`!\n", tag->tag.c_str(), p);
@@ -227,7 +237,7 @@ void TextualParser::parseTagList(UChar *& p, Set *s) {
 			tags.erase(std::unique(tags.begin(), tags.end()), tags.end());
 			// If this particular list of tags hasn't already been seen, then increment their frequency counts
 			if (taglists.insert(tags).second) {
-				boost_foreach (Tag *t, tags) {
+				for (auto t : tags) {
 					++tag_freq[t];
 				}
 			}
@@ -236,7 +246,7 @@ void TextualParser::parseTagList(UChar *& p, Set *s) {
 	AST_CLOSE(p);
 
 	freq_sorter fs(tag_freq);
-	boost_foreach (const TagVector& tvc, taglists) {
+	for (auto& tvc : taglists) {
 		if (tvc.size() == 1) {
 			result->addTagToSet(tvc[0], s);
 			continue;
@@ -246,7 +256,7 @@ void TextualParser::parseTagList(UChar *& p, Set *s) {
 		// Doing this yields a very cheap imperfect form of trie compression, but it's good enough
 		std::sort(tv.begin(), tv.end(), fs);
 		bool special = false;
-		boost_foreach (Tag *tag, tv) {
+		for (auto tag : tv) {
 			if (tag->type & T_SPECIAL) {
 				special = true;
 				break;
@@ -325,7 +335,7 @@ Set *TextualParser::parseSetInline(UChar *& p, Set *s) {
 					}
 					else {
 						bool special = false;
-						boost_foreach (Tag *tag, tags) {
+						for (auto tag : tags) {
 							if (tag->type & T_SPECIAL) {
 								special = true;
 								break;
@@ -359,15 +369,11 @@ Set *TextualParser::parseSetInline(UChar *& p, Set *s) {
 					AST_CLOSE(p);
 				}
 
-				if (!set_ops.empty() && (set_ops.back() == S_SET_ISECT_U || set_ops.back() == S_SET_SYMDIFF_U)) {
-					TagVector tv;
+				if (!set_ops.empty() && (set_ops.back() == S_SET_DIFF || set_ops.back() == S_SET_ISECT_U || set_ops.back() == S_SET_SYMDIFF_U)) {
 					std::set<TagVector> a;
-					trie_getTags(result->getSet(sets[sets.size() - 1])->trie, a, tv);
-					trie_getTags(result->getSet(sets[sets.size() - 1])->trie_special, a, tv);
-					tv.clear();
+					result->getTags(*result->getSet(sets[sets.size() - 1]), a);
 					std::set<TagVector> b;
-					trie_getTags(result->getSet(sets[sets.size() - 2])->trie, b, tv);
-					trie_getTags(result->getSet(sets[sets.size() - 2])->trie_special, b, tv);
+					result->getTags(*result->getSet(sets[sets.size() - 2]), b);
 
 					std::vector<TagVector> r;
 					if (set_ops.back() == S_SET_ISECT_U) {
@@ -376,6 +382,10 @@ Set *TextualParser::parseSetInline(UChar *& p, Set *s) {
 					else if (set_ops.back() == S_SET_SYMDIFF_U) {
 						std::set_symmetric_difference(a.begin(), a.end(), b.begin(), b.end(), std::back_inserter(r));
 					}
+					else if (set_ops.back() == S_SET_DIFF) {
+						// (b,a) because order matters for difference
+						std::set_difference(b.begin(), b.end(), a.begin(), a.end(), std::back_inserter(r));
+					}
 
 					set_ops.pop_back();
 					sets.pop_back();
@@ -386,14 +396,14 @@ Set *TextualParser::parseSetInline(UChar *& p, Set *s) {
 					set_c->setName(sets_counter++);
 
 					bc::flat_map<Tag*, size_t> tag_freq;
-					boost_foreach (const TagVector& tags, r) {
-						boost_foreach (Tag *t, tags) {
+					for (auto& tags : r) {
+						for (auto t : tags) {
 							++tag_freq[t];
 						}
 					}
 
 					freq_sorter fs(tag_freq);
-					boost_foreach (TagVector& tv, r) {
+					for (auto& tv : r) {
 						if (tv.size() == 1) {
 							result->addTagToSet(tv[0], set_c);
 							continue;
@@ -403,7 +413,7 @@ Set *TextualParser::parseSetInline(UChar *& p, Set *s) {
 						// Doing this yields a very cheap imperfect form of trie compression, but it's good enough
 						std::sort(tv.begin(), tv.end(), fs);
 						bool special = false;
-						boost_foreach (Tag *tag, tv) {
+						for (auto tag : tv) {
 							if (tag->type & T_SPECIAL) {
 								special = true;
 								break;
@@ -425,7 +435,12 @@ Set *TextualParser::parseSetInline(UChar *& p, Set *s) {
 			}
 			else {
 				UChar *n = p;
-				result->lines += SKIPTOWS(n, 0, true);
+				if (n[0] == '\\' && ISSPACE(n[1])) {
+					++n;
+				}
+				else {
+					result->lines += SKIPTOWS(n, 0, true);
+				}
 				ptrdiff_t c = n - p;
 				u_strncpy(&gbuffers[0][0], p, c);
 				gbuffers[0][c] = 0;
@@ -683,6 +698,10 @@ void TextualParser::parseContextualTestPosition(UChar *& p, ContextualTest& t) {
 	if (!ISSPACE(*p)) {
 		error("%s: Error: Invalid position on line %u near `%S` - garbage data!\n", n);
 	}
+	if (p-n == 1 && (*n == 'o' || *n == 'O')) {
+		error("%s: Error: Position on line %u near `%S` - stand-alone o or O doesn't make sense - maybe you meant 0?\n", n);
+	}
+
 	if (had_digits) {
 		if (t.pos & (POS_DEP_CHILD | POS_DEP_SIBLING | POS_DEP_PARENT)) {
 			error("%s: Error: Invalid position on line %u near `%S` - cannot combine offsets with dependency!\n", n);
@@ -893,9 +912,9 @@ ContextualTest *TextualParser::parseContextualTestList(UChar *& p, Rule *rule) {
 		}
 		result->lines += SKIPWS(p);
 
-		if ((t->barrier || t->cbarrier) && !(t->pos & MASK_POS_SCAN)) {
+		if ((t->barrier || t->cbarrier) && !(t->pos & (MASK_POS_SCAN | POS_SELF))) {
 			uncond_swap<UChar> swp(*p, 0);
-			u_fprintf(ux_stderr, "%s: Warning: Barriers only make sense for scanning tests on line %u at %S.\n", filebase, result->lines, pos_p);
+			u_fprintf(ux_stderr, "%s: Warning: Barriers only make sense for scanning or self tests on line %u at %S.\n", filebase, result->lines, pos_p);
 			u_fflush(ux_stderr);
 			t->barrier = 0;
 			t->cbarrier = 0;
@@ -1136,6 +1155,9 @@ void TextualParser::parseRule(UChar *& p, KEYWORDS key) {
 	if (rule->flags & RF_ITERATE && rule->flags & RF_NOITERATE) {
 		error("%s: Error: Line %u near `%S`: ITERATE and NOITERATE are mutually exclusive!\n", lp);
 	}
+	if (rule->flags & RF_BEFORE && rule->flags & RF_AFTER) {
+		error("%s: Error: Line %u near `%S`: BEFORE and AFTER are mutually exclusive!\n", lp);
+	}
 
 	if (!(rule->flags & (RF_ITERATE | RF_NOITERATE))) {
 		if (key != K_SELECT && key != K_REMOVE && key != K_IFF && key != K_DELIMIT && key != K_REMCOHORT && key != K_MOVE && key != K_SWITCH) {
@@ -1239,12 +1261,50 @@ void TextualParser::parseRule(UChar *& p, KEYWORDS key) {
 		AST_CLOSE(p);
 	}
 
+	if (key == K_ADD || key == K_MAP || key == K_SUBSTITUTE || key == K_COPY) {
+		if (ux_simplecasecmp(p, stringbits[S_AFTER].getTerminatedBuffer(), stringbits[S_AFTER].length())) {
+			p += stringbits[S_AFTER].length();
+			rule->flags |= RF_AFTER;
+		}
+		else if (ux_simplecasecmp(p, stringbits[S_BEFORE].getTerminatedBuffer(), stringbits[S_BEFORE].length())) {
+			p += stringbits[S_BEFORE].length();
+			rule->flags |= RF_BEFORE;
+		}
+		if (rule->flags & (RF_BEFORE | RF_AFTER)) {
+			Set *s = parseSetInlineWrapper(p);
+			rule->childset1 = s->hash;
+		}
+	}
+
 	result->lines += SKIPWS(p);
 	if (ux_simplecasecmp(p, stringbits[S_TARGET].getTerminatedBuffer(), stringbits[S_TARGET].length())) {
 		p += stringbits[S_TARGET].length();
 	}
 	result->lines += SKIPWS(p);
 
+	if (ux_simplecasecmp(p, g_flags[FL_WITHCHILD].getTerminatedBuffer(), g_flags[FL_WITHCHILD].length())) {
+		AST_OPEN(RuleFlag);
+		p += g_flags[FL_WITHCHILD].length();
+		AST_CLOSE(p);
+		AST_OPEN(RuleWithChildTarget);
+		Set *s = parseSetInlineWrapper(p);
+		AST_CLOSE(p);
+		result->has_dep = true;
+		rule->flags |= RF_WITHCHILD;
+		rule->flags &= ~RF_NOCHILD;
+		rule->childset1 = s->hash;
+		result->lines += SKIPWS(p);
+	}
+	else if (ux_simplecasecmp(p, g_flags[FL_NOCHILD].getTerminatedBuffer(), g_flags[FL_NOCHILD].length())) {
+		AST_OPEN(RuleFlag);
+		p += g_flags[FL_NOCHILD].length();
+		AST_CLOSE(p);
+		rule->flags |= RF_NOCHILD;
+		rule->flags &= ~RF_WITHCHILD;
+		rule->childset1 = 0;
+		result->lines += SKIPWS(p);
+	}
+
 	AST_OPEN(RuleTarget);
 	Set *s = parseSetInlineWrapper(p);
 	rule->target = s->hash;
@@ -1361,14 +1421,14 @@ void TextualParser::parseRule(UChar *& p, KEYWORDS key) {
 			found = true;
 		}
 		else {
-			foreach (it, rule->tests) {
-				if ((*it)->pos & POS_MARK_JUMP) {
+			for (auto it : rule->tests) {
+				if (it->pos & POS_MARK_JUMP) {
 					found = true;
 					break;
 				}
 			}
-			foreach (it, rule->dep_tests) {
-				if ((*it)->pos & POS_MARK_JUMP) {
+			for (auto it : rule->dep_tests) {
+				if (it->pos & POS_MARK_JUMP) {
 					found = true;
 					break;
 				}
@@ -1685,6 +1745,50 @@ void TextualParser::parseFromUChar(UChar *input, const char *fname) {
 			else if (IS_ICASE(p, "SETS", "sets")) {
 				p += 4;
 			}
+			// LIST-TAGS
+			else if (IS_ICASE(p, "LIST-TAGS", "list-tags")) {
+				AST_OPEN(ListTags);
+				p += 9;
+				result->lines += SKIPWS(p, '+');
+				if (p[0] != '+' || p[1] != '=') {
+					error("%s: Error: Encountered a %C before the expected += on line %u near `%S`!\n", *p, p);
+				}
+				p += 2;
+				result->lines += SKIPWS(p);
+
+				uint32SortedVector tmp;
+				list_tags.swap(tmp);
+				while (*p && *p != ';') {
+					AST_OPEN(Tag);
+					UChar *n = p;
+					if (*n == '"') {
+						n++;
+						SKIPTO_NOSPAN(n, '"');
+						if (*n != '"') {
+							error("%s: Error: Expected closing \" on line %u near `%S`!\n", p);
+						}
+					}
+					result->lines += SKIPTOWS(n, ';', true);
+					ptrdiff_t c = n - p;
+					u_strncpy(&gbuffers[0][0], p, c);
+					gbuffers[0][c] = 0;
+					Tag *t = parseTag(&gbuffers[0][0], p);
+					tmp.insert(t->hash);
+					p = n;
+					AST_CLOSE(p);
+					result->lines += SKIPWS(p);
+				}
+
+				if (tmp.empty()) {
+					error("%s: Error: LIST-TAGS declared, but no definitions given, on line %u near `%S`!\n", p);
+				}
+				result->lines += SKIPWS(p, ';');
+				if (*p != ';') {
+					error("%s: Error: Expected closing ; before line %u near `%S`!\n", p);
+				}
+				list_tags.swap(tmp);
+				AST_CLOSE(p + 1);
+			}
 			// LIST
 			else if (IS_ICASE(p, "LIST", "list")) {
 				Set *s = result->allocateSet();
@@ -1957,11 +2061,13 @@ void TextualParser::parseFromUChar(UChar *input, const char *fname) {
 					std::pair<size_t, bool*>(S_STRICT_WFORMS, &strict_wforms),
 					std::pair<size_t, bool*>(S_STRICT_BFORMS, &strict_bforms),
 					std::pair<size_t, bool*>(S_STRICT_SECOND, &strict_second),
+					std::pair<size_t, bool*>(S_STRICT_REGEX, &strict_regex),
+					std::pair<size_t, bool*>(S_STRICT_ICASE, &strict_icase),
 				};
 
 				while (*p != ';') {
 					bool found = false;
-					boost_foreach (pairs_t& pair, pairs) {
+					for (auto pair : pairs) {
 						if (ux_simplecasecmp(p, stringbits[pair.first].getTerminatedBuffer(), stringbits[pair.first].length())) {
 							AST_OPEN(Option);
 							p += stringbits[pair.first].length();
@@ -2092,7 +2198,7 @@ void TextualParser::parseFromUChar(UChar *input, const char *fname) {
 					u_fungetc(bom, grammar);
 				}
 
-				boost::shared_ptr<std::vector<UChar> > gbuf(new std::vector<UChar>(grammar_size * 2, 0));
+				std::shared_ptr<std::vector<UChar> > gbuf(new std::vector<UChar>(grammar_size * 2, 0));
 				grammarbufs.push_back(gbuf);
 				std::vector<UChar>& data = *gbuf.get();
 				uint32_t read = u_file_read(&data[4], grammar_size * 2, grammar);
@@ -2360,7 +2466,7 @@ int TextualParser::parse_grammar_from_file(const char *fname, const char *loc, c
 	}
 
 	// It reads into the buffer at offset 4 because certain functions may look back, so we need some nulls in front.
-	boost::shared_ptr<std::vector<UChar> > gbuf(new std::vector<UChar>(result->grammar_size * 2, 0));
+	std::shared_ptr<std::vector<UChar> > gbuf(new std::vector<UChar>(result->grammar_size * 2, 0));
 	grammarbufs.push_back(gbuf);
 	std::vector<UChar>& data = *gbuf.get();
 	uint32_t read = u_file_read(&data[4], result->grammar_size * 2, grammar);
@@ -2460,13 +2566,13 @@ int TextualParser::parse_grammar_from_file(const char *fname, const char *loc, c
 
 	result->addAnchor(keywords[K_END].getTerminatedBuffer(), result->rule_by_number.size() - 1, true);
 
-	foreach (it, result->rule_by_number) {
-		if ((*it)->name) {
-			result->addAnchor((*it)->name, (*it)->number, false);
+	for (auto it : result->rule_by_number) {
+		if (!it->name.empty()) {
+			result->addAnchor(it->name.c_str(), it->number, false);
 		}
 	}
 
-	boost_foreach (Tag *tag, result->single_tags_list) {
+	for (auto tag : result->single_tags_list) {
 		if (!(tag->type & T_VARSTRING)) {
 			continue;
 		}
@@ -2496,18 +2602,18 @@ int TextualParser::parse_grammar_from_file(const char *fname, const char *loc, c
 		} while (*p);
 	}
 
-	foreach (it, deferred_tmpls) {
-		uint32_t cn = hash_value(it->second.second);
+	for (auto it : deferred_tmpls) {
+		uint32_t cn = hash_value(it.second.second);
 		if (result->templates.find(cn) == result->templates.end()) {
-			u_fprintf(ux_stderr, "%s: Error: Unknown template '%S' referenced on line %u!\n", filebase, it->second.second.c_str(), it->second.first);
+			u_fprintf(ux_stderr, "%s: Error: Unknown template '%S' referenced on line %u!\n", filebase, it.second.second.c_str(), it.second.first);
 			++error_counter;
 			continue;
 		}
-		it->first->tmpl = result->templates.find(cn)->second;
+		it.first->tmpl = result->templates.find(cn)->second;
 	}
 
 	bc::flat_map<uint32_t, uint32_t> sets;
-	for (BOOST_AUTO(cntx, result->contexts.begin()); cntx != result->contexts.end();) {
+	for (auto cntx = result->contexts.begin(); cntx != result->contexts.end();) {
 		if (cntx->second->pos & POS_NUMERIC_BRANCH) {
 			ContextualTest *unsafec = cntx->second;
 			result->contexts.erase(cntx);
@@ -2532,18 +2638,18 @@ int TextualParser::parse_grammar_from_file(const char *fname, const char *loc, c
 			orc->ors.push_back(unsafec);
 			orc = result->addContextualTest(orc);
 
-			for (BOOST_AUTO(cntx, result->contexts.begin()); cntx != result->contexts.end(); ++cntx) {
+			for (auto cntx = result->contexts.begin(); cntx != result->contexts.end(); ++cntx) {
 				if (cntx->second->linked == tmp) {
 					cntx->second->linked = orc;
 				}
 			}
-			for (BOOST_AUTO(it, result->rule_by_number.begin()); it != result->rule_by_number.end(); ++it) {
+			for (auto it = result->rule_by_number.begin(); it != result->rule_by_number.end(); ++it) {
 				if ((*it)->dep_target == tmp) {
 					(*it)->dep_target = orc;
 				}
 				ContextList *cntxs[2] = { &(*it)->tests, &(*it)->dep_tests };
 				for (size_t i = 0; i < 2; ++i) {
-					boost_foreach (ContextualTest *& test, *cntxs[i]) {
+					for (auto& test : *cntxs[i]) {
 						if (test == tmp) {
 							test = orc;
 						}
diff --git a/src/TextualParser.hpp b/src/TextualParser.hpp
index 2785bfa..6a77ac7 100644
--- a/src/TextualParser.hpp
+++ b/src/TextualParser.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -54,6 +54,7 @@ public:
 	Grammar *get_grammar() { return result; }
 	const char *filebase;
 	uint32SortedVector strict_tags;
+	uint32SortedVector list_tags;
 
 private:
 	UChar nearbuf[32];
@@ -62,14 +63,14 @@ private:
 	uint32_t seen_mapping_prefix;
 	bool option_vislcg_compat;
 	bool in_section, in_before_sections, in_after_sections, in_null_section;
-	bool no_isets, no_itmpls, strict_wforms, strict_bforms, strict_second;
+	bool no_isets, no_itmpls, strict_wforms, strict_bforms, strict_second, strict_regex=false, strict_icase=false;
 	const char *filename;
 	const char *locale;
 	const char *codepage;
 
-	typedef stdext::hash_map<ContextualTest*, std::pair<size_t, UString> > deferred_t;
+	typedef std::unordered_map<ContextualTest*, std::pair<size_t, UString> > deferred_t;
 	deferred_t deferred_tmpls;
-	std::vector<boost::shared_ptr<std::vector<UChar> > > grammarbufs;
+	std::vector<std::shared_ptr<std::vector<UChar> > > grammarbufs;
 
 	void parseFromUChar(UChar *input, const char *fname = 0);
 	void addRuleToGrammar(Rule *rule);
diff --git a/src/Window.cpp b/src/Window.cpp
index 396ee65..75496b1 100644
--- a/src/Window.cpp
+++ b/src/Window.cpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -101,12 +101,12 @@ void Window::shuffleWindowsDown() {
 void Window::rebuildSingleWindowLinks() {
 	SingleWindow *sWindow = 0;
 
-	foreach (iter, previous) {
-		(*iter)->previous = sWindow;
+	for (auto iter : previous) {
+		iter->previous = sWindow;
 		if (sWindow) {
-			sWindow->next = *iter;
+			sWindow->next = iter;
 		}
-		sWindow = *iter;
+		sWindow = iter;
 	}
 
 	if (current) {
@@ -117,12 +117,12 @@ void Window::rebuildSingleWindowLinks() {
 		sWindow = current;
 	}
 
-	foreach (iter, next) {
-		(*iter)->previous = sWindow;
+	for (auto iter : next) {
+		iter->previous = sWindow;
 		if (sWindow) {
-			sWindow->next = *iter;
+			sWindow->next = iter;
 		}
-		sWindow = *iter;
+		sWindow = iter;
 	}
 
 	if (sWindow) {
@@ -144,13 +144,13 @@ void Window::rebuildCohortLinks() {
 
 	Cohort *prev = 0;
 	while (sWindow) {
-		foreach (citer, sWindow->cohorts) {
-			(*citer)->prev = prev;
-			(*citer)->next = 0;
+		for (auto citer : sWindow->cohorts) {
+			citer->prev = prev;
+			citer->next = 0;
 			if (prev) {
-				prev->next = *citer;
+				prev->next = citer;
 			}
-			prev = *citer;
+			prev = citer;
 		}
 		sWindow = sWindow->next;
 	}
diff --git a/src/Window.hpp b/src/Window.hpp
index fec65af..a08abfb 100644
--- a/src/Window.hpp
+++ b/src/Window.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
diff --git a/src/bloomish.hpp b/src/bloomish.hpp
index c109fae..bd0228e 100644
--- a/src/bloomish.hpp
+++ b/src/bloomish.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -23,7 +23,7 @@
 #ifndef c6d28b7452ec699b_BLOOMISH_HPP
 #define c6d28b7452ec699b_BLOOMISH_HPP
 #include <algorithm>
-#include <stdint.h> // C99 or C++0x or C++ TR1 will have this header. ToDo: Change to <cstdint> when C++0x broader support gets under way.
+#include <cstdint>
 
 namespace CG3 {
 
@@ -42,7 +42,7 @@ public:
 	}
 
 	void clear() {
-		std::fill(value, value + 4, 0);
+		std::fill(value, value + 4, static_cast<Cont>(0));
 	}
 
 	void insert(const Cont& v) {
diff --git a/src/cg-mwesplit.cpp b/src/cg-mwesplit.cpp
index ee52cb6..dbef741 100644
--- a/src/cg-mwesplit.cpp
+++ b/src/cg-mwesplit.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2016, GrammarSoft ApS
+ * Copyright (C) 2007-2017, GrammarSoft ApS
  * Developed by Tino Didriksen <mail at tinodidriksen.com>
  * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
  *
@@ -112,7 +112,7 @@ int main(int argc, char **argv) {
 	CG3::MweSplitApplicator applicator(ux_stderr);
 	applicator.setGrammar(&grammar);
 
-	boost::scoped_ptr<CG3::istream> instream;
+	std::unique_ptr<CG3::istream> instream;
 
 	instream.reset(new CG3::istream(ux_stdin));
 
diff --git a/src/cg-relabel.cpp b/src/cg-relabel.cpp
index 841e3d5..520e046 100644
--- a/src/cg-relabel.cpp
+++ b/src/cg-relabel.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2016, GrammarSoft ApS
+ * Copyright (C) 2007-2017, GrammarSoft ApS
  * Developed by Tino Didriksen <mail at tinodidriksen.com>
  * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
  *
@@ -64,7 +64,7 @@ CG3::Grammar *cg3_grammar_load(const char *filename, UFILE *ux_stdout, UFILE *ux
 	grammar->ux_stderr = ux_stderr;
 	grammar->ux_stdout = ux_stdout;
 
-	boost::scoped_ptr<IGrammarParser> parser;
+	std::unique_ptr<IGrammarParser> parser;
 
 	if (cbuffers[0][0] == 'C' && cbuffers[0][1] == 'G' && cbuffers[0][2] == '3' && cbuffers[0][3] == 'B') {
 		parser.reset(new BinaryGrammar(*grammar, ux_stderr));
diff --git a/src/cg3.h b/src/cg3.h
index bec2cd5..bcaa6b5 100644
--- a/src/cg3.h
+++ b/src/cg3.h
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -140,6 +140,11 @@ const uint16_t *cg3_tag_gettext_u16(cg3_tag *tag);
 const uint32_t *cg3_tag_gettext_u32(cg3_tag *tag);
 const wchar_t *cg3_tag_gettext_w(cg3_tag *tag);
 
+// These 3 from Paul Meurer <paul.meurer at uni.no>
+size_t cg3_cohort_numdelreadings(cg3_cohort *cohort);
+cg3_reading *cg3_cohort_getdelreading(cg3_cohort *cohort, size_t which);
+size_t cg3_reading_gettrace_ruletype(cg3_reading *reading_, size_t which);
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/src/cg_comp.cpp b/src/cg_comp.cpp
index fa99141..0b3a223 100644
--- a/src/cg_comp.cpp
+++ b/src/cg_comp.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2016, GrammarSoft ApS
+ * Copyright (C) 2007-2017, GrammarSoft ApS
  * Developed by Tino Didriksen <mail at tinodidriksen.com>
  * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
  *
diff --git a/src/cg_conv.cpp b/src/cg_conv.cpp
index d2b6b00..e8a1828 100644
--- a/src/cg_conv.cpp
+++ b/src/cg_conv.cpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -108,10 +108,15 @@ int main(int argc, char *argv[]) {
 	CG3::FormatConverter applicator(ux_stderr);
 	applicator.setGrammar(&grammar);
 
-	boost::scoped_ptr<CG3::istream> instream;
+	std::unique_ptr<CG3::istream> instream;
 
 	CG3::CG_FORMATS fmt = CG3::FMT_INVALID;
 
+	if (options[ADD_TAGS].doesOccur) {
+		options[IN_PLAIN].doesOccur = true;
+		dynamic_cast<CG3::PlaintextApplicator&>(applicator).add_tags = true;
+	}
+
 	if (options[IN_CG].doesOccur) {
 		fmt = CG3::FMT_CG;
 	}
diff --git a/src/cg_proc.cpp b/src/cg_proc.cpp
index e2917eb..014f862 100644
--- a/src/cg_proc.cpp
+++ b/src/cg_proc.cpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -300,9 +300,8 @@ int main(int argc, char *argv[]) {
 		buf[0] = 0;
 		buf[sn] = 0;
 		u_charsToUChars(single_rule, buf, sn);
-		foreach (riter, applicator->grammar->rule_by_number) {
-			const CG3::Rule *rule = *riter;
-			if (rule->name && u_strcmp(rule->name, buf) == 0) {
+		for (auto rule : applicator->grammar->rule_by_number) {
+			if (rule->name == buf) {
 				applicator->valid_rules.push_back(rule->number);
 			}
 		}
diff --git a/src/flat_unordered_map.hpp b/src/flat_unordered_map.hpp
index f3fd042..c0726a1 100644
--- a/src/flat_unordered_map.hpp
+++ b/src/flat_unordered_map.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -28,7 +28,7 @@
 #include <algorithm>
 #include <functional>
 #include <utility>
-#include <stdint.h> // C99 or C++0x or C++ TR1 will have this header. ToDo: Change to <cstdint> when C++0x broader support gets under way.
+#include <cstdint>
 
 namespace CG3 {
 
diff --git a/src/flat_unordered_set.hpp b/src/flat_unordered_set.hpp
index 2e62681..12d4742 100644
--- a/src/flat_unordered_set.hpp
+++ b/src/flat_unordered_set.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -27,7 +27,7 @@
 #include <vector>
 #include <algorithm>
 #include <functional>
-#include <stdint.h> // C99 or C++0x or C++ TR1 will have this header. ToDo: Change to <cstdint> when C++0x broader support gets under way.
+#include <cstdint>
 
 namespace CG3 {
 
diff --git a/src/inlines.hpp b/src/inlines.hpp
index 1a174d7..476eb0c 100644
--- a/src/inlines.hpp
+++ b/src/inlines.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -25,7 +25,9 @@
 
 namespace CG3 {
 
-const uint32_t CG3_HASH_SEED = 705577479u;
+constexpr double NUMERIC_MIN = static_cast<double>(-(1ll << 48ll));
+constexpr double NUMERIC_MAX = static_cast<double>((1ll << 48ll)-1);
+constexpr uint32_t CG3_HASH_SEED = 705577479u;
 
 /*
 	Paul Hsieh's SuperFastHash from http://www.azillionmonkeys.com/qed/hash.html
@@ -197,6 +199,12 @@ inline uint32_t hash_value(uint64_t c) {
 	//*/
 }
 
+struct hash_ustring {
+	size_t operator()(const UString& str) const {
+		return hash_value(str);
+	}
+};
+
 inline bool ISSPACE(const UChar c) {
 	if (c <= 0xFF && c != 0x09 && c != 0x0A && c != 0x0D && c != 0x20 && c != 0xA0) {
 		return false;
@@ -232,6 +240,10 @@ inline bool ISESC(const UChar *p) {
 	return (a % 2 == 0);
 }
 
+inline bool ISSPACE(const UChar *p) {
+	return ISSPACE(*p) && !ISESC(p);
+}
+
 template<typename C, size_t N>
 inline bool IS_ICASE(const UChar *p, const C (&uc)[N], const C (&lc)[N]) {
 	// N - 1 due to null terminator for string constants
@@ -281,7 +293,7 @@ inline uint32_t SKIPWS(UChar *& p, const UChar a = 0, const UChar b = 0, const b
 
 inline uint32_t SKIPTOWS(UChar *& p, const UChar a = 0, const bool allowhash = false, const bool allowscol = false) {
 	uint32_t s = 0;
-	while (*p && !ISSPACE(*p)) {
+	while (*p && !ISSPACE(p)) {
 		if (!allowhash && *p == '#' && !ISESC(p)) {
 			s += SKIPLN(p);
 			--p;
@@ -338,6 +350,15 @@ inline void CG3Quit(const int32_t c = 0, const char *file = 0, const uint32_t li
 	exit(c);
 }
 
+inline constexpr uint64_t make_64(uint32_t hi, uint32_t low) {
+	return (static_cast<uint64_t>(hi) << 32) | static_cast<uint64_t>(low);
+}
+
+template <typename T, size_t N>
+inline constexpr size_t size(T(&)[N]) {
+	return N;
+}
+
 template<typename Cont, typename VT>
 inline bool index_matches(const Cont& index, const VT& entry) {
 	return (index.find(entry) != index.end());
@@ -415,12 +436,15 @@ inline void writeSwapped(std::ostream& stream, const T& value) {
 		uint32_t tmp = static_cast<uint32_t>(htonl(static_cast<uint32_t>(value)));
 		stream.write(reinterpret_cast<const char*>(&tmp), sizeof(T));
 	}
-	/*
 	else if (sizeof(T) == 8) {
-		uint64_t tmp = static_cast<uint64_t>(htonll(static_cast<uint64_t>(value)));
+		uint64_t tmp = value;
+#ifndef BIG_ENDIAN
+		const uint32_t high = static_cast<uint32_t>(htonl(static_cast<uint32_t>(tmp >> 32)));
+		const uint32_t low = static_cast<uint32_t>(htonl(static_cast<uint32_t>(tmp & 0xFFFFFFFFULL)));
+		tmp = (static_cast<uint64_t>(low) << 32) | high;
+#endif
 		stream.write(reinterpret_cast<const char*>(&tmp), sizeof(T));
 	}
-	//*/
 	else {
 		throw std::runtime_error("Unhandled type size in writeSwapped()");
 	}
@@ -429,8 +453,20 @@ inline void writeSwapped(std::ostream& stream, const T& value) {
 	}
 }
 
+template<>
+inline void writeSwapped(std::ostream& stream, const double& value) {
+	int exp = 0;
+	uint64_t mant64 = static_cast<uint64_t>(std::numeric_limits<int64_t>::max() * frexp(value, &exp));
+	uint32_t exp32 = static_cast<uint32_t>(exp);
+	writeSwapped(stream, mant64);
+	writeSwapped(stream, exp32);
+}
+
 template<typename T>
 inline T readSwapped(std::istream& stream) {
+	if (!stream) {
+		throw std::runtime_error("Stream was in bad state in readSwapped()");
+	}
 	if (sizeof(T) == 1) {
 		uint8_t tmp = 0;
 		stream.read(reinterpret_cast<char*>(&tmp), sizeof(T));
@@ -446,20 +482,27 @@ inline T readSwapped(std::istream& stream) {
 		stream.read(reinterpret_cast<char*>(&tmp), sizeof(T));
 		return static_cast<T>(ntohl(tmp));
 	}
-	/*
 	else if (sizeof(T) == 8) {
 		uint64_t tmp = 0;
 		stream.read(reinterpret_cast<char*>(&tmp), sizeof(T));
-		return static_cast<T>(ntohll(tmp));
-	}
-	//*/
-	else {
-		throw std::runtime_error("Unhandled type size in readSwapped()");
-	}
-	if (!stream) {
-		throw std::runtime_error("Stream was in bad state in readSwapped()");
+#ifndef BIG_ENDIAN
+		const uint32_t high = static_cast<uint32_t>(ntohl(static_cast<uint32_t>(tmp >> 32)));
+		const uint32_t low = static_cast<uint32_t>(ntohl(static_cast<uint32_t>(tmp & 0xFFFFFFFFULL)));
+		tmp = (static_cast<uint64_t>(low) << 32) | high;
+#endif
+		return static_cast<T>(tmp);
 	}
-	return T();
+	throw std::runtime_error("Unhandled type size in readSwapped()");
+}
+
+template<>
+inline double readSwapped(std::istream& stream) {
+	uint64_t mant64 = readSwapped<uint64_t>(stream);
+	int exp = static_cast<int>(readSwapped<int32_t>(stream));
+
+	double value = static_cast<double>(static_cast<int64_t>(mant64)) / std::numeric_limits<int64_t>::max();
+
+	return ldexp(value, exp);
 }
 
 #ifdef _MSC_VER
@@ -468,7 +511,7 @@ inline T readSwapped(std::istream& stream) {
 #endif
 
 template<typename Cont>
-inline void GAppSetOpts_ranged(const char *value, Cont& cont) {
+inline void GAppSetOpts_ranged(const char *value, Cont& cont, bool fill = true) {
 	cont.clear();
 	bool had_range = false;
 
@@ -486,7 +529,7 @@ inline void GAppSetOpts_ranged(const char *value, Cont& cont) {
 		}
 	} while ((comma = strchr(comma, ',')) != 0 && ++comma && *comma != 0);
 
-	if (cont.size() == 1 && !had_range) {
+	if (cont.size() == 1 && !had_range && fill) {
 		uint32_t val = cont.front();
 		cont.clear();
 		for (uint32_t i = 1; i <= val; ++i) {
diff --git a/src/interval_vector.hpp b/src/interval_vector.hpp
index 2ec603e..f477d75 100644
--- a/src/interval_vector.hpp
+++ b/src/interval_vector.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -25,7 +25,7 @@
 #include <iostream>
 #include <vector>
 #include <algorithm>
-#include <stdint.h> // C99 or C++0x or C++ TR1 will have this header. ToDo: Change to <cstdint> when C++0x broader support gets under way.
+#include <cstdint>
 
 namespace CG3 {
 
diff --git a/src/istream.hpp b/src/istream.hpp
index ef7f6cd..b6b5daa 100644
--- a/src/istream.hpp
+++ b/src/istream.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -91,7 +91,7 @@ public:
 
 	UChar *gets(UChar *s, int32_t m) {
 		if (offset < buffer.size()) {
-			std::fill(s, s + m, 0);
+			std::fill(s, s + m, static_cast<UChar>(0));
 			UChar *p = &buffer[offset];
 			UChar *n = p;
 			SKIPLN(n);
diff --git a/src/libcg3.cpp b/src/libcg3.cpp
index a7b4954..ddf67f7 100644
--- a/src/libcg3.cpp
+++ b/src/libcg3.cpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -102,7 +102,7 @@ cg3_grammar *cg3_grammar_load(const char *filename) {
 	grammar->ux_stderr = ux_stderr;
 	grammar->ux_stdout = ux_stdout;
 
-	boost::scoped_ptr<IGrammarParser> parser;
+	std::unique_ptr<IGrammarParser> parser;
 
 	if (cbuffers[0][0] == 'C' && cbuffers[0][1] == 'G' && cbuffers[0][2] == '3' && cbuffers[0][3] == 'B') {
 		u_fprintf(ux_stderr, "CG3 Info: Binary grammar detected.\n");
@@ -202,7 +202,7 @@ inline Reading *_reading_copy(Cohort *nc, Reading *oldr, bool is_sub = false) {
 	insert_if_exists(nr->parent->possible_sets, ga->grammar->sets_any);
 	ga->addTagToReading(*nr, nc->wordform);
 	TagList mappings;
-	boost_foreach (uint32_t tag, oldr->tags_list) {
+	for (auto tag : oldr->tags_list) {
 		Tag *nt = _tag_copy(oldr->parent->parent->parent->parent, nc->parent->parent->parent, tag);
 		if (nt->type & T_MAPPING || nt->tag[0] == ga->grammar->mapping_prefix) {
 			mappings.push_back(nt);
@@ -223,7 +223,7 @@ inline Reading *_reading_copy(Cohort *nc, Reading *oldr, bool is_sub = false) {
 inline Cohort *_cohort_copy(SingleWindow *ns, Cohort *oc) {
 	Cohort *nc = alloc_cohort(ns);
 	nc->wordform = _tag_copy(ns->parent->parent, oc->wordform);
-	boost_foreach (Reading *r, oc->readings) {
+	for (auto r : oc->readings) {
 		Reading *nr = _reading_copy(nc, r);
 		nc->appendReading(nr);
 	}
@@ -241,7 +241,7 @@ cg3_sentence *cg3_sentence_copy(cg3_sentence *sentence_, cg3_applicator *applica
 	applicator->initEmptySingleWindow(current);
 	current->has_enclosures = sentence->has_enclosures;
 	current->text = sentence->text;
-	boost_foreach (Cohort *c, sentence->cohorts) {
+	for (auto c : sentence->cohorts) {
 		Cohort *nc = _cohort_copy(current, c);
 		current->appendCohort(nc);
 	}
@@ -323,10 +323,10 @@ void cg3_cohort_getrelation_u(cg3_cohort *cohort_, const UChar *rel, uint32_t *r
 	GrammarApplicator *ga = cohort->parent->parent->parent;
 
 	if ((cohort->type & CT_RELATED) && !cohort->relations.empty()) {
-		foreach (miter, cohort->relations) {
-			foreach (siter, miter->second) {
-				if (u_strcmp(ga->single_tags.find(miter->first)->second->tag.c_str(), rel) == 0) {
-					*rel_parent = *siter;
+		for (auto miter : cohort->relations) {
+			for (auto siter : miter->second) {
+				if (u_strcmp(ga->single_tags.find(miter.first)->second->tag.c_str(), rel) == 0) {
+					*rel_parent = siter;
 				}
 			}
 		}
@@ -509,7 +509,7 @@ const char *cg3_tag_gettext_u8(cg3_tag *tag_) {
 	Tag *tag = static_cast<Tag*>(tag_);
 	UErrorCode status = U_ZERO_ERROR;
 
-	u_strToUTF8(&cbuffers[0][0], CG3_BUFFER_SIZE - 1, 0, tag->tag.c_str(), tag->tag.length(), &status);
+	u_strToUTF8(&cbuffers[0][0], CG3_BUFFER_SIZE - 1, 0, tag->tag.c_str(), tag->tag.size(), &status);
 	if (U_FAILURE(status)) {
 		u_fprintf(ux_stderr, "CG3 Error: Failed to convert text from UChar to UTF-8. Status = %s\n", u_errorName(status));
 		return 0;
@@ -529,7 +529,7 @@ const uint32_t *cg3_tag_gettext_u32(cg3_tag *tag_) {
 
 	UChar32 *tmp = reinterpret_cast<UChar32*>(&cbuffers[0][0]);
 
-	u_strToUTF32(tmp, (CG3_BUFFER_SIZE / sizeof(UChar32)) - 1, 0, tag->tag.c_str(), tag->tag.length(), &status);
+	u_strToUTF32(tmp, (CG3_BUFFER_SIZE / sizeof(UChar32)) - 1, 0, tag->tag.c_str(), tag->tag.size(), &status);
 	if (U_FAILURE(status)) {
 		u_fprintf(ux_stderr, "CG3 Error: Failed to convert text from UChar to UTF-32. Status = %s\n", u_errorName(status));
 		return 0;
@@ -544,7 +544,7 @@ const wchar_t *cg3_tag_gettext_w(cg3_tag *tag_) {
 
 	wchar_t *tmp = reinterpret_cast<wchar_t*>(&cbuffers[0][0]);
 
-	u_strToWCS(tmp, (CG3_BUFFER_SIZE / sizeof(wchar_t)) - 1, 0, tag->tag.c_str(), tag->tag.length(), &status);
+	u_strToWCS(tmp, (CG3_BUFFER_SIZE / sizeof(wchar_t)) - 1, 0, tag->tag.c_str(), tag->tag.size(), &status);
 	if (U_FAILURE(status)) {
 		u_fprintf(ux_stderr, "CG3 Error: Failed to convert text from UChar to UTF-32. Status = %s\n", u_errorName(status));
 		return 0;
@@ -552,3 +552,23 @@ const wchar_t *cg3_tag_gettext_w(cg3_tag *tag_) {
 
 	return tmp;
 }
+
+// These 3 from Paul Meurer <paul.meurer at uni.no>
+size_t cg3_cohort_numdelreadings(cg3_cohort *cohort_) {
+	Cohort *cohort = static_cast<Cohort*>(cohort_);
+	return cohort->deleted.size();
+}
+
+cg3_reading *cg3_cohort_getdelreading(cg3_cohort *cohort_, size_t which) {
+	Cohort *cohort = static_cast<Cohort*>(cohort_);
+	ReadingList::iterator it = cohort->deleted.begin();
+	std::advance(it, which);
+	return *it;
+}
+
+size_t cg3_reading_gettrace_ruletype(cg3_reading *reading_, size_t which) {
+	Reading *reading = static_cast<Reading*>(reading_);
+	Grammar *grammar = reading->parent->parent->parent->parent->grammar;
+	const Rule *r = grammar->rule_by_number[reading->hit_by[which]];
+	return r->type;
+}
diff --git a/src/main.cpp b/src/main.cpp
index a819d74..5a5b775 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -311,21 +311,6 @@ int main(int argc, char *argv[]) {
 		}
 	}
 
-	if (grammar.is_binary) {
-		if (options[GRAMMAR_BIN].doesOccur || options[GRAMMAR_OUT].doesOccur) {
-			std::cerr << "Error: Binary grammars cannot be rewritten." << std::endl;
-			CG3Quit(1);
-		}
-		if (options[STATISTICS].doesOccur) {
-			std::cerr << "Error: Statistics cannot be gathered with a binary grammar." << std::endl;
-			CG3Quit(1);
-		}
-		if (options[OPTIMIZE_UNSAFE].doesOccur || options[OPTIMIZE_SAFE].doesOccur) {
-			std::cerr << "Error: Binary grammars cannot be further optimized." << std::endl;
-			CG3Quit(1);
-		}
-	}
-
 	if (options[STATISTICS].doesOccur && !(options[GRAMMAR_BIN].doesOccur || options[GRAMMAR_OUT].doesOccur)) {
 		std::cerr << "Error: Does not make sense to gather statistics if you are not writing the compiled grammar back out somehow." << std::endl;
 		CG3Quit(1);
@@ -358,9 +343,9 @@ int main(int argc, char *argv[]) {
 
 	if (options[OPTIMIZE_UNSAFE].doesOccur) {
 		std::vector<uint32_t> bad;
-		foreach (ir, grammar.rule_by_number) {
-			if ((*ir)->num_match == 0) {
-				bad.push_back((*ir)->number);
+		for (auto ir : grammar.rule_by_number) {
+			if (ir->num_match == 0) {
+				bad.push_back(ir->number);
 			}
 		}
 		reverse_foreach (br, bad) {
@@ -374,17 +359,17 @@ int main(int argc, char *argv[]) {
 	}
 	if (options[OPTIMIZE_SAFE].doesOccur) {
 		CG3::RuleVector bad;
-		foreach (ir, grammar.rule_by_number) {
-			if ((*ir)->num_match == 0) {
-				bad.push_back(*ir);
+		for (auto ir : grammar.rule_by_number) {
+			if (ir->num_match == 0) {
+				bad.push_back(ir);
 			}
 		}
 		reverse_foreach (br, bad) {
 			grammar.rule_by_number.erase(grammar.rule_by_number.begin() + (*br)->number);
 		}
-		foreach (br, bad) {
-			(*br)->number = grammar.rule_by_number.size();
-			grammar.rule_by_number.push_back(*br);
+		for (auto br : bad) {
+			br->number = grammar.rule_by_number.size();
+			grammar.rule_by_number.push_back(br);
 		}
 		std::cerr << "Optimizer moved " << bad.size() << " rules." << std::endl;
 		grammar.reindex();
@@ -480,6 +465,9 @@ void GAppSetOpts(CG3::GrammarApplicator& applicator, UConverter *conv) {
 	}
 	if (options[TRACE].doesOccur) {
 		applicator.trace = true;
+		if (options[TRACE].value) {
+			CG3::GAppSetOpts_ranged(options[TRACE].value, applicator.trace_rules, false);
+		}
 	}
 	if (options[TRACE_NAME_ONLY].doesOccur) {
 		applicator.trace = true;
@@ -519,9 +507,8 @@ void GAppSetOpts(CG3::GrammarApplicator& applicator, UConverter *conv) {
 			buf[0] = 0;
 			ucnv_toUChars(conv, buf, sn * 3, options[RULE].value, sn, &status);
 
-			foreach (riter, applicator.grammar->rule_by_number) {
-				const CG3::Rule *rule = *riter;
-				if (rule->name && u_strcmp(rule->name, buf) == 0) {
+			for (auto rule : applicator.grammar->rule_by_number) {
+				if (rule->name == buf) {
 					applicator.valid_rules.push_back(rule->number);
 				}
 			}
diff --git a/src/options.hpp b/src/options.hpp
index 2f90ba5..db4e635 100644
--- a/src/options.hpp
+++ b/src/options.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -123,7 +123,7 @@ UOption options[] = {
 	UOPTION_DEF_D("no-sections",           0, UOPT_NO_ARG,       "disables all rules in SECTION parts"),
 	UOPTION_DEF_D("no-after-sections",     0, UOPT_NO_ARG,       "disables all rules in AFTER-SECTIONS parts"),
 
-	UOPTION_DEF_D("trace",               't', UOPT_NO_ARG,       "prints debug output alongside with normal output"),
+	UOPTION_DEF_D("trace",               't', UOPT_OPTIONAL_ARG, "prints debug output alongside normal output; optionally stops execution"),
 	UOPTION_DEF_D("trace-name-only",       0, UOPT_NO_ARG,       "if a rule is named, omit the line number; implies --trace"),
 	UOPTION_DEF_D("trace-no-removed",      0, UOPT_NO_ARG,       "does not print removed readings; implies --trace"),
 	UOPTION_DEF_D("trace-encl",            0, UOPT_NO_ARG,       "traces which enclosure pass is currently happening; implies --trace"),
@@ -152,7 +152,7 @@ UOption options[] = {
 	UOPTION_DEF_D("split-mappings",        0, UOPT_NO_ARG,       "keep mapped readings separate in output"),
 	UOPTION_DEF_D("show-end-tags",       'e', UOPT_NO_ARG,       "allows the <<< tags to appear in output"),
 	UOPTION_DEF_D("show-unused-sets",      0, UOPT_NO_ARG,       "prints a list of unused sets and their line numbers; implies --grammar-only"),
-	UOPTION_DEF_D("show-tags",             0, UOPT_NO_ARG,       "prints a list of unique tags; implies --grammar-only"),
+	UOPTION_DEF_D("show-tags",             0, UOPT_NO_ARG,       "prints a list of unique used tags; implies --grammar-only"),
 	UOPTION_DEF_D("show-tag-hashes",       0, UOPT_NO_ARG,       "prints a list of tags and their hashes as they are parsed during the run"),
 	UOPTION_DEF_D("show-set-hashes",       0, UOPT_NO_ARG,       "prints a list of sets and their hashes; implies --grammar-only"),
 	UOPTION_DEF_D("dump-ast",              0, UOPT_NO_ARG,       "prints the grammar parse tree; implies --grammar-only"),
diff --git a/src/options_conv.hpp b/src/options_conv.hpp
index 6267835..49cacfd 100644
--- a/src/options_conv.hpp
+++ b/src/options_conv.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -37,6 +37,7 @@ enum OPTIONS {
 	IN_APERTIUM,
 	IN_FST,
 	IN_PLAIN,
+	ADD_TAGS,
 	OUT_CG,
 	OUT_CG2,
 	OUT_APERTIUM,
@@ -62,13 +63,14 @@ UOption options[] = {
 	UOPTION_DEF_D("in-apertium",  'a', UOPT_NO_ARG,       "sets input format to Apertium"),
 	UOPTION_DEF_D("in-fst",       'f', UOPT_NO_ARG,       "sets input format to HFST/XFST"),
 	UOPTION_DEF_D("in-plain",     'p', UOPT_NO_ARG,       "sets input format to plain text"),
+	UOPTION_DEF_D("add-tags",       0, UOPT_NO_ARG,       "adds minimal analysis to readings (implies -p)"),
 	UOPTION_DEF_D("out-cg",       'C', UOPT_NO_ARG,       "sets output format to CG (default)"),
 	UOPTION_DEF_D("V",            'V', UOPT_NO_ARG,       "!"),
 	UOPTION_DEF_D("out-apertium", 'A', UOPT_NO_ARG,       "sets output format to Apertium"),
 	UOPTION_DEF_D("out-matxin",   'M', UOPT_NO_ARG,       "sets output format to Matxin"),
 	UOPTION_DEF_D("out-niceline", 'N', UOPT_NO_ARG,       "sets output format to Niceline CG"),
 	UOPTION_DEF_D("out-plain",    'P', UOPT_NO_ARG,       "sets output format to plain text"),
-	UOPTION_DEF_D("wfactor",      'W', UOPT_REQUIRES_ARG, "FST weight factor (defaults to 100.0)"),
+	UOPTION_DEF_D("wfactor",      'W', UOPT_REQUIRES_ARG, "FST weight factor (defaults to 1.0)"),
 	UOPTION_DEF_D("wtag",           0, UOPT_REQUIRES_ARG, "FST weight tag prefix (defaults to W)"),
 	UOPTION_DEF_D("sub-delim",    'S', UOPT_REQUIRES_ARG, "FST sub-reading delimiters (defaults to #)"),
 	UOPTION_DEF_D("rtl",          'r', UOPT_NO_ARG,       "sets sub-reading direction to RTL (default)"),
diff --git a/src/parser_helpers.hpp b/src/parser_helpers.hpp
index 318d5a9..30d914f 100644
--- a/src/parser_helpers.hpp
+++ b/src/parser_helpers.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -102,7 +102,7 @@ Tag *parseTag(const UChar *to, const UChar *p, State& state) {
 			size_t oldlength = length;
 
 			// Parse the suffixes r, i, v but max only one of each.
-			while (tmp[length - 1] == 'i' || tmp[length - 1] == 'r' || tmp[length - 1] == 'v') {
+			while (tmp[length - 1] == 'i' || tmp[length - 1] == 'r' || tmp[length - 1] == 'v' || tmp[length - 1] == 'l') {
 				if (!(tag->type & T_VARSTRING) && tmp[length - 1] == 'v') {
 					tag->type |= T_VARSTRING;
 					length--;
@@ -118,6 +118,12 @@ Tag *parseTag(const UChar *to, const UChar *p, State& state) {
 					length--;
 					continue;
 				}
+				if (!(tag->type & T_REGEXP_LINE) && tmp[length - 1] == 'l') {
+					tag->type |= T_REGEXP;
+					tag->type |= T_REGEXP_LINE;
+					length--;
+					continue;
+				}
 				break;
 			}
 
@@ -157,20 +163,31 @@ Tag *parseTag(const UChar *to, const UChar *p, State& state) {
 			state.error("%s: Error: Parsing tag %S resulted in an empty tag on line %u near `%S` - cannot continue!\n", tag->tag.c_str(), p);
 		}
 
-		foreach (iter, state.get_grammar()->regex_tags) {
+		// ToDo: Remove for real ordered mode
+		if (tag->type & T_REGEXP_LINE) {
+			constexpr UChar uu[] = { '_', '_', 0 };
+			constexpr UChar rx[] = { '(', '^', '|', '$', '|', ' ', '|', ' ', '.', '+', '?', ' ', ')', 0 }; // (^|$| | .+? )
+			size_t pos;
+			while ((pos = tag->tag.find(uu)) != UString::npos) {
+				tag->tag.replace(pos, 2, rx);
+				length += size(rx) - size(uu);
+			}
+		}
+
+		for (auto iter : state.get_grammar()->regex_tags) {
 			UErrorCode status = U_ZERO_ERROR;
-			uregex_setText(*iter, tag->tag.c_str(), tag->tag.length(), &status);
+			uregex_setText(iter, tag->tag.c_str(), tag->tag.size(), &status);
 			if (status != U_ZERO_ERROR) {
 				state.error("%s: Error: uregex_setText(parseTag) returned %s on line %u near `%S` - cannot continue!\n", u_errorName(status), p);
 			}
 			status = U_ZERO_ERROR;
-			if (uregex_matches(*iter, 0, &status)) {
+			if (uregex_matches(iter, 0, &status)) {
 				tag->type |= T_TEXTUAL;
 			}
 		}
-		foreach (iter, state.get_grammar()->icase_tags) {
+		for (auto iter : state.get_grammar()->icase_tags) {
 			UErrorCode status = U_ZERO_ERROR;
-			if (u_strCaseCompare(tag->tag.c_str(), tag->tag.length(), (*iter)->tag.c_str(), (*iter)->tag.length(), U_FOLD_CASE_DEFAULT, &status) == 0) {
+			if (u_strCaseCompare(tag->tag.c_str(), tag->tag.size(), iter->tag.c_str(), iter->tag.size(), U_FOLD_CASE_DEFAULT, &status) == 0) {
 				tag->type |= T_TEXTUAL;
 			}
 			if (status != U_ZERO_ERROR) {
@@ -183,6 +200,19 @@ Tag *parseTag(const UChar *to, const UChar *p, State& state) {
 		if (tag->tag[0] == '<' && tag->tag[length - 1] == '>') {
 			tag->parseNumeric();
 		}
+		/*
+		if (tag->tag[0] == '#') {
+			uint32_t dep_self = 0;
+			uint32_t dep_parent = 0;
+			if (u_sscanf(tag->tag.c_str(), "#%i->%i", &dep_self, &dep_parent) == 2 && dep_self != 0) {
+				tag->type |= T_DEPENDENCY;
+			}
+			constexpr UChar local_dep_unicode[] = { '#', '%', 'i', L'\u2192', '%', 'i', 0 };
+			if (u_sscanf_u(tag->tag.c_str(), local_dep_unicode, &dep_self, &dep_parent) == 2 && dep_self != 0) {
+				tag->type |= T_DEPENDENCY;
+			}
+		}
+		//*/
 
 		if (u_strcmp(tag->tag.c_str(), stringbits[S_ASTERIK].getTerminatedBuffer()) == 0) {
 			tag->type |= T_ANY;
@@ -299,9 +329,9 @@ Set *parseSet(const UChar *name, const UChar *p, State& state) {
 	}
 	Set *tmp = state.get_grammar()->getSet(sh);
 	if (!tmp) {
-		if (!state.strict_tags.empty()) {
+		if (!state.strict_tags.empty() || !state.list_tags.empty()) {
 			Tag *tag = parseTag(name, p, state);
-			if (state.strict_tags.count(tag->plain_hash)) {
+			if (state.strict_tags.count(tag->plain_hash) || state.list_tags.count(tag->plain_hash)) {
 				Set *ns = state.get_grammar()->allocateSet();
 				ns->line = state.get_grammar()->lines;
 				ns->setName(name);
diff --git a/src/process.hpp b/src/process.hpp
index 0747637..820a655 100644
--- a/src/process.hpp
+++ b/src/process.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
diff --git a/src/scoped_stack.hpp b/src/scoped_stack.hpp
index 13a2b8c..10c6943 100644
--- a/src/scoped_stack.hpp
+++ b/src/scoped_stack.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
diff --git a/src/sorted_vector.hpp b/src/sorted_vector.hpp
index 9623ba6..de16326 100644
--- a/src/sorted_vector.hpp
+++ b/src/sorted_vector.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -26,7 +26,7 @@
 #include <vector>
 #include <algorithm>
 #include <functional>
-#include <stdint.h> // C99 or C++0x or C++ TR1 will have this header. ToDo: Change to <cstdint> when C++0x broader support gets under way.
+#include <cstdint>
 
 namespace CG3 {
 namespace detail {
@@ -116,8 +116,8 @@ public:
 		elements.erase(it, elements.end());
 	}
 
-	bool push_back(T t) {
-		return insert(t);
+	void push_back(T t) {
+		insert(t);
 	}
 
 	bool erase(T t) {
@@ -143,6 +143,13 @@ public:
 		return elements.erase(elements.begin() + o);
 	}
 
+	template<typename It>
+	void erase(It b, It e) {
+		for (; b != e; ++b) {
+			erase(*b);
+		}
+	}
+
 	const_iterator find(T t) const {
 		if (elements.empty()) {
 			return elements.end();
@@ -242,6 +249,10 @@ public:
 		std::sort(elements.begin(), elements.end(), Comp());
 	}
 
+	void pop_back() {
+		elements.pop_back();
+	}
+
 	container& get() {
 		return elements;
 	}
diff --git a/src/stdafx.hpp b/src/stdafx.hpp
index 650f276..350d046 100644
--- a/src/stdafx.hpp
+++ b/src/stdafx.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -49,6 +49,8 @@
 #include <string>
 #include <set>
 #include <map>
+#include <unordered_map>
+#include <unordered_set>
 #include <stack>
 #include <limits>
 #include <ctime>
@@ -57,7 +59,7 @@
 #include <cassert>
 #include <ciso646>
 #include <sys/stat.h>
-#include <stdint.h> // C99 or C++0x or C++ TR1 will have this header. ToDo: Change to <cstdint> when C++0x broader support gets under way.
+#include <cstdint>
 #include <cycle.h>
 
 // cycle.h doesn't know all platforms (such as ARM), so fall back on clock()
@@ -73,26 +75,17 @@
 	#define HAVE_TICK_COUNTER
 #endif
 
-#include <boost/unordered_set.hpp>
-#include <boost/unordered_map.hpp>
 #include <boost/container/flat_set.hpp>
 #include <boost/container/flat_map.hpp>
 #include <boost/dynamic_bitset.hpp>
-#include <boost/scoped_ptr.hpp>
-#include <boost/shared_ptr.hpp>
-#include <boost/typeof/typeof.hpp>
-#include <boost/foreach.hpp>
-#define boost_foreach BOOST_FOREACH
-#define stdext boost
-#define hash_map unordered_map
 
 #define foreach(iter, container) \
 	if (!(container).empty())    \
-		for (BOOST_AUTO(iter, (container).begin()), iter##_end = (container).end(); iter != iter##_end; ++iter)
+		for (auto iter = (container).begin(), iter##_end = (container).end(); iter != iter##_end; ++iter)
 
 #define reverse_foreach(iter, container) \
 	if (!(container).empty())            \
-		for (BOOST_AUTO(iter, (container).rbegin()), iter##_end = (container).rend(); iter != iter##_end; ++iter)
+		for (auto iter = (container).rbegin(), iter##_end = (container).rend(); iter != iter##_end; ++iter)
 
 #ifdef _WIN32
 	#include <winsock.h> // for hton() and family.
diff --git a/src/test_libcg3.c b/src/test_libcg3.c
index 66c69ee..49d68b0 100644
--- a/src/test_libcg3.c
+++ b/src/test_libcg3.c
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
diff --git a/src/uextras.cpp b/src/uextras.cpp
index b3978c8..227c416 100644
--- a/src/uextras.cpp
+++ b/src/uextras.cpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
diff --git a/src/uextras.hpp b/src/uextras.hpp
index b9f6e01..6458ee2 100644
--- a/src/uextras.hpp
+++ b/src/uextras.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -63,6 +63,8 @@ inline int ux_isSetOp(const UChar *it) {
 			return S_MINUS;
 		case '^':
 			return S_FAILFAST;
+		case '\\':
+			return S_SET_DIFF;
 		case 8745:
 			return S_SET_ISECT_U;
 		case 8710:
diff --git a/src/version.hpp b/src/version.hpp
index 650d88b..e187a79 100644
--- a/src/version.hpp
+++ b/src/version.hpp
@@ -1,5 +1,5 @@
 /*
-* Copyright (C) 2007-2016, GrammarSoft ApS
+* Copyright (C) 2007-2017, GrammarSoft ApS
 * Developed by Tino Didriksen <mail at tinodidriksen.com>
 * Design by Eckhard Bick <eckhard.bick at mail.dk>, Tino Didriksen <mail at tinodidriksen.com>
 *
@@ -23,16 +23,16 @@
 #ifndef c6d28b7452ec699b_VERSION_H
 #define c6d28b7452ec699b_VERSION_H
 
-#include <stdint.h>
+#include <cstdint>
 
-const char *const CG3_COPYRIGHT_STRING = "Copyright (C) 2007-2016 GrammarSoft ApS. Licensed under GPLv3+";
+constexpr auto CG3_COPYRIGHT_STRING = "Copyright (C) 2007-2017 GrammarSoft ApS. Licensed under GPLv3+";
 
-const uint32_t CG3_VERSION_MAJOR = 0;
-const uint32_t CG3_VERSION_MINOR = 9;
-const uint32_t CG3_VERSION_PATCH = 9;
-const uint32_t CG3_REVISION = 11621;
-const uint32_t CG3_FEATURE_REV = 10575;
-const uint32_t CG3_TOO_OLD = 10373;
-const uint32_t CG3_EXTERNAL_PROTOCOL = 7226;
+constexpr uint32_t CG3_VERSION_MAJOR = 1;
+constexpr uint32_t CG3_VERSION_MINOR = 0;
+constexpr uint32_t CG3_VERSION_PATCH = 0;
+constexpr uint32_t CG3_REVISION = 12253;
+constexpr uint32_t CG3_FEATURE_REV = 12235;
+constexpr uint32_t CG3_TOO_OLD = 10373;
+constexpr uint32_t CG3_EXTERNAL_PROTOCOL = 7226;
 
 #endif
diff --git a/test/Apertium/T_Flush/expected.txt b/test/Apertium/T_Flush/expected.txt
new file mode 100644
index 0000000..9fcd785
Binary files /dev/null and b/test/Apertium/T_Flush/expected.txt differ
diff --git a/test/Apertium/T_Flush/grammar.cg3 b/test/Apertium/T_Flush/grammar.cg3
new file mode 100644
index 0000000..2376240
--- /dev/null
+++ b/test/Apertium/T_Flush/grammar.cg3
@@ -0,0 +1,7 @@
+DELIMITERS = "<$.>" ;
+
+LIST ASet = wanted ;
+
+SECTION
+
+SELECT ASet ;
diff --git a/test/Apertium/T_Flush/input.txt b/test/Apertium/T_Flush/input.txt
new file mode 100644
index 0000000..a294a13
Binary files /dev/null and b/test/Apertium/T_Flush/input.txt differ
diff --git a/test/Apertium/T_Flush/run.pl b/test/Apertium/T_Flush/run.pl
new file mode 100755
index 0000000..d39f5de
--- /dev/null
+++ b/test/Apertium/T_Flush/run.pl
@@ -0,0 +1,61 @@
+#!/usr/bin/perl
+use strict;
+use warnings;
+use Cwd qw(realpath);
+use FileHandle;
+use IPC::Open2;
+
+my ($bindir, $sep) = $0 =~ /^(.*)(\\|\/).*/;
+$bindir = realpath $bindir;
+chdir $bindir or die("Error: Could not change directory to $bindir !");
+
+my $bpath = $ARGV[0];
+my $binary = $bpath."cg-proc";
+my $compiler = $bpath."cg-comp";
+
+`"$compiler" grammar.cg3 grammar.bin  >stdout.txt 2>stderr.txt`;
+
+my $input = do {
+  local $/;
+  open my $fh, '<', 'input.txt' or die "Couldn't open input.txt for reading: $!\n";
+  <$fh>;
+};
+
+# Server:
+my $pid = open2(*Reader, *Writer, "$binary", "-z", "-d", "grammar.bin");
+
+# Client:
+my $tries = 10;
+my $timeout = 2;
+eval {
+  local $SIG{ALRM} = sub { die "timed out"; };
+  alarm($timeout);
+  do {
+    print Writer $input;
+    do {
+      local $/ = "\0";
+      my $got = <Reader> . "\n"; # input/expected have a final newline
+      open my $out, '>', "output.txt" or die "Couldn't open output.txt for writing: $!\n";
+      print $out $got;
+
+      `diff -a -B expected.txt output.txt >diff.txt`;
+      if (-s "diff.txt") {
+        last;
+      }
+      else {
+        $tries--;
+      }
+    };
+  } while ($tries > 0);
+  alarm(0);
+};
+
+if (my $e = $@) {
+  print STDERR "Fail: $e\n";
+}
+elsif ($tries != 0) {
+  print STDERR "Fail.\n";
+}
+else {
+  print STDERR "Success.\n";
+}
diff --git a/test/T_Append/expected.txt b/test/T_Append/expected.txt
index 1f91bf6..abc7b4f 100644
--- a/test/T_Append/expected.txt
+++ b/test/T_Append/expected.txt
@@ -5,13 +5,13 @@
 	"newword" appended1 @A @B
 	"newword" appended3 @A @C
 	"newword" appended2
-	"word" wanted copy copied
+	"word" copy copied wanted
 	"word" wanted copy also-copied
 	"word" wanted noguard-copied
-	"word" wanted copy copied noguard-copied
+	"word" copy copied wanted noguard-copied
 	"word" wanted copy also-copied noguard-copied
 	"word" wanted copied-except
-	"word" wanted copied copied-except
+	"word" copied wanted copied-except
 "<word>"
 	"word" notwanted
 "<waffle>"
diff --git a/test/T_Append/grammar.cg3 b/test/T_Append/grammar.cg3
index 7c7c57e..37be386 100644
--- a/test/T_Append/grammar.cg3
+++ b/test/T_Append/grammar.cg3
@@ -3,7 +3,7 @@ DELIMITERS = "<$.>" ;
 "<word>" APPEND ("newword" appended1 @A @B "newword" appended3 @A @C) (*) (NOT 0 (appended1)) ;
 "<word>" APPEND ("newword" appended2) (*) (NOT 0 (appended2)) ;
 
-COPY (copy copied) (wanted) - (copy) ;
+COPY (copy copied) BEFORE (wanted) (wanted) - (copy) ;
 COPY (copy also-copied) (wanted) - (copy) ;
 COPY (noguard-copied) (wanted) ;
 COPY (copied-except) EXCEPT (also-copied copy noguard-copied) (wanted) ;
diff --git a/test/T_Dependency_Loops/grammar.cg3 b/test/T_Dependency_Loops/grammar.cg3
index ab20672..a6431ff 100644
--- a/test/T_Dependency_Loops/grammar.cg3
+++ b/test/T_Dependency_Loops/grammar.cg3
@@ -1,5 +1,5 @@
 DELIMITERS = "<$.>" ;
-STRICT-TAGS += vis N @>N PRP @P< @ADVL ADV ;
+LIST-TAGS += vis N @>N PRP @P< @ADVL ADV ;
 
 MAP (vis) _S_DELIMITERS_ ;
 SETPARENT _S_DELIMITERS_ TO (@0 (*)) ;
diff --git a/test/T_External/run.pl b/test/T_External/run.pl
index 9bf9c09..2c8ec5f 100755
--- a/test/T_External/run.pl
+++ b/test/T_External/run.pl
@@ -1,4 +1,4 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
 use strict;
 use warnings;
 use Cwd qw(realpath);
diff --git a/test/T_JumpExecute/expected.txt b/test/T_JumpExecute/expected.txt
index 68346e7..fb91db1 100644
--- a/test/T_JumpExecute/expected.txt
+++ b/test/T_JumpExecute/expected.txt
@@ -1,19 +1,19 @@
 "<He>"
-	"he" <*> <NonMod> PRON PERS MASC NOM SG3 SUBJ @b @a @c
+	"he" <*> <NonMod> PRON PERS MASC NOM SG3 SUBJ @a @b @c
 "<ate>"
-	"eat" <SVO> <SV> V PAST VFIN @b @a @c
+	"eat" <SVO> <SV> V PAST VFIN @a @b @c
 "<a>"
-	"a" <Indef> DET CENTRAL ART SG @b @a @c
+	"a" <Indef> DET CENTRAL ART SG @a @b @c
 "<cow>"
-	"cow" N NOM SG @b @a @c
+	"cow" N NOM SG @a @b @c
 "<with>"
-	"with" PREP @b @a @c
+	"with" PREP @a @b @c
 "<biscuits>"
-	"biscuit" N NOM PL @b @a @c
+	"biscuit" N NOM PL @a @b @c
 "<and>"
-	"and" CC @b @a @c
+	"and" CC @a @b @c
 "<lemonade>"
-	"lemonade" <-Indef> N NOM SG @b @a @c
+	"lemonade" <-Indef> N NOM SG @a @b @c
 "<$.>"
-	"$." <<< @b @a @c
+	"$." <<< @a @b @c
 
diff --git a/test/T_MapAdd_Different/expected.txt b/test/T_MapAdd_Different/expected.txt
index 6cdc91c..ecd3eb7 100644
--- a/test/T_MapAdd_Different/expected.txt
+++ b/test/T_MapAdd_Different/expected.txt
@@ -1,6 +1,8 @@
 "<word>"
 	"word" notwanted
-	"matchme" wanted $tag £tag @tag ADD:8 ADD:9 ADD:10 MAP:12
+	"matchme" a b $tag c wanted d £tag e f @order-good ADD:6 ADD:7 ADD:12 ADD:13 ADD:14 MAP:16
 	"word" notmeeither
-	"matchme" wanted $tag £tag @mapped ADD:8 ADD:9 ADD:10 MAP:12
+	"matchme" a b $tag c wanted d £tag e f @order-skip ADD:6 ADD:7 ADD:12 ADD:13 ADD:14 MAP:16
+	"matchme" a b $tag c wanted d £tag e f @tag ADD:6 ADD:7 ADD:12 ADD:13 ADD:14 MAP:16
+	"matchme" a b $tag c wanted d £tag e f @mapped ADD:6 ADD:7 ADD:12 ADD:13 ADD:14 MAP:16
 
diff --git a/test/T_MapAdd_Different/grammar.cg3 b/test/T_MapAdd_Different/grammar.cg3
index 79b57d1..84d4766 100644
--- a/test/T_MapAdd_Different/grammar.cg3
+++ b/test/T_MapAdd_Different/grammar.cg3
@@ -3,10 +3,14 @@ DELIMITERS = "<$.>" ;
 
 LIST ASet = ".atc[hm]{2}.*"r ;
 
+ADD (@order-good) (/__d\ e\ f__/l) ;
+ADD (@order-skip) (/__d__f__/l) ;
+ADD (@order-bad) (/__d\ f__/l) ;
+
 SECTION
 
-ADD ($tag) ASet ;
-ADD (£tag) ASet ;
+ADD ($tag) BEFORE (c wanted d) ASet ;
+ADD (£tag) AFTER (c wanted d) ASet ;
 ADD (@tag) ASet ;
 
 MAP (@mapped) (£tag) ;
diff --git a/test/T_MapAdd_Different/grammar.cg3b.10043 b/test/T_MapAdd_Different/grammar.cg3b.10043
deleted file mode 100644
index 4efa53a..0000000
Binary files a/test/T_MapAdd_Different/grammar.cg3b.10043 and /dev/null differ
diff --git a/test/T_MapAdd_Different/input.txt b/test/T_MapAdd_Different/input.txt
index c60aa30..dad7ab3 100644
--- a/test/T_MapAdd_Different/input.txt
+++ b/test/T_MapAdd_Different/input.txt
@@ -1,4 +1,4 @@
 "<word>"
 	"word" notwanted
-	"matchme" wanted
+	"matchme" a b c wanted d e f
 	"word" notmeeither
diff --git a/test/T_Movement/expected.txt b/test/T_Movement/expected.txt
index cf147b2..2621d8d 100644
--- a/test/T_Movement/expected.txt
+++ b/test/T_Movement/expected.txt
@@ -1,7 +1,17 @@
+"<anchor>"
+	"anchor" #1->1
+"<target>"
+	"target" 1 #2->2
+"<target>"
+	"target" 2 #3->3
+"<target>"
+	"target" 3 #4->4
+"<$.>"
+
 "<word>"
 	"word" wanted third #1->6
 "<word>"
-	"word" wanted first #2->10
+	"word" wanted first #2->9
 "<word>"
 	"word" wanted seventh #3->8
 "<word>"
diff --git a/test/T_Movement/grammar.cg3 b/test/T_Movement/grammar.cg3
index d872341..f44df38 100644
--- a/test/T_Movement/grammar.cg3
+++ b/test/T_Movement/grammar.cg3
@@ -26,3 +26,5 @@ MOVE (seventh) AFTER (-1* (first)) ;
 
 # Switch
 SWITCH (fourth) WITH (*1 (eighth)) ;
+
+MOVE REPEAT ("target") AFTER (1* ("anchor") BARRIER ("target")) ;
diff --git a/test/T_Movement/grammar.cg3b.10043 b/test/T_Movement/grammar.cg3b.10043
deleted file mode 100644
index d533bf0..0000000
Binary files a/test/T_Movement/grammar.cg3b.10043 and /dev/null differ
diff --git a/test/T_Movement/input.txt b/test/T_Movement/input.txt
index 4c6397c..9d938e6 100644
--- a/test/T_Movement/input.txt
+++ b/test/T_Movement/input.txt
@@ -1,5 +1,15 @@
-"<word>"
-	"word" wanted first #1->4
+"<target>"
+	"target" 1
+"<target>"
+	"target" 2
+"<target>"
+	"target" 3
+"<anchor>"
+	"anchor"
+"<$.>"
+
+"<word>"
+	"word" wanted first #1->2
 "<word>"
 	"word" wanted second #2->0
 "<word>"
diff --git a/test/T_MweSplit/run.pl b/test/T_MweSplit/run.pl
index 9d31e1f..2c99795 100755
--- a/test/T_MweSplit/run.pl
+++ b/test/T_MweSplit/run.pl
@@ -1,4 +1,4 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
 use strict;
 use warnings;
 use Cwd qw(realpath);
@@ -14,7 +14,7 @@ if (!$binary_mwesplit || $binary_mwesplit eq '' || !(-x $binary_mwesplit)) {
 }
 
 `"$binary_mwesplit" < input.txt > output.txt 2>>stderr.txt`;
-`diff -B expected.txt output.txt >diff.txt`;
+`diff -ZB expected.txt output.txt >diff.txt`;
 
 if (-s "diff.txt") {
 	print STDERR "Fail.\n";
diff --git a/test/T_RelabelList/run.pl b/test/T_RelabelList/run.pl
index 96094bc..797e816 100755
--- a/test/T_RelabelList/run.pl
+++ b/test/T_RelabelList/run.pl
@@ -1,4 +1,4 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
 use strict;
 use warnings;
 use Cwd qw(realpath);
diff --git a/test/T_RelabelList_Apertium/run.pl b/test/T_RelabelList_Apertium/run.pl
index 8a2485c..9b10675 100755
--- a/test/T_RelabelList_Apertium/run.pl
+++ b/test/T_RelabelList_Apertium/run.pl
@@ -1,4 +1,4 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
 use strict;
 use warnings;
 use Cwd qw(realpath);
diff --git a/test/T_RelabelSet/run.pl b/test/T_RelabelSet/run.pl
index ec674b3..98cbeea 100755
--- a/test/T_RelabelSet/run.pl
+++ b/test/T_RelabelSet/run.pl
@@ -1,4 +1,4 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
 use strict;
 use warnings;
 use Cwd qw(realpath);
diff --git a/test/T_RemCohort/expected.txt b/test/T_RemCohort/expected.txt
index d3e812a..1f2c317 100644
--- a/test/T_RemCohort/expected.txt
+++ b/test/T_RemCohort/expected.txt
@@ -1,32 +1,36 @@
 "<word>"
-	"word" notwanted @stillhere ADD:17 ADD:17 ADD:17
-	"word" notmeeither @stillhere ADD:17 ADD:17 ADD:17
+	"word" notwanted @stillhere #1->0 ADD:20 ADD:20 ADD:20
+	"word" notmeeither @stillhere #1->0 ADD:20 ADD:20 ADD:20
 "<wordform1>"
-	"baseform1" tag1 @stillhere ADDCOHORT-BEFORE:12 ADD:17 ADD:17 ADD:17
+	"baseform1" tag1 @stillhere #2->2 ADDCOHORT-BEFORE:12 ADD:20 ADD:20 ADD:20
 "<wordform3>"
-	"baseform3" tag3 @stillhere ADDCOHORT-BEFORE:14 ADD:17 ADD:17 ADD:17
-	"baseform4" tag4 @stillhere ADDCOHORT-BEFORE:14 ADD:17 ADD:17 ADD:17
-	"baseform5" @stillhere ADDCOHORT-BEFORE:14 ADD:17 ADD:17 ADD:17
+	"baseform3" tag3 @stillhere #3->3 ADDCOHORT-BEFORE:14 ADD:20 ADD:20 ADD:20
+	"baseform4" tag4 @stillhere #3->3 ADDCOHORT-BEFORE:14 ADD:20 ADD:20 ADD:20
+	"baseform5" @stillhere #3->3 ADDCOHORT-BEFORE:14 ADD:20 ADD:20 ADD:20
+"<wordform4>"
+	"baseform4" tag4 @stillhere #4->4 ADDCOHORT-BEFORE:16 ADD:20 ADD:20 ADD:20
 ; "<word>"
-;	"word" notwanted REMCOHORT:16
-;	"matchme" wanted ADDCOHORT-BEFORE:12 ADDCOHORT-AFTER:13 ADDCOHORT-BEFORE:14 REMCOHORT:16
-;	"word" notmeeither REMCOHORT:16
+;	"word" notwanted REMCOHORT:19
+;	"matchme" wanted ADDCOHORT-BEFORE:12 ADDCOHORT-AFTER:13 ADDCOHORT-BEFORE:14 ADDCOHORT-BEFORE:16 ADDCOHORT-AFTER:17 REMCOHORT:19
+;	"word" notmeeither REMCOHORT:19
 "<(>"
-	"(" @stillhere ADD:17
+	"(" @stillhere #5->1 ADD:20
 "<inside>"
-	"inside" inside @stillhere ADD:17
+	"inside" inside @stillhere #6->1 ADD:20
 "<)>"
-	")" @stillhere ADD:17
+	")" @stillhere #7->1 ADD:20
 "<wordform2>"
-	"baseform2" tag2 @stillhere ADDCOHORT-AFTER:13 ADD:17 ADD:17 ADD:17
+	"baseform2" tag2 @stillhere #8->8 ADDCOHORT-AFTER:13 ADD:20 ADD:20 ADD:20
 "<word>"
-	"word" notwanted @stillhere @last-after-rem ADD:6 ADDCOHORT-AFTER:7 ADD:17 ADD:17 ADD:6 ADD:17
-	"word" notwanted @last-after-rem ADD:6 ADDCOHORT-AFTER:7 ADD:17 ADD:17 ADD:6 ADDCOHORT-AFTER:7 ADD:17
-	"word" notmeeither @last-after-rem @stillhere ADD:6 ADD:17 ADD:17 ADD:6 ADD:17
+	"word" notwanted @last-after-rem @stillhere #9->1 ADD:6 ADDCOHORT-AFTER:7 ADD:20 ADD:20 ADD:20
+	"word" notmeeither @last-after-rem @stillhere #9->1 ADD:6 ADD:20 ADD:20 ADD:20
 ; "<last>"
 ;	"last" last cohort @last-old ADD:4 REMCOHORT:5
+"<wordform5>"
+	"baseform5" tag5 @stillhere @last-after-rem #10->10 ADDCOHORT-AFTER:17 ADD:20 ADD:20 ADD:6 ADD:20
+	"baseform5" tag5 @stillhere #10->10 ADDCOHORT-AFTER:17 ADD:20 ADD:20 ADD:6 ADDCOHORT-AFTER:7 ADD:20
 ; "<new last>"
-;	"new last" new last cohort @last-after-add @stillhere @last-old ADDCOHORT-AFTER:7 ADD:8 ADD:17 ADD:17 ADD:4 REMCOHORT:5
+;	"new last" new last cohort @last-after-add @stillhere @last-old ADDCOHORT-AFTER:7 ADD:8 ADD:20 ADD:20 ADD:4 REMCOHORT:5
 "<new last>"
-	"new last" new last cohort @last-after-add @stillhere ADDCOHORT-AFTER:7 ADD:8 ADD:17
+	"new last" new last cohort @last-after-add @stillhere #11->11 ADDCOHORT-AFTER:7 ADD:8 ADD:20
 
diff --git a/test/T_RemCohort/grammar.cg3 b/test/T_RemCohort/grammar.cg3
index 8c6c8f2..bf53022 100644
--- a/test/T_RemCohort/grammar.cg3
+++ b/test/T_RemCohort/grammar.cg3
@@ -13,5 +13,8 @@ ADDCOHORT ("<wordform1>" "baseform1" tag1) BEFORE ("matchme") ;
 ADDCOHORT ("<wordform2>" "baseform2" tag2) AFTER ("matchme") ;
 ADDCOHORT ("<wordform3>" "baseform3" tag3 "baseform4" tag4 "baseform5") BEFORE ("matchme") ;
 
+ADDCOHORT ("<wordform4>" "baseform4" tag4) BEFORE WITHCHILD (*) ("matchme") ;
+ADDCOHORT ("<wordform5>" "baseform5" tag5) AFTER WITHCHILD (*) ("matchme") ;
+
 REMCOHORT ("matchme") ;
 ADD (@stillhere) (*) ;
diff --git a/test/T_RemCohort/grammar.cg3b.10043 b/test/T_RemCohort/grammar.cg3b.10043
deleted file mode 100644
index f02d4c4..0000000
Binary files a/test/T_RemCohort/grammar.cg3b.10043 and /dev/null differ
diff --git a/test/T_RemCohort/input.txt b/test/T_RemCohort/input.txt
index 3c70c31..5d04c6e 100644
--- a/test/T_RemCohort/input.txt
+++ b/test/T_RemCohort/input.txt
@@ -1,18 +1,18 @@
 "<word>"
-	"word" notwanted
-	"word" notmeeither
+	"word" notwanted #1->0
+	"word" notmeeither #1->0
 "<word>"
-	"word" notwanted
-	"matchme" wanted
-	"word" notmeeither
+	"word" notwanted #2->1
+	"matchme" wanted #2->1
+	"word" notmeeither #2->1
 "<(>"
-	"("
+	"(" #3->2
 "<inside>"
-	"inside" inside
+	"inside" inside #4->2
 "<)>"
-	")"
+	")" #5->2
 "<word>"
-	"word" notwanted
-	"word" notmeeither
+	"word" notwanted #6->2
+	"word" notmeeither #6->2
 "<last>"
-	"last" last cohort
+	"last" last cohort #7->1
diff --git a/test/T_SetOps/expected.txt b/test/T_SetOps/expected.txt
index 6fce0a2..bbdf8db 100644
--- a/test/T_SetOps/expected.txt
+++ b/test/T_SetOps/expected.txt
@@ -1,22 +1,22 @@
 "<setops>"
-	"setop" a @union @difference @symdiff
-	"setop" b @union @difference @symdiff
+	"setop" a @union @except @difference @symdiff
+	"setop" b @union @except @difference @symdiff
 	"setop" c @union @intersect @product
 	"setop" d @union @intersect @product
 	"setop" e @union @symdiff
 	"setop" f @union @symdiff
 "<setops>"
-	"setop" a b @union @difference @symdiff
-	"setop" b c @union @symdiff @intersect @product
+	"setop" a b @union @except @difference @symdiff
+	"setop" b c @union @difference @symdiff @intersect @product
 	"setop" c d @union @intersect @product
 	"setop" d e @union @symdiff @intersect @product
 	"setop" e f @union @symdiff
-	"setop" f a @union @symdiff @product
+	"setop" f a @union @difference @symdiff @product
 "<setops>"
-	"setop" a e @union @symdiff @product
-	"setop" b f @union @symdiff @product
-	"setop" c a @union @symdiff @intersect @product
-	"setop" d b @union @symdiff @intersect @product
+	"setop" a e @union @difference @symdiff @product
+	"setop" b f @union @difference @symdiff @product
+	"setop" c a @union @difference @symdiff @intersect @product
+	"setop" d b @union @difference @symdiff @intersect @product
 	"setop" e c @union @symdiff @intersect @product
 	"setop" f d @union @symdiff @intersect @product
 
diff --git a/test/T_SetOps/grammar.cg3 b/test/T_SetOps/grammar.cg3
index ca209c2..5a037b8 100644
--- a/test/T_SetOps/grammar.cg3
+++ b/test/T_SetOps/grammar.cg3
@@ -4,7 +4,8 @@ LIST A = a b c d ;
 LIST B = c d e f ;
 
 ADD (@union) A OR B ;
-ADD (@difference) A - B ;
+ADD (@except) A - B ;
+ADD (@difference) A \ B ;
 ADD (@symdiff) A ∆ B ;
 ADD (@intersect) A ∩ B ;
 ADD (@product) A + B ;
diff --git a/test/T_SetOps/grammar.cg3b.10043 b/test/T_SetOps/grammar.cg3b.10043
deleted file mode 100644
index 27ed3dc..0000000
Binary files a/test/T_SetOps/grammar.cg3b.10043 and /dev/null differ
diff --git a/test/T_SubReadings_Apertium/run.pl b/test/T_SubReadings_Apertium/run.pl
index ca3b296..530350b 100755
--- a/test/T_SubReadings_Apertium/run.pl
+++ b/test/T_SubReadings_Apertium/run.pl
@@ -1,4 +1,4 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
 use strict;
 use warnings;
 use Cwd qw(realpath);
diff --git a/test/T_SubstituteNil/grammar.cg3 b/test/T_SubstituteNil/grammar.cg3
index 9bbc788..108cd1b 100644
--- a/test/T_SubstituteNil/grammar.cg3
+++ b/test/T_SubstituteNil/grammar.cg3
@@ -1,3 +1,6 @@
 DELIMITERS = "<$.>" ;
 
-SUBSTITUTE (wanted) (*) (wanted) ;
+SUBSTITUTE REPEAT (/hubba\\d/r) (*) (before) ;
+
+SECTION
+SUBSTITUTE ITERATE (/wanted\\d/r) (*) (before) ;
diff --git a/test/T_SubstituteNil/grammar.cg3b.10043 b/test/T_SubstituteNil/grammar.cg3b.10043
deleted file mode 100644
index 59c82d1..0000000
Binary files a/test/T_SubstituteNil/grammar.cg3b.10043 and /dev/null differ
diff --git a/test/T_SubstituteNil/input.txt b/test/T_SubstituteNil/input.txt
index 217688b..e0846f8 100644
--- a/test/T_SubstituteNil/input.txt
+++ b/test/T_SubstituteNil/input.txt
@@ -1,4 +1,4 @@
 "<word>"
 	"word" notwanted
-	"word" before wanted after
+	"word" before wanted1 wanted2 wanted3 wanted4 hubba1 hubba2 hubba3 after
 	"word" notmeeither
diff --git a/test/clean.sh b/test/clean.sh
index c7aff37..1cd4770 100755
--- a/test/clean.sh
+++ b/test/clean.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
 rm -f T*/diff*.txt
 rm -f T*/output*.txt
 rm -f T*/stdout*.txt
diff --git a/test/runall.pl b/test/runall.pl
index 20db8cc..1ae6997 100755
--- a/test/runall.pl
+++ b/test/runall.pl
@@ -1,4 +1,4 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
 use strict;
 use warnings;
 use Cwd qw(realpath);
@@ -9,6 +9,8 @@ chdir $bindir or die("Error: Could not change directory to $bindir !");
 
 # Search paths for the binary
 my @binlist = (
+	"../../build/VS15/src/Debug/vislcg3",
+	"../../build/VS15/src/Release/vislcg3",
 	"../../build/VS14/src/Debug/vislcg3",
 	"../../build/VS14/src/Release/vislcg3",
 	"../src/Debug/vislcg3",

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/cg3.git



More information about the debian-science-commits mailing list