[cylc] 04/11: new upstream 7.4.0

Alastair McKinstry mckinstry at moszumanska.debian.org
Wed Jul 19 12:33:18 UTC 2017


This is an automated email from the git hooks/post-receive script.

mckinstry pushed a commit to branch debian/master
in repository cylc.

commit d03fabbd6ca238ac39561b6319231c66c0157eb4
Author: Alastair McKinstry <mckinstry at debian.org>
Date:   Tue Jun 20 13:24:16 2017 +0100

    new upstream 7.4.0
---
 .gitignore                                         |   26 +-
 CHANGES.md                                         |  127 +-
 INSTALL.md                                         |   77 +-
 Makefile                                           |    4 +-
 README.md                                          |   13 +-
 bin/cylc                                           |    4 -
 bin/cylc-cat-log                                   |   63 +-
 bin/cylc-cycle-point                               |   16 +
 bin/cylc-documentation                             |    2 +-
 bin/cylc-edit                                      |    2 +-
 bin/cylc-get-suite-config                          |    7 +-
 bin/cylc-graph                                     |   13 +-
 bin/cylc-gscan                                     |   16 +-
 bin/cylc-gui                                       |   29 +-
 bin/cylc-job-logs-retrieve                         |   81 --
 bin/cylc-job-submit                                |   56 -
 bin/cylc-jobs-kill                                 |    4 +-
 bin/cylc-jobs-poll                                 |    4 +-
 bin/cylc-jobs-submit                               |    4 +-
 bin/cylc-monitor                                   |   54 +-
 bin/cylc-profile-battery                           |   33 +
 bin/cylc-random                                    |   54 -
 bin/cylc-reset                                     |   22 +-
 bin/cylc-scan                                      |    9 +-
 bin/cylc-submit                                    |   81 +-
 bin/cylc-suite-state                               |    2 +-
 bin/cylc-trigger                                   |    7 +-
 bin/cylc-validate                                  |   19 +-
 bin/cylc-view                                      |   10 +-
 conf/cylc.lang                                     |   15 +-
 conf/cylc.xml                                      |   14 +-
 dev/profile-experiments/example                    |    4 +
 dev/suites/chains/suite.rc                         |    9 +-
 dev/suites/complex/suite.rc                        |   32 +-
 dev/suites/diamond/suite.rc                        |    9 +-
 dev/suites/integer/one/suite.rc                    |    8 +-
 doc/Makefile                                       |   70 +-
 doc/README                                         |   28 +-
 doc/gscanrc.tex                                    |   43 -
 doc/index.css                                      |   85 --
 doc/scripts/make-index.sh                          |  155 --
 doc/src/cylc-logo.png                              |  Bin 0 -> 3823 bytes
 doc/{ => src/cylc-user-guide}/Makefile             |   49 +-
 doc/{ => src/cylc-user-guide}/README               |    0
 doc/{ => src/cylc-user-guide}/abstract.tex         |    0
 doc/{ => src/cylc-user-guide}/cug-html.cfg         |    0
 doc/{ => src/cylc-user-guide}/cug-html.tex         |    0
 doc/{ => src/cylc-user-guide}/cug-pdf.tex          |    0
 doc/{ => src/cylc-user-guide}/cug.tex              |  838 +++--------
 doc/{ => src/cylc-user-guide}/gcylcrc.tex          |    2 +-
 doc/{ => src/cylc-user-guide}/gpl-3.0.tex          |    0
 .../png/orig/QuickStartA-ControlRunning.png        |  Bin
 .../png/orig/QuickStartA-ControlStalled.png        |  Bin
 .../png/orig/QuickStartA-ControlStart00.png        |  Bin
 .../png/orig/QuickStartA-ControlStart06.png        |  Bin
 .../graphics/png/orig/QuickStartA-ModelState.png   |  Bin
 .../graphics/png/orig/QuickStartA-graph18.png      |  Bin
 .../graphics/png/orig/QuickStartB-graph18.png      |  Bin
 .../graphics/png/orig/conditional-triggers.png     |  Bin
 .../graphics/png/orig/dep-eg-1.png                 |  Bin
 .../graphics/png/orig/dep-multi-cycle.png          |  Bin
 .../graphics/png/orig/dep-one-cycle.png            |  Bin
 .../graphics/png/orig/dep-two-cycles-linked.png    |  Bin
 .../graphics/png/orig/dep-two-cycles.png           |  Bin
 .../cylc-user-guide}/graphics/png/orig/ecox-1.png  |  Bin
 .../graphics/png/orig/eg2-dynamic.png              |  Bin
 .../graphics/png/orig/eg2-static.png               |  Bin
 .../png/orig/gcylc-graph-and-dot-views.png         |  Bin
 .../graphics/png/orig/gcylc-text-view.png          |  Bin
 .../cylc-user-guide}/graphics/png/orig/gscan.png   |  Bin
 .../graphics/png/orig/inherit-2.png                |  Bin
 .../graphics/png/orig/inherit-3.png                |  Bin
 .../graphics/png/orig/inherit-4.png                |  Bin
 .../graphics/png/orig/inherit-5.png                |  Bin
 .../graphics/png/orig/inherit-6.png                |  Bin
 .../graphics/png/orig/inherit-7.png                |  Bin
 .../graphics/png/orig/jinja2-ensemble-graph.png    |  Bin
 .../graphics/png/orig/jinja2-suite-graph.png       |  Bin
 .../cylc-user-guide}/graphics/png/orig/logo.png    |  Bin
 .../graphics/png/orig/niwa-colour-small.png        |  Bin
 .../graphics/png/orig/niwa-colour.png              |  Bin
 .../cylc-user-guide}/graphics/png/orig/params1.png |  Bin
 .../graphics/png/orig/satellite.png                |  Bin
 .../cylc-user-guide}/graphics/png/orig/suicide.png |  Bin
 .../graphics/png/orig/suite-log.png                |  Bin
 .../graphics/png/orig/suite-output.png             |  Bin
 .../graphics/png/orig/suiterc-jinja2.png           |  Bin
 .../graphics/png/orig/task-pool.png                |  Bin
 .../cylc-user-guide}/graphics/png/orig/test1.png   |  Bin
 .../cylc-user-guide}/graphics/png/orig/test2.png   |  Bin
 .../cylc-user-guide}/graphics/png/orig/test4.png   |  Bin
 .../cylc-user-guide}/graphics/png/orig/test5.png   |  Bin
 .../cylc-user-guide}/graphics/png/orig/test6.png   |  Bin
 .../graphics/png/orig/timeline-one-a.png           |  Bin
 .../graphics/png/orig/timeline-one-c.png           |  Bin
 .../graphics/png/orig/timeline-one.png             |  Bin
 .../graphics/png/orig/timeline-three.png           |  Bin
 .../png/orig/timeline-two-cycles-optimal.png       |  Bin
 .../graphics/png/orig/timeline-two.png             |  Bin
 .../graphics/png/orig/timeline-zero.png            |  Bin
 .../graphics/png/orig/tut-cyc-int.png              |  Bin
 .../graphics/png/orig/tut-four.png                 |  Bin
 .../graphics/png/orig/tut-hello-multi-1.png        |  Bin
 .../graphics/png/orig/tut-hello-multi-2.png        |  Bin
 .../graphics/png/orig/tut-hello-multi-3.png        |  Bin
 .../cylc-user-guide}/graphics/png/orig/tut-one.png |  Bin
 .../graphics/png/orig/tut-three.png                |  Bin
 .../cylc-user-guide}/graphics/png/orig/tut-two.png |  Bin
 .../png/scaled/QuickStartA-ControlRunning.png      |  Bin
 .../png/scaled/QuickStartA-ControlStalled.png      |  Bin
 .../png/scaled/QuickStartA-ControlStart00.png      |  Bin
 .../png/scaled/QuickStartA-ControlStart06.png      |  Bin
 .../graphics/png/scaled/QuickStartA-ModelState.png |  Bin
 .../graphics/png/scaled/QuickStartA-graph18.png    |  Bin
 .../graphics/png/scaled/QuickStartB-graph18.png    |  Bin
 .../graphics/png/scaled/conditional-triggers.png   |  Bin
 .../graphics/png/scaled/dep-eg-1.png               |  Bin
 .../graphics/png/scaled/dep-multi-cycle.png        |  Bin
 .../graphics/png/scaled/dep-one-cycle.png          |  Bin
 .../graphics/png/scaled/dep-two-cycles-linked.png  |  Bin
 .../graphics/png/scaled/dep-two-cycles.png         |  Bin
 .../graphics/png/scaled/ecox-1.png                 |  Bin
 .../graphics/png/scaled/eg2-dynamic.png            |  Bin
 .../graphics/png/scaled/eg2-static.png             |  Bin
 .../png/scaled/gcylc-graph-and-dot-views.png       |  Bin
 .../graphics/png/scaled/gcylc-text-view.png        |  Bin
 .../cylc-user-guide}/graphics/png/scaled/gscan.png |  Bin
 .../graphics/png/scaled/inherit-2.png              |  Bin
 .../graphics/png/scaled/inherit-3.png              |  Bin
 .../graphics/png/scaled/inherit-4.png              |  Bin
 .../graphics/png/scaled/inherit-5.png              |  Bin
 .../graphics/png/scaled/inherit-6.png              |  Bin
 .../graphics/png/scaled/inherit-7.png              |  Bin
 .../graphics/png/scaled/jinja2-ensemble-graph.png  |  Bin
 .../graphics/png/scaled/jinja2-suite-graph.png     |  Bin
 .../cylc-user-guide}/graphics/png/scaled/logo.png  |  Bin
 .../graphics/png/scaled/niwa-colour-small.png      |  Bin
 .../graphics/png/scaled/niwa-colour.png            |  Bin
 .../graphics/png/scaled/params1.png                |  Bin
 .../graphics/png/scaled/satellite.png              |  Bin
 .../graphics/png/scaled/suicide.png                |  Bin
 .../graphics/png/scaled/suite-log.png              |  Bin
 .../graphics/png/scaled/suite-output.png           |  Bin
 .../graphics/png/scaled/suiterc-jinja2.png         |  Bin
 .../graphics/png/scaled/task-pool.png              |  Bin
 .../cylc-user-guide}/graphics/png/scaled/test1.png |  Bin
 .../cylc-user-guide}/graphics/png/scaled/test2.png |  Bin
 .../cylc-user-guide}/graphics/png/scaled/test4.png |  Bin
 .../cylc-user-guide}/graphics/png/scaled/test5.png |  Bin
 .../cylc-user-guide}/graphics/png/scaled/test6.png |  Bin
 .../graphics/png/scaled/timeline-one-a.png         |  Bin
 .../graphics/png/scaled/timeline-one-c.png         |  Bin
 .../graphics/png/scaled/timeline-one.png           |  Bin
 .../graphics/png/scaled/timeline-three.png         |  Bin
 .../png/scaled/timeline-two-cycles-optimal.png     |  Bin
 .../graphics/png/scaled/timeline-two.png           |  Bin
 .../graphics/png/scaled/timeline-zero.png          |  Bin
 .../graphics/png/scaled/tut-cyc-int.png            |  Bin
 .../graphics/png/scaled/tut-four.png               |  Bin
 .../graphics/png/scaled/tut-hello-multi-1.png      |  Bin
 .../graphics/png/scaled/tut-hello-multi-2.png      |  Bin
 .../graphics/png/scaled/tut-hello-multi-3.png      |  Bin
 .../graphics/png/scaled/tut-one.png                |  Bin
 .../graphics/png/scaled/tut-three.png              |  Bin
 .../graphics/png/scaled/tut-two.png                |  Bin
 .../cylc-user-guide}/graphics/scale-images.sh      |    0
 .../cylc-user-guide}/graphics/vector/README.txt    |    0
 .../graphics/vector/eps/dep-multi-cycle.eps        |    0
 .../graphics/vector/eps/dep-one-cycle.eps          |    0
 .../graphics/vector/eps/dep-two-cycles-linked.eps  |    0
 .../graphics/vector/eps/dep-two-cycles.eps         |    0
 .../graphics/vector/eps/task-pool.eps              |    0
 .../graphics/vector/eps/timeline-one-a.eps         |    0
 .../graphics/vector/eps/timeline-one-c.eps         |    0
 .../graphics/vector/eps/timeline-one.eps           |    0
 .../graphics/vector/eps/timeline-three.eps         |    0
 .../vector/eps/timeline-two-cycles-optimal.eps     |    0
 .../graphics/vector/eps/timeline-two.eps           |    0
 .../graphics/vector/eps/timeline-zero.eps          |    0
 .../graphics/vector/svg/dep-multi-cycle.svg        |    0
 .../graphics/vector/svg/dep-one-cycle.svg          |    0
 .../graphics/vector/svg/dep-two-cycles-linked.svg  |    0
 .../graphics/vector/svg/dep-two-cycles.svg         |    0
 .../graphics/vector/svg/task-pool.svg              |    0
 .../graphics/vector/svg/timeline-one-a.svg         |    0
 .../graphics/vector/svg/timeline-one-c.svg         |    0
 .../graphics/vector/svg/timeline-one.svg           |    0
 .../graphics/vector/svg/timeline-three-0.svg       |    0
 .../graphics/vector/svg/timeline-three.svg         |    0
 .../vector/svg/timeline-two-cycles-optimal.svg     |    0
 .../graphics/vector/svg/timeline-two.svg           |    0
 .../graphics/vector/svg/timeline-zero.svg          |    0
 doc/{ => src/cylc-user-guide}/graphviz.txt         |    0
 doc/src/cylc-user-guide/gscanrc.tex                |   57 +
 doc/{ => src/cylc-user-guide}/scripts/get-deps.sh  |    7 +-
 .../cylc-user-guide}/scripts/make-commands.sh      |   27 +-
 doc/{ => src/cylc-user-guide}/scripts/make-html.sh |   23 +-
 doc/{ => src/cylc-user-guide}/scripts/make-pdf.sh  |   17 +-
 doc/{ => src/cylc-user-guide}/siterc.tex           |   38 +-
 doc/{ => src/cylc-user-guide}/suiterc.tex          |  161 ++-
 doc/{ => src/cylc-user-guide}/titlepic.sty         |    0
 doc/{ => src/cylc-user-guide}/titlepic/README      |    0
 .../cylc-user-guide}/titlepic/titlepic-manual.pdf  |  Bin
 .../cylc-user-guide}/titlepic/titlepic-manual.tex  |    0
 .../cylc-user-guide}/titlepic/titlepic.sty         |    0
 doc/src/index.css                                  |   86 ++
 doc/src/make-index.sh                              |  164 +++
 Makefile => doc/src/suite-design-guide/Makefile    |   15 +-
 doc/src/suite-design-guide/document.tex            |   11 +
 doc/src/suite-design-guide/efficiency.tex          |  355 +++++
 doc/src/suite-design-guide/general-principles.tex  |  682 +++++++++
 doc/src/suite-design-guide/introduction.tex        |   28 +
 doc/src/suite-design-guide/portable-suites.tex     |  493 +++++++
 doc/src/suite-design-guide/preamble.tex            |   87 ++
 .../resources/png/failure-recovery.png             |  Bin 0 -> 10003 bytes
 .../resources/png/fam-to-fam-1.png                 |  Bin 0 -> 54466 bytes
 .../resources/png/fam-to-fam-2.png                 |  Bin 0 -> 53147 bytes
 .../suite-design-guide/resources/png/param-1.png   |  Bin 0 -> 74693 bytes
 .../suite-design-guide/resources/png/param-2.png   |  Bin 0 -> 129846 bytes
 .../suite-design-guide/resources/png/rose-logo.png |  Bin 0 -> 19084 bytes
 .../suite-design-guide/resources/tex/cylc-logo.pdf |  Bin 0 -> 6186 bytes
 doc/src/suite-design-guide/roadmap.tex             |   75 +
 doc/src/suite-design-guide/style-guide.tex         |  281 ++++
 doc/src/suite-design-guide/title-page.tex          |   16 +
 examples/clock-expire/suite.rc                     |    7 +-
 examples/delayed-retry/suite.rc                    |    3 +-
 examples/family/extra/suite.rc                     |    2 +-
 examples/inherit/single/two/suite.rc               |    3 -
 examples/satellite/ext-trigger/suite.rc            |   20 +-
 examples/satellite/task-retries/suite.rc           |   21 +-
 .../task-states/bin/change-my-job-sub-method.sh    |    3 +-
 examples/task-states/suite.rc                      |   12 +-
 examples/tutorial/oneoff/retry/suite.rc            |    3 +-
 lib/cylc/batch_sys_handlers/background.py          |    1 -
 lib/cylc/batch_sys_handlers/sge.py                 |    7 +-
 lib/cylc/batch_sys_manager.py                      |  109 +-
 lib/cylc/cfgspec/globalcfg.py                      |   32 +-
 lib/cylc/cfgspec/gscan.py                          |    3 +-
 lib/cylc/cfgspec/suite.py                          |  118 +-
 lib/cylc/config.py                                 |  252 +++-
 lib/cylc/cycling/__init__.py                       |  147 +-
 lib/cylc/cycling/integer.py                        |   74 +-
 lib/cylc/cycling/iso8601.py                        |  176 ++-
 lib/cylc/graph_parser.py                           |   43 +-
 lib/cylc/gui/app_gcylc.py                          |  108 +-
 lib/cylc/gui/gpanel.py                             |   32 +-
 lib/cylc/gui/gscan.py                              |  393 +++--
 lib/cylc/gui/scanutil.py                           |  293 +++-
 lib/cylc/gui/tailer.py                             |    3 +-
 lib/cylc/gui/updater.py                            |   99 +-
 lib/cylc/gui/updater_dot.py                        |   48 +-
 lib/cylc/gui/updater_graph.py                      |    4 -
 lib/cylc/job.sh                                    |   52 +-
 lib/cylc/job_file.py                               |   99 +-
 lib/cylc/job_host.py                               |  207 ---
 lib/cylc/message_output.py                         |   51 -
 lib/cylc/mp_pool.py                                |   47 +-
 lib/cylc/network/__init__.py                       |    2 +-
 lib/cylc/network/https/base_client.py              |  396 +++--
 lib/cylc/network/https/client_reporter.py          |   14 +-
 lib/cylc/network/https/port_scan.py                |  236 +--
 lib/cylc/network/https/suite_broadcast_server.py   |   35 +-
 lib/cylc/network/https/suite_command_server.py     |   11 +-
 lib/cylc/network/https/suite_identifier_server.py  |    3 +-
 lib/cylc/network/https/suite_info_client.py        |    8 +-
 lib/cylc/network/https/suite_log_server.py         |   28 +-
 lib/cylc/network/https/suite_state_client.py       |   10 -
 lib/cylc/network/https/suite_state_server.py       |   24 +-
 lib/cylc/network/suite_state_client.py             |    2 +-
 lib/cylc/param_expand.py                           |   24 +-
 lib/cylc/profiling/git.py                          |    8 +-
 lib/cylc/profiling/profile.py                      |   30 +-
 lib/cylc/remote.py                                 |   22 +-
 lib/cylc/rundb.py                                  |    5 +
 lib/cylc/scheduler.py                              | 1090 ++++----------
 lib/cylc/scheduler_cli.py                          |    4 +-
 lib/cylc/suite_db_mgr.py                           |  392 +++++
 lib/cylc/suite_events.py                           |  182 +++
 lib/cylc/suite_logging.py                          |   10 +
 lib/cylc/suite_srv_files_mgr.py                    |    8 +-
 lib/cylc/task_action_timer.py                      |  105 ++
 lib/cylc/task_events_mgr.py                        |  845 +++++++++++
 lib/cylc/task_job_mgr.py                           |  948 ++++++++++++
 lib/cylc/task_outputs.py                           |  193 ++-
 lib/cylc/task_pool.py                              | 1073 ++++++--------
 lib/cylc/task_proxy.py                             | 1515 ++------------------
 lib/cylc/task_state.py                             |  331 +----
 lib/cylc/task_state_prop.py                        |   99 ++
 lib/cylc/task_trigger.py                           |   26 +-
 lib/cylc/taskdef.py                                |   11 +
 lib/cylc/time_parser.py                            |   25 +-
 lib/isodatetime/data.py                            |    2 +-
 lib/jinja2/__init__.py                             |   18 +-
 lib/jinja2/_compat.py                              |   22 +-
 lib/jinja2/_stringdefs.py                          |  127 +-
 lib/jinja2/bccache.py                              |    4 +-
 lib/jinja2/compiler.py                             | 1059 +++++++-------
 lib/jinja2/constants.py                            |    2 +-
 lib/jinja2/debug.py                                |   42 +-
 lib/jinja2/defaults.py                             |   13 +-
 lib/jinja2/environment.py                          |  175 ++-
 lib/jinja2/exceptions.py                           |    2 +-
 lib/jinja2/ext.py                                  |   35 +-
 lib/jinja2/filters.py                              |  213 ++-
 lib/jinja2/idtracking.py                           |  273 ++++
 lib/jinja2/lexer.py                                |   49 +-
 lib/jinja2/loaders.py                              |    6 +-
 lib/jinja2/meta.py                                 |   11 +-
 lib/jinja2/nodes.py                                |   96 +-
 lib/jinja2/optimizer.py                            |   25 +-
 lib/jinja2/parser.py                               |  109 +-
 lib/jinja2/runtime.py                              |  244 +++-
 lib/jinja2/sandbox.py                              |  128 +-
 lib/jinja2/tests.py                                |   16 +-
 lib/jinja2/utils.py                                |  117 +-
 lib/jinja2/visitor.py                              |    2 +-
 tests/api-suite-info/03-get-graph-raw-4/suite.rc   |    1 +
 .../api-suite-info/04-api-suite-info-unit-tests.t  |   18 +-
 tests/authentication/00-identity.t                 |   22 +-
 tests/authentication/01-description.t              |    2 +-
 tests/authentication/02-state-totals.t             |    2 +-
 tests/authentication/03-full-read.t                |    2 +-
 tests/authentication/04-shutdown.t                 |    2 +-
 tests/authentication/05-full-control.t             |    3 +-
 tests/authentication/06-suite-override.t           |    2 +-
 tests/authentication/07-sha-hash.t                 |    3 +-
 tests/authentication/11-suite2-stop-suite1.t       |    2 +-
 tests/cyclers/23-no_final_cycle_point.t            |    6 +-
 tests/cyclers/exclusions/graph.plain.ref           |    7 +
 tests/cyclers/exclusions/reference.log             |   41 +-
 tests/cyclers/exclusions/suite.rc                  |    5 +
 tests/cyclers/integer1/graph.plain.ref             |    2 +
 tests/cyclers/integer1/reference.log               |   94 +-
 tests/cyclers/integer1/suite.rc                    |    3 +
 tests/cylc-cat-log/00-local.t                      |    2 +-
 tests/cylc-cat-log/01-remote.t                     |    2 +-
 tests/cylc-get-config/00-simple/section2.stdout    |  240 ++--
 tests/cylc-get-config/04-dummy-mode-output.t       |   39 +-
 .../04-dummy-mode-output/reference.log             |    4 +
 .../cylc-get-config/04-dummy-mode-output/suite.rc  |   23 +-
 .../03-host-bool-override.t}                       |   24 +-
 tests/cylc-poll/03-poll-all/suite.rc               |   44 +-
 .../12-err-script.t => cylc-reset/02-output-1.t}   |   14 +-
 tests/cylc-reset/02-output-1/reference.log         |    6 +
 tests/cylc-reset/02-output-1/suite.rc              |   39 +
 .../12-err-script.t => cylc-reset/03-output-2.t}   |   27 +-
 tests/cylc-reset/03-output-2/reference.log         |    4 +
 tests/cylc-reset/03-output-2/suite.rc              |   30 +
 .../10-bad-syntax.t => cylc-scan/03-monitor.t}     |   29 +-
 .../reference-untz.log                             |   35 -
 tests/cylc-submit/00-bg.t                          |    2 +-
 tests/cylc-submit/11-multi.t                       |   63 +
 tests/database/00-simple.t                         |    9 +-
 tests/database/00-simple/schema.out                |    1 +
 tests/database/00-simple/select-inheritance.out    |    4 +
 tests/database/00-simple/select-task-events.out    |   12 +-
 tests/deprecations/00-all.t                        |    4 +
 tests/deprecations/00-all/suite.rc                 |    7 +-
 tests/events/08-task-event-handler-retry.t         |   12 +-
 tests/events/08-task-event-handler-retry/suite.rc  |    4 +-
 tests/events/10-task-event-job-logs-retrieve.t     |   12 +-
 .../events/11-cycle-task-event-job-logs-retrieve.t |    6 +-
 .../15-host-task-event-handler-retry-globalcfg.t   |    4 +-
 tests/events/20-suite-event-handlers.t             |    4 +-
 tests/events/20-suite-event-handlers/suite.rc      |    3 +-
 tests/events/32-task-event-job-logs-retrieve-2.t   |    2 +-
 tests/events/33-task-event-job-logs-retrieve-3.t   |    8 +-
 tests/graph_parser/00-unittests.t                  |    3 +-
 .../09-ref-graph.t}                                |   23 +-
 tests/graphing/09-ref-graph/graph.ref              |    7 +
 tests/graphing/09-ref-graph/suite.rc               |   14 +
 .../hold-release/20-reset-waiting-output/suite.rc  |    8 +-
 tests/job-file-trap/01-loadleveler.t               |   17 +-
 tests/job-file-trap/01-loadleveler/suite.rc        |    2 +-
 tests/job-file-trap/02-pipefail.t                  |    1 -
 tests/job-submission/05-activity-log.t             |    8 +-
 .../09-activity-log-host-bad-submit/suite.rc       |    2 +-
 tests/jobscript/00-torture/foo.ref-jobfile         |    2 +-
 tests/jobscript/00-torture/suite.rc                |    2 +-
 tests/jobscript/10-bad-syntax.t                    |   23 +-
 tests/jobscript/11-shell-ksh.t                     |    2 +-
 tests/jobscript/12-err-script.t                    |    2 +-
 .../jobscript/{10-bad-syntax.t => 14-sge-format.t} |   34 +-
 tests/jobscript/14-sge-format/suite.rc             |   13 +
 .../jobscript/{12-err-script.t => 15-semicolon.t}  |    8 +-
 tests/jobscript/15-semicolon/reference.log         |    4 +
 tests/jobscript/15-semicolon/suite.rc              |   17 +
 tests/jobscript/{12-err-script.t => 16-midfail.t}  |    7 +-
 tests/jobscript/16-midfail/reference.log           |    4 +
 tests/jobscript/16-midfail/suite.rc                |   18 +
 tests/jobscript/{12-err-script.t => 17-envfail.t}  |    9 +-
 tests/jobscript/17-envfail/reference.log           |    3 +
 tests/jobscript/17-envfail/suite.rc                |   10 +
 .../03-dummy-env.t}                                |   23 +-
 tests/modes/03-dummy-env/reference.log             |    3 +
 tests/modes/03-dummy-env/suite.rc                  |   18 +
 tests/modes/dummy/suite.rc                         |    4 +-
 tests/modes/simulation/suite.rc                    |    2 -
 .../01-basic.t}                                    |   33 +-
 tests/param_expand/01-basic/graph.ref              |   48 +
 tests/param_expand/01-basic/suite.rc               |   18 +
 tests/profile-battery/00-compatability.t           |   23 +-
 tests/reload/17-graphing-change.t                  |   12 +-
 tests/restart/15-state-to-db/cylc-suite-db.dump    |   24 +-
 tests/restart/24-upgrade-db-611/cylc-suite-db.dump |   27 +-
 tests/suite-state/00-polling.t                     |   17 +-
 tests/suite-state/01-polling.t                     |   17 +-
 tests/tutorial/cycling/reflogs/tut.five            |   60 -
 tests/tutorial/cycling/reflogs/tut.four            |   59 -
 tests/tutorial/cycling/reflogs/tut.one             |   50 -
 tests/tutorial/cycling/reflogs/tut.three           |   60 -
 tests/tutorial/cycling/reflogs/tut.two             |   50 -
 tests/tutorial/oneoff/reflogs/tut.basic            |   18 -
 tests/tutorial/oneoff/reflogs/tut.external         |   18 -
 tests/tutorial/oneoff/reflogs/tut.ftrigger1        |   35 -
 tests/tutorial/oneoff/reflogs/tut.ftrigger2        |   46 -
 tests/tutorial/oneoff/reflogs/tut.goodbye          |   27 -
 tests/tutorial/oneoff/reflogs/tut.inherit          |   27 -
 tests/tutorial/oneoff/reflogs/tut.jinja2           |   44 -
 tests/tutorial/oneoff/reflogs/tut.minimal          |   18 -
 tests/tutorial/oneoff/reflogs/tut.remote           |   18 -
 tests/tutorial/oneoff/reflogs/tut.retry            |   40 -
 tests/tutorial/oneoff/reflogs/tut.suicide          |   28 -
 .../04-builtin-suites.t}                           |   53 +-
 tests/validate/62-null-task-name.t                 |    5 +-
 ...-task-name.t => 63-collapse-secondary-parent.t} |   27 +-
 426 files changed, 11533 insertions(+), 8398 deletions(-)

diff --git a/.gitignore b/.gitignore
index 16735ca..46a5ac6 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,16 +2,20 @@
 # python bytecode
 *.pyc
 
-# generated documentation
-doc/commands/
-doc/categories/
-doc/cylc.txt
-doc/commands.tex
-doc/graphics/png-scaled/
-doc/html/
-doc/pdf/
-doc/index.html
-doc/cylc-version.txt
+# Generated documentation files.
+# - User Guide.
+doc/src/cylc-user-guide/pdf
+doc/src/cylc-user-guide/html
+doc/src/cylc-user-guide/commands.tex
+doc/src/cylc-user-guide/cylc-version.txt
+# - Suite Design Guide.
+doc/src/suite-design-guide/*.aux
+doc/src/suite-design-guide/*.out
+doc/src/suite-design-guide/*.pdf
+doc/src/suite-design-guide/*.log
+doc/src/suite-design-guide/*.toc
+# Installed docs.
+doc/install/
 
 # VERSION FILE
 VERSION
@@ -37,3 +41,5 @@ conf/*/*.rc.processed
 # suite passphrases
 passphrase
 
+# profiling
+.profiling
diff --git a/CHANGES.md b/CHANGES.md
index 7e9c8e7..82030c4 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -4,6 +4,127 @@ For the full list of all changes for each release see [closed
 milestones](https://github.com/cylc/cylc/milestones?state=closed).
 
 -------------------------------------------------------------------------------
+## __cylc-7.4.0 (2017-05-16)__
+
+Enhancements and fixes.
+
+### Highlighted Changes
+
+[#2260](https://github.com/cylc/cylc/pull/2260): Open job logs in your text
+editor, from CLI (`cylc cat-log`) or GUI.
+
+[#2259](https://github.com/cylc/cylc/pull/2259): `cylc gscan` - various
+improvements: right-click menu is now for suite operations only; other items
+moved to a main menubar and toolbar (which can be hidden to retain gscan's
+popular minimalist look); added all suite stop options (was just the default
+clean stop); task-state colour-key popup updates in-place if theme changed; new
+collapse/expand-all toobar buttons.
+
+[#2275](https://github.com/cylc/cylc/pull/2275): Pass suite and task URLs to
+event handlers.
+
+[#2272](https://github.com/cylc/cylc/pull/2272): Efficiency - reduce memory
+footprint.
+
+[#2157](https://github.com/cylc/cylc/pull/2157): 
+  * internal efficiency improvements
+  * allow reset of individual message outputs
+  * "cylc submit" can now submit families
+
+[#2244](https://github.com/cylc/cylc/pull/2244): Graph cycling configuration:
+multiple exclusion points.
+
+[#2240](https://github.com/cylc/cylc/pull/2240): Stepped integer parameters.
+
+### Fixes
+
+[#2269](https://github.com/cylc/cylc/pull/2269): Fix auto suite-polling tasks
+(i.e. inter-suite dependence graph syntax) - Broken in 7.3.0.
+
+[#2282](https://github.com/cylc/cylc/pull/2282): Fix global config processing
+of boolean settings - users could not override a site True setting to False.
+
+[#2279](https://github.com/cylc/cylc/pull/2279): Bundle Jinja2 2.9.6. (up from
+2.8) - fixes a known issue with Jinja2 "import with context".
+
+[#2255](https://github.com/cylc/cylc/pull/2255): Fix handling of suite script
+items that contain nothing but comments.
+
+[#2247](https://github.com/cylc/cylc/pull/2247): Allow `cylc graph --help`
+in the absence of an X environment.
+
+### Other Changes
+
+[#2270](https://github.com/cylc/cylc/pull/2270): Detect and fail null tasks in
+graph.
+
+[#2257](https://github.com/cylc/cylc/pull/2257): `cylc gscan` - graceful exit
+via Ctrl-C.
+
+[#2252](https://github.com/cylc/cylc/pull/2252): `ssh`: add `-Y` (X Forwarding)
+only if necessary.
+
+[#2245](https://github.com/cylc/cylc/pull/2245): SSL certficate: add serial
+number (issue number). This allows curl, browsers, etc. to connect to
+suite daemons.
+
+[#2265](https://github.com/cylc/cylc/pull/2265): `cylc gpanel` - restored
+sorting of items by suite name.
+
+[#2250](https://github.com/cylc/cylc/issues/2250): Updated installation docs
+for HTTPS-related requirements.
+
+-------------------------------------------------------------------------------
+## __cylc-7.3.0 (2017-04-10)__
+
+New Suite Design Guide, plus other enhancements and fixes.
+
+### Highlighted Changes
+
+[#2211](https://github.com/cylc/cylc/pull/2211): New comprehensive Suite Design
+Guide document to replace the outdated Suite Design section in the User Guide.
+
+[#2232](https://github.com/cylc/cylc/pull/2232): `cylc gscan` GUI: stop, hold,
+and release suites or groups of suites.
+
+[#2220](https://github.com/cylc/cylc/pull/2220): dummy and simulation mode improvements:
+ * new `dummy-local` mode runs dummy tasks as local background jobs (allows
+   dummy running other-site suites).
+ * proportional run length, if tasks configure an `execution time limit`
+ * single common `[simulation]` configuration section for dummy, dummy-local, and
+   simulation modes.
+ * dummy or simulated tasks can be made to fail at specific cycle points, and
+   for first-try only, or all tries.
+ * custom message outputs now work in simulation mode as well as the dummy modes.
+
+[#2218](https://github.com/cylc/cylc/pull/2218): fix error trapping in job
+scripts (degraded since job file refactoring in 7.1.1)
+
+[#2215](https://github.com/cylc/cylc/pull/2215): SGE batch system support -
+fixed formatting of directives with a space in the name.
+
+### Other Notable Changes
+
+[#2233](https://github.com/cylc/cylc/pull/2233): Upgraded the built-in example
+suites to cylc-7 syntax.
+
+[#2221](https://github.com/cylc/cylc/pull/2221): `cylc gui` GUI dot view - maintain
+user selection during update.
+
+[#2217](https://github.com/cylc/cylc/pull/2217): `cylc gscan` GUI - fix
+tracebacks emitted during suite initialization.
+
+[#2219](https://github.com/cylc/cylc/pull/2219): add `user at host` option to
+`cylc monitor` an `cylc gui`. Allows suite selection at startup using `cylc
+scan` output.
+
+[#2222](https://github.com/cylc/cylc/pull/2222): `cylc gui` GUI graph view -
+fixed right-click "view prerequisites" sub-menu.
+
+[#2213](https://github.com/cylc/cylc/pull/2213): Record family inheritance
+structure in the run database.
+
+-------------------------------------------------------------------------------
 ## __cylc-7.2.1 (2017-03-23)__
 
 Minor enhancements and fixes.
@@ -25,13 +146,13 @@ installation section.
 
 ### Other Notable Changes
 
-[#2191](https://github.com/cylc/cylc/pull/)2191: Clearer task prerequisites
+[#2191](https://github.com/cylc/cylc/pull/2191): Clearer task prerequisites
 print-out.
 
-[#2196](https://github.com/cylc/cylc/pull/2196): Removed the bundled external
+[#2197](https://github.com/cylc/cylc/pull/2197): Removed the bundled external
 OrderedDict package.
 
-[#2914](https://github.com/cylc/cylc/pull/2914): `cylc gscan` - better handling
+[#2194](https://github.com/cylc/cylc/pull/2194): `cylc gscan` - better handling
 of suites that are still initializing.
 
 -------------------------------------------------------------------------------
diff --git a/INSTALL.md b/INSTALL.md
index c76ca11..73eab75 100644
--- a/INSTALL.md
+++ b/INSTALL.md
@@ -3,51 +3,64 @@
 **See [The Cylc User Guide](https://cylc.github.io/cylc/documentation.html) for
 detailed instructions.**
 
-Note: *to run distributed suites cylc must be installed on task hosts as well as suite
-hosts.*
+Cylc must be installed on suite and task job hosts, although the external
+software packages (below) are not required on job hosts.
 
-### External Software Packages.
+### Required External Software Packages
 
-Several external packages required on suite hosts are not needed on task hosts:
-*graphviz*, and *pygraphviz*.  These should only need to be installed
-once, and then updated infrequently.
+These can be installed once on suite hosts updated infrequently.
 
-### Installing Cylc Releases
+ * graphviz
+ * pygraphviz
 
-Download the latest release tarball from https://github.com/cylc/cylc/releases.
+### Installing Cylc
 
-Cylc releases should be installed in parallel under a top level `cylc`
-directory such as `/opt/cylc/` or `/home/admin/cylc/`.
+Download the latest tarball from https://github.com/cylc/cylc/releases.
+
+Successive Cylc releases should be installed side-by-side under a location such
+as `/opt`:
 
 ```bash
-cd /home/admin/cylc/
-tar xzf ~/Downloads/cylc-6.10.0.tar.gz
-cd cylc-6.10.0
+cd /opt
+tar xzf cylc-7.4.0.tar.gz
+# DO NOT CHANGE THE NAME OF THE UNPACKED CYLC SOURCE DIRECTORY.
+cd cylc-7.4.0
 export PATH=$PWD/bin:$PATH
-make  # (see below)
+make
 ```
 
-Cylc is accessed via a central wrapper script can select between installed
-versions. This allows long-running suites to stick with older cylc versions
-if necessary. The wrapper should be modified slightly to point to your
-local installation (see comments in-script) and then installed (once) in
-`$PATH` for users, e.g.:
+When you type `make`:
+  * A file called VERSION is created, containing the Cylc version number
+    * The version number is taken from the name of the parent directory: DO NOT
+      CHANGE THE NAME OF THE UNPACKED CYLC SOURCE DIRECTORY
+  * The Cylc documentation is generated from source and put in doc/install/
+
+Once installed, Cylc commands should be invoked via the supplied central
+wrapper script that selects between the available versions. This allows
+long-running suites (and their task jobs) to stick with older versions if
+necessary. The wrapper should be edited to point to the Cylc install location:
+
 ```bash
-cp admin/cylc-wrapper /usr/local/bin/cylc
+cp /opt/cylc-7.4.0/admin/cylc-wrapper /usr/local/bin/cylc
+# (now edit '/usr/local/bin/cylc' as per in-file instructions...)
 ```
 
-When you type `make`: 
-  * A file called VERSION will be created to hold the cylc version string,
-  e.g. "6.10.0".  This is taken from the name of the parent directory: *do not
-  change the name of the unpacked cylc source directory*.
-  * The Cylc User Guide will be generated from LaTeX source files (in PDF if
-  `pdflatex` is installed, and HTML if `tex4ht` and *ImageMagick* are
-  installed).
+Finally, make a symlink to the latest installed version:
+```bash
+ln -s /opt/cylc-7.4.0 /opt/cylc
+```
+(This will be the default version invoked by the wrapper if a specific version is not requested via \lstinline=$CYLC_VERSION=.
+
+### Installing The Documentation
+
+After running `make` you can copy the entire `doc/install` directory to a
+convenient location such as `/var/www/html/`, and update your Cylc site config 
+file to point to the intranet location.
 
 ### Cloning The Cylc Repository
 
-To get the latest bleeding-edge cylc version and participate in cylc
-development, fork [cylc on GitHub](https://github.com/cylc/cylc), clone your
-fork locally, develop changes locally in a new branch, then push the branch to
-your fork and issue a Pull Request to the cylc development team.  Please
-discuss proposed changes before you begin work, however.
+To participate in Cylc development fork [Cylc on
+GitHub](https://github.com/cylc/cylc) and clone it locally.  Changes should be
+developed in feature branches then pushed to your GitHub fork before issuing a
+Pull Request to the team. Please discuss proposed changes before you begin
+work.
diff --git a/Makefile b/Makefile
index 7577cf0..7012f10 100644
--- a/Makefile
+++ b/Makefile
@@ -14,12 +14,12 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-all: version docs
+all: version documentation
 
 version:
 	admin/create-version-file
 
-docs:
+documentation:
 	cd doc && $(MAKE)
 
 clean:
diff --git a/README.md b/README.md
index d171f87..d47d098 100644
--- a/README.md
+++ b/README.md
@@ -1,18 +1,15 @@
-# The Cylc Suite Engine
+# The Cylc Workflow Engine
 
 [![Build Status](https://travis-ci.org/cylc/cylc.svg?branch=master)](https://travis-ci.org/cylc/cylc)
 [![DOI](https://zenodo.org/badge/1836229.svg)](https://zenodo.org/badge/latestdoi/1836229)
 
-
-## A Workflow Engine and Meta-Scheduler
-
-Cylc specialises in continuous workflows of cycling (repeating) tasks such as
-those used in weather and climate forecasting and research, but it can also be
-used for non-cycling systems.
+Cylc (“silk”) orchestrates complex distributed suites of interdependent cycling
+tasks. It was originally designed for environmental forecasting systems at
+[NIWA](https://www.niwa.co.nz).
 
 ### Copyright and Terms of Use
 
-Copyright (C) 2008-2017 [NIWA](https://www.niwa.co.nz)
+Copyright (C) 2008-2017 NIWA
  
 Cylc is free software: you can redistribute it and/or modify it under the terms
 of the GNU General Public License as published by the Free Software Foundation,
diff --git a/bin/cylc b/bin/cylc
index 7600417..544f9c5 100755
--- a/bin/cylc
+++ b/bin/cylc
@@ -234,7 +234,6 @@ control_commands['checkpoint'] = ['checkpoint']
 utility_commands = OrderedDict()
 utility_commands['cycle-point'] = [
     'cycle-point', 'cyclepoint', 'datetime', 'cycletime']
-utility_commands['random'] = ['random', 'rnd']
 utility_commands['scp-transfer'] = ['scp-transfer']
 utility_commands['suite-state'] = ['suite-state']
 utility_commands['ls-checkpoints'] = ['ls-checkpoints']
@@ -282,7 +281,6 @@ task_commands['message'] = ['message', 'task-message']
 task_commands['jobs-kill'] = ['jobs-kill']
 task_commands['jobs-poll'] = ['jobs-poll']
 task_commands['jobs-submit'] = ['jobs-submit']
-task_commands['job-submit'] = ['job-submit']
 
 all_commands = OrderedDict()
 for dct in [
@@ -432,11 +430,9 @@ comsum['broadcast'] = 'Change suite [runtime] settings on the fly'
 comsum['jobs-kill'] = '(Internal) Kill task jobs'
 comsum['jobs-poll'] = '(Internal) Retrieve status for task jobs'
 comsum['jobs-submit'] = '(Internal) Submit task jobs'
-comsum['job-submit'] = '(Internal) Submit a job'
 
 # utility
 comsum['cycle-point'] = 'Cycle point arithmetic and filename templating'
-comsum['random'] = 'Generate a random integer within a given range'
 comsum['jobscript'] = 'Generate a task job script and print it to stdout'
 comsum['scp-transfer'] = 'Scp-based file transfer for cylc suites'
 comsum['suite-state'] = 'Query the task states in a suite'
diff --git a/bin/cylc-cat-log b/bin/cylc-cat-log
index 71a53bd..8e949c7 100755
--- a/bin/cylc-cat-log
+++ b/bin/cylc-cat-log
@@ -29,6 +29,8 @@ if remrun().execute():
     sys.exit(0)
 
 import os
+import re
+from tempfile import NamedTemporaryFile
 from pipes import quote
 import shlex
 from subprocess import Popen, PIPE
@@ -41,6 +43,7 @@ from cylc.suite_host import is_remote_host
 from cylc.suite_logging import get_logs
 from cylc.cfgspec.globalcfg import GLOBAL_CFG
 from cylc.task_id import TaskID
+from parsec.fileparse import read_and_proc
 
 
 NAME_DEFAULT = "log"
@@ -123,6 +126,16 @@ def get_option_parser():
         help="Task job log only: List log directory on the job host",
         action="store_const", const=LIST_MODE_REMOTE, dest="list_mode")
 
+    parser.add_option(
+        "-g", "--geditor",
+        help="force use of the configured GUI editor.",
+        action="store_true", default=False, dest="geditor")
+
+    parser.add_option(
+        "-b", "--teditor",
+        help="force use of the configured Non-GUI editor.",
+        action="store_true", default=False, dest="editor")
+
     return parser
 
 
@@ -271,6 +284,32 @@ def main():
         else:
             owner, host = (None, user_at_host)
 
+    cylc_tmpdir = GLOBAL_CFG.get_tmpdir()
+    if options.geditor:
+            editor = GLOBAL_CFG.get(['editors', 'gui'])
+    elif options.editor:
+            editor = GLOBAL_CFG.get(['editors', 'terminal'])
+
+    if options.editor or options.geditor:
+        if os.path.exists(filename):
+            lines = read_and_proc(filename)
+        else:
+            print >> sys.stderr, 'No such file or directory: %s' % filename
+            sys.exit(1)
+
+        viewfile = NamedTemporaryFile(
+            suffix='.' + os.path.basename(filename),
+            prefix=suite.replace('/', '_') + '.', dir=cylc_tmpdir
+        )
+
+        for line in lines:
+            viewfile.write(line + '\n')
+        viewfile.seek(0, 0)
+
+        os.chmod(viewfile.name, 0400)
+
+        modtime1 = os.stat(viewfile.name).st_mtime
+
     # Construct the shell command
     commands = []
     if options.location_mode:
@@ -296,19 +335,23 @@ def main():
             cmd_tmpl = str(GLOBAL_CFG.get_host_item(
                 "local tail command template"))
             commands.append(shlex.split(cmd_tmpl % {"filename": filename}))
+    elif options.geditor or options.editor:
+        command_list = shlex.split(editor)
+        command_list.append(viewfile.name)
+        commands.append(command_list)
     else:
         commands.append(["cat", filename])
 
     # Deal with remote [user@]host
     if user_at_host:
-        ssh = str(GLOBAL_CFG.get_host_item(
-            "remote shell template", host, owner))
+        ssh = str(GLOBAL_CFG.get_host_item("ssh command", host, owner))
         for i, command in enumerate(commands):
             commands[i] = shlex.split(ssh) + ["-n", user_at_host] + command
 
     err = None
     for command in commands:
         stderr = PIPE
+
         if options.debug:
             sys.stderr.write(
                 " ".join([quote(item) for item in command]) + "\n")
@@ -316,8 +359,24 @@ def main():
         proc = Popen(command, stderr=stderr)
         err = proc.communicate()[1]
         ret_code = proc.wait()
+
+        if options.editor or options.geditor:
+            if ret_code != 0:
+                print >>sys.stderr, command, 'failed:', ret_code
+                sys.exit(1)
+
+            modtime2 = os.stat(viewfile.name).st_mtime
+
+            if modtime2 > modtime1:
+                print >> sys.stderr, (
+                    'WARNING: YOU HAVE EDITED A TEMPORARY READ_ONLY COPY : ')
+                print >> sys.stderr, viewfile.name
+
+            viewfile.close()
+
         if ret_code == 0:
             break
+
     if ret_code and err:
         sys.stderr.write(err)
     sys.exit(ret_code)
diff --git a/bin/cylc-cycle-point b/bin/cylc-cycle-point
index e295f1c..ece0811 100755
--- a/bin/cylc-cycle-point
+++ b/bin/cylc-cycle-point
@@ -97,6 +97,11 @@ def main():
         action="store", dest="offset")
 
     parser.add_option(
+        "--equal", metavar="POINT2",
+        help="Succeed if POINT2 is equal to POINT (format agnostic).",
+        action="store", dest="point2")
+
+    parser.add_option(
         "--template", metavar="TEMPLATE",
         help="Filename template string or variable",
         action="store", dest="template")
@@ -198,6 +203,17 @@ def main():
     except ValueError as exc:
         parser.error('ERROR: invalid cycle: %s' % exc)
 
+    if options.point2:
+        try:
+            cycle_point2 = iso_point_parser.parse(
+                options.point2, dump_as_parsed=(template is None))
+        except ValueError as exc:
+            parser.error('ERROR: invalid cycle: %s' % exc)
+        if cycle_point2 == cycle_point:
+            sys.exit(0)
+        else:
+            sys.exit(1)
+
     offset_props = {}
 
     if options.offsethours:
diff --git a/bin/cylc-documentation b/bin/cylc-documentation
index 4be7bd3..dd772b3 100755
--- a/bin/cylc-documentation
+++ b/bin/cylc-documentation
@@ -36,7 +36,7 @@ import sys
 for arg in sys.argv[1:]:
     if arg.startswith('--host=') or arg.startswith('--user='):
         from cylc.remote import remrun
-        if remrun().execute(force_required=True):
+        if remrun().execute(force_required=True, forward_x11=True):
             sys.exit(0)
 
 import os
diff --git a/bin/cylc-edit b/bin/cylc-edit
index 7682d4e..1049fb6 100755
--- a/bin/cylc-edit
+++ b/bin/cylc-edit
@@ -58,7 +58,7 @@ See also 'cylc [prep] view'."""
 
 import sys
 from cylc.remote import remrun
-if remrun().execute():
+if remrun().execute(forward_x11=True):
     sys.exit(0)
 
 import os
diff --git a/bin/cylc-get-suite-config b/bin/cylc-get-suite-config
index 3ef74d2..0e998f0 100755
--- a/bin/cylc-get-suite-config
+++ b/bin/cylc-get-suite-config
@@ -109,6 +109,11 @@ def main():
              "[DEPRECATED: use 'cylc list SUITE'].",
         action="store_true", default=False, dest="tasks")
 
+    parser.add_option(
+        "-u", "--run-mode",
+        help="Get config for suite run mode.", action="store", default="live",
+        dest="run_mode", choices=['live', 'dummy', 'simulation'])
+
     (options, args) = parser.parse_args()
     suite, suiterc = SuiteSrvFilesManager().parse_suite_arg(options, args[0])
 
@@ -120,7 +125,7 @@ def main():
     config = SuiteConfig(
         suite, suiterc,
         load_template_vars(options.templatevars, options.templatevars_file),
-        cli_initial_point_string=options.icp)
+        cli_initial_point_string=options.icp, run_mode=options.run_mode)
     if options.tasks:
         for task in config.get_task_name_list():
             print prefix + task
diff --git a/bin/cylc-graph b/bin/cylc-graph
index ba656e5..3feeea7 100755
--- a/bin/cylc-graph
+++ b/bin/cylc-graph
@@ -76,11 +76,14 @@ try:
     from xdot import DotWindow
     from cylc.cylc_xdot import (
         MyDotWindow, MyDotWindow2, get_reference_from_plain_format)
-except ImportError as exc:
+except (ImportError, RuntimeError) as exc:
     # Allow command help generation without a graphical environment.
     print >> sys.stderr, 'WARNING: no X environment? %s' % exc
+    no_x = True
+else:
+    no_x = False
 
-if remrun().execute():
+if remrun().execute(forward_x11=True):
     sys.exit(0)
 
 from cylc.option_parsers import CylcOptionParser as COP
@@ -204,6 +207,9 @@ def main():
 
     (options, args) = parser.parse_args()
 
+    if no_x:
+        sys.exit(1)
+
     if options.filename:
         if len(args) != 0:
             parser.error(
@@ -265,7 +271,8 @@ def main():
             if options.output_filename == "-":
                 sys.stdout.write(output_text)
             else:
-                open(options.output_filename).write(output_text)
+                with open(options.output_filename, 'w') as f:
+                    f.write(output_text)
         else:
             if options.output_filename == "-":
                 window.graph.draw(sys.stdout, format="plain", prog="dot")
diff --git a/bin/cylc-gscan b/bin/cylc-gscan
index 1c14652..2d87ce1 100755
--- a/bin/cylc-gscan
+++ b/bin/cylc-gscan
@@ -28,13 +28,18 @@ import sys
 if "--use-ssh" in sys.argv[1:]:
     sys.argv.remove("--use-ssh")
     from cylc.remote import remrun
-    if remrun().execute():
+    if remrun().execute(forward_x11=True):
         sys.exit(0)
 
 import gtk
 import warnings
 warnings.filterwarnings('ignore', 'use the new', Warning)
 
+gtk.settings_get_default().set_long_property(
+    "gtk-button-images", True, "main")
+gtk.settings_get_default().set_long_property(
+    "gtk-menu-images", True, "main")
+
 from cylc.gui.gscan import ScanApp
 from cylc.option_parsers import CylcOptionParser as COP
 from cylc.owner import USER
@@ -80,13 +85,18 @@ def main():
         options.patterns_owner = [r".*"]
     elif not options.patterns_owner:
         options.patterns_owner = [USER]
-    ScanApp(
+    scan_app = ScanApp(
         hosts=args,
         patterns_name=options.patterns_name,
         patterns_owner=options.patterns_owner,
         comms_timeout=options.comms_timeout,
         poll_interval=options.interval)
-    gtk.main()
+    try:
+        gtk.main()
+    except KeyboardInterrupt:
+        print >> sys.stderr, "Stopping..."
+        scan_app.window.destroy()
+        raise SystemExit(1)
 
 
 if __name__ == "__main__":
diff --git a/bin/cylc-gui b/bin/cylc-gui
index ddcdc11..23be9d6 100755
--- a/bin/cylc-gui
+++ b/bin/cylc-gui
@@ -16,11 +16,14 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-"""cylc gui [OPTIONS] [REG]
-gcylc [OPTIONS] [REG]
+"""cylc gui [OPTIONS] [REG] [USER_AT_HOST]
+gcylc [OPTIONS] [REG] [USER_AT_HOST]
 
 This is the cylc Graphical User Interface.
 
+The USER_AT_HOST argument allows suite selection by 'cylc scan' output:
+  cylc gui $(cylc scan | grep <suite_name>)
+
 Local suites can be opened and switched between from within gcylc. To connect
 to running remote suites (whose passphrase you have installed) you must
 currently use --host and/or --user on the gcylc command line.
@@ -39,7 +42,7 @@ import sys
 if '--use-ssh' in sys.argv[1:]:
     sys.argv.remove('--use-ssh')
     from cylc.remote import remrun
-    if remrun().execute():
+    if remrun().execute(forward_x11=True):
         sys.exit(0)
 
 import os
@@ -55,8 +58,11 @@ def main():
     sys.path.append(
         os.path.dirname(os.path.realpath(os.path.abspath(__file__))) + '/../')
 
-    parser = COP(__doc__, comms=True, noforce=True, jset=True,
-                 argdoc=[('[REG]', 'Suite name')])
+    parser = COP(
+        __doc__, comms=True, noforce=True, jset=True, argdoc=[
+            ('[REG]', 'Suite name'),
+            ('[USER_AT_HOST]', 'user at host:port, shorthand for --user, --host '
+             '& --port.')])
 
     parser.add_option(
         "-r", "--restricted",
@@ -90,10 +96,17 @@ def main():
     gtk.settings_get_default().set_long_property(
         "gtk-menu-images", True, "main")
 
-    if len(args) == 1:
+    suite = None
+    if len(args) > 0:
         suite = args[0]
-    else:
-        suite = None
+    if len(args) == 2:
+        try:
+            user_at_host, options.port = args[1].split(':')
+            options.owner, options.host = user_at_host.split('@')
+        except ValueError:
+            print >> sys.stderr, ('USER_AT_HOST must take the form '
+                                  '"user at host:port"')
+            sys.exit(1)
     app = ControlApp(
         suite, options.owner, options.host,
         options.port, options.comms_timeout,
diff --git a/bin/cylc-job-logs-retrieve b/bin/cylc-job-logs-retrieve
deleted file mode 100755
index 2f9aaef..0000000
--- a/bin/cylc-job-logs-retrieve
+++ /dev/null
@@ -1,81 +0,0 @@
-#!/usr/bin/env python
-
-# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
-# Copyright (C) 2008-2017 NIWA
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""cylc [hook] job-logs-retrieve [OPTIONS] HOST:HOST-PATH LOCALHOST-PATH
-
-(This command is for internal use.)
-Retrieve logs from a remote host for a task job.
-
-"""
-
-
-import os
-from subprocess import check_call
-import shlex
-import sys
-import traceback
-
-from cylc.cfgspec.globalcfg import GLOBAL_CFG
-from cylc.option_parsers import CylcOptionParser as COP
-from cylc.wallclock import get_time_string_from_unix_time
-
-
-def main():
-    """CLI main."""
-    # Options and arguments
-    opt_parser = COP(__doc__, argdoc=[
-        ("HOST:HOST-PATH", "Path to remote job logs directory"),
-        ("LOCALHOST-PATH", "Path to local job logs directory"),
-    ])
-    opt_parser.add_option(
-        "--max-size",
-        help="Don't transfer any file larger than SIZE.",
-        action="store", default="10M", dest="max_size", metavar="SIZE")
-    opts, args = opt_parser.parse_args()
-
-    # Determine the remote shell template to use
-    source, target = args
-    source_auth, source_path = source.split(":", 1)
-    if "@" in source_auth:
-        source_owner, source_host = source_auth.split("@", 1)
-    else:
-        source_owner, source_host = (None, source_auth)
-    ssh_tmpl = str(GLOBAL_CFG.get_host_item(
-        "remote shell template", source_host, source_owner))
-
-    # Retrieve remote job logs
-    # N.B. "scp" does not have a "max-size" option.
-    check_call([
-        "rsync", "-a", "--rsh=" + ssh_tmpl, "--max-size=" + opts.max_size,
-        source + "/", target])
-
-    filenames = os.listdir(target)
-    if "job.out" not in filenames:
-        sys.exit("ERROR: job.out: file not found")
-    sys.stdout.write("%s:\n" % os.path.basename(sys.argv[0]))
-    for filename in filenames:
-        stat = os.stat(os.path.join(target, filename))
-        sys.stdout.write("%s\t%s\t%s\n" % (
-            get_time_string_from_unix_time(stat.st_mtime),
-            stat.st_size,
-            filename))
-
-    os.listdir(target)
-
-
-if __name__ == "__main__":
-    main()
diff --git a/bin/cylc-job-submit b/bin/cylc-job-submit
deleted file mode 100755
index 75c94c0..0000000
--- a/bin/cylc-job-submit
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python
-
-# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
-# Copyright (C) 2008-2017 NIWA
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-
-"""cylc [task] job-submit [--remote-mode] JOB-FILE-PATH
-
-(This command is for internal use. Users should use "cylc submit".)
-Submit a job file.
-
-"""
-
-
-import sys
-from cylc.remote import remrun
-
-
-def main():
-    """CLI main."""
-    parser = COP(
-        __doc__,
-        argdoc=[("JOB-FILE-PATH", "the path of the job file")])
-    parser.add_option(
-        "--remote-mode",
-        help="Is this being run on a remote job host?",
-        action="store_true", dest="remote_mode", default=False)
-    opts, args = parser.parse_args()
-    ret_code, out, err, job_id = BATCH_SYS_MANAGER.job_submit(
-        args[0], opts.remote_mode)
-    if err:
-        sys.stderr.write(err)
-    if out:
-        sys.stdout.write(out)
-    if job_id:
-        sys.stdout.write(
-            "%s=%s\n" % (BATCH_SYS_MANAGER.CYLC_BATCH_SYS_JOB_ID, job_id))
-    sys.exit(ret_code)
-
-
-if __name__ == "__main__" and not remrun().execute():
-    from cylc.option_parsers import CylcOptionParser as COP
-    from cylc.batch_sys_manager import BATCH_SYS_MANAGER
-    main()
diff --git a/bin/cylc-jobs-kill b/bin/cylc-jobs-kill
index 8d8825a..452a243 100755
--- a/bin/cylc-jobs-kill
+++ b/bin/cylc-jobs-kill
@@ -35,10 +35,10 @@ def main():
         ("JOB-LOG-ROOT", "The log/job sub-directory for the suite"),
         ("[JOB-LOG-DIR ...]", "A point/name/submit_num sub-directory")])
     args = parser.parse_args()[1]
-    BATCH_SYS_MANAGER.jobs_kill(args[0], args[1:])
+    BatchSysManager().jobs_kill(args[0], args[1:])
 
 
 if __name__ == "__main__" and not remrun().execute():
     from cylc.option_parsers import CylcOptionParser as COP
-    from cylc.batch_sys_manager import BATCH_SYS_MANAGER
+    from cylc.batch_sys_manager import BatchSysManager
     main()
diff --git a/bin/cylc-jobs-poll b/bin/cylc-jobs-poll
index 8267ba0..476bbf9 100755
--- a/bin/cylc-jobs-poll
+++ b/bin/cylc-jobs-poll
@@ -34,10 +34,10 @@ def main():
         ("JOB-LOG-ROOT", "The log/job sub-directory for the suite"),
         ("[JOB-LOG-DIR ...]", "A point/name/submit_num sub-directory")])
     args = parser.parse_args()[1]
-    BATCH_SYS_MANAGER.jobs_poll(args[0], args[1:])
+    BatchSysManager().jobs_poll(args[0], args[1:])
 
 
 if __name__ == "__main__" and not remrun().execute():
     from cylc.option_parsers import CylcOptionParser as COP
-    from cylc.batch_sys_manager import BATCH_SYS_MANAGER
+    from cylc.batch_sys_manager import BatchSysManager
     main()
diff --git a/bin/cylc-jobs-submit b/bin/cylc-jobs-submit
index 98a5855..c710467 100755
--- a/bin/cylc-jobs-submit
+++ b/bin/cylc-jobs-submit
@@ -38,11 +38,11 @@ def main():
         help="Is this being run on a remote job host?",
         action="store_true", dest="remote_mode", default=False)
     opts, args = parser.parse_args()
-    BATCH_SYS_MANAGER.jobs_submit(
+    BatchSysManager().jobs_submit(
         args[0], args[1:], remote_mode=opts.remote_mode)
 
 
 if __name__ == "__main__" and not remrun().execute():
     from cylc.option_parsers import CylcOptionParser as COP
-    from cylc.batch_sys_manager import BATCH_SYS_MANAGER
+    from cylc.batch_sys_manager import BatchSysManager
     main()
diff --git a/bin/cylc-monitor b/bin/cylc-monitor
index 409aadd..3d5cd77 100755
--- a/bin/cylc-monitor
+++ b/bin/cylc-monitor
@@ -29,18 +29,18 @@ if '--use-ssh' in sys.argv[1:]:
     sys.exit("No '--use-ssh': this command requires a local terminal.")
 
 import os
-from time import sleep
+from time import sleep, time
 
 from parsec.OrderedDict import OrderedDict
 from cylc.option_parsers import CylcOptionParser as COP
 from cylc.network.suite_state_client import (
-    SUITE_STATUS_SPLIT_REC, get_suite_status_string, StateSummaryClient,
-    SuiteStillInitialisingError)
+    SUITE_STATUS_SPLIT_REC, get_suite_status_string, StateSummaryClient)
 from cylc.wallclock import get_time_string_from_unix_time
 from cylc.cfgspec.globalcfg import GLOBAL_CFG
 from cylc.task_state import (
-    TaskState, TASK_STATUS_RUNAHEAD, TASK_STATUSES_ORDERED,
+    TASK_STATUS_RUNAHEAD, TASK_STATUSES_ORDERED,
     TASK_STATUSES_RESTRICTED)
+from cylc.task_state_prop import get_status_prop
 
 
 class SuiteMonitor(object):
@@ -49,8 +49,14 @@ class SuiteMonitor(object):
         self.parser = COP(
             """cylc [info] monitor [OPTIONS] ARGS
 
-A terminal-based live suite monitor.  Exit with 'Ctrl-C'.""",
-            comms=True, noforce=True)
+A terminal-based live suite monitor.  Exit with 'Ctrl-C'.
+
+The USER_AT_HOST argument allows suite selection by 'cylc scan' output:
+  cylc monitor $(cylc scan | grep <suite_name>)
+""",
+            argdoc=[('REG', 'Suite name'),
+                    ('[USER_AT_HOST]', 'user at host:port, shorthand for --user, '
+                     '--host & --port.')], comms=True, noforce=True)
 
         self.parser.add_option(
             "-a", "--align",
@@ -95,13 +101,22 @@ A terminal-based live suite monitor.  Exit with 'Ctrl-C'.""",
         (options, args) = self.parser.parse_args()
         suite = args[0]
 
+        if len(args) > 1:
+            try:
+                user_at_host, options.port = args[1].split(':')
+                options.owner, options.host = user_at_host.split('@')
+            except ValueError:
+                print >> sys.stderr, ('USER_AT_HOST must take the form '
+                                      '"user at host:port"')
+                sys.exit(1)
+
         client_name = os.path.basename(sys.argv[0])
         if options.restricted:
             client_name += " -r"
 
         legend = ''
         for state in TASK_STATUSES_ORDERED:
-            legend += TaskState.get_status_prop(state, 'ascii_ctrl')
+            legend += get_status_prop(state, 'ascii_ctrl')
         legend = legend.rstrip()
 
         len_header = sum(len(s) for s in TASK_STATUSES_ORDERED)
@@ -110,16 +125,25 @@ A terminal-based live suite monitor.  Exit with 'Ctrl-C'.""",
             suite, options.owner, options.host, options.port,
             options.comms_timeout)
 
+        is_cont = False
         while True:
+            if is_cont:
+                if options.once:
+                    break
+                else:
+                    sleep(float(options.update_interval))
+            is_cont = True
             try:
                 glbl, task_summaries, fam_summaries = (
                     self.pclient.get_suite_state_summary())
-            except SuiteStillInitialisingError as exc:
-                print str(exc)
             except Exception as exc:
                 print >> sys.stderr, "\033[1;37;41mERROR\033[0m", str(exc)
                 self.pclient.reset()
             else:
+                if not glbl:
+                    print >> sys.stderr, (
+                        "\033[1;37;41mWARNING\033[0m suite initialising")
+                    continue
                 states = [t["state"] for t in task_summaries.values() if (
                           "state" in t)]
                 n_tasks_total = len(states)
@@ -134,6 +158,8 @@ A terminal-based live suite monitor.  Exit with 'Ctrl-C'.""",
                 try:
                     updated_at = get_time_string_from_unix_time(
                         glbl['last_updated'])
+                except KeyError:
+                    updated_at = time()
                 except (TypeError, ValueError):
                     # Older suite.
                     updated_at = glbl['last_updated'].isoformat()
@@ -158,7 +184,7 @@ A terminal-based live suite monitor.  Exit with 'Ctrl-C'.""",
                     name_list.add(name)
                     if point_string not in task_info:
                         task_info[point_string] = {}
-                    task_info[point_string][name] = TaskState.get_status_prop(
+                    task_info[point_string][name] = get_status_prop(
                         state, 'ascii_ctrl', subst=name)
 
                 # Sort the tasks in each cycle point.
@@ -204,8 +230,7 @@ A terminal-based live suite monitor.  Exit with 'Ctrl-C'.""",
                         state_totals[state] += 1
                 for state, tot in state_totals.items():
                     subst = " %d " % tot
-                    summary += TaskState.get_status_prop(state,
-                                                         'ascii_ctrl', subst)
+                    summary += get_status_prop(state, 'ascii_ctrl', subst)
                 blit.append(summary)
 
                 # Print a divider line containing the suite status string.
@@ -245,11 +270,6 @@ A terminal-based live suite monitor.  Exit with 'Ctrl-C'.""",
                 for ix in indxs:
                     print blitlines[ix]
 
-            if options.once:
-                break
-            else:
-                sleep(float(options.update_interval))
-
 
 if __name__ == "__main__":
     monitor = SuiteMonitor()
diff --git a/bin/cylc-profile-battery b/bin/cylc-profile-battery
index 71b51b5..c9f25bb 100755
--- a/bin/cylc-profile-battery
+++ b/bin/cylc-profile-battery
@@ -24,6 +24,7 @@ import json
 import optparse
 import os
 import random
+import re
 import shutil
 import sys
 import tempfile
@@ -375,6 +376,38 @@ def install_experiments(experiment_ids, experiments, install_dir,
                 sdir, suite_dirs[key]['install dir'])
             run['suite dir'] = suite_dirs[key]['install dir']
 
+    # Global config sourcing.
+    os.mkdir(os.path.join(install_sdir, 'globalrc'))
+    for experiment in experiments:
+        for run in experiment['config']['runs']:
+            if 'globalrc' in run:
+                string = ''
+                for setting in run['globalrc']:
+                    indent = 0
+                    setting = re.split('[\[\]]+', setting.strip())
+                    for part in setting[:-1]:  # Key hierarchy.
+                        if not part:
+                            continue
+                        string += '%s%s%s%s\n' % (
+                            '    ' * indent,
+                            '[' * (indent + 1),
+                            part,
+                            ']' * (indent + 1)
+                        )
+                        indent += 1
+                    string += '%s%s\n' % ('    ' * indent, setting[-1])
+                hash_ = hashlib.sha256()
+                hash_.update(string)
+                dirname = os.path.join(install_sdir, 'globalrc',
+                                       hash_.hexdigest()[:10])
+                if not os.path.exists(dirname):
+                    # If an identical globalrc has been written do nothing.
+                    os.mkdir(dirname)
+                    with open(os.path.join(dirname, 'global.rc'),
+                              'w+') as globalrc_file:
+                        globalrc_file.write(string)
+                run['globalrc'] = dirname
+
 
 def get_experiments(experiment_names):
     """Returns a dictionary of experiment names against experiment ids (which
diff --git a/bin/cylc-random b/bin/cylc-random
deleted file mode 100755
index bb9b367..0000000
--- a/bin/cylc-random
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/usr/bin/env python
-
-# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
-# Copyright (C) 2008-2017 NIWA
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-
-"""cylc [util] random A B
-
-Generate a random integer in the range [A,B). This is just a command
-interface to Python's random.randrange() function.
-
-Arguments:
-   A     start of the range interval (inclusive)
-   B     end of the random range (exclusive, so must be > A)"""
-
-import os
-import sys
-import random
-from optparse import OptionParser
-
-import cylc.flags
-
-
-def main():
-    parser = OptionParser(__doc__)
-    (options, args) = parser.parse_args()
-
-    if len(args) != 2:
-        parser.error("Two integer arguments required")
-
-    start = int(args[0])
-    end = int(args[1])
-    print random.randrange(start, end)
-
-
-if __name__ == "__main__":
-    try:
-        main()
-    except Exception as exc:
-        if cylc.flags.debug:
-            raise
-        sys.exit(str(exc))
diff --git a/bin/cylc-reset b/bin/cylc-reset
index cff4731..fd9da3f 100755
--- a/bin/cylc-reset
+++ b/bin/cylc-reset
@@ -54,12 +54,25 @@ def main():
         "-s", "--state", metavar="STATE",
         help="Reset task state to STATE, can be %s" % (
             ', '.join(TASK_STATUSES_CAN_RESET_TO)),
-        action="store", default=None, dest="state")
+        choices=list(TASK_STATUSES_CAN_RESET_TO),
+        action="store", dest="state")
+
+    parser.add_option(
+        "--output", "-O",
+        metavar="OUTPUT",
+        help=("Find task output by message string or trigger string, " +
+              "set complete or incomplete with !OUTPUT, " +
+              "'*' to set all complete, '!*' to set all incomplete. " +
+              "Can be used more than once to reset multiple task outputs."),
+        action="append", default=[], dest="outputs")
 
     options, args = parser.parse_args()
 
     suite = args.pop(0)
 
+    if not options.state and not options.outputs:
+        parser.error("Neither --state=STATE nor --output=OUTPUT is set")
+
     if options.state == "spawn":
         # Back compat.
         sys.stderr.write(
@@ -72,8 +85,8 @@ def main():
                 exc.filename = cmd
             raise SystemExit(exc)
 
-    if options.state not in TASK_STATUSES_CAN_RESET_TO:
-        parser.error("Illegal STATE value: " + options.state)
+    if not options.state:
+        options.state = ''
 
     prompt('Reset task(s) %s in %s' % (args, suite), options.force)
     pclient = SuiteCommandClient(
@@ -82,7 +95,8 @@ def main():
         print_uuid=options.print_uuid)
     items = parser.parse_multitask_compat(options, args)
     pclient.put_command(
-        'reset_task_states', items=items, state=options.state)
+        'reset_task_states', items=items, state=options.state,
+        outputs=options.outputs)
 
 
 if __name__ == "__main__":
diff --git a/bin/cylc-scan b/bin/cylc-scan
index bb5ba77..2bd868b 100755
--- a/bin/cylc-scan
+++ b/bin/cylc-scan
@@ -52,7 +52,8 @@ from cylc.network.port_scan import scan_all
 from cylc.option_parsers import CylcOptionParser as COP
 from cylc.cfgspec.globalcfg import GLOBAL_CFG
 from cylc.owner import USER
-from cylc.task_state import TaskState, TASK_STATUSES_ORDERED
+from cylc.task_state import TASK_STATUSES_ORDERED
+from cylc.task_state_prop import get_status_prop
 
 
 NO_BOLD = False
@@ -201,7 +202,7 @@ def main():
     if options.color:
         n_states = len(TASK_STATUSES_ORDERED)
         for index, state in enumerate(TASK_STATUSES_ORDERED):
-            state_legend += TaskState.get_status_prop(state, 'ascii_ctrl')
+            state_legend += get_status_prop(state, 'ascii_ctrl')
             if index == n_states / 2:
                 state_legend += "\n"
         state_legend = state_legend.rstrip()
@@ -305,7 +306,7 @@ def get_point_state_count_lines(state_count_totals, state_count_cycles,
     for state, tot in sorted(state_count_totals.items()):
         if use_color:
             subst = " %d " % tot
-            line += TaskState.get_status_prop(state, 'ascii_ctrl', subst)
+            line += get_status_prop(state, 'ascii_ctrl', subst)
         else:
             line += '%s:%d ' % (state, tot)
     yield ("", line.strip())
@@ -315,7 +316,7 @@ def get_point_state_count_lines(state_count_totals, state_count_cycles,
         for state, tot in sorted(state_count_cycles[point_string].items()):
             if use_color:
                 subst = " %d " % tot
-                line += TaskState.get_status_prop(state, 'ascii_ctrl', subst)
+                line += get_status_prop(state, 'ascii_ctrl', subst)
             else:
                 line += '%s:%d ' % (state, tot)
         yield (point_string, line.strip())
diff --git a/bin/cylc-submit b/bin/cylc-submit
index 047f5d6..80e2681 100755
--- a/bin/cylc-submit
+++ b/bin/cylc-submit
@@ -40,13 +40,13 @@ from cylc.cfgspec.globalcfg import GLOBAL_CFG
 from cylc.config import SuiteConfig
 from cylc.cycling.loader import get_point
 import cylc.flags
-from cylc.get_task_proxy import get_task_proxy
-from cylc.job_file import JobFile
-from cylc.job_host import RemoteJobHostManager
 from cylc.mp_pool import SuiteProcPool
 from cylc.option_parsers import CylcOptionParser as COP
+from cylc.suite_db_mgr import SuiteDatabaseManager
 from cylc.suite_srv_files_mgr import SuiteSrvFilesManager
 from cylc.task_id import TaskID
+from cylc.task_job_mgr import TaskJobManager
+from cylc.task_proxy import TaskProxy
 from cylc.task_state import TASK_STATUS_SUBMIT_FAILED
 from cylc.templatevars import load_template_vars
 import cylc.version  # Ensures '$CYLC_VERSION' is set.
@@ -64,7 +64,7 @@ def main():
     parser = COP(
         __doc__, jset=True, icp=True,
         argdoc=[("REG", "Suite name"),
-                ("TASK", "Target task (" + TaskID.SYNTAX + ")")])
+                ("TASK [...]", "Family or task ID (%s)" % TaskID.SYNTAX)])
     parser.set_defaults(sched=False, dry_run=False)
     parser.add_option(
         "-d", "--dry-run",
@@ -73,26 +73,37 @@ def main():
     (options, args) = parser.parse_args()
     if options.debug:
         cylc.flags.debug = True
-    suite, task_id = args
-    if not TaskID.is_valid_id(task_id):
-        sys.exit("Invalid task ID " + task_id)
-    suiterc = SuiteSrvFilesManager().get_suite_rc(suite)
+    suite = args.pop(0)
+    for arg in args:
+        if not TaskID.is_valid_id(arg):
+            sys.exit("Invalid task ID %s" % arg)
+    suite_srv_mgr = SuiteSrvFilesManager()
+    suiterc = suite_srv_mgr.get_suite_rc(suite)
     suite_dir = os.path.dirname(suiterc)
     # For user-defined batch system handlers
     sys.path.append(os.path.join(suite_dir, 'python'))
-    suite_run_dir = GLOBAL_CFG.get_derived_host_item(
-        suite, 'suite run directory')
 
-    # load suite config
+    # Load suite config and tasks
     config = SuiteConfig.get_inst(
         suite, suiterc,
         load_template_vars(options.templatevars, options.templatevars_file),
         cli_initial_point_string=options.icp)
-
+    itasks = []
+    for arg in args:
+        name_str, point_str = TaskID.split(arg)
+        taskdefs = config.find_taskdefs(name_str)
+        if not taskdefs:
+            sys.exit("No task found for %s" % arg)
+        for taskdef in taskdefs:
+            itasks.append(TaskProxy(
+                taskdef, get_point(point_str).standardise(), is_startup=True))
+
+    # Initialise job submit environment
     GLOBAL_CFG.create_cylc_run_tree(suite)
-
-    RemoteJobHostManager.get_inst().single_task_mode = True
-    JobFile.get_inst().set_suite_env({
+    task_job_mgr = TaskJobManager(
+        suite, SuiteProcPool(), SuiteDatabaseManager(), suite_srv_mgr)
+    task_job_mgr.single_task_mode = True
+    task_job_mgr.job_file_writer.set_suite_env({
         'CYLC_UTC': str(config.cfg['cylc']['UTC mode']),
         'CYLC_DEBUG': str(cylc.flags.debug),
         'CYLC_VERBOSE': str(cylc.flags.verbose),
@@ -104,27 +115,29 @@ def main():
             config.cfg['scheduling']['final cycle point']),
     })
 
-    task_name, point_string = TaskID.split(task_id)
-    point = get_point(point_string).standardise()
-    # Try to get a graphed task of the given name.
-    itask = get_task_proxy(task_name, point, is_startup=True)
-
-    if itask.prep_submit(dry_run=options.dry_run) is None:
-        sys.exit(1)
+    ret_code = 0
     if options.dry_run:
-        print "JOB SCRIPT=%s" % itask.get_job_log_path(
-            itask.HEAD_MODE_LOCAL, tail=itask.JOB_FILE_BASE)
+        task_job_mgr.prep_submit_task_jobs(suite, itasks, dry_run=True)
+        for itask in itasks:
+            if itask.local_job_file_path:
+                print('JOB SCRIPT=%s' % itask.local_job_file_path)
+            else:
+                print >> sys.stderr, (
+                    'Unable to prepare job file for %s' % itask.identity)
+                ret_code = 1
     else:
-        proc_pool = SuiteProcPool.get_inst(pool_size=1)
-        itask.submit()
-        while proc_pool.results:
-            proc_pool.handle_results_async()
-        proc_pool.close()
-        proc_pool.join()
-        if itask.summary['submit_method_id'] is not None:
-            print 'Job ID:', itask.summary['submit_method_id']
-
-    sys.exit(itask.state.status == TASK_STATUS_SUBMIT_FAILED)
+        task_job_mgr.submit_task_jobs(suite, itasks)
+        while task_job_mgr.proc_pool.results:
+            task_job_mgr.proc_pool.handle_results_async()
+        task_job_mgr.proc_pool.close()
+        task_job_mgr.proc_pool.join()
+        for itask in itasks:
+            if itask.summary['submit_method_id'] is not None:
+                print('[%s] Job ID: %s' % (
+                    itask.identity, itask.summary['submit_method_id']))
+            if itask.state.status == TASK_STATUS_SUBMIT_FAILED:
+                ret_code = 1
+    sys.exit(ret_code)
 
 
 if __name__ == "__main__":
diff --git a/bin/cylc-suite-state b/bin/cylc-suite-state
index dec2032..886d9f9 100755
--- a/bin/cylc-suite-state
+++ b/bin/cylc-suite-state
@@ -188,7 +188,7 @@ def main():
         action="store", dest="msg", default=None)
 
     SuitePoller.add_to_cmd_options(parser)
-    (options, args) = parser.parse_args(remove_opts=["--db", "--debug"])
+    (options, args) = parser.parse_args(remove_opts=["--db"])
 
     suite = args[0]
 
diff --git a/bin/cylc-trigger b/bin/cylc-trigger
index f61ba39..a00fc96 100755
--- a/bin/cylc-trigger
+++ b/bin/cylc-trigger
@@ -38,7 +38,7 @@ if '--host' in sys.argv[1:] and '--edit' in sys.argv[1:]:
 if '--use-ssh' in sys.argv[1:]:
     sys.argv.remove('--use-ssh')
     from cylc.remote import remrun
-    if remrun().execute(force_required=True):
+    if remrun().execute(force_required=True, forward_x11=True):
         sys.exit(0)
 
 import re
@@ -102,14 +102,11 @@ def main():
 
         # Get the job filename from the suite daemon - the task cycle point may
         # need standardising to the suite cycle point format.
-        jobfile_path, compat = info_client.get_info(
+        jobfile_path = info_client.get_info(
             'get_task_jobfile_path', task_id=task_id)
         if not jobfile_path:
             sys.exit('ERROR: task not found')
 
-        if isinstance(jobfile_path, bool):
-            jobfile_path = compat
-
         # Note: localhost time and file system time may be out of sync,
         #       so the safe way to detect whether a new file is modified
         #       or is to detect whether time stamp has changed or not.
diff --git a/bin/cylc-validate b/bin/cylc-validate
index 682cf92..979c7f7 100755
--- a/bin/cylc-validate
+++ b/bin/cylc-validate
@@ -42,7 +42,7 @@ from cylc.profiler import Profiler
 
 
 def main():
-
+    """cylc validate CLI."""
     parser = COP(__doc__, jset=True, prep=True, icp=True)
 
     parser.add_option(
@@ -62,6 +62,11 @@ def main():
         "--profile", help="Output profiling (performance) information",
         action="store_true", default=False, dest="profile_mode")
 
+    parser.add_option(
+        "-u", "--run-mode", help="Validate for run mode.", action="store",
+        default="live", dest="run_mode",
+        choices=['live', 'dummy', 'dummy-local', 'simulation'])
+
     (options, args) = parser.parse_args()
 
     profiler = Profiler(options.profile_mode)
@@ -73,7 +78,7 @@ def main():
         suite, suiterc,
         load_template_vars(options.templatevars, options.templatevars_file),
         cli_initial_point_string=options.icp,
-        validation=True, strict=options.strict,
+        validation=True, strict=options.strict, run_mode=options.run_mode,
         output_fname=options.output,
         mem_log_func=profiler.log_memory)
 
@@ -85,19 +90,15 @@ def main():
     for name in cfg.taskdefs.keys():
         try:
             itask = TaskProxy(
-                cfg.taskdefs[name],
-                cfg.start_point,
-                is_startup=True,
-                validate_mode=True)
+                cfg.taskdefs[name], cfg.start_point, is_startup=True)
         except Exception as exc:
             print >> sys.stderr, str(exc)
             raise SuiteConfigError(
                 'ERROR, failed to instantiate task %s' % name)
         if itask.point is None:
             if cylc.flags.verbose:
-                print >> sys.stderr, (
-                    " + Task out of bounds for " + str(cfg.start_point) +
-                    ": " + itask.name)
+                print >> sys.stderr, (" + Task out of bounds for %s: %s" % (
+                    cfg.start_point, itask.tdef.name))
             continue
 
         # Warn for purely-implicit-cycling tasks (these are deprecated).
diff --git a/bin/cylc-view b/bin/cylc-view
index c4a202d..23c1487 100755
--- a/bin/cylc-view
+++ b/bin/cylc-view
@@ -33,12 +33,12 @@ See also 'cylc [prep] edit'."""
 
 import sys
 from cylc.remote import remrun
-if remrun().execute():
+if remrun().execute(forward_x11=True):
     sys.exit(0)
 
 import os
-import re
 from tempfile import NamedTemporaryFile
+import shlex
 import subprocess
 
 import cylc.flags
@@ -135,7 +135,9 @@ def main():
 
     # write to a temporary file
     viewfile = NamedTemporaryFile(
-        suffix=".suite.rc", prefix=suite + '.', dir=cylc_tmpdir)
+        suffix=".suite.rc", prefix=suite.replace('/', '_') + '.',
+        dir=cylc_tmpdir
+    )
     for line in lines:
         viewfile.write(line + '\n')
     viewfile.seek(0, 0)
@@ -148,7 +150,7 @@ def main():
     modtime1 = os.stat(viewfile.name).st_mtime
 
     # in case editor has options, e.g. 'emacs -nw':
-    command_list = re.split(' ', editor)
+    command_list = shlex.split(editor)
     command_list.append(viewfile.name)
     command = ' '.join(command_list)
     # THIS BLOCKS UNTIL THE COMMAND COMPLETES
diff --git a/conf/cylc.lang b/conf/cylc.lang
index 0768d55..4c0e43a 100644
--- a/conf/cylc.lang
+++ b/conf/cylc.lang
@@ -110,14 +110,19 @@
         <keyword>started handler</keyword>
         <keyword>stalled handler</keyword>
         <keyword>simulation mode suite timeout</keyword>
-        <keyword>simulate failure</keyword>
+        <keyword>disable suite event handlers</keyword>
+        <keyword>default run length</keyword>
+        <keyword>speedup factor</keyword>
+        <keyword>time limit buffer</keyword>
+        <keyword>fail cycle points</keyword>
+        <keyword>fail try 1 only</keyword>
+        <keyword>disable task event handlers</keyword>
         <keyword>shutdown handler</keyword>
         <keyword>shell</keyword>
         <keyword>sequential</keyword>
         <keyword>script</keyword>
         <keyword>runahead limit</keyword>
         <keyword>run-dir</keyword>
-        <keyword>run time range</keyword>
         <keyword>root</keyword>
         <keyword>retry handler</keyword>
         <keyword>retrieve job logs retry delays</keyword>
@@ -174,12 +179,6 @@
         <keyword>exclude</keyword>
         <keyword>env-script</keyword>
         <keyword>enable resurrection</keyword>
-        <keyword>dummy mode suite timeout</keyword>
-        <keyword>disable task event hooks</keyword>
-        <keyword>disable suite event hooks</keyword>
-        <keyword>disable retries</keyword>
-        <keyword>disable pre-script</keyword>
-        <keyword>disable post-script</keyword>
         <keyword>disable automatic shutdown</keyword>
         <keyword>description</keyword>
         <keyword>default node attributes</keyword>
diff --git a/conf/cylc.xml b/conf/cylc.xml
index c4b4325..d7a5879 100644
--- a/conf/cylc.xml
+++ b/conf/cylc.xml
@@ -37,14 +37,19 @@
         <RegExpr attribute='Keyword' String=' started handler '/>
         <RegExpr attribute='Keyword' String=' stalled handler '/>
         <RegExpr attribute='Keyword' String=' simulation mode suite timeout '/>
-        <RegExpr attribute='Keyword' String=' simulate failure '/>
+        <RegExpr attribute='Keyword' String=' disable suite event handlers '/>
+        <RegExpr attribute='Keyword' String=' default run length '/>
+        <RegExpr attribute='Keyword' String=' speedup factor '/>
+        <RegExpr attribute='Keyword' String=' time limit buffer '/>
+        <RegExpr attribute='Keyword' String=' fail cycle points '/>
+        <RegExpr attribute='Keyword' String=' fail try 1 only '/>
+        <RegExpr attribute='Keyword' String=' disable task event handlers '/>
         <RegExpr attribute='Keyword' String=' shutdown handler '/>
         <RegExpr attribute='Keyword' String=' shell '/>
         <RegExpr attribute='Keyword' String=' sequential '/>
         <RegExpr attribute='Keyword' String=' script '/>
         <RegExpr attribute='Keyword' String=' runahead limit '/>
         <RegExpr attribute='Keyword' String=' run-dir '/>
-        <RegExpr attribute='Keyword' String=' run time range '/>
         <RegExpr attribute='Keyword' String=' root '/>
         <RegExpr attribute='Keyword' String=' retry handler '/>
         <RegExpr attribute='Keyword' String=' retrieve job logs retry delays '/>
@@ -102,11 +107,6 @@
         <RegExpr attribute='Keyword' String=' env-script '/>
         <RegExpr attribute='Keyword' String=' enable resurrection '/>
         <RegExpr attribute='Keyword' String=' dummy mode suite timeout '/>
-        <RegExpr attribute='Keyword' String=' disable task event hooks '/>
-        <RegExpr attribute='Keyword' String=' disable suite event hooks '/>
-        <RegExpr attribute='Keyword' String=' disable retries '/>
-        <RegExpr attribute='Keyword' String=' disable pre-script '/>
-        <RegExpr attribute='Keyword' String=' disable post-script '/>
         <RegExpr attribute='Keyword' String=' disable automatic shutdown '/>
         <RegExpr attribute='Keyword' String=' description '/>
         <RegExpr attribute='Keyword' String=' default node attributes '/>
diff --git a/dev/profile-experiments/example b/dev/profile-experiments/example
index 4281be4..09ef6cd 100644
--- a/dev/profile-experiments/example
+++ b/dev/profile-experiments/example
@@ -22,6 +22,10 @@
             #  - `sleep_time`: User for [task]script = sleep _
             "options": ["setting=value", "foo=bar"],
 
+            # Config items for the global.rc file this suite is run with.
+            # --- optional, default=[] ---
+           "globalrc": ['[cylc][events]mail events=timeout'],
+
             # Number of REPEATS to perform, if zero the suite will run once,
             # if one it will run twice!
             # --- optional, default=0 ---
diff --git a/dev/suites/chains/suite.rc b/dev/suites/chains/suite.rc
index 8542c35..f6f4d76 100644
--- a/dev/suites/chains/suite.rc
+++ b/dev/suites/chains/suite.rc
@@ -14,6 +14,11 @@
 {% if not tasks_per_chain is defined %}
     {% set tasks_per_chain = 1 %}
 {% endif %}
+{% if not sleep_time is defined %}
+    {% set script = 'true' %}
+{% else %}
+    {% set script = 'sleep ' + sleep_time %}
+{% endif %}
 
 [scheduling]
     cycling mode = integer
@@ -34,11 +39,11 @@
 [runtime]
     [[root]]
     {% if cylc_compat_mode is defined and cylc_compat_mode == '6' %}
-        command scripting = true
+        command scripting = {{ script }}
         [[[job submission]]]
             method = {{ batch_system }}
     {% else %}
-        script = true
+        script = {{ script }}
         [[[job]]]
             batch system = {{ batch_system }}
     {% endif %}
diff --git a/dev/suites/complex/suite.rc b/dev/suites/complex/suite.rc
index a83ca75..ac239e6 100644
--- a/dev/suites/complex/suite.rc
+++ b/dev/suites/complex/suite.rc
@@ -14,7 +14,7 @@ title = "Complex Suite Demo"
 description = "This is a super-elaborate obfuscated version of a suite with no Jinja2 to condense it down."
 [cylc]
     UTC mode = True
-    [[event hooks]]
+    [[events]]
         timeout handler = rose suite-hook --mail --shutdown
         timeout = P1D                       # 1 day.
 [scheduling]
@@ -22,7 +22,7 @@ description = "This is a super-elaborate obfuscated version of a suite with no J
     final cycle point = 20150220T00
     runahead limit = PT6H
     [[special tasks]]
-        clock-triggered = \
+        clock-trigger = \
                           long_start_00(PT2H40M), brief_start_00(PT6H15M), \
                           long_start_06(PT2H40M), brief_start_06(PT6H15M), \
                           long_start_12(PT2H40M), brief_start_12(PT6H15M), \
@@ -1137,8 +1137,6 @@ description = "This is a super-elaborate obfuscated version of a suite with no J
         inherit = LONG_OBS_PROCESS_LOOK_AT, OBS_PARALLEL, RETRY_TASK
     [[brief_observations_db_stuff_wagtail]]
         inherit = BRIEF_OBS_DB_STUFF, OBS_SHARED
-    [[brief_observations_db_stuff_wagtail]]
-        inherit = BRIEF_OBS_DB_STUFF, OBS_SHARED
     [[long_observations_db_stuff_wagtail]]
         inherit = LONG_OBS_DB_STUFF, OBS_SHARED
     [[brief_observations_process_screen]]
@@ -1216,10 +1214,8 @@ description = "This is a super-elaborate obfuscated version of a suite with no J
         inherit = BRIEF, HPC, BRIEF_HPC, CORETYPE_SHARED
     [[long_local_preflight_checks, long_local]]
         inherit = LONG, HPC, LONG_HPC, CORETYPE_SHARED
-        inherit = LONG, HPC, LONG_HPC
     [[long_bigleftright]]
         inherit = LONG, HPC, LONG_HPC, CORETYPE_SHARED
-        inherit = LONG, HPC, LONG_HPC
     [[long_happyland_ingest]]
         inherit = LONG, HPC, LONG_HPC, CORETYPE_SHARED
     [[long_bogus_listing]]
@@ -1342,8 +1338,6 @@ description = "This is a super-elaborate obfuscated version of a suite with no J
         inherit = LONG_FORECAST_TRIGGER, LONG_HPC
     [[long_forecast_tD]]
         inherit = LONG_FORECAST_TRIGGER, LONG_HPC
-    [[long_forecast_tD]]
-        inherit = LONG_FORECAST_TRIGGER, LONG_HPC
     [[long_forecast_tE]]
         inherit = LONG_FORECAST_TRIGGER, LONG_HPC
     [[long_forecast_tF]]
@@ -1414,14 +1408,6 @@ description = "This is a super-elaborate obfuscated version of a suite with no J
         inherit = LONG_FORECAST_TRIGGER, LONG_HPC
     [[long_forecast_tZE]]
         inherit = LONG_FORECAST_TRIGGER, LONG_HPC
-    [[long_forecast_tL]]
-        inherit = LONG_FORECAST_TRIGGER, LONG_HPC
-    [[long_forecast_tZF]]
-        inherit = LONG_FORECAST_TRIGGER, LONG_HPC
-    [[long_forecast_tZG]]
-        inherit = LONG_FORECAST_TRIGGER, LONG_HPC
-    [[long_forecast_tZH]]
-        inherit = LONG_FORECAST_TRIGGER, LONG_HPC
     [[long_forecast_tN]]
         inherit = LONG_FORECAST_TRIGGER, LONG_HPC
     [[long_forecast_tZI]]
@@ -1468,8 +1454,6 @@ description = "This is a super-elaborate obfuscated version of a suite with no J
         inherit = LONG_SUBJOB
     [[long_forecast_subjob_tD]]
         inherit = LONG_SUBJOB
-    [[long_forecast_subjob_tD]]
-        inherit = LONG_SUBJOB
     [[long_forecast_subjob_tE]]
         inherit = LONG_SUBJOB
     [[long_forecast_subjob_tF]]
@@ -1520,14 +1504,8 @@ description = "This is a super-elaborate obfuscated version of a suite with no J
         inherit = LONG_SUBJOB
     [[long_forecast_subjob_tKF]]
         inherit = LONG_SUBJOB
-    [[long_forecast_subjob_tL]]
-        inherit = LONG_SUBJOB
     [[long_forecast_subjob_tZF]]
         inherit = LONG_SUBJOB
-    [[long_forecast_subjob_tZG]]
-        inherit = LONG_SUBJOB
-    [[long_forecast_subjob_tZH]]
-        inherit = LONG_SUBJOB
     [[long_forecast_subjob_tZA]]
         inherit = LONG_SUBJOB
     [[long_forecast_subjob_tZB]]
@@ -1542,8 +1520,6 @@ description = "This is a super-elaborate obfuscated version of a suite with no J
         inherit = LONG_SUBJOB
     [[long_forecast_subjob_tL]]
         inherit = LONG_SUBJOB
-    [[long_forecast_subjob_tZF]]
-        inherit = LONG_SUBJOB
     [[long_forecast_subjob_tZG]]
         inherit = LONG_SUBJOB
     [[long_forecast_subjob_tZH]]
@@ -3106,8 +3082,6 @@ description = "This is a super-elaborate obfuscated version of a suite with no J
             """
 [runtime]
     [[recover1, recover2, recover3]]
-        environment scripting = sleep 1
-[runtime]
     [[ENSEMBLE]]
     [[ENCOLD_START]]
         inherit = ENSEMBLE
@@ -4460,10 +4434,8 @@ description = "This is a super-elaborate obfuscated version of a suite with no J
         inherit = ENSEMBLE_THUNDERBIRDS_ARE_GO
     [[ensemble_local]]
         inherit = ENSEMBLE, HPC, ENSEMBLE_HPC, CORETYPE_SHARED
-        inherit = ENSEMBLE, HPC, ENSEMBLE_HPC
     [[ensemble_downscale]]
         inherit = ENSEMBLE, HPC, ENSEMBLE_HPC, CORETYPE_SHARED
-        inherit = ENSEMBLE, HPC, ENSEMBLE_HPC
     [[ensemble_subjob_hook]]
         inherit = ENSEMBLE, HPC, ENSEMBLE_HPC, CORETYPE_SHARED
 [scheduling]
diff --git a/dev/suites/diamond/suite.rc b/dev/suites/diamond/suite.rc
index 9b51dcf..57ac6e8 100644
--- a/dev/suites/diamond/suite.rc
+++ b/dev/suites/diamond/suite.rc
@@ -11,6 +11,11 @@
 {% if not tasks is defined %}
     {% set tasks = 100 %}
 {% endif %}
+{% if not sleep_time is defined %}
+    {% set script = 'true' %}
+{% else %}
+    {% set script = 'sleep ' + sleep_time %}
+{% endif %}
 
 [scheduling]
     cycling mode = integer
@@ -29,11 +34,11 @@
 [runtime]
     [[root]]
     {% if cylc_compat_mode is defined and cylc_compat_mode == '6' %}
-        command scripting = true
+        command scripting = {{ script }}
         [[[job submission]]]
             method = {{ batch_system }}
     {% else %}
-        script = true
+        script = {{ script }}
         [[[job]]]
             batch system = {{ batch_system }}
     {% endif %}
diff --git a/dev/suites/integer/one/suite.rc b/dev/suites/integer/one/suite.rc
index 7aec5cc..3ddd975 100644
--- a/dev/suites/integer/one/suite.rc
+++ b/dev/suites/integer/one/suite.rc
@@ -28,13 +28,9 @@
                     """
 [runtime]
     [[foo]]
-        script = """
-sleep 5
-cylc task message "the cheese is ready for $(( CYLC_TASK_CYCLE_TIME + 3 ))"
-sleep 5
-                            """
+        script = "sleep 5; cylc task message 'the cheese is ready!'; sleep 5"
         [[[outputs]]]
-            out1 = "the cheese is ready for [T+3]"
+            out1 = "the cheese is ready!"
 
 [visualization]
     default node attributes = "style=filled"
diff --git a/doc/Makefile b/doc/Makefile
index 4ac08b9..b6e8204 100644
--- a/doc/Makefile
+++ b/doc/Makefile
@@ -16,63 +16,37 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-CYLC=$(dir $(abspath $(lastword $(MAKEFILE_LIST))))../bin/cylc
+.PHONY: all cug sdg cug-pdf cug-html clean install installclean
 
-.PHONY: all index clean html html-multi html-single pdf
+all: sdg cug install
 
-DEPS := $(shell ./scripts/get-deps.sh)
+sdg: src/suite-design-guide/document.pdf
 
-all: index
+cug: cug-pdf cug-html
+	
+cug-pdf: src/cylc-user-guide/pdf/cug-pdf.pdf
 
-index: $(DEPS)
-	./scripts/make-index.sh
+cug-html: src/cylc-user-guide/html/single/cug-html.html \
+          src/cylc-user-guide/html/multi/cug-html.html
 
-html: html-multi html-single
+src/suite-design-guide/document.pdf:
+	cd src/suite-design-guide && $(MAKE)
 
-html-multi: html/multi/cug-html.html
+src/cylc-user-guide/pdf/cug-pdf.pdf:
+	cd src/cylc-user-guide && $(MAKE) pdf
 
-html-single: html/single/cug-html.html
+src/cylc-user-guide/html/single/cug-html.html:
+	cd src/cylc-user-guide && $(MAKE) html-single
 
-pdf: pdf/cug-pdf.pdf
+src/cylc-user-guide/html/multi/cug-html.html:
+	cd src/cylc-user-guide && $(MAKE) html-multi
 
+install:
+	./src/make-index.sh
 
-cylc.txt: ../bin/cylc
-	$< --help > $@
-
-cats = $(shell ../bin/cylc categories) 
-catx = $(cats:%=categories/%.txt)
-
-cmds = $(shell ../bin/cylc commands)
-cmdx = $(cmds:%=commands/%.txt)
-
-$(cmdx): commands/%.txt: ../bin/cylc-%
-	if test ! -d commands; then mkdir -p commands/; fi
-	$(patsubst ../bin/cylc-%,../bin/cylc %,$<) --help > $@
-
-$(catx): categories/%.txt: ../bin/cylc
-	if test ! -d categories; then mkdir -p categories/; fi
-	cylc $(subst .txt,,$(subst categories/,,$@)) --help > $@
-
-commands.tex: $(cmdx) $(catx) cylc.txt
-	./scripts/make-commands.sh
-
-html/multi/cug-html.html: commands.tex cug-html.tex cug.tex suiterc.tex siterc.tex gcylcrc.tex cug-html.cfg
-	./scripts/make-html.sh multi
-
-html/single/cug-html.html: commands.tex cug-html.tex cug.tex suiterc.tex siterc.tex gcylcrc.tex cug-html.cfg
-	./scripts/make-html.sh single
-
-pdf/cug-pdf.pdf: commands.tex cug-pdf.tex cug.tex suiterc.tex siterc.tex gcylcrc.tex
-	./scripts/make-pdf.sh
-
-# delete all generated files:
 clean:
-	rm -r pdf \
-	html \
-	cylc.txt \
-	commands \
-	categories \
-	commands.tex \
-	index.html \
-	cylc-version.txt
+	cd src/suite-design-guide && $(MAKE) clean
+	cd src/cylc-user-guide && $(MAKE) clean
 
+installclean:
+	rm -r install
diff --git a/doc/README b/doc/README
index f3b3f9d..3597803 100644
--- a/doc/README
+++ b/doc/README
@@ -1,15 +1,23 @@
+------------------------------------------
+Cylc document generation from LaTeX source
+------------------------------------------
 
-To generate pdf and html (single and multi-page) Cylc User Guides:
+cug = Cylc User Guide
+sdg = Suite Design Guide
 
- | cd $CYLC_DIR/doc
- | make
+make all - create cug and sdg, and install to 'doc/install/'
 
-The following make targets are also avaialable:
- | make pdf
- | make html
- | make html-single
- | make html-multi
+make sdg - just create sdg (PDF)
 
-This will generate the documents under pdf/ and html/ sub-directories,
-and an index.html file at the top level that links to them.
+make cug - just create cug (PDF and HTML)
+make cug-pdf - just create cug-pdf
+make cug-html - just create cug-html (single and multi-page versions)
 
+make install - install created docs to 'doc/install/', create index.html
+make clean - remove all generated files (except those in 'doc/install/')
+make installclean - remove the 'doc/install/' directory
+
+-----------------------------------------------------------------------
+NOTE 'doc/install/' can be moved wholesale to another location (add the
+new location to your global.rc so that 'cylc doc' works properly).
+-----------------------------------------------------------------------
diff --git a/doc/gscanrc.tex b/doc/gscanrc.tex
deleted file mode 100644
index f856280..0000000
--- a/doc/gscanrc.tex
+++ /dev/null
@@ -1,43 +0,0 @@
-
-\section{Cylc Gscan Config File Reference}
-\label{GscanRCReference}
-
-\lstset{language=bash}
-
-This section defines all legal items and values for the gscan config
-file which should be located in
-\lstinline=$HOME/.cylc/gscan.rc=.
-
-\subsection{Top Level Items}
-
-
-\subsubsection{activate on startup}
-Set whether
-\lstinline=cylc gpanel=
-will activate automatically when the gui is loaded or not.
-
-\begin{myitemize}
-    \item {\em type:} boolean (True or False)
-\item {\em legal values:} ``True'', ``False''
-\item {\em default:} ``False''
-\item {\em example:} \lstinline at activate on startup = True@
-\end{myitemize}
-
-
-\subsubsection{columns}
-Set the data fields displayed initially when the
-\lstinline=cylc gscan=
-GUI starts. This
-can be changed later using the right click context menu.
-\newline
-Note that the order in
-which the fields are specified does not affect the order in which they are
-displayed.
-
-\begin{myitemize}
-\item {\em type:} string (a list of one or more view names)
-\item {\em legal values:} ``host'', ``owner'', ``status'', ``suite'',  ``title'',
-        ``updated''
-\item {\em default:} ``status'', ``suite''
-\item {\em example:} \lstinline at columns = suite, title, status@
-\end{myitemize}
diff --git a/doc/index.css b/doc/index.css
deleted file mode 100644
index bf418c2..0000000
--- a/doc/index.css
+++ /dev/null
@@ -1,85 +0,0 @@
-
-body { 
-		background:#bbc;
-}
-
-div.uberpage {
-		border:2px solid #bbc;
-		margin:0 auto;
-		width:700px;
-		background-image:url(images/niwa-colour-small.png);
-		background-repeat:no-repeat;
-		background-position: bottom right;
-}
-
-div.page {
-		background:#ffffff;
-		border:2px solid #0a0;
-		margin:0 auto;
-		margin-top:20px;
-		padding:50px;
-		width:600px;
-}
-
-.lbox {
-		width: 60%;
-}
-
-.rbox {
-		float:right;
-	/*	font-size:80%;*/
-}
-
-.info {
-		color:#666;
-		font-size:80%;
-}
-
-
-a:link, a:visited { 
-		font-weight:bold;
-		color: #04a;
-}
-
-h1 {
-		border-bottom:2px solid #0a0;
-		margin-top:0;
-		color:#0a0;
-}
-h2 {
-		border-bottom:2px solid #00cc00;
-		margin-right:25%;
-		margin-left:30px;
-		color:#0a0;
-}
-h3 {
- 		border-bottom:1px solid #080;
-		color:#080;
-		margin-right:50%;
-		margin-left:60px;
-		padding-top:20px;
-}
-
-code {
-		color:#ff0033;
-    font-family: "Courier 10 Pitch", Courier, monospace; 
-		font-size: 90%;
-		font-weight:bold;
-}
-
-pre.code {
-		display:block;
-		/*color:#3e7c63;*/
-		color:#ff0033;
-		margin-top:-10px;
-		padding:10px;
-    font-family: "Courier 10 Pitch", Courier, monospace; 
-		font-size: 90%;
-		font-weight:bold;
-		white-space: pre-wrap; /* css-3 */
-		white-space: -moz-pre-wrap !important; 
-		white-space: -pre-wrap; /* Opera 4-6 */
-		white-space: -o-pre-wrap; /* Opera 7 */
-		word-wrap: break-word; /* Internet Explorer 5.5+ */
-}
-
diff --git a/doc/scripts/make-index.sh b/doc/scripts/make-index.sh
deleted file mode 100755
index bcfd435..0000000
--- a/doc/scripts/make-index.sh
+++ /dev/null
@@ -1,155 +0,0 @@
-#!/bin/bash
-
-# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
-# Copyright (C) 2008-2017 NIWA
-# 
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-
-# This script generates an HTML index page linking to cylc
-# documentation. It is intended to be executed automatically 
-# during the document generation process (see Makefile). The resulting
-# index file will link to whichever documentation formats have been
-# generated (PDF and/or HTML single page and/or HTML multi-page).
-# It can however be executed manually from within the doc directory.
-
-set -e
-
-echo "Updating index.html"
-
-CYLC_VERSION=$($(dirname $0)/../../bin/cylc --version)
-INDEX=index.html
-
-cat > $INDEX <<END
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN">
-<html>
-		<head>
-				<title>Cylc Documentation Index</title>
-				<link rel="stylesheet" href="index.css">
-		</head>
-
-<body>
-
-<div class="uberpage">
-<div class="page">
-
-<div style="float:right">
-<b>
-END
-
-echo $CYLC_VERSION >> $INDEX
-
-cat >> $INDEX <<END
-</b>
-</div>
-
-<h1>Cylc Documentation</h1>
-
-<p>Run the <code>cylc documentation</code> command to get here 
-(see <code>cylc doc --help</code>).<p>
-
-
-<div class="rbox">
-<h3 style="margin:10px">Command Help</h3>
-<pre class="code">
-cylc --help
-cylc COMMAND --help
-</pre>
-<h3 style="margin:10px">Misc</h3>
-<ul>
-<li><a href="https://github.com/cylc/cylc/blob/master/CHANGES.md">changes</a></li>
-</ul>
-</div>
-
-<div class="lbox">
-<h3 style="margin:10px">User Guide</h3>
-<p>For this cylc version: 
-END
-echo $CYLC_VERSION >> $INDEX
-
-cat >> $INDEX <<END
-</p>
-<ul>
-END
-
-if [[ -f pdf/cug-pdf.pdf ]]; then
-    cat >> $INDEX <<END
-<li> <a href="pdf/cug-pdf.pdf">PDF format</a> </li>
-END
-else
-    cat >> $INDEX <<END
-    <li>PDF format <i>(not generated)</i></li>
-END
-fi
-
-if [[ -f html/single/cug-html.html ]]; then
-    cat >> $INDEX <<END
-<li> <a href="html/single/cug-html.html">HTML single-page</a> </li>
-END
-else
-    cat >> $INDEX <<END
-    <li> HTML single page <i>(not generated)</i></li>
-END
-
-fi
-
-if [[ -f html/multi/cug-html.html ]]; then
-    cat >> $INDEX <<END
-<li> <a href="html/multi/cug-html.html">HTML multi-page</a> </li>
-END
-else
-    cat >> $INDEX <<END
-    <li> HTML multi-page <i>(not generated)</i></li>
-END
-fi
-
-cat >> $INDEX <<END
-</ul>
-</div>
-
-<div class="lbox">
-<h3 style="margin:10px">Internet</h3>
-<p>For the latest cylc release</p>
-<ul>
-<li> <a href="http://cylc.github.com/cylc/#">Project Homepage</a> </li>
-<li> <a href="http://cylc.github.com/cylc/#documentation">Online Documentation</a> </li>
-<li> <a href="https://github.com/cylc/cylc">Github Source Repository</a> </li>
-</ul>
-</div>
-</div>
-
-<div class="info">
-<p>Document generation:</p>
-<ul>
-<li> user: <b>
-END
-whoami >> $INDEX
-cat >> $INDEX <<END
-</b> </li>
-<li> host: <b>
-END
-hostname -f >> $INDEX
-cat >> $INDEX <<END
-</b> </li>
-<li> date: <b>
-END
-date >> $INDEX
-
-cat >> $INDEX <<END
-</div>
-</div>
-
-</body>
-</html>
-END
-
diff --git a/doc/src/cylc-logo.png b/doc/src/cylc-logo.png
new file mode 100644
index 0000000..9654cc4
Binary files /dev/null and b/doc/src/cylc-logo.png differ
diff --git a/doc/Makefile b/doc/src/cylc-user-guide/Makefile
similarity index 57%
copy from doc/Makefile
copy to doc/src/cylc-user-guide/Makefile
index 4ac08b9..77d1f94 100644
--- a/doc/Makefile
+++ b/doc/src/cylc-user-guide/Makefile
@@ -16,16 +16,14 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-CYLC=$(dir $(abspath $(lastword $(MAKEFILE_LIST))))../bin/cylc
-
-.PHONY: all index clean html html-multi html-single pdf
+.PHONY: all pdf html html-multi html-single clean
 
 DEPS := $(shell ./scripts/get-deps.sh)
+CYLC = ../../../bin/cylc
 
-all: index
+all: $(DEPS)
 
-index: $(DEPS)
-	./scripts/make-index.sh
+pdf: pdf/cug-pdf.pdf
 
 html: html-multi html-single
 
@@ -33,46 +31,17 @@ html-multi: html/multi/cug-html.html
 
 html-single: html/single/cug-html.html
 
-pdf: pdf/cug-pdf.pdf
-
-
-cylc.txt: ../bin/cylc
-	$< --help > $@
-
-cats = $(shell ../bin/cylc categories) 
-catx = $(cats:%=categories/%.txt)
-
-cmds = $(shell ../bin/cylc commands)
-cmdx = $(cmds:%=commands/%.txt)
-
-$(cmdx): commands/%.txt: ../bin/cylc-%
-	if test ! -d commands; then mkdir -p commands/; fi
-	$(patsubst ../bin/cylc-%,../bin/cylc %,$<) --help > $@
-
-$(catx): categories/%.txt: ../bin/cylc
-	if test ! -d categories; then mkdir -p categories/; fi
-	cylc $(subst .txt,,$(subst categories/,,$@)) --help > $@
-
-commands.tex: $(cmdx) $(catx) cylc.txt
+commands.tex: $(CYLC)-*
 	./scripts/make-commands.sh
 
 html/multi/cug-html.html: commands.tex cug-html.tex cug.tex suiterc.tex siterc.tex gcylcrc.tex cug-html.cfg
-	./scripts/make-html.sh multi
+	- ./scripts/make-html.sh multi
 
 html/single/cug-html.html: commands.tex cug-html.tex cug.tex suiterc.tex siterc.tex gcylcrc.tex cug-html.cfg
-	./scripts/make-html.sh single
+	- ./scripts/make-html.sh single
 
 pdf/cug-pdf.pdf: commands.tex cug-pdf.tex cug.tex suiterc.tex siterc.tex gcylcrc.tex
-	./scripts/make-pdf.sh
+	- ./scripts/make-pdf.sh
 
-# delete all generated files:
 clean:
-	rm -r pdf \
-	html \
-	cylc.txt \
-	commands \
-	categories \
-	commands.tex \
-	index.html \
-	cylc-version.txt
-
+	rm -rf pdf html commands.tex cylc-version.txt
diff --git a/doc/README b/doc/src/cylc-user-guide/README
similarity index 100%
copy from doc/README
copy to doc/src/cylc-user-guide/README
diff --git a/doc/abstract.tex b/doc/src/cylc-user-guide/abstract.tex
similarity index 100%
rename from doc/abstract.tex
rename to doc/src/cylc-user-guide/abstract.tex
diff --git a/doc/cug-html.cfg b/doc/src/cylc-user-guide/cug-html.cfg
similarity index 100%
rename from doc/cug-html.cfg
rename to doc/src/cylc-user-guide/cug-html.cfg
diff --git a/doc/cug-html.tex b/doc/src/cylc-user-guide/cug-html.tex
similarity index 100%
rename from doc/cug-html.tex
rename to doc/src/cylc-user-guide/cug-html.tex
diff --git a/doc/cug-pdf.tex b/doc/src/cylc-user-guide/cug-pdf.tex
similarity index 100%
rename from doc/cug-pdf.tex
rename to doc/src/cylc-user-guide/cug-pdf.tex
diff --git a/doc/cug.tex b/doc/src/cylc-user-guide/cug.tex
similarity index 91%
rename from doc/cug.tex
rename to doc/src/cylc-user-guide/cug.tex
index 49c92c5..7896237 100644
--- a/doc/cug.tex
+++ b/doc/src/cylc-user-guide/cug.tex
@@ -388,13 +388,22 @@ Cylc runs on Unix variants, usually Linux, and including Apple OS X.
 {\bf Python \lstinline@>=@ 2.6} is required (but not yet Python 3). Python
 should already be installed in your Linux system. \url{https://python.org}.
 
+For Cylc's HTTPS communications layer:
+\begin{myitemize}
+  \item {\bf OpenSSL} - \url{https://www.openssl.org/}
+  \item {\bf pyOpenSSL} - \url{http://www.pyopenssl.org/}
+  \item {\bf python-requests} - \url{http://docs.python-requests.org/}
+  \item ({\bf python-urllib3} - should be bundled with python-requests)
+\end{myitemize}
+
 The following packages are highly recommended, but are technically optional as
 you can construct and run suites without dependency graph visualisation or
 the Cylc GUIs:
 
 \begin{myitemize}
-  \item {\bf PyGTK} - GUI toolkit (should be in your system Python):
-    \url{http://www.pygtk.org}.
+  \item {\bf PyGTK} - GUI toolkit \url{http://www.pygtk.org}.  {\em Note PyGTK
+    typically comes with your system Python. It is alledgedly quite
+    difficult to install if you need to do so for another Python version.}
     \item {\bf Graphviz} - graph layout engine (tested 2.36.0):
       \url{http://www.graphviz.org}.
     \item {\bf Pygraphviz} - Python Graphviz interface (tested 1.2):
@@ -455,7 +464,7 @@ separately.
     (recommended) or else pure Python via {\bf urllib2}.
 \newline \url{http://www.cherrypy.org/}
 \newline \url{http://docs.python-requests.org/}
-  \item {\bf Jinja2 2.8}: a full featured template engine for Python, and its
+  \item {\bf Jinja2 2.9.6}: a full featured template engine for Python, and its
     dependency {\bf MarkupSafe 0.23}; both BSD licensed.
 \newline \url{http://jinja.pocoo.org/}
 \newline \url{http://www.pocoo.org/projects/markupsafe/}
@@ -469,18 +478,24 @@ separately.
 Cylc releases can be downloaded from from \url{https://cylc.github.io/cylc}.
 
 The wrapper script \lstinline=admin/cylc-wrapper= should be installed as
-\lstinline=cylc= somewhere in the system executable search path (e.g.\
+\lstinline=cylc= in the system executable search path (e.g.\
 \lstinline=/usr/local/bin/=) and modified slightly to point to a location
-such as \lstinline=/opt/cylc/= where successive Cylc releases will be unpacked
+such as \lstinline=/opt= where successive Cylc releases will be unpacked
 side by side.
 
 To install Cylc for the first time simply unpack the release tarball in that
-location, type \lstinline=make= in it, and set your site defaults in a
-site config file (below).
+location, e.g.\ \lstinline=/opt/cylc-7.4.0=, type \lstinline=make= inside
+the unpacked release directory, and set site defaults - if necessary - in a
+site global config file (below).
+
+In the installed location, make a symbolic link from \lstinline=cylc= to the
+latest installed version: \lstinline=ln -s /opt/cylc-7.4.0 cylc=. This is the
+version of Cylc that will be invoked by the central wrapper if a specific
+version is not requested e.g.\ by \lstinline at CYLC_VERSION=7.4.0 at .
 
 Installing subsequent releases is just a matter of unpacking the new tarballs
 next to the previous releases, running \lstinline=make= in them, and copying
-in (possibly with modifications) the previous site config file.
+in (possibly with modifications) the previous site global config file.
 
 \subsubsection{Local User Installation}
 \label{LocalInstall}
@@ -502,6 +517,7 @@ where to install it. All legal site and user global config items are defined
 in~\ref{SiteRCReference}.
 
 \subsubsection{Configure Site Environment on Job Hosts}
+\label{Configure Site Environment on Job Hosts}
 
 If your users submit task jobs to hosts other than the hosts they use to run
 their suites, you should ensure that the job hosts have the correct environment
@@ -856,7 +872,7 @@ $ echo $?
 Here's the traditional {\em Hello World} program rendered as a cylc
 suite:
 \lstset{language=suiterc}
-\lstinputlisting{../examples/tutorial/oneoff/basic/suite.rc}
+\lstinputlisting{../../../examples/tutorial/oneoff/basic/suite.rc}
 \lstset{language=transcript}
 
 Cylc suites feature a clean separation of scheduling configuration,
@@ -877,7 +893,7 @@ prototyping of new suites, it is submitted to run as a background job on
 the suite host. In fact cylc even provides a default task implementation
 that makes the entire \lstinline=[runtime]= section technically optional:
 \lstset{language=suiterc}
-\lstinputlisting{../examples/tutorial/oneoff/minimal/suite.rc}
+\lstinputlisting{../../../examples/tutorial/oneoff/minimal/suite.rc}
 \lstset{language=transcript}
 (the resulting {\em dummy task} just prints out some identifying
 information and exits).
@@ -1386,7 +1402,7 @@ represents a task, or if it is inherited by other namespaces, a {\em
 family}. This allows common configuration to be factored out of related
 tasks very efficiently.
 \lstset{language=suiterc}
-\lstinputlisting{../examples/tutorial/oneoff/inherit/suite.rc}
+\lstinputlisting{../../../examples/tutorial/oneoff/inherit/suite.rc}
 The \lstinline=[root]= namespace provides defaults for all tasks in the suite.
 Here both tasks inherit \lstinline=script= from \lstinline=root=, which they
 customize with different values of the environment variable
@@ -1425,7 +1441,7 @@ i.e.\ if \lstinline=foo= succeeds, trigger all members of
 \lstinline=GREETERS= at once. Here's the full suite with runtime
 hierarchy shown:
 \lstset{language=suiterc}
-\lstinputlisting{../examples/tutorial/oneoff/ftrigger1/suite.rc}
+\lstinputlisting{../../../examples/tutorial/oneoff/ftrigger1/suite.rc}
 
 (Note that we recommend given ALL-CAPS names to task families to help
 distinguish them from task names. However, this is just a convention).
@@ -1433,7 +1449,7 @@ distinguish them from task names. However, this is just a convention).
 Experiment with the \lstinline=tut/oneoff/ftrigger1= suite to see
 how this works.
 
-\subsection{Triggering Off Families}
+\subsection{Triggering Off Of Families}
 
 \hilight{ suite: \lstinline=tut/oneoff/ftrigger2= }
 \vspace{3mm}
@@ -1557,11 +1573,11 @@ basic Hello World suite and cut the implementation of the task
 \lstinline=hello= out to a file \lstinline=hello.sh= in the suite
 bin directory:
 \lstset{language=bash}
-\lstinputlisting{../examples/tutorial/oneoff/external/bin/hello.sh}
+\lstinputlisting{../../../examples/tutorial/oneoff/external/bin/hello.sh}
 Make the task script executable, and change the \lstinline=hello= task
 runtime section to invoke it:
 \lstset{language=suiterc}
-\lstinputlisting{../examples/tutorial/oneoff/external/suite.rc}
+\lstinputlisting{../../../examples/tutorial/oneoff/external/suite.rc}
 
 If you run the suite now the new greeting from the external task script
 should appear in the \lstinline=hello= task stdout log. This works
@@ -1598,7 +1614,7 @@ belonging to that sequence may only run once.
 
 Open the \lstinline=tut/cycling/one= suite:
 \lstset{language=suiterc}
-\lstinputlisting{../examples/tutorial/cycling/one/suite.rc}
+\lstinputlisting{../../../examples/tutorial/cycling/one/suite.rc}
 The difference between cycling and non-cycling suites is all in the
 \lstinline=[scheduling]= section, so we will leave the
 \lstinline=[runtime]= section alone for now (this will result in
@@ -1628,7 +1644,7 @@ clock triggers in real time operation.
 
 Experiment with \lstinline=tut/cycling/one= to see how cycling tasks work.
 
-\paragraph{ISO 8601 Date-Time Syntax}
+\subsubsection{ISO 8601 Date-Time Syntax}
 
 The suite above is a very simple example of a cycling date-time workflow. More
 generally, cylc comprehensively supports the ISO 8601 standard for date-time
@@ -1800,7 +1816,7 @@ date-time based.
 Open the \lstinline=tut/cycling/integer= suite, which is plotted in
 Figure~\ref{fig-tut-int}.
 \lstset{language=suiterc}
-\lstinputlisting{../examples/tutorial/cycling/integer/suite.rc}
+\lstinputlisting{../../../examples/tutorial/cycling/integer/suite.rc}
 
 \begin{figure}
     \begin{center}
@@ -1840,7 +1856,7 @@ uses of Jinja2: changing suite content or structure based on the value
 of a logical switch; and iteratively generating dependencies and runtime
 configuration for groups of related tasks:
 \lstset{language=suiterc}
-\lstinputlisting{../examples/tutorial/oneoff/jinja2/suite.rc}
+\lstinputlisting{../../../examples/tutorial/oneoff/jinja2/suite.rc}
 
 To view the result of Jinja2 processing with the Jinja2 flag
 \lstinline at MULTI@ set to \lstinline=False=:
@@ -1893,7 +1909,7 @@ An environment variable \lstinline=$CYLC_TASK_TRY_NUMBER= increments
 from $1$ on each successive try, and is passed to the task to allow
 different behaviour on the retry:
 \lstset{language=suiterc}
-\lstinputlisting{../examples/tutorial/oneoff/retry/suite.rc}
+\lstinputlisting{../../../examples/tutorial/oneoff/retry/suite.rc}
 
 When a task with configured retries fails, its cylc task proxy goes
 into the {\em retrying} state until the next retry delay is up, then it
@@ -2623,8 +2639,11 @@ Datetimes can be excluded from a recurrence by an exclamation mark for example
 \lstinline=[[[ PT1D!20000101 ]]]= means run daily except on the
 first of January 2000.
 
-This syntax can only be used to exclude one datetime from a recurrence. Note
-that the \lstinline=^= and \lstinline=$= symbols (shorthand for the initial
+This syntax can be used to exclude one or multiple datetimes from a recurrence.
+Multiple datetimes are excluded using the syntax 
+\lstinline=[[[ PT1D!(20000101,20000102,...) ]]]=. All datetimes listed within
+the parentheses after the exclamation mark will be excluded. Note that the 
+\lstinline=^= and \lstinline=$= symbols (shorthand for the initial
 and final cycle points) are both datetimes so \lstinline=[[[ T12!$-PT1D ]]]=
 is valid.
 
@@ -3070,7 +3089,7 @@ The example suite \lstinline=$CYLC_DIR/examples/message-triggers= illustrates
 message triggering.
 
 \lstset{language=suiterc}
-\lstinputlisting{../examples/message-triggers/suite.rc}
+\lstinputlisting{../../../examples/message-triggers/suite.rc}
 
 \paragraph{Job Submission Triggers}
 
@@ -3948,7 +3967,7 @@ The following listing of the {\em inherit.single.one} example suite
 illustrates basic runtime inheritance with single parents.
 
 \lstset{language=suiterc}
-\lstinputlisting{../examples/inherit/single/one/suite.rc}
+\lstinputlisting{../../../examples/inherit/single/one/suite.rc}
 \lstset{language=transcript}
 
 \subsubsection{Runtime Inheritance - Multiple}
@@ -3967,7 +3986,7 @@ The {\em inherit.multi.one} example suite, listed here, makes use of
 multiple inheritance:
 
 \lstset{language=suiterc}
-\lstinputlisting{../examples/inherit/multi/one/suite.rc}
+\lstinputlisting{../../../examples/inherit/multi/one/suite.rc}
 \lstset{language=transcript}
 
 \lstinline=cylc get-suite-config= provides an easy way to check the result of
@@ -4378,6 +4397,7 @@ Parameter values can be lists of strings, or integer ranges (with inclusive boun
     [[parameters]]
         obs = ship, buoy, plane
         run = 1..5  # 1, 2, 3, 4, 5
+        idx = 1..9..2  # 1, 3, 5, 7, 9
 \end{lstlisting}
 Then angle brackets denote use of these parameters throughout the suite
 definition. For the values above, this parameterized name:
@@ -4863,7 +4883,7 @@ with the New York City task family expanded, in
 Figure~\ref{fig-jinja2-cities}.
 
 \lstset{language=suiterc}
-\lstinputlisting{../examples/jinja2/cities/suite.rc}
+\lstinputlisting{../../../examples/jinja2/cities/suite.rc}
 \lstset{language=transcript}
 
 \begin{figure}
@@ -6057,7 +6077,7 @@ tracking (see these with \lstinline=cylc get-site-config=):
 # Setup the communication method details. This is required for
 # communications between cylc clients and servers (i.e. between
 # suite-connecting commands and guis, and running suite server processes).
-[communications]
+[communication]
 
     # Configure the choice of communication method. Only https is supported
     # at the moment.
@@ -6266,7 +6286,6 @@ As a suite runs its task proxies may pass through the following states:
     (see~\ref{InternalQueues}).
 
     \item {\bf held} - will not be submitted even if ready to run.
-    Tasks that spawn past the final cycle point are held automatically.
 
     \item {\bf ready} - ready to run (prerequisites satisfied) and
     handed to cylc's job submission sub-system.
@@ -6275,7 +6294,7 @@ As a suite runs its task proxies may pass through the following states:
     (could be waiting in an external batch scheduler queue).
 
     \item {\bf submit-failed} - job submission failed {\em or}
-    submitted job killed before commencing execution.
+    submitted job killed (cancelled) before commencing execution.
 
     \item {\bf submit-retrying} - job submission failed, but a submission retry
     was configured. Will only enter the {\em submit-failed} state if all
@@ -6409,7 +6428,7 @@ queue. The {\em queues} example suite illustrates how queues work by
 running two task trees side by side (as seen in the graph GUI) each
 limited to 2 and 3 tasks respectively:
 \lstset{language=suiterc}
-\lstinputlisting{../examples/queues/suite.rc}
+\lstinputlisting{../../../examples/queues/suite.rc}
 
 \subsection{Automatic Task Retry On Failure}
 \label{TaskRetries}
@@ -6645,7 +6664,7 @@ With \lstinline=cylc trigger --edit= (also in the gcylc right-click task menu)
 you can edit the generated task job script to make one-off changes before the
 task submits.
 
-\subsection{Runtime Settings Broadcast and Communication Between Tasks}
+\subsection{Cylc Broadcast}
 \label{cylc-broadcast}
 
 The \lstinline=cylc broadcast= command overrides \lstinline=[runtime]=
@@ -6697,79 +6716,60 @@ different behaviour may be required, or in a normal mid-run cycle point.
 Note however that an initial \lstinline=R1= graph section is now the preferred
 way to get different behaviour at suite start-up.
 
-\subsection{The Simulation And Dummy Run Modes}
+\subsection{Simulating Suite Behaviour}
 \label{SimulationMode}
 
-Since cylc-4.6.0 any cylc suite can run in {\em live}, {\em simulation},
-or {\em dummy} mode. Prior to that release simulation mode was a
-hybrid mode that replaced real tasks with local dummy tasks. This
-allowed local simulation testing of any suite, to get the scheduling
-right without running real tasks, but running dummy tasks locally does
-not add much value over a pure simulation (in which no tasks are
-submitted at all) because all job submission configuration has to be
-ignored and most task job script sections have to be cut out to avoid
-any code that could potentially be specific to the intended task host.
-So at 4.6.0 we replaced this with a pure simulation mode (task proxies
-go through the {\em running} state automatically within cylc, and no
-dummy tasks are submitted to run) and a new dummy mode in which only the
-real task scripting  is dummied out - each dummy task is
-submitted exactly as the task it represents on the correct host and in
-the same execution environment. A successful dummy run confirms not only
-that the scheduling works correctly but also tests real job submission,
-communication from remote task hosts, and the real task job scripts (in
-which errors such as use of undefined variables will cause a task to
-fail).
-
-The run mode, which defaults to {\em live}, is set on the command line
-(for run and restart):
+Several suite run modes allow you to simulate suite behaviour quickly without
+running the suite's real jobs - which may be long-running and resource-hungry:
+
+\begin{myitemize}
+  \item {\em dummy mode} - runs dummy tasks as background jobs on configured
+    job hosts.
+    \begin{myitemize}
+      \item simulates scheduling, job host connectivity, and
+        generates all job files on suite and job hosts.
+    \end{myitemize}
+  \item {\em dummy-local mode} - runs real dummy tasks as background jobs on
+    the suite host, which allows dummy-running suites from other sites.
+    \begin{myitemize}
+      \item simulates scheduling and generates all job files on the
+        suite host.
+    \end{myitemize}
+  \item {\em simulation mode} - does not run any real tasks.
+    \begin{myitemize}
+      \item simulates scheduling without generating any job files.
+    \end{myitemize}
+\end{myitemize}
+ 
+Set the run mode (default {\em live}) in the GUI suite start dialog box, or on
+the command line:
 \lstset{language=transcript}
 \begin{lstlisting}
 $ cylc run --mode=dummy SUITE
+$ cylc restart --mode=dummy SUITE
 \end{lstlisting}
-but you can configure the suite to force a particular run mode:
-\lstset{language=suiterc}
-\begin{lstlisting}
-[cylc]
-    force run mode = simulation
-\end{lstlisting}
-This can be used, for example, for demo suites that necessarily run out
-of their original context; or to temporarily prevent accidental
-execution of expensive real tasks during suite development.
 
-Dummy mode task scripting just prints a message and sleeps for ten
-seconds by default, but you can override this behaviour for particular
-tasks or task groups if you like. Here's how to make a task sleep for
-twenty seconds and then fail in dummy mode:
-\lstset{language=suiterc}
-\begin{lstlisting}
-[runtime]
-    [[foo]]
-        script = "run-real-task.sh"
-        [[[dummy mode]]]
-            script = """
-echo "hello from dummy task $CYLC_TASK_ID"
-sleep 20
-echo "ABORTING"
-/bin/false"""
-\end{lstlisting}
+You can get specified tasks to fail in these modes, for more flexible suite
+testing. See Section~\ref{suiterc-sim-config} for simulation configuration.
 
-Finally, in simulation mode each task takes between 1 and 15 seconds to
-``run'' by default, but you can also alter this for particular tasks or
-groups of tasks:
-\lstset{language=suiterc}
-\begin{lstlisting}
-[runtime]
-    [[foo]]
-       [[[simulation mode]]]
-        run time range = PT20S,PT31S # (between 20 and 30 seconds)
-\end{lstlisting}
-Note that to get a failed simulation or dummy mode task to succeed on
-re-triggering, just change the suite.rc file appropriately and reload
-the suite definition at run time with \lstinline=cylc reload SUITE=
-before re-triggering the task.
+\subsubsection{Proportional Simulated Run Length}
+
+If task \lstinline=[job]execution time limit= is set, Cylc divides it by
+\lstinline=[simulation]speedup factor= (default \lstinline=10.0=) to compute
+simulated task run lengths (default 10 seconds).
 
-Dummy mode is equivalent to removing all user-defined task scripting
-to expose the default scripting.
+\subsubsection{Limitations Of Suite Simulation}
+
+Dummy mode ignores batch scheduler settings because Cylc does not know which
+job resource directives (requested memory, number of compute nodes, etc.) would
+need to be changed for the dummy jobs.  If you need to dummy-run jobs on a
+batch scheduler manually comment out \lstinline=script= items and modify 
+directives in your live suite, or else use a custom live mode test suite.
+
+Note that the dummy modes ignore all configured task \lstinline=script= items
+including \lstinline=init-script=. If your \lstinline=init-script= is required
+to run even dummy tasks on a job host, note that host environment setup should
+be done elsewhere - see~\ref{Configure Site Environment on Job Hosts}.
 
 \subsubsection{Restarting Suites With A Different Run Mode?}
 
@@ -6846,7 +6846,7 @@ you wish:
         abort on timeout = True
 \end{lstlisting}
 
-\subsection{Inter-suite Dependence: Triggering Off Task States In Other Suites}
+\subsection{Triggering Off Of Tasks In Other Suites}
 \label{SuiteStatePolling}
 
 The \lstinline=cylc suite-state= command interrogates suite run databases. It
@@ -6926,6 +6926,93 @@ Note that the remote suite does not have to be running when polling commences
 because the command interrogates the suite run database, not the suite server
 process.
 
+\subsection{Suite Server Logs}
+\label{Suite Server Logs}
+
+Each suite maintains its own log of time-stamped events under the {\em suite
+server log directory}:
+
+\begin{lstlisting}
+$HOME/cylc-run/<SUITE-NAME>/log/suite/
+\end{lstlisting}
+
+By way of example, we will show the complete server log generated (at
+cylc-7.2.0) by a small suite that runs two 30-second dummy tasks
+\lstinline=foo= and \lstinline=bar= for a single cycle point
+\lstinline=2017-01-01T00Z= before shutting down:
+
+\lstset{language=suiterc,breaklines=true}
+\begin{lstlisting}
+[cylc]
+    cycle point format = %Y-%m-%dT%HZ
+[scheduling]
+    initial cycle point = 2017-01-01T00Z
+    final cycle point = 2017-01-01T00Z
+    [[dependencies]]
+        graph = "foo => bar"
+[runtime]
+    [[foo]]
+        script = sleep 30; /bin/false
+    [[bar]]
+        script = sleep 30; /bin/true
+\end{lstlisting}
+
+By the task scripting defined above, this suite will stall when \lstinline=foo=
+fails. Then, the suite owner {\em vagrant at cylon} manually resets the failed
+task's state to {\em succeeded}, allowing \lstinline=bar= to trigger and the
+suite to finish and shut down.  Here's the complete suite log for this run:
+
+\lstset{language=transcript}
+\begin{lstlisting}
+$ cylc log SUITE-NAME
+2017-03-30T09:46:10Z INFO - Suite starting: server=localhost:43086 pid=3483
+2017-03-30T09:46:10Z INFO - Run mode: live
+2017-03-30T09:46:10Z INFO - Initial point: 2017-01-01T00Z
+2017-03-30T09:46:10Z INFO - Final point: 2017-01-01T00Z
+2017-03-30T09:46:10Z INFO - Cold Start 2017-01-01T00Z
+2017-03-30T09:46:11Z INFO - [foo.2017-01-01T00Z] -submit_method_id=3507
+2017-03-30T09:46:11Z INFO - [foo.2017-01-01T00Z] -submission succeeded
+2017-03-30T09:46:11Z INFO - [foo.2017-01-01T00Z] -(current:submitted)> started at 2017-03-30T09:46:10Z
+2017-03-30T09:46:41Z CRITICAL - [foo.2017-01-01T00Z] -(current:running)> Task job script received signal EXIT at 2017-03-30T09:46:40Z
+2017-03-30T09:46:41Z CRITICAL - [foo.2017-01-01T00Z] -(current:running)> failed at 2017-03-30T09:46:40Z
+2017-03-30T09:46:42Z WARNING - suite stalled
+2017-03-30T09:46:42Z WARNING - Unmet prerequisites for bar.2017-01-01T00Z:
+2017-03-30T09:46:42Z WARNING -  * foo.2017-01-01T00Z succeeded
+2017-03-30T09:47:58Z INFO - [client-command] reset_task_states vagrant at cylon:cylc-reset 1e0d8e9f-2833-4dc9-a0c8-9cf263c4c8c3
+2017-03-30T09:47:58Z INFO - [foo.2017-01-01T00Z] -resetting state to succeeded
+2017-03-30T09:47:58Z INFO - Command succeeded: reset_task_states([u'foo.2017'], state=succeeded)
+2017-03-30T09:47:59Z INFO - [bar.2017-01-01T00Z] -submit_method_id=3565
+2017-03-30T09:47:59Z INFO - [bar.2017-01-01T00Z] -submission succeeded
+2017-03-30T09:47:59Z INFO - [bar.2017-01-01T00Z] -(current:submitted)> started at 2017-03-30T09:47:58Z
+2017-03-30T09:48:29Z INFO - [bar.2017-01-01T00Z] -(current:running)> succeeded at 2017-03-30T09:48:28Z
+2017-03-30T09:48:30Z INFO - Waiting for the command process pool to empty for shutdown
+2017-03-30T09:48:30Z INFO - Suite shutting down - AUTOMATIC
+\end{lstlisting}
+
+The information logged here includes:
+
+\begin{myitemize}
+  \item event timestamps, at the start of each line
+  \item suite server host, port and process ID
+  \item suite initial and final cycle points
+  \item suite start type (cold start in this case)
+  \item task events (task started, succeeded, failed, etc.)
+  \item suite stalled warning (in this suite nothing else can run when
+    \lstinline=foo= fails)
+  \item the client command issued by {\em vagrant at cylon} to reset
+    \lstinline=foo= to {\em succeeded}
+  \item job IDs  - in this case process IDs for background jobs (or PBS job IDs
+    etc.)
+  \item state changes due to incoming task progress message  ("started at ..."
+    etc.) suite shutdown time and reasons (AUTOMATIC means "all tasks finished
+    and nothing else to do")
+\end{myitemize}
+
+Note that suite log files are primarily intended for human eyes. If you need
+to have an external system to monitor suite events automatically, interrogate
+the sqlite {\em suite run database} (see~\ref{Suite Run
+Databases}) rather than parse the log files.
+
 \subsection{Suite Run Databases}
 \label{Suite Run Databases}
 
@@ -6947,8 +7034,7 @@ $ sqlite3 ~/cylc-run/foo/log/db << _END_
 > select * from task_events where name is "foo";
 > _END_
 name|cycle|time|submit_num|event|message
-foo|1|2017-03-12T11:06:08Z|1|incrementing submit number|
-foo|1|2017-03-12T11:06:09Z|1|submission succeeded|
+foo|1|2017-03-12T11:06:09Z|1|submitted|
 foo|1|2017-03-12T11:06:09Z|1|output completed|started
 foo|1|2017-03-12T11:06:09Z|1|started|
 foo|1|2017-03-12T11:06:19Z|1|output completed|succeeded
@@ -6979,558 +7065,6 @@ cylc. It is available under the open source GPL license.
     \item Rose source repository: \url{https://github.com/metomi/rose}
 \end{myitemize}
 
-
-\section{Suite Design Principles}
-\label{SuiteDesignPrinciples}
-
-%Simplicity, flexibility, efficiency, and portability of cylc suites.
-
-\subsection{Make Fine-Grained Suites}
-\label{Granularity}
-
-A suite can contain a small number of large, internally complex tasks; a
-large number of small, simple tasks; or anything in between. Cylc can
-easily handle a large number of tasks, however, so there are definite
-advantages to fine-graining:
-
-\begin{myitemize}
-    \item A more modular and transparent suite.
-
-    \item More functional parallelism (multiple tasks running
-        at once).
-
-    \item Faster debugging and failure recovery: rerun just the tasks(s)
-        that failed.
-
-    \item More code reuse: similar tasks may be able to call the same
-        underlying script or command with differing input parameters.
-
-\end{myitemize}
-
-\subsection{Make Tasks Re-runnable}
-
-It should be possible to rerun a task by simply resubmitting it for the
-same cycle point. In other words, failure at any point during execution
-of a task should not render a rerun impossible by corrupting the state
-of some internal-use file, or whatever. It is difficult to overstate the
-usefulness of being able to rerun the same task multiple times,
-either outside of the suite with \lstinline=cylc submit=, or by
-re-triggering it within the running suite, when debugging a problem.
-
-\subsection{Make Models Re-runnable}
-
-If a warm-cycled model uses the exact same file names for its restart files
-regardless of current cycle point, the only cycle point that can subsequently
-run successfully is the next one. Instead, restart files should be labelled
-with current cycle point and maintained in a simple rolling archive. Then you
-can easily rerun the task for any cycle point still in the archive.
-
-\subsection{Avoid False Dependence}
-\label{LimitPID}
-
-If a task does not depend on files generated by another task then generally
-speaking it should not trigger off that task in the suite scheduling graph.
-Unnecessary dependence between tasks restricts functional parallelism at run
-time, and it makes the suite more difficult to understand. If you need to
-restrict the number of tasks that are active at once, use runahead limiting
-(\ref{RunaheadLimit}) and internal queues (\ref{InternalQueues}).
-
-
-\subsection{Put Task Cycle Point In Output File Paths}
-\label{PutCycleTimeinIO}
-
-Putting task cycle point in output file or directory names makes archiving and
-cleanup easier, and it facilitates re-runnability by ensuring that important
-files do not get overwritten from one cycle to the next.
-
-The \lstinline=cylc cycle-point= command computes offsets from a given or
-current cycle point, and can insert the resulting computed date-time into
-a filename template string.
-
-\subsection{Managing Input/Output File Dependencies}
-\label{HandlingDependencies}
-
-Dependence between tasks usually, although not always, take the form of
-files generated by one task and used by others. It is possible to manage these
-files across a suite without compromising suite flexibility and portability with
-hard wired I/O locations.
-
-\subsubsection{Common I/O Workspaces}
-
-You may be able to have all tasks, or groups of tasks that need to cooperate,
-read and write from a common workspace, thereby avoiding the need to explicitly
-move files around. The suite share directory
-(\lstinline=$CYLC_SUITE_SHARE_DIR=) is provided for this purpose.
-Similarly, task work directories are private to each task by default but they
-can be shared to allow multiple tasks to simply read and write from their
-current working directory. Even if you use other custom I/O directories, define
-their locations in the suite.rc file rather than hard wiring them into task
-implementation. Shared workspace locations can be passed to tasks as needed
-without modifying the task implementation, like this:
-
-\lstset{language=suiterc}
-\begin{lstlisting}
-[runtime]
-   [[SHARE]]
-      [[[environment]]]
-         WORKDIR = $CYLC_SUITE_SHARE_DIR/workspace1
-   [[foo]]
-      inherit = SHARE
-      script = generate-data.exe
-      [[[environment]]]
-         MY_OUTPUT_DIR = $WORKDIR
-   [[bar]]
-      inherit = SHARE
-      script = use-data.exe
-      [[[environment]]]
-         MY_INPUT_DIR = $WORKDIR
-\end{lstlisting}
-
-\subsubsection{Connector Tasks}
-
-Special tasks can be used to move files around, from one task's output
-directory to another's input directory. This should only be necessary across
-host or filesystem boundaries, however; otherwise simply reference shared
-locations as shown above.
-
-\subsection{Reuse Task Implementation}
-
-If your suite contains multiple logically distinct tasks that have similar
-functionality (e.g.\ tasks that move files around or generate similar products
-from different datasets) just have them all call the same underlying command,
-script, or executable, but provide different input parameters as required.
-
-
-\subsection{Make Suites Portable}
-
-If all I/O is automatically done in suite-specific locations, such as the under
-suite share and work directories (\lstinline=$CYLC_SUITE_SHARE_DIRECTORY=
-and \lstinline=$CYLC_TASK_WORK_DIRECTORY=), you should be able to run multiple
-copies of the same suite without interference between them, and other users
-should be able to copy and run your suites with minimal modification.
-
-\subsection{Make Tasks As Independent As Possible}
-
-Where possible a task should not rely on the action of another task, except for
-the inputs embodied in the suite dependency graph that it has no choice but to
-depend on. This makes it as easy as possible to run single tasks alone during
-suite development and debugging. For example, tasks should create their own
-output directories if necessary rather than assuming their existence due to the
-action of another task. Note that if the existing task implementation does not
-handle output directory creation you can do it in suite
-\lstinline=pre-script= or similar.
-
-\subsection{Make Suites As Self-Contained As Possible}
-
-Tasks can (of course) run external commands, scripts, and executables; and they
-can read or otherwise make use of external files. In some cases this may be
-necessary, but it does leave suites vulnerable to external breakages.
-Alternatively, suites can be more or less completely self-contained (aside from
-exposure to network, filesystem, and OS problems) if they have private copies
-of every file they need at run time. Tasks can access files stored under their
-suite definition directory via \lstinline=$CYLC_SUITE_DEF_PATH=, and the suite
-bin directory is automatically added to \lstinline=$PATH= in the task execution
-environment. If you have multiple suites there may be a tradeoff between
-self-containment and duplication of files, but this does not particularly
-matter if you can automatically extract, build, and install suite files from
-external repositories prior to, or at the start of, a suite run.
-
-\subsubsection{Distinguish Between Source and Installed Suites}
-
-A suite definition and any files stored with it should be version controlled,
-and a particular revision extracted before a run. The extracted source suite
-will be a repository clone or working copy, depending on your choice of
-revision control software, and can be used for further development. The source
-files should then be {\em installed} to another location where the suite will
-actually be executed (the cylc suite run directory is ideal for this).
-External files may also be installed into the suite at this time, prior to the
-run, or by special deployment tasks that run at suite start-up. This makes
-self-containment easier to achieve, and the clean separation of source and
-installed suite allows further development without breaking a running suite.
-Rose (\ref{Rose}) supports this mode of working with cylc suites.
-
-
-\subsection{Orderly Product Generation?}
-\label{OrderlyProductGeneration}
-
-Correct scheduling is not necessarily equivalent to orderly generation of
-products in strict date-time order. Under cylc a product generation task will
-trigger as soon as its private prerequisites are satisfied regardless of
-whether other tasks at the same cycle point have finished or have yet to run.
-If your product presentation system demands that all products are uploaded in
-order, then be aware that this may be quite inefficient if your suite ever has
-to catch up from a delay or run over historical data, but if necessary you can
-force tasks to run in the right order even if their true dependencies do not
-require that. One way to do this is to declare the product upload task to be
-{\em sequential}, which is equivalent to making it depend on its own previous
-instance (see~\ref{SequentialTasks}).
-
-\subsection{Use Of Clock-Trigger Tasks}
-
-Tasks that wait on external real time data should have a clock-trigger to delay
-submission until roughly the expected time of data availability
-(see~\ref{ClockTriggerTasks}), otherwise they could clutter up your batch
-scheduler queue by submitting hours earlier. Similarly, suite polling tasks
-(for inter-suite dependence in real time operation) should use a clock-trigger
-to delay their submission until the expected time of the remote suite event.
-
-\subsection{Tasks That Wait On Something}
-
-Some tasks wait on external events and therefore need to repeatedly check and
-wait for the event before reporting eventual success (or perhaps failure after
-a timeout). For example, a task that waits for a file to appear on an ftp
-server. Typically these should be clock-trigger tasks (see above), but once
-triggered there are two ways to handle the repeated checking: the task itself
-could implement a check-and-wait loop; or you could just configure multiple
-retries for the task (see~\ref{TaskRetries}).
-
-\subsection{Do Not Treat Real Time Operation As Special}
-
-Cylc suites, without modification, can handle real time and delayed operation
-equally well. In caught-up real time operation, clock-trigger tasks
-constrain the behaviour of the whole suite, or at least of any tasks
-downstream of them in the dependency graph. In delayed or historical operation
-clock-trigger tasks will not constrain the suite at all, and cylc's cycle
-point interleaving abilities come to the fore, because the clock-trigger times
-have already passed. But if a clock-trigger task catches up to the wall
-clock, it will automatically wait again. In this way cylc suites naturally
-transition between delayed and real time operation as required.
-
-\subsection{Factor Out Common Configuration}
-
-To help avoid suite maintenance errors in the future, properties shared by
-multiple tasks (job submission settings, environment variables, scripting,
-etc.) should be defined only once, using runtime inheritance
-(\ref{NIORP}) or Jinja2 variables (\ref{Jinja2}).
-
-Multiple inheritance is efficient when tasks share many properties, but Jinja2
-variables may be preferred when a small number of properties are shared by
-tasks that don't have anything else in common (e.g.\ a single environment
-variable for the location of a shared file).
-
-For environment variables in particular it may be tempting to define all
-variables for all tasks once under \lstinline=[root]=, but this is analagous to
-overuse of global variables in programming and it can make it difficult to
-determine which variables matter to which tasks. Environment filters
-(\ref{EnvironmentFilter}) can be used to make this safer, but generally
-it is best to provide each task with only the variables that it needs. It is
-difficult to be sure if a task really needs a variable that is passed to it,
-but you can be sure that it does not use a variable that is not passed to it.
-
-Finally, Jinja2 can also be used to avoid polluting task environments with
-variables used for the sole purpose of deriving other variables at task
-run time. Instead of this:
-\lstset{language=suiterc}
-\begin{lstlisting}
-[runtime]
-    [[root]]
-        [[[environment]]]
-            OUTPUT_DIR=/my/top/outputdir
-    [[foo]]
-        [[[environment]]]
-            FOO_OUTPUT_DIR=$OUTPUT_DIR/foo
-            BAR_OUTPUT_DIR=$OUTPUT_DIR/bar
-\end{lstlisting}
-        do this:
-\lstset{language=suiterc}
-\begin{lstlisting}
-{% set OUTPUT_DIR = "/my/top/outputdir" %}
-[runtime]
-    [[foo]]
-        [[[environment]]]
-            FOO_OUTPUT_DIR={{ OUTPUT_DIR }}/foo
-            BAR_OUTPUT_DIR={{ OUTPUT_DIR }}/bar
-\end{lstlisting}
-
-If the values of these Jinja2 variables are needed in external
-scripts, just translate them directly in environment sections:
-\lstset{language=suiterc}
-\begin{lstlisting}
-    [[[environment]]]
-        OUTPUT_DIR = {{ OUTPUT_DIR }}
-\end{lstlisting}
-
-
-\subsection{Use The Graph For Scheduling}
-
-If you find yourself writing runtime scripting to change a task's behaviour
-in some cycle points, consider that the graph is usually the proper place to
-express this sort of thing. Use different task names, but have them inherit
-common properties to avoid duplication. Instead of this:
-\lstset{language=suiterc}
-\begin{lstlisting}
-[scheduling]
-    [[dependencies]]
-        [[[T00,T06,T12,T18]]]
-            graph = "foo => shout => baz"
-[runtime]
-    [[shout]]
-        script = """
-if [[ $( cylc cycle-point --print-hour ) == 06 || \
-      $( cylc cycle-point --print-hour ) == 18 ]]; then
-    SENTENCE="the quick brown fox"
-else
-    SENTENCE="the lazy dog"
-fi
-echo $SENTENCE"""
-        # (...other config...)
-\end{lstlisting}
-        do this:
-\lstset{language=suiterc}
-\begin{lstlisting}
-[scheduling]
-    [[dependencies]]
-        [[[T00,T12]]]
-            graph = "foo => shout_dog => baz"
-        [[[T06,T18]]]
-            graph = "foo => shout_fox => baz"
-[runtime]
-    [[SHOUT]]
-        # (... other config...)
-        script = "echo $SENTENCE"
-    [[shout_fox]]
-        inherit = SHOUT
-        [[[environment]]]
-            SENTENCE = "the quick brown fox"
-    [[shout_dog]]
-        inherit = SHOUT
-        [[[environment]]]
-            SENTENCE = "the lazy dog"
-\end{lstlisting}
-
-Similarly, if your task has a different behaviour at the initial or final
-cycle point, consider using an \lstinline=R1= syntax to separate out the
-functionality.
-
-\subsection{Use Suite Visualization}
-
-Effective visualization can make complex suites easier to understand.
-Collapsible task families for visualization are defined by the {\em first
-parents} in the runtime namespace hierarchy. Tasks should generally be grouped
-into visualization families that reflect their purpose within the structure of
-the suite rather than technical detail such as common batch system or
-task host. This often coincides nicely with common configuration inheritance
-requirements, but if it doesn't you can use an empty namespace as a first
-parent for visualization:
-\lstset{language=suiterc}
-\begin{lstlisting}
-[runtime]
-    [[OBSPROC]]
-    [[obs1, obs2, obs3]]
-        inherit = OBSPROC
-\end{lstlisting}
-    and you can demote parents from primary to secondary:
-\lstset{language=suiterc}
-\begin{lstlisting}
-[runtime]
-    [[HOSTX]]
-        # common settings for tasks on host HOSTX
-    [[foo]]
-        inherit = None, HOSTX
-\end{lstlisting}
-
-\section{Style Guide}
-
-Good style is to some extent a matter of taste. That said, for collaborative
-development of complex systems it is important to settle on a clear and
-consistent style, and you may find the following suggestions useful. Note that
-the boundary between this section (style) and the previous (design) is somewhat
-arbitrary.
-
-\subsection{Indentation}
-
-The suite.rc file format consists of \lstinline at item = value@ pairs
-under nested section headings. Clear indentation is the best way to show
-local nesting level inside large blocks.
-
-\begin{myitemize}
-    \item Indent suite.rc syntax four spaces per nesting level.
-
-\lstset{language=suiterc}
-\begin{lstlisting}
-[SECTION]
-    title = the quick brown fox
-    [[SUBSECTION]]
-        a short item = value1
-        a very very long item = value2
-\end{lstlisting}
-Don't align \lstinline at item = value@ pairs on the \lstinline@=@ character -
-this does not show nesting level clearly and it pushes everything off to
-the right:
-\lstset{language=suiterc}
-\begin{lstlisting}
-[SECTION]
-             a short item = value1
-    a very very long item = value2
-\end{lstlisting}
-
-The following layout does preserve proper indentation on the left,
-but the whole block may need reformatting after changing one line, which
-pollutes your revision history with spurious changes:
-\lstset{language=suiterc}
-\begin{lstlisting}
-[SECTION]
-    a short item          = value1
-    a very very long item = value2
-\end{lstlisting}
-
-    \item Set your text editor to convert {\em TAB characters} to spaces -
-        tabs may be displayed differently in different editors, so a
-        mixture of space and tab indentations can render to a mess.
-
-    \item {\em Line comments} should be indented to the same level as the
-        section or item they refer to. Consistent local indentation makes
-        block re-indentation operations easier in text editors.
-
-    \item  {\em script strings} are interpreted by the
-        associated task job script, not by cylc, so strictly speaking
-        their internal lines should not be indented as if part of the
-        suite.rc syntax. This, for example:
-\lstset{language=suiterc}
-\begin{lstlisting}
-[runtime]
-    [[foo]]
-        script = \
-"""echo Hello World!
-echo Goodbye World!"""
-\end{lstlisting}
-is preferred over this (or similar):
-\lstset{language=suiterc}
-\begin{lstlisting}
-[runtime]
-    [[foo]]
-        script ="""
-            echo Hello World!
-            echo Goodbye World!
-                           """
-\end{lstlisting}
-        The extra whitespace here translates directly to spurious
-        indentation in the task job script. As it happens this is just
-        an aesthetic problem in bash scripts, but for Python job scripts
-        (which cylc may support in the future) it would be a technical error.
-
-    \item The positioning of string-delimiting triple quotes is of no
-        practical consequence either, but the following forms are
-        suggested for the same reason - to avoid including spurious
-        whitespace in the string:
-\begin{lstlisting}
-[runtime]
-    [[foo]]
-        # best:
-        script = \
-"""echo Hello World!
-echo Goodbye World!"""
-        # or (short first line):
-        script ="""echo Hello World!
-echo Goodbye World!"""
-        # or (adds a single extra newline character):
-        script ="""
-echo Hello World!
-echo Goodbye World!"""
-\end{lstlisting}
-
-    \item Multiline dependency \lstinline at graph@ strings have no meaning
-        outside of the suite definition, so they can be free-form in
-        order to most clearly present the structure of the suite:
-\lstset{language=suiterc}
-\begin{lstlisting}
-[scheduling]
-    [[dependencies]]
-        graph = """
-   foo => bar => baz => qux
-     # failure recovery:
-     qux:fail => recover
-                """
-\end{lstlisting}
-
-   \item Embedded {\em Jinja2} code is not part of the suite.rc syntax, so
-       it should be indented from the left margin on its own terms.
-\lstset{language=suiterc}
-\begin{lstlisting}
-    [[OPS]]
-{% for T in OPS_TASKS %}
-    [[ops_{{T}}]]
-        inherit = OPS
-        # ...
-{% endfor %}
-\end{lstlisting}
-   \end{myitemize}
-
-\subsection{Comments}
-
-Comments should be {\em minimal}, but not too minimal. If context and clear
-item names will do, leave it at that. Extremely verbose comments tend to be
-neglected and eventually get out of sync with the code, a result that may be
-worse than having no comments at all.
-
-    \begin{myitemize}
-        \item {\em Indent line comments} to section or item level, as
-            described above.
-
-        \item Avoid {\em numbered comments} - future changes can create a
-            renumbering nightmare.
-
-        \item Avoid {\em full page width ``section divider'' comments} -
-            these assume a particular line width, which can be a problem
-            for text editors that auto line break on a smaller line width.
-
-        \item Use the \lstinline=title= and \lstinline=description= items
-            instead of comments to describe tasks and families under
-            \lstinline=[runtime]= - these get displayed by mouse hover in
-            gcylc.
-
-    \end{myitemize}
-
-\subsection{Line Length}
-
-Keep to the standard maximum line length of 79 characters where possible. Very
-long lines affect readability, may pose a problem for auto-line-breaking in
-text editors, and make side-by-side diff display less effective.
-
-\begin{myitemize}
-    \item Line continuation markers can be used anywhere to break up long
-        lines:
-\lstset{language=suiterc}
-\begin{lstlisting}
-[scheduling]
-    [[dependencies]]
-        graph = "prep => one => two => three \
-            => four => five six => seven => eight"
-[runtime]
-    [[MY_TASKS]]
-    [[one, two, three, four, five, \
-        six, seven, eight ]]
-        inherit = MY_TASKS
-\end{lstlisting}
-Graph lines can also be split up without line breaks, like this:
-\lstset{language=suiterc}
-\begin{lstlisting}
-[scheduling]
-    [[dependencies]]
-        graph = """prep => one => two => three => four
-                   four => five six => seven => eight"""
-\end{lstlisting}
-
-\end{myitemize}
-
-\subsection{Task Naming Convention}
-
-Use \lstinline=UPPERCASE_NAMES= for families and \lstinline=lowercase_names=
-for tasks, so that you can tell which is which at a glance.
-
-\begin{myitemize}
-    \item Put the most general components of task names first, for natural
-        grouping in the GUI (under alphanumeric sorting) and in listings, e.g.\
-        \lstinline=obsproc_sonde=, \lstinline=obsproc_radar=.
-\end{myitemize}
-
-\subsection{Inlined Task Scripting}
-
-Trivial task scripting may be inlined in the suite definition but anything
-more should be written to a script file. This keeps the suite definition tidy,
-it allows proper shell-mode text editing, and it allows separate command line
-testing of the script during development or debugging.
-
 \pagebreak
 
 \appendix
@@ -7587,12 +7121,12 @@ mitigate any jumping layout problems:
 
 \section{Cylc README File}
 
-\lstinputlisting{../README.md}
+\lstinputlisting{../../../README.md}
 
 \section{Cylc INSTALL File}
 \label{INSTALL}
 
-\lstinputlisting{../INSTALL.md}
+\lstinputlisting{../../../INSTALL.md}
 
 \section{Cylc Development History - Major Changes}
 
diff --git a/doc/gcylcrc.tex b/doc/src/cylc-user-guide/gcylcrc.tex
similarity index 99%
rename from doc/gcylcrc.tex
rename to doc/src/cylc-user-guide/gcylcrc.tex
index f4375b5..d30aa41 100644
--- a/doc/gcylcrc.tex
+++ b/doc/src/cylc-user-guide/gcylcrc.tex
@@ -180,7 +180,7 @@ to list your available themes.
 Sets the size (in pixels) of the cylc GUI at startup.
 
 \begin{myitemize}
-    \item {\em type:} integer list (x, y)
+    \item {\em type:} integer list: x, y
     \item {\em legal values:} positive integers
     \item {\em default:} 800, 500
     \item {\em example:} \lstinline at window size = 1000, 700@
diff --git a/doc/gpl-3.0.tex b/doc/src/cylc-user-guide/gpl-3.0.tex
similarity index 100%
rename from doc/gpl-3.0.tex
rename to doc/src/cylc-user-guide/gpl-3.0.tex
diff --git a/doc/graphics/png/orig/QuickStartA-ControlRunning.png b/doc/src/cylc-user-guide/graphics/png/orig/QuickStartA-ControlRunning.png
similarity index 100%
rename from doc/graphics/png/orig/QuickStartA-ControlRunning.png
rename to doc/src/cylc-user-guide/graphics/png/orig/QuickStartA-ControlRunning.png
diff --git a/doc/graphics/png/orig/QuickStartA-ControlStalled.png b/doc/src/cylc-user-guide/graphics/png/orig/QuickStartA-ControlStalled.png
similarity index 100%
rename from doc/graphics/png/orig/QuickStartA-ControlStalled.png
rename to doc/src/cylc-user-guide/graphics/png/orig/QuickStartA-ControlStalled.png
diff --git a/doc/graphics/png/orig/QuickStartA-ControlStart00.png b/doc/src/cylc-user-guide/graphics/png/orig/QuickStartA-ControlStart00.png
similarity index 100%
rename from doc/graphics/png/orig/QuickStartA-ControlStart00.png
rename to doc/src/cylc-user-guide/graphics/png/orig/QuickStartA-ControlStart00.png
diff --git a/doc/graphics/png/orig/QuickStartA-ControlStart06.png b/doc/src/cylc-user-guide/graphics/png/orig/QuickStartA-ControlStart06.png
similarity index 100%
rename from doc/graphics/png/orig/QuickStartA-ControlStart06.png
rename to doc/src/cylc-user-guide/graphics/png/orig/QuickStartA-ControlStart06.png
diff --git a/doc/graphics/png/orig/QuickStartA-ModelState.png b/doc/src/cylc-user-guide/graphics/png/orig/QuickStartA-ModelState.png
similarity index 100%
rename from doc/graphics/png/orig/QuickStartA-ModelState.png
rename to doc/src/cylc-user-guide/graphics/png/orig/QuickStartA-ModelState.png
diff --git a/doc/graphics/png/orig/QuickStartA-graph18.png b/doc/src/cylc-user-guide/graphics/png/orig/QuickStartA-graph18.png
similarity index 100%
rename from doc/graphics/png/orig/QuickStartA-graph18.png
rename to doc/src/cylc-user-guide/graphics/png/orig/QuickStartA-graph18.png
diff --git a/doc/graphics/png/orig/QuickStartB-graph18.png b/doc/src/cylc-user-guide/graphics/png/orig/QuickStartB-graph18.png
similarity index 100%
rename from doc/graphics/png/orig/QuickStartB-graph18.png
rename to doc/src/cylc-user-guide/graphics/png/orig/QuickStartB-graph18.png
diff --git a/doc/graphics/png/orig/conditional-triggers.png b/doc/src/cylc-user-guide/graphics/png/orig/conditional-triggers.png
similarity index 100%
rename from doc/graphics/png/orig/conditional-triggers.png
rename to doc/src/cylc-user-guide/graphics/png/orig/conditional-triggers.png
diff --git a/doc/graphics/png/orig/dep-eg-1.png b/doc/src/cylc-user-guide/graphics/png/orig/dep-eg-1.png
similarity index 100%
rename from doc/graphics/png/orig/dep-eg-1.png
rename to doc/src/cylc-user-guide/graphics/png/orig/dep-eg-1.png
diff --git a/doc/graphics/png/orig/dep-multi-cycle.png b/doc/src/cylc-user-guide/graphics/png/orig/dep-multi-cycle.png
similarity index 100%
rename from doc/graphics/png/orig/dep-multi-cycle.png
rename to doc/src/cylc-user-guide/graphics/png/orig/dep-multi-cycle.png
diff --git a/doc/graphics/png/orig/dep-one-cycle.png b/doc/src/cylc-user-guide/graphics/png/orig/dep-one-cycle.png
similarity index 100%
rename from doc/graphics/png/orig/dep-one-cycle.png
rename to doc/src/cylc-user-guide/graphics/png/orig/dep-one-cycle.png
diff --git a/doc/graphics/png/orig/dep-two-cycles-linked.png b/doc/src/cylc-user-guide/graphics/png/orig/dep-two-cycles-linked.png
similarity index 100%
rename from doc/graphics/png/orig/dep-two-cycles-linked.png
rename to doc/src/cylc-user-guide/graphics/png/orig/dep-two-cycles-linked.png
diff --git a/doc/graphics/png/orig/dep-two-cycles.png b/doc/src/cylc-user-guide/graphics/png/orig/dep-two-cycles.png
similarity index 100%
rename from doc/graphics/png/orig/dep-two-cycles.png
rename to doc/src/cylc-user-guide/graphics/png/orig/dep-two-cycles.png
diff --git a/doc/graphics/png/orig/ecox-1.png b/doc/src/cylc-user-guide/graphics/png/orig/ecox-1.png
similarity index 100%
rename from doc/graphics/png/orig/ecox-1.png
rename to doc/src/cylc-user-guide/graphics/png/orig/ecox-1.png
diff --git a/doc/graphics/png/orig/eg2-dynamic.png b/doc/src/cylc-user-guide/graphics/png/orig/eg2-dynamic.png
similarity index 100%
rename from doc/graphics/png/orig/eg2-dynamic.png
rename to doc/src/cylc-user-guide/graphics/png/orig/eg2-dynamic.png
diff --git a/doc/graphics/png/orig/eg2-static.png b/doc/src/cylc-user-guide/graphics/png/orig/eg2-static.png
similarity index 100%
rename from doc/graphics/png/orig/eg2-static.png
rename to doc/src/cylc-user-guide/graphics/png/orig/eg2-static.png
diff --git a/doc/graphics/png/orig/gcylc-graph-and-dot-views.png b/doc/src/cylc-user-guide/graphics/png/orig/gcylc-graph-and-dot-views.png
similarity index 100%
rename from doc/graphics/png/orig/gcylc-graph-and-dot-views.png
rename to doc/src/cylc-user-guide/graphics/png/orig/gcylc-graph-and-dot-views.png
diff --git a/doc/graphics/png/orig/gcylc-text-view.png b/doc/src/cylc-user-guide/graphics/png/orig/gcylc-text-view.png
similarity index 100%
rename from doc/graphics/png/orig/gcylc-text-view.png
rename to doc/src/cylc-user-guide/graphics/png/orig/gcylc-text-view.png
diff --git a/doc/graphics/png/orig/gscan.png b/doc/src/cylc-user-guide/graphics/png/orig/gscan.png
similarity index 100%
rename from doc/graphics/png/orig/gscan.png
rename to doc/src/cylc-user-guide/graphics/png/orig/gscan.png
diff --git a/doc/graphics/png/orig/inherit-2.png b/doc/src/cylc-user-guide/graphics/png/orig/inherit-2.png
similarity index 100%
rename from doc/graphics/png/orig/inherit-2.png
rename to doc/src/cylc-user-guide/graphics/png/orig/inherit-2.png
diff --git a/doc/graphics/png/orig/inherit-3.png b/doc/src/cylc-user-guide/graphics/png/orig/inherit-3.png
similarity index 100%
rename from doc/graphics/png/orig/inherit-3.png
rename to doc/src/cylc-user-guide/graphics/png/orig/inherit-3.png
diff --git a/doc/graphics/png/orig/inherit-4.png b/doc/src/cylc-user-guide/graphics/png/orig/inherit-4.png
similarity index 100%
rename from doc/graphics/png/orig/inherit-4.png
rename to doc/src/cylc-user-guide/graphics/png/orig/inherit-4.png
diff --git a/doc/graphics/png/orig/inherit-5.png b/doc/src/cylc-user-guide/graphics/png/orig/inherit-5.png
similarity index 100%
rename from doc/graphics/png/orig/inherit-5.png
rename to doc/src/cylc-user-guide/graphics/png/orig/inherit-5.png
diff --git a/doc/graphics/png/orig/inherit-6.png b/doc/src/cylc-user-guide/graphics/png/orig/inherit-6.png
similarity index 100%
rename from doc/graphics/png/orig/inherit-6.png
rename to doc/src/cylc-user-guide/graphics/png/orig/inherit-6.png
diff --git a/doc/graphics/png/orig/inherit-7.png b/doc/src/cylc-user-guide/graphics/png/orig/inherit-7.png
similarity index 100%
rename from doc/graphics/png/orig/inherit-7.png
rename to doc/src/cylc-user-guide/graphics/png/orig/inherit-7.png
diff --git a/doc/graphics/png/orig/jinja2-ensemble-graph.png b/doc/src/cylc-user-guide/graphics/png/orig/jinja2-ensemble-graph.png
similarity index 100%
rename from doc/graphics/png/orig/jinja2-ensemble-graph.png
rename to doc/src/cylc-user-guide/graphics/png/orig/jinja2-ensemble-graph.png
diff --git a/doc/graphics/png/orig/jinja2-suite-graph.png b/doc/src/cylc-user-guide/graphics/png/orig/jinja2-suite-graph.png
similarity index 100%
rename from doc/graphics/png/orig/jinja2-suite-graph.png
rename to doc/src/cylc-user-guide/graphics/png/orig/jinja2-suite-graph.png
diff --git a/doc/graphics/png/orig/logo.png b/doc/src/cylc-user-guide/graphics/png/orig/logo.png
similarity index 100%
rename from doc/graphics/png/orig/logo.png
rename to doc/src/cylc-user-guide/graphics/png/orig/logo.png
diff --git a/doc/graphics/png/orig/niwa-colour-small.png b/doc/src/cylc-user-guide/graphics/png/orig/niwa-colour-small.png
similarity index 100%
rename from doc/graphics/png/orig/niwa-colour-small.png
rename to doc/src/cylc-user-guide/graphics/png/orig/niwa-colour-small.png
diff --git a/doc/graphics/png/orig/niwa-colour.png b/doc/src/cylc-user-guide/graphics/png/orig/niwa-colour.png
similarity index 100%
rename from doc/graphics/png/orig/niwa-colour.png
rename to doc/src/cylc-user-guide/graphics/png/orig/niwa-colour.png
diff --git a/doc/graphics/png/orig/params1.png b/doc/src/cylc-user-guide/graphics/png/orig/params1.png
similarity index 100%
rename from doc/graphics/png/orig/params1.png
rename to doc/src/cylc-user-guide/graphics/png/orig/params1.png
diff --git a/doc/graphics/png/orig/satellite.png b/doc/src/cylc-user-guide/graphics/png/orig/satellite.png
similarity index 100%
rename from doc/graphics/png/orig/satellite.png
rename to doc/src/cylc-user-guide/graphics/png/orig/satellite.png
diff --git a/doc/graphics/png/orig/suicide.png b/doc/src/cylc-user-guide/graphics/png/orig/suicide.png
similarity index 100%
rename from doc/graphics/png/orig/suicide.png
rename to doc/src/cylc-user-guide/graphics/png/orig/suicide.png
diff --git a/doc/graphics/png/orig/suite-log.png b/doc/src/cylc-user-guide/graphics/png/orig/suite-log.png
similarity index 100%
rename from doc/graphics/png/orig/suite-log.png
rename to doc/src/cylc-user-guide/graphics/png/orig/suite-log.png
diff --git a/doc/graphics/png/orig/suite-output.png b/doc/src/cylc-user-guide/graphics/png/orig/suite-output.png
similarity index 100%
rename from doc/graphics/png/orig/suite-output.png
rename to doc/src/cylc-user-guide/graphics/png/orig/suite-output.png
diff --git a/doc/graphics/png/orig/suiterc-jinja2.png b/doc/src/cylc-user-guide/graphics/png/orig/suiterc-jinja2.png
similarity index 100%
rename from doc/graphics/png/orig/suiterc-jinja2.png
rename to doc/src/cylc-user-guide/graphics/png/orig/suiterc-jinja2.png
diff --git a/doc/graphics/png/orig/task-pool.png b/doc/src/cylc-user-guide/graphics/png/orig/task-pool.png
similarity index 100%
rename from doc/graphics/png/orig/task-pool.png
rename to doc/src/cylc-user-guide/graphics/png/orig/task-pool.png
diff --git a/doc/graphics/png/orig/test1.png b/doc/src/cylc-user-guide/graphics/png/orig/test1.png
similarity index 100%
rename from doc/graphics/png/orig/test1.png
rename to doc/src/cylc-user-guide/graphics/png/orig/test1.png
diff --git a/doc/graphics/png/orig/test2.png b/doc/src/cylc-user-guide/graphics/png/orig/test2.png
similarity index 100%
rename from doc/graphics/png/orig/test2.png
rename to doc/src/cylc-user-guide/graphics/png/orig/test2.png
diff --git a/doc/graphics/png/orig/test4.png b/doc/src/cylc-user-guide/graphics/png/orig/test4.png
similarity index 100%
rename from doc/graphics/png/orig/test4.png
rename to doc/src/cylc-user-guide/graphics/png/orig/test4.png
diff --git a/doc/graphics/png/orig/test5.png b/doc/src/cylc-user-guide/graphics/png/orig/test5.png
similarity index 100%
rename from doc/graphics/png/orig/test5.png
rename to doc/src/cylc-user-guide/graphics/png/orig/test5.png
diff --git a/doc/graphics/png/orig/test6.png b/doc/src/cylc-user-guide/graphics/png/orig/test6.png
similarity index 100%
rename from doc/graphics/png/orig/test6.png
rename to doc/src/cylc-user-guide/graphics/png/orig/test6.png
diff --git a/doc/graphics/png/orig/timeline-one-a.png b/doc/src/cylc-user-guide/graphics/png/orig/timeline-one-a.png
similarity index 100%
rename from doc/graphics/png/orig/timeline-one-a.png
rename to doc/src/cylc-user-guide/graphics/png/orig/timeline-one-a.png
diff --git a/doc/graphics/png/orig/timeline-one-c.png b/doc/src/cylc-user-guide/graphics/png/orig/timeline-one-c.png
similarity index 100%
rename from doc/graphics/png/orig/timeline-one-c.png
rename to doc/src/cylc-user-guide/graphics/png/orig/timeline-one-c.png
diff --git a/doc/graphics/png/orig/timeline-one.png b/doc/src/cylc-user-guide/graphics/png/orig/timeline-one.png
similarity index 100%
rename from doc/graphics/png/orig/timeline-one.png
rename to doc/src/cylc-user-guide/graphics/png/orig/timeline-one.png
diff --git a/doc/graphics/png/orig/timeline-three.png b/doc/src/cylc-user-guide/graphics/png/orig/timeline-three.png
similarity index 100%
rename from doc/graphics/png/orig/timeline-three.png
rename to doc/src/cylc-user-guide/graphics/png/orig/timeline-three.png
diff --git a/doc/graphics/png/orig/timeline-two-cycles-optimal.png b/doc/src/cylc-user-guide/graphics/png/orig/timeline-two-cycles-optimal.png
similarity index 100%
rename from doc/graphics/png/orig/timeline-two-cycles-optimal.png
rename to doc/src/cylc-user-guide/graphics/png/orig/timeline-two-cycles-optimal.png
diff --git a/doc/graphics/png/orig/timeline-two.png b/doc/src/cylc-user-guide/graphics/png/orig/timeline-two.png
similarity index 100%
rename from doc/graphics/png/orig/timeline-two.png
rename to doc/src/cylc-user-guide/graphics/png/orig/timeline-two.png
diff --git a/doc/graphics/png/orig/timeline-zero.png b/doc/src/cylc-user-guide/graphics/png/orig/timeline-zero.png
similarity index 100%
rename from doc/graphics/png/orig/timeline-zero.png
rename to doc/src/cylc-user-guide/graphics/png/orig/timeline-zero.png
diff --git a/doc/graphics/png/orig/tut-cyc-int.png b/doc/src/cylc-user-guide/graphics/png/orig/tut-cyc-int.png
similarity index 100%
rename from doc/graphics/png/orig/tut-cyc-int.png
rename to doc/src/cylc-user-guide/graphics/png/orig/tut-cyc-int.png
diff --git a/doc/graphics/png/orig/tut-four.png b/doc/src/cylc-user-guide/graphics/png/orig/tut-four.png
similarity index 100%
rename from doc/graphics/png/orig/tut-four.png
rename to doc/src/cylc-user-guide/graphics/png/orig/tut-four.png
diff --git a/doc/graphics/png/orig/tut-hello-multi-1.png b/doc/src/cylc-user-guide/graphics/png/orig/tut-hello-multi-1.png
similarity index 100%
rename from doc/graphics/png/orig/tut-hello-multi-1.png
rename to doc/src/cylc-user-guide/graphics/png/orig/tut-hello-multi-1.png
diff --git a/doc/graphics/png/orig/tut-hello-multi-2.png b/doc/src/cylc-user-guide/graphics/png/orig/tut-hello-multi-2.png
similarity index 100%
rename from doc/graphics/png/orig/tut-hello-multi-2.png
rename to doc/src/cylc-user-guide/graphics/png/orig/tut-hello-multi-2.png
diff --git a/doc/graphics/png/orig/tut-hello-multi-3.png b/doc/src/cylc-user-guide/graphics/png/orig/tut-hello-multi-3.png
similarity index 100%
rename from doc/graphics/png/orig/tut-hello-multi-3.png
rename to doc/src/cylc-user-guide/graphics/png/orig/tut-hello-multi-3.png
diff --git a/doc/graphics/png/orig/tut-one.png b/doc/src/cylc-user-guide/graphics/png/orig/tut-one.png
similarity index 100%
rename from doc/graphics/png/orig/tut-one.png
rename to doc/src/cylc-user-guide/graphics/png/orig/tut-one.png
diff --git a/doc/graphics/png/orig/tut-three.png b/doc/src/cylc-user-guide/graphics/png/orig/tut-three.png
similarity index 100%
rename from doc/graphics/png/orig/tut-three.png
rename to doc/src/cylc-user-guide/graphics/png/orig/tut-three.png
diff --git a/doc/graphics/png/orig/tut-two.png b/doc/src/cylc-user-guide/graphics/png/orig/tut-two.png
similarity index 100%
rename from doc/graphics/png/orig/tut-two.png
rename to doc/src/cylc-user-guide/graphics/png/orig/tut-two.png
diff --git a/doc/graphics/png/scaled/QuickStartA-ControlRunning.png b/doc/src/cylc-user-guide/graphics/png/scaled/QuickStartA-ControlRunning.png
similarity index 100%
rename from doc/graphics/png/scaled/QuickStartA-ControlRunning.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/QuickStartA-ControlRunning.png
diff --git a/doc/graphics/png/scaled/QuickStartA-ControlStalled.png b/doc/src/cylc-user-guide/graphics/png/scaled/QuickStartA-ControlStalled.png
similarity index 100%
rename from doc/graphics/png/scaled/QuickStartA-ControlStalled.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/QuickStartA-ControlStalled.png
diff --git a/doc/graphics/png/scaled/QuickStartA-ControlStart00.png b/doc/src/cylc-user-guide/graphics/png/scaled/QuickStartA-ControlStart00.png
similarity index 100%
rename from doc/graphics/png/scaled/QuickStartA-ControlStart00.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/QuickStartA-ControlStart00.png
diff --git a/doc/graphics/png/scaled/QuickStartA-ControlStart06.png b/doc/src/cylc-user-guide/graphics/png/scaled/QuickStartA-ControlStart06.png
similarity index 100%
rename from doc/graphics/png/scaled/QuickStartA-ControlStart06.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/QuickStartA-ControlStart06.png
diff --git a/doc/graphics/png/scaled/QuickStartA-ModelState.png b/doc/src/cylc-user-guide/graphics/png/scaled/QuickStartA-ModelState.png
similarity index 100%
rename from doc/graphics/png/scaled/QuickStartA-ModelState.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/QuickStartA-ModelState.png
diff --git a/doc/graphics/png/scaled/QuickStartA-graph18.png b/doc/src/cylc-user-guide/graphics/png/scaled/QuickStartA-graph18.png
similarity index 100%
rename from doc/graphics/png/scaled/QuickStartA-graph18.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/QuickStartA-graph18.png
diff --git a/doc/graphics/png/scaled/QuickStartB-graph18.png b/doc/src/cylc-user-guide/graphics/png/scaled/QuickStartB-graph18.png
similarity index 100%
rename from doc/graphics/png/scaled/QuickStartB-graph18.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/QuickStartB-graph18.png
diff --git a/doc/graphics/png/scaled/conditional-triggers.png b/doc/src/cylc-user-guide/graphics/png/scaled/conditional-triggers.png
similarity index 100%
rename from doc/graphics/png/scaled/conditional-triggers.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/conditional-triggers.png
diff --git a/doc/graphics/png/scaled/dep-eg-1.png b/doc/src/cylc-user-guide/graphics/png/scaled/dep-eg-1.png
similarity index 100%
rename from doc/graphics/png/scaled/dep-eg-1.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/dep-eg-1.png
diff --git a/doc/graphics/png/scaled/dep-multi-cycle.png b/doc/src/cylc-user-guide/graphics/png/scaled/dep-multi-cycle.png
similarity index 100%
rename from doc/graphics/png/scaled/dep-multi-cycle.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/dep-multi-cycle.png
diff --git a/doc/graphics/png/scaled/dep-one-cycle.png b/doc/src/cylc-user-guide/graphics/png/scaled/dep-one-cycle.png
similarity index 100%
rename from doc/graphics/png/scaled/dep-one-cycle.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/dep-one-cycle.png
diff --git a/doc/graphics/png/scaled/dep-two-cycles-linked.png b/doc/src/cylc-user-guide/graphics/png/scaled/dep-two-cycles-linked.png
similarity index 100%
rename from doc/graphics/png/scaled/dep-two-cycles-linked.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/dep-two-cycles-linked.png
diff --git a/doc/graphics/png/scaled/dep-two-cycles.png b/doc/src/cylc-user-guide/graphics/png/scaled/dep-two-cycles.png
similarity index 100%
rename from doc/graphics/png/scaled/dep-two-cycles.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/dep-two-cycles.png
diff --git a/doc/graphics/png/scaled/ecox-1.png b/doc/src/cylc-user-guide/graphics/png/scaled/ecox-1.png
similarity index 100%
rename from doc/graphics/png/scaled/ecox-1.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/ecox-1.png
diff --git a/doc/graphics/png/scaled/eg2-dynamic.png b/doc/src/cylc-user-guide/graphics/png/scaled/eg2-dynamic.png
similarity index 100%
rename from doc/graphics/png/scaled/eg2-dynamic.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/eg2-dynamic.png
diff --git a/doc/graphics/png/scaled/eg2-static.png b/doc/src/cylc-user-guide/graphics/png/scaled/eg2-static.png
similarity index 100%
rename from doc/graphics/png/scaled/eg2-static.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/eg2-static.png
diff --git a/doc/graphics/png/scaled/gcylc-graph-and-dot-views.png b/doc/src/cylc-user-guide/graphics/png/scaled/gcylc-graph-and-dot-views.png
similarity index 100%
rename from doc/graphics/png/scaled/gcylc-graph-and-dot-views.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/gcylc-graph-and-dot-views.png
diff --git a/doc/graphics/png/scaled/gcylc-text-view.png b/doc/src/cylc-user-guide/graphics/png/scaled/gcylc-text-view.png
similarity index 100%
rename from doc/graphics/png/scaled/gcylc-text-view.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/gcylc-text-view.png
diff --git a/doc/graphics/png/scaled/gscan.png b/doc/src/cylc-user-guide/graphics/png/scaled/gscan.png
similarity index 100%
rename from doc/graphics/png/scaled/gscan.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/gscan.png
diff --git a/doc/graphics/png/scaled/inherit-2.png b/doc/src/cylc-user-guide/graphics/png/scaled/inherit-2.png
similarity index 100%
rename from doc/graphics/png/scaled/inherit-2.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/inherit-2.png
diff --git a/doc/graphics/png/scaled/inherit-3.png b/doc/src/cylc-user-guide/graphics/png/scaled/inherit-3.png
similarity index 100%
rename from doc/graphics/png/scaled/inherit-3.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/inherit-3.png
diff --git a/doc/graphics/png/scaled/inherit-4.png b/doc/src/cylc-user-guide/graphics/png/scaled/inherit-4.png
similarity index 100%
rename from doc/graphics/png/scaled/inherit-4.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/inherit-4.png
diff --git a/doc/graphics/png/scaled/inherit-5.png b/doc/src/cylc-user-guide/graphics/png/scaled/inherit-5.png
similarity index 100%
rename from doc/graphics/png/scaled/inherit-5.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/inherit-5.png
diff --git a/doc/graphics/png/scaled/inherit-6.png b/doc/src/cylc-user-guide/graphics/png/scaled/inherit-6.png
similarity index 100%
rename from doc/graphics/png/scaled/inherit-6.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/inherit-6.png
diff --git a/doc/graphics/png/scaled/inherit-7.png b/doc/src/cylc-user-guide/graphics/png/scaled/inherit-7.png
similarity index 100%
rename from doc/graphics/png/scaled/inherit-7.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/inherit-7.png
diff --git a/doc/graphics/png/scaled/jinja2-ensemble-graph.png b/doc/src/cylc-user-guide/graphics/png/scaled/jinja2-ensemble-graph.png
similarity index 100%
rename from doc/graphics/png/scaled/jinja2-ensemble-graph.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/jinja2-ensemble-graph.png
diff --git a/doc/graphics/png/scaled/jinja2-suite-graph.png b/doc/src/cylc-user-guide/graphics/png/scaled/jinja2-suite-graph.png
similarity index 100%
rename from doc/graphics/png/scaled/jinja2-suite-graph.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/jinja2-suite-graph.png
diff --git a/doc/graphics/png/scaled/logo.png b/doc/src/cylc-user-guide/graphics/png/scaled/logo.png
similarity index 100%
rename from doc/graphics/png/scaled/logo.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/logo.png
diff --git a/doc/graphics/png/scaled/niwa-colour-small.png b/doc/src/cylc-user-guide/graphics/png/scaled/niwa-colour-small.png
similarity index 100%
rename from doc/graphics/png/scaled/niwa-colour-small.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/niwa-colour-small.png
diff --git a/doc/graphics/png/scaled/niwa-colour.png b/doc/src/cylc-user-guide/graphics/png/scaled/niwa-colour.png
similarity index 100%
rename from doc/graphics/png/scaled/niwa-colour.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/niwa-colour.png
diff --git a/doc/graphics/png/scaled/params1.png b/doc/src/cylc-user-guide/graphics/png/scaled/params1.png
similarity index 100%
rename from doc/graphics/png/scaled/params1.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/params1.png
diff --git a/doc/graphics/png/scaled/satellite.png b/doc/src/cylc-user-guide/graphics/png/scaled/satellite.png
similarity index 100%
rename from doc/graphics/png/scaled/satellite.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/satellite.png
diff --git a/doc/graphics/png/scaled/suicide.png b/doc/src/cylc-user-guide/graphics/png/scaled/suicide.png
similarity index 100%
rename from doc/graphics/png/scaled/suicide.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/suicide.png
diff --git a/doc/graphics/png/scaled/suite-log.png b/doc/src/cylc-user-guide/graphics/png/scaled/suite-log.png
similarity index 100%
rename from doc/graphics/png/scaled/suite-log.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/suite-log.png
diff --git a/doc/graphics/png/scaled/suite-output.png b/doc/src/cylc-user-guide/graphics/png/scaled/suite-output.png
similarity index 100%
rename from doc/graphics/png/scaled/suite-output.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/suite-output.png
diff --git a/doc/graphics/png/scaled/suiterc-jinja2.png b/doc/src/cylc-user-guide/graphics/png/scaled/suiterc-jinja2.png
similarity index 100%
rename from doc/graphics/png/scaled/suiterc-jinja2.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/suiterc-jinja2.png
diff --git a/doc/graphics/png/scaled/task-pool.png b/doc/src/cylc-user-guide/graphics/png/scaled/task-pool.png
similarity index 100%
rename from doc/graphics/png/scaled/task-pool.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/task-pool.png
diff --git a/doc/graphics/png/scaled/test1.png b/doc/src/cylc-user-guide/graphics/png/scaled/test1.png
similarity index 100%
rename from doc/graphics/png/scaled/test1.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/test1.png
diff --git a/doc/graphics/png/scaled/test2.png b/doc/src/cylc-user-guide/graphics/png/scaled/test2.png
similarity index 100%
rename from doc/graphics/png/scaled/test2.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/test2.png
diff --git a/doc/graphics/png/scaled/test4.png b/doc/src/cylc-user-guide/graphics/png/scaled/test4.png
similarity index 100%
rename from doc/graphics/png/scaled/test4.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/test4.png
diff --git a/doc/graphics/png/scaled/test5.png b/doc/src/cylc-user-guide/graphics/png/scaled/test5.png
similarity index 100%
rename from doc/graphics/png/scaled/test5.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/test5.png
diff --git a/doc/graphics/png/scaled/test6.png b/doc/src/cylc-user-guide/graphics/png/scaled/test6.png
similarity index 100%
rename from doc/graphics/png/scaled/test6.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/test6.png
diff --git a/doc/graphics/png/scaled/timeline-one-a.png b/doc/src/cylc-user-guide/graphics/png/scaled/timeline-one-a.png
similarity index 100%
rename from doc/graphics/png/scaled/timeline-one-a.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/timeline-one-a.png
diff --git a/doc/graphics/png/scaled/timeline-one-c.png b/doc/src/cylc-user-guide/graphics/png/scaled/timeline-one-c.png
similarity index 100%
rename from doc/graphics/png/scaled/timeline-one-c.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/timeline-one-c.png
diff --git a/doc/graphics/png/scaled/timeline-one.png b/doc/src/cylc-user-guide/graphics/png/scaled/timeline-one.png
similarity index 100%
rename from doc/graphics/png/scaled/timeline-one.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/timeline-one.png
diff --git a/doc/graphics/png/scaled/timeline-three.png b/doc/src/cylc-user-guide/graphics/png/scaled/timeline-three.png
similarity index 100%
rename from doc/graphics/png/scaled/timeline-three.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/timeline-three.png
diff --git a/doc/graphics/png/scaled/timeline-two-cycles-optimal.png b/doc/src/cylc-user-guide/graphics/png/scaled/timeline-two-cycles-optimal.png
similarity index 100%
rename from doc/graphics/png/scaled/timeline-two-cycles-optimal.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/timeline-two-cycles-optimal.png
diff --git a/doc/graphics/png/scaled/timeline-two.png b/doc/src/cylc-user-guide/graphics/png/scaled/timeline-two.png
similarity index 100%
rename from doc/graphics/png/scaled/timeline-two.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/timeline-two.png
diff --git a/doc/graphics/png/scaled/timeline-zero.png b/doc/src/cylc-user-guide/graphics/png/scaled/timeline-zero.png
similarity index 100%
rename from doc/graphics/png/scaled/timeline-zero.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/timeline-zero.png
diff --git a/doc/graphics/png/scaled/tut-cyc-int.png b/doc/src/cylc-user-guide/graphics/png/scaled/tut-cyc-int.png
similarity index 100%
rename from doc/graphics/png/scaled/tut-cyc-int.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/tut-cyc-int.png
diff --git a/doc/graphics/png/scaled/tut-four.png b/doc/src/cylc-user-guide/graphics/png/scaled/tut-four.png
similarity index 100%
rename from doc/graphics/png/scaled/tut-four.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/tut-four.png
diff --git a/doc/graphics/png/scaled/tut-hello-multi-1.png b/doc/src/cylc-user-guide/graphics/png/scaled/tut-hello-multi-1.png
similarity index 100%
rename from doc/graphics/png/scaled/tut-hello-multi-1.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/tut-hello-multi-1.png
diff --git a/doc/graphics/png/scaled/tut-hello-multi-2.png b/doc/src/cylc-user-guide/graphics/png/scaled/tut-hello-multi-2.png
similarity index 100%
rename from doc/graphics/png/scaled/tut-hello-multi-2.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/tut-hello-multi-2.png
diff --git a/doc/graphics/png/scaled/tut-hello-multi-3.png b/doc/src/cylc-user-guide/graphics/png/scaled/tut-hello-multi-3.png
similarity index 100%
rename from doc/graphics/png/scaled/tut-hello-multi-3.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/tut-hello-multi-3.png
diff --git a/doc/graphics/png/scaled/tut-one.png b/doc/src/cylc-user-guide/graphics/png/scaled/tut-one.png
similarity index 100%
rename from doc/graphics/png/scaled/tut-one.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/tut-one.png
diff --git a/doc/graphics/png/scaled/tut-three.png b/doc/src/cylc-user-guide/graphics/png/scaled/tut-three.png
similarity index 100%
rename from doc/graphics/png/scaled/tut-three.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/tut-three.png
diff --git a/doc/graphics/png/scaled/tut-two.png b/doc/src/cylc-user-guide/graphics/png/scaled/tut-two.png
similarity index 100%
rename from doc/graphics/png/scaled/tut-two.png
rename to doc/src/cylc-user-guide/graphics/png/scaled/tut-two.png
diff --git a/doc/graphics/scale-images.sh b/doc/src/cylc-user-guide/graphics/scale-images.sh
similarity index 100%
rename from doc/graphics/scale-images.sh
rename to doc/src/cylc-user-guide/graphics/scale-images.sh
diff --git a/doc/graphics/vector/README.txt b/doc/src/cylc-user-guide/graphics/vector/README.txt
similarity index 100%
rename from doc/graphics/vector/README.txt
rename to doc/src/cylc-user-guide/graphics/vector/README.txt
diff --git a/doc/graphics/vector/eps/dep-multi-cycle.eps b/doc/src/cylc-user-guide/graphics/vector/eps/dep-multi-cycle.eps
similarity index 100%
rename from doc/graphics/vector/eps/dep-multi-cycle.eps
rename to doc/src/cylc-user-guide/graphics/vector/eps/dep-multi-cycle.eps
diff --git a/doc/graphics/vector/eps/dep-one-cycle.eps b/doc/src/cylc-user-guide/graphics/vector/eps/dep-one-cycle.eps
similarity index 100%
rename from doc/graphics/vector/eps/dep-one-cycle.eps
rename to doc/src/cylc-user-guide/graphics/vector/eps/dep-one-cycle.eps
diff --git a/doc/graphics/vector/eps/dep-two-cycles-linked.eps b/doc/src/cylc-user-guide/graphics/vector/eps/dep-two-cycles-linked.eps
similarity index 100%
rename from doc/graphics/vector/eps/dep-two-cycles-linked.eps
rename to doc/src/cylc-user-guide/graphics/vector/eps/dep-two-cycles-linked.eps
diff --git a/doc/graphics/vector/eps/dep-two-cycles.eps b/doc/src/cylc-user-guide/graphics/vector/eps/dep-two-cycles.eps
similarity index 100%
rename from doc/graphics/vector/eps/dep-two-cycles.eps
rename to doc/src/cylc-user-guide/graphics/vector/eps/dep-two-cycles.eps
diff --git a/doc/graphics/vector/eps/task-pool.eps b/doc/src/cylc-user-guide/graphics/vector/eps/task-pool.eps
similarity index 100%
rename from doc/graphics/vector/eps/task-pool.eps
rename to doc/src/cylc-user-guide/graphics/vector/eps/task-pool.eps
diff --git a/doc/graphics/vector/eps/timeline-one-a.eps b/doc/src/cylc-user-guide/graphics/vector/eps/timeline-one-a.eps
similarity index 100%
rename from doc/graphics/vector/eps/timeline-one-a.eps
rename to doc/src/cylc-user-guide/graphics/vector/eps/timeline-one-a.eps
diff --git a/doc/graphics/vector/eps/timeline-one-c.eps b/doc/src/cylc-user-guide/graphics/vector/eps/timeline-one-c.eps
similarity index 100%
rename from doc/graphics/vector/eps/timeline-one-c.eps
rename to doc/src/cylc-user-guide/graphics/vector/eps/timeline-one-c.eps
diff --git a/doc/graphics/vector/eps/timeline-one.eps b/doc/src/cylc-user-guide/graphics/vector/eps/timeline-one.eps
similarity index 100%
rename from doc/graphics/vector/eps/timeline-one.eps
rename to doc/src/cylc-user-guide/graphics/vector/eps/timeline-one.eps
diff --git a/doc/graphics/vector/eps/timeline-three.eps b/doc/src/cylc-user-guide/graphics/vector/eps/timeline-three.eps
similarity index 100%
rename from doc/graphics/vector/eps/timeline-three.eps
rename to doc/src/cylc-user-guide/graphics/vector/eps/timeline-three.eps
diff --git a/doc/graphics/vector/eps/timeline-two-cycles-optimal.eps b/doc/src/cylc-user-guide/graphics/vector/eps/timeline-two-cycles-optimal.eps
similarity index 100%
rename from doc/graphics/vector/eps/timeline-two-cycles-optimal.eps
rename to doc/src/cylc-user-guide/graphics/vector/eps/timeline-two-cycles-optimal.eps
diff --git a/doc/graphics/vector/eps/timeline-two.eps b/doc/src/cylc-user-guide/graphics/vector/eps/timeline-two.eps
similarity index 100%
rename from doc/graphics/vector/eps/timeline-two.eps
rename to doc/src/cylc-user-guide/graphics/vector/eps/timeline-two.eps
diff --git a/doc/graphics/vector/eps/timeline-zero.eps b/doc/src/cylc-user-guide/graphics/vector/eps/timeline-zero.eps
similarity index 100%
rename from doc/graphics/vector/eps/timeline-zero.eps
rename to doc/src/cylc-user-guide/graphics/vector/eps/timeline-zero.eps
diff --git a/doc/graphics/vector/svg/dep-multi-cycle.svg b/doc/src/cylc-user-guide/graphics/vector/svg/dep-multi-cycle.svg
similarity index 100%
rename from doc/graphics/vector/svg/dep-multi-cycle.svg
rename to doc/src/cylc-user-guide/graphics/vector/svg/dep-multi-cycle.svg
diff --git a/doc/graphics/vector/svg/dep-one-cycle.svg b/doc/src/cylc-user-guide/graphics/vector/svg/dep-one-cycle.svg
similarity index 100%
rename from doc/graphics/vector/svg/dep-one-cycle.svg
rename to doc/src/cylc-user-guide/graphics/vector/svg/dep-one-cycle.svg
diff --git a/doc/graphics/vector/svg/dep-two-cycles-linked.svg b/doc/src/cylc-user-guide/graphics/vector/svg/dep-two-cycles-linked.svg
similarity index 100%
rename from doc/graphics/vector/svg/dep-two-cycles-linked.svg
rename to doc/src/cylc-user-guide/graphics/vector/svg/dep-two-cycles-linked.svg
diff --git a/doc/graphics/vector/svg/dep-two-cycles.svg b/doc/src/cylc-user-guide/graphics/vector/svg/dep-two-cycles.svg
similarity index 100%
rename from doc/graphics/vector/svg/dep-two-cycles.svg
rename to doc/src/cylc-user-guide/graphics/vector/svg/dep-two-cycles.svg
diff --git a/doc/graphics/vector/svg/task-pool.svg b/doc/src/cylc-user-guide/graphics/vector/svg/task-pool.svg
similarity index 100%
rename from doc/graphics/vector/svg/task-pool.svg
rename to doc/src/cylc-user-guide/graphics/vector/svg/task-pool.svg
diff --git a/doc/graphics/vector/svg/timeline-one-a.svg b/doc/src/cylc-user-guide/graphics/vector/svg/timeline-one-a.svg
similarity index 100%
rename from doc/graphics/vector/svg/timeline-one-a.svg
rename to doc/src/cylc-user-guide/graphics/vector/svg/timeline-one-a.svg
diff --git a/doc/graphics/vector/svg/timeline-one-c.svg b/doc/src/cylc-user-guide/graphics/vector/svg/timeline-one-c.svg
similarity index 100%
rename from doc/graphics/vector/svg/timeline-one-c.svg
rename to doc/src/cylc-user-guide/graphics/vector/svg/timeline-one-c.svg
diff --git a/doc/graphics/vector/svg/timeline-one.svg b/doc/src/cylc-user-guide/graphics/vector/svg/timeline-one.svg
similarity index 100%
rename from doc/graphics/vector/svg/timeline-one.svg
rename to doc/src/cylc-user-guide/graphics/vector/svg/timeline-one.svg
diff --git a/doc/graphics/vector/svg/timeline-three-0.svg b/doc/src/cylc-user-guide/graphics/vector/svg/timeline-three-0.svg
similarity index 100%
rename from doc/graphics/vector/svg/timeline-three-0.svg
rename to doc/src/cylc-user-guide/graphics/vector/svg/timeline-three-0.svg
diff --git a/doc/graphics/vector/svg/timeline-three.svg b/doc/src/cylc-user-guide/graphics/vector/svg/timeline-three.svg
similarity index 100%
rename from doc/graphics/vector/svg/timeline-three.svg
rename to doc/src/cylc-user-guide/graphics/vector/svg/timeline-three.svg
diff --git a/doc/graphics/vector/svg/timeline-two-cycles-optimal.svg b/doc/src/cylc-user-guide/graphics/vector/svg/timeline-two-cycles-optimal.svg
similarity index 100%
rename from doc/graphics/vector/svg/timeline-two-cycles-optimal.svg
rename to doc/src/cylc-user-guide/graphics/vector/svg/timeline-two-cycles-optimal.svg
diff --git a/doc/graphics/vector/svg/timeline-two.svg b/doc/src/cylc-user-guide/graphics/vector/svg/timeline-two.svg
similarity index 100%
rename from doc/graphics/vector/svg/timeline-two.svg
rename to doc/src/cylc-user-guide/graphics/vector/svg/timeline-two.svg
diff --git a/doc/graphics/vector/svg/timeline-zero.svg b/doc/src/cylc-user-guide/graphics/vector/svg/timeline-zero.svg
similarity index 100%
rename from doc/graphics/vector/svg/timeline-zero.svg
rename to doc/src/cylc-user-guide/graphics/vector/svg/timeline-zero.svg
diff --git a/doc/graphviz.txt b/doc/src/cylc-user-guide/graphviz.txt
similarity index 100%
rename from doc/graphviz.txt
rename to doc/src/cylc-user-guide/graphviz.txt
diff --git a/doc/src/cylc-user-guide/gscanrc.tex b/doc/src/cylc-user-guide/gscanrc.tex
new file mode 100644
index 0000000..f259a2c
--- /dev/null
+++ b/doc/src/cylc-user-guide/gscanrc.tex
@@ -0,0 +1,57 @@
+
+\section{Cylc Gscan Config File Reference}
+\label{GscanRCReference}
+
+\lstset{language=bash}
+
+This section defines all legal items and values for the gscan config
+file which should be located in \lstinline=$HOME/.cylc/gscan.rc=. Some items
+also affect the gpanel panel app.
+
+The main menubar and toolbar appear when the mouse pointer is moved toward the
+top of the gscan window.  The View menu allows you to change which columns are
+displayed, which hosts to scan for running suites, and the task state icon
+theme.
+
+At startup, the task state icon theme and icon size are taken from the gcylc
+config file \lstinline=$HOME/.cylc/gcylc.rc=.
+
+
+\subsection{Top Level Items}
+
+\subsubsection{activate on startup}
+
+Set whether \lstinline=cylc gpanel= will activate automatically when the gui is
+loaded or not.
+
+\begin{myitemize}
+    \item {\em type:} boolean (True or False)
+\item {\em legal values:} ``True'', ``False''
+\item {\em default:} ``False''
+\item {\em example:} \lstinline at activate on startup = True@
+\end{myitemize}
+
+\subsubsection{columns}
+
+Set the columns to display when the \lstinline=cylc gscan= GUI starts. This can
+be changed later with the View menu.  The order in which the columns are
+specified here does not affect the display order.
+
+\begin{myitemize}
+\item {\em type:} string (a list of one or more view names)
+\item {\em legal values:} ``host'', ``owner'', ``status'', ``suite'',
+  ``title'', ``updated''
+\item {\em default:} ``status'', ``suite''
+\item {\em example:} \lstinline at columns = suite, title, status@
+\end{myitemize}
+
+\subsubsection{window size}
+
+Sets the size in pixels of the \lstinline=cylc gscan= GUI at startup.
+
+\begin{myitemize}
+    \item {\em type:} integer list: x, y
+    \item {\em legal values:} positive integers
+    \item {\em default:} 300, 200
+    \item {\em example:} \lstinline at window size = 1000, 700@
+\end{myitemize}
diff --git a/doc/scripts/get-deps.sh b/doc/src/cylc-user-guide/scripts/get-deps.sh
similarity index 63%
rename from doc/scripts/get-deps.sh
rename to doc/src/cylc-user-guide/scripts/get-deps.sh
index 4e33915..1f9eec1 100755
--- a/doc/scripts/get-deps.sh
+++ b/doc/src/cylc-user-guide/scripts/get-deps.sh
@@ -7,7 +7,7 @@ CONVERT=$(  which convert  2> /dev/null )
 WARNED=false
 
 if [[ -z $PDFLATEX ]]; then
-    echo "*** WARNING: to generate the PDF User Guide install LaTeX pdflatex ***" >&2
+    echo "*** WARNING: to generate PDF Cylc documentation install LaTeX pdflatex ***" >&2
     WARNED=true
 else
     DEPS="pdf"
@@ -15,12 +15,12 @@ fi
 
 if [[ -z $HTLATEX ]]; then
     echo
-    echo "*** WARNING: to generate the HTML User Guides install LaTeX tex4ht ***" >&2
+    echo "*** WARNING: to generate HTML Cylc documentation install LaTeX tex4ht ***" >&2
     WARNED=true
 fi
 
 if [[ -z $CONVERT ]]; then
-    echo "*** WARNING: to generate the HTML User Guides install ImageMagick convert ***" >&2
+    echo "*** WARNING: to generate HTML Cylc documentation install ImageMagick convert ***" >&2
     WARNED=true
 fi
 
@@ -34,4 +34,3 @@ if $WARNED; then
 fi
 
 echo $DEPS
-
diff --git a/doc/scripts/make-commands.sh b/doc/src/cylc-user-guide/scripts/make-commands.sh
similarity index 69%
rename from doc/scripts/make-commands.sh
rename to doc/src/cylc-user-guide/scripts/make-commands.sh
index 5e92e55..4377637 100755
--- a/doc/scripts/make-commands.sh
+++ b/doc/src/cylc-user-guide/scripts/make-commands.sh
@@ -16,19 +16,27 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-CYLC=../bin/cylc
+# Create cylc-version.txt and commands.tex for inclusion in LaTeX doc.
+
+CYLC=$(dirname $0)/../../../../bin/cylc
+
+$CYLC --version > cylc-version.txt
 
 cat > commands.tex <<END
 \label{help}
-\lstinputlisting{cylc.txt}
+\begin{lstlisting}
+$($CYLC --help)
+\end{lstlisting}
 \subsection{Command Categories}
 END
 
-for CAT in $( $CYLC categories ); do
+for CAT in $($CYLC categories); do
 	cat >> commands.tex <<END
 \subsubsection{$CAT}
 \label{$CAT}
-\lstinputlisting{categories/${CAT}.txt}
+\begin{lstlisting}
+$($CYLC $CAT --help)
+\end{lstlisting}
 END
 done
 
@@ -36,11 +44,12 @@ cat >> commands.tex <<END
 \subsection{Commands}
 END
 
-for COMMAND in $( $CYLC commands ); do
+for COM in $($CYLC commands); do
 	cat >> commands.tex <<END
-\subsubsection{$COMMAND}
-\label{$COMMAND}
-\lstinputlisting{commands/${COMMAND}.txt}
+\subsubsection{$COM}
+\label{$COM}
+\begin{lstlisting}
+$($CYLC $COM --help)
+\end{lstlisting}
 END
 done
-
diff --git a/doc/scripts/make-html.sh b/doc/src/cylc-user-guide/scripts/make-html.sh
similarity index 87%
rename from doc/scripts/make-html.sh
rename to doc/src/cylc-user-guide/scripts/make-html.sh
index 4272abd..6e8604c 100755
--- a/doc/scripts/make-html.sh
+++ b/doc/src/cylc-user-guide/scripts/make-html.sh
@@ -16,31 +16,17 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-set -e
+# Make HTML Cylc User Guide (called from Makefile).
 
-CYLC=$(dirname $0)/../../bin/cylc
+set -e
 
-function usage {
-    echo "USAGE make-html.sh [multi|single]"
-}
-
-if [[ $# != 1 ]]; then
-    usage
-    exit 1
-fi
-
-TYPE=$1
-if [[ $TYPE != multi ]] && [[ $TYPE != single ]]; then
-    usage
-    exit 1
-fi
+TYPE=$1  # "multi" or "single"
 
 DEST=html/$TYPE
 rm -rf $DEST; mkdir -p $DEST
 
-$CYLC -v > cylc-version.txt
-
 cp -r *.tex cug-html.cfg cylc-version.txt titlepic.sty $DEST
+
 cd $DEST
 ls *.tex | xargs -n 1 perl -pi -e 's at graphics/png/orig at ../../graphics/png/scaled at g'
 ls *.tex | xargs -n 1 perl -pi -e 's@\.\./examples/@../../../examples/@g'
@@ -59,4 +45,3 @@ if [[ $TYPE == multi ]]; then
 else
     htlatex cug-html.tex "cug-html.cfg,html,1,fn-in" "" "" "-halt-on-error"
 fi
-
diff --git a/doc/scripts/make-pdf.sh b/doc/src/cylc-user-guide/scripts/make-pdf.sh
similarity index 90%
rename from doc/scripts/make-pdf.sh
rename to doc/src/cylc-user-guide/scripts/make-pdf.sh
index 9ea323d..4e32390 100755
--- a/doc/scripts/make-pdf.sh
+++ b/doc/src/cylc-user-guide/scripts/make-pdf.sh
@@ -16,25 +16,15 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-set -e
-
-CYLC=$(dirname $0)/../../bin/cylc
+# Make PDF Cylc User Guide (called from Makefile).
 
-function usage {
-    echo "USAGE make.sh"
-}
-
-if [[ $# != 0 ]]; then
-    usage
-    exit 1
-fi
+set -e
 
 DEST=pdf
 rm -rf $DEST; mkdir -p $DEST
 
-$CYLC -v > cylc-version.txt
-
 cp -r *.tex cylc-version.txt titlepic.sty $DEST
+
 cd $DEST
 ls *.tex | xargs -n 1 perl -pi -e 's at graphics/png/orig at ../graphics/png/orig at g'
 ls *.tex | xargs -n 1 perl -pi -e 's@\.\./examples/@../../examples/@g'
@@ -49,4 +39,3 @@ perl -pi -e 's@\.\./INSTALL at ../../INSTALL at g' cug.tex
 pdflatex -halt-on-error cug-pdf.tex
 pdflatex -halt-on-error cug-pdf.tex
 pdflatex -halt-on-error cug-pdf.tex
-
diff --git a/doc/siterc.tex b/doc/src/cylc-user-guide/siterc.tex
similarity index 95%
rename from doc/siterc.tex
rename to doc/src/cylc-user-guide/siterc.tex
index b640d00..8683f29 100644
--- a/doc/siterc.tex
+++ b/doc/src/cylc-user-guide/siterc.tex
@@ -491,7 +491,7 @@ value is probably sufficient for job submission polling.
 \item {\em example:} (see the execution polling example above)
 \end{myitemize}
 
-\paragraph[remote copy template]{[hosts] \textrightarrow [[HOST]] \textrightarrow remote copy template }
+\paragraph[scp command]{[hosts] \textrightarrow [[HOST]] \textrightarrow scp command }
 
 A string for the command used to copy files to a remote host. This is not used
 on the suite host unless you run local tasks under another user account. The
@@ -504,7 +504,7 @@ that implements a similar interface to \lstinline=scp=.
 \item {\em localhost default:} \lstinline at scp -oBatchMode=yes -oConnectTimeout=10@
 \end{myitemize}
 
-\paragraph[remote shell template]{[hosts] \textrightarrow [[HOST]] \textrightarrow remote shell template }
+\paragraph[ssh command]{[hosts] \textrightarrow [[HOST]] \textrightarrow ssh command }
 
 A string for the command used to invoke commands on this host. This is not
 used on the suite host unless you run local tasks under another user account.
@@ -892,39 +892,39 @@ See ~\ref{task-event-mail-interval} for details.
 You can define site defaults for each of the following options, details
 of which can be found under ~\ref{SuiteEventHandling}:
 
-\subparagraph[handlers]{[cylc] \textrightarrow [[events]] \textrightarrow handlers}
+\paragraph[handlers]{[cylc] \textrightarrow [[events]] \textrightarrow handlers}
 
-\subparagraph[handler events]{[cylc] \textrightarrow [[events]] \textrightarrow handler events}
+\paragraph[handler events]{[cylc] \textrightarrow [[events]] \textrightarrow handler events}
 
-\subparagraph[startup handler]{[cylc] \textrightarrow [[events]] \textrightarrow startup handler}
+\paragraph[startup handler]{[cylc] \textrightarrow [[events]] \textrightarrow startup handler}
 
-\subparagraph[shutdown handler]{[cylc] \textrightarrow [[events]] \textrightarrow shutdown handler}
+\paragraph[shutdown handler]{[cylc] \textrightarrow [[events]] \textrightarrow shutdown handler}
 
-\subparagraph[mail events]{[cylc] \textrightarrow [[events]] \textrightarrow mail events}
+\paragraph[mail events]{[cylc] \textrightarrow [[events]] \textrightarrow mail events}
 
-\subparagraph[mail footer]{[cylc] \textrightarrow [[events]] \textrightarrow mail footer}
+\paragraph[mail footer]{[cylc] \textrightarrow [[events]] \textrightarrow mail footer}
 
-\subparagraph[mail from]{[cylc] \textrightarrow [[events]] \textrightarrow mail from}
+\paragraph[mail from]{[cylc] \textrightarrow [[events]] \textrightarrow mail from}
 
-\subparagraph[mail smtp]{[cylc] \textrightarrow [[events]] \textrightarrow mail smtp}
+\paragraph[mail smtp]{[cylc] \textrightarrow [[events]] \textrightarrow mail smtp}
 
-\subparagraph[mail to]{[cylc] \textrightarrow [[events]] \textrightarrow mail to}
+\paragraph[mail to]{[cylc] \textrightarrow [[events]] \textrightarrow mail to}
 
-\subparagraph[timeout handler]{[cylc] \textrightarrow [[events]] \textrightarrow timeout handler}
+\paragraph[timeout handler]{[cylc] \textrightarrow [[events]] \textrightarrow timeout handler}
 
-\subparagraph[timeout]{[cylc] \textrightarrow [[events]] \textrightarrow timeout}
+\paragraph[timeout]{[cylc] \textrightarrow [[events]] \textrightarrow timeout}
 
-\subparagraph[abort on timeout]{[cylc] \textrightarrow [[events]] \textrightarrow abort on timeout}
+\paragraph[abort on timeout]{[cylc] \textrightarrow [[events]] \textrightarrow abort on timeout}
 
-\subparagraph[stalled handler]{[cylc] \textrightarrow [[events]] \textrightarrow stalled handler}
+\paragraph[stalled handler]{[cylc] \textrightarrow [[events]] \textrightarrow stalled handler}
 
-\subparagraph[abort on stalled]{[cylc] \textrightarrow [[events]] \textrightarrow abort on stalled}
+\paragraph[abort on stalled]{[cylc] \textrightarrow [[events]] \textrightarrow abort on stalled}
 
-\subparagraph[inactivity handler]{[cylc] \textrightarrow [[events]] \textrightarrow inactivity handler}
+\paragraph[inactivity handler]{[cylc] \textrightarrow [[events]] \textrightarrow inactivity handler}
 
-\subparagraph[inactivity]{[cylc] \textrightarrow [[events]] \textrightarrow inactivity}
+\paragraph[inactivity]{[cylc] \textrightarrow [[events]] \textrightarrow inactivity}
 
-\subparagraph[abort on inactivity]{[cylc] \textrightarrow [[events]] \textrightarrow abort on inactivity}
+\paragraph[abort on inactivity]{[cylc] \textrightarrow [[events]] \textrightarrow abort on inactivity}
 
 \subsection{[authentication]}
 \label{GlobalAuth}
diff --git a/doc/suiterc.tex b/doc/src/cylc-user-guide/suiterc.tex
similarity index 95%
rename from doc/suiterc.tex
rename to doc/src/cylc-user-guide/suiterc.tex
index 06c528d..c3e03b5 100644
--- a/doc/suiterc.tex
+++ b/doc/src/cylc-user-guide/suiterc.tex
@@ -72,7 +72,7 @@ their normal operational context; or to prevent accidental submission of
 expensive real tasks during suite development.
 \begin{myitemize}
     \item {\em type:} string
-    \item {\em legal values:} live, dummy, simulation
+    \item {\em legal values:} live, dummy, dummy-local, simulation
     \item {\em default:} None
 \end{myitemize}
 
@@ -234,13 +234,13 @@ used for cylc testing and development.
     \item {\em default:} False
 \end{myitemize}
 
-\subsubsection[parameters]{[cylc] \textrightarrow parameters}
+\subsubsection[{[[}parameters{]]}]{[cylc] \textrightarrow [[parameters]]}
 
 Define parameter values here for use in expanding {\em parameterized tasks} -
 see Section~\ref{Parameterized Tasks}.
 \begin{myitemize}
     \item {\em type:} list of strings, or an integer range
-        \lstinline=LOWER..UPPER= (two dots, inclusive bounds)
+        \lstinline=LOWER..UPPER..STEP= (two dots, inclusive bounds, STEP optional)
     \item {\em default:} (none)
     \item {\em examples:}
         \begin{myitemize}
@@ -249,7 +249,7 @@ see Section~\ref{Parameterized Tasks}.
         \end{myitemize}
 \end{myitemize}
 
-\subsubsection[parameter templates]{[cylc] \textrightarrow parameter templates}
+\subsubsection[{[[}parameter templates{]]}]{[cylc] \textrightarrow [[parameter templates]]}
 \label{RefParameterTemplates}
 
 Parameterized task names (see previous item, and Section~\ref{Parameterized
@@ -301,6 +301,7 @@ substituted with actual values:
 \begin{myitemize}
     \item \%(event)s: event name (see below)
     \item \%(suite)s: suite name
+    \item \%(suite\_url)s: suite URL
     \item \%(message)s: event message, if any
 \end{myitemize}
 
@@ -581,7 +582,7 @@ in another run mode.
 
 \begin{myitemize}
     \item {\em type:} string
-    \item {\em legal values:} live, dummy, simulation
+    \item {\em legal values:} live, dummy, dummy-local, simulation
     \item {\em default:} None
 \end{myitemize}
 
@@ -662,6 +663,21 @@ See~\ref{GlobalAuth} for more information.
 The client privilege level granted for public access - i.e.\ no suite passphrase
 required.  See~\ref{GlobalAuth} for legal values.
 
+\subsubsection[{[[}simulation{]]} ]{[cylc] \textrightarrow [[simulation]]}
+
+Suite-level configuration for the {\em simulation} and {\em dummy} run modes
+described in Section~\ref{SimulationMode}.
+
+\paragraph[disable suite event handlers]{[cylc] \textrightarrow [[simulation]] \textrightarrow disable suite event handlers}
+
+If this is set to \lstinline=True= configured suite event handlers will not be
+called in simulation or dummy modes.
+
+\begin{myitemize}
+    \item {\em type:} boolean
+    \item {\em default:} \lstinline=True=
+\end{myitemize}
+
 \subsection{[scheduling]}
 
 This section allows cylc to determine when tasks are ready to run.
@@ -912,8 +928,7 @@ trigger message string and pass the cycle point to the \lstinline=cylc ext-trigg
 Sequential tasks are automatically given dependence on their own
 predecessor. This is equivalent to use of explicit inter-cycle triggers
 in the graph, except that the automatic version does not show in suite
-graph visualization. For more on sequential tasks see~\ref{SequentialTasks}
-and~\ref{LimitPID}.
+graph visualization. For more on sequential tasks see~\ref{SequentialTasks}.
 
 \begin{myitemize}
     \item {\em type:} Comma-separated list of task or family names.
@@ -1190,7 +1205,7 @@ single command or multiple lines of scripting. See also \lstinline=init-script=,
 
 \begin{myitemize}
 \item {\em type:} string
-\item {\em root default:} \lstinline=echo "Dummy task"; $(cylc rnd 1 16)=
+\item {\em root default:} (none)
 \end{myitemize}
 
 \paragraph[post-script]{ [runtime] \textrightarrow [[\_\_NAME\_\_]] \textrightarrow post-script}
@@ -1247,57 +1262,6 @@ for a job with higher immediate priority. See also~\ref{PreemptionHPC}
 \item {\em default:} False
 \end{myitemize}
 
-
-\paragraph[{[[[}dummy mode{]]]}]{[runtime] \textrightarrow [[\_\_NAME\_\_]] \textrightarrow [[[dummy mode]]]}
-
-Dummy mode configuration.
-
-\subparagraph[script]{[runtime] \textrightarrow [[\_\_NAME\_\_]] \textrightarrow [[[dummy mode]]] \textrightarrow script}
-
-The main \lstinline=script= item for tasks in {\em dummy mode}.
-See~\ref{ScriptItem} for documentation.
-
-\begin{myitemize}
-\item {\em type:} string
-\item {\em root default:} \lstinline=echo "Dummy task"; sleep $(cylc rnd 1 16)=
-\end{myitemize}
-
-\subparagraph[disable pre-script]{[runtime] \textrightarrow [[\_\_NAME\_\_]] \textrightarrow [[[dummy mode]]] \textrightarrow disable pre-script}
-
-This disables the task pre-script in dummy mode.
-
-\begin{myitemize}
-\item {\em type:} boolean
-\item {\em root default:} True
-\end{myitemize}
-
-\subparagraph[disable post-script]{[runtime] \textrightarrow [[\_\_NAME\_\_]] \textrightarrow [[[dummy mode]]] \textrightarrow disable post-script}
-
-This disables the task post-script in dummy mode.
-
-\begin{myitemize}
-\item {\em type:} boolean
-\item {\em root default:} True
-\end{myitemize}
-
-\paragraph[{[[[}simulation mode{]]]}]{[runtime] \textrightarrow [[\_\_NAME\_\_]] \textrightarrow [[[simulation mode]]]}
-
-Simulation mode configuration.
-
-\paragraph[run time range]{[runtime] \textrightarrow [[\_\_NAME\_\_]] \textrightarrow [[[simulation mode]]] \textrightarrow run time range}
-
-This defines a minimum and a maximum duration (expressed as ISO 8601
-duration/intervals) which define a range from which the simulation mode task
-run length will be randomly chosen.
-
-\begin{myitemize}
-    \item {\em type:} Comma-separated list containing two ISO 8601
-        duration/interval representations.
-    \item {\em example:} \lstinline=PT1S,PT20S= - a range of 1 second to 20
-    seconds
-    \item {\em default:} (1, 16)
-\end{myitemize}
-
 \paragraph[{[[[}job{]]]}]{[runtime] \textrightarrow [[\_\_NAME\_\_]] \textrightarrow [[[job]]]}
 
 This section configures the means by which cylc submits task job scripts to run.
@@ -1582,6 +1546,8 @@ substituted with actual values:
     \item \%(suite)s: suite name
     \item \%(point)s: cycle point
     \item \%(name)s: task name
+    \item \%(suite\_url)s: suite URL
+    \item \%(task\_url)s: task URL
     \item \%(submit\_num)s: submit number
     \item \%(id)s: task ID (i.e. \%(name)s.\%(point)s)
     \item \%(message)s: event message, if any
@@ -1948,6 +1914,83 @@ Run the polling \lstinline=cylc suite-state= command in verbose output mode.
     \item {\em default:} False
 \end{myitemize}
 
+\paragraph[{[[[}simulation{]]]}]{[runtime] \textrightarrow [[\_\_NAME\_\_]] \textrightarrow [[[simulation]]]}
+\label{suiterc-sim-config}
+
+\lstset{language=transcript}
+
+Task configuration for the suite {\em simulation} and {\em dummy} run modes
+described in Section~\ref{SimulationMode}.
+
+\subparagraph[default run length]{[runtime] \textrightarrow [[\_\_NAME\_\_]] \textrightarrow [[[simulation]]] \textrightarrow default run length}
+
+The default simulated job run length, if \lstinline=[job]execution time limit=
+and \lstinline=[simulation]speedup factor= are not set.
+
+\begin{myitemize}
+    \item {\em type:} ISO 8601 duration/interval representation (e.g.
+ \lstinline=PT10S=, 10 seconds, or \lstinline=PT1M=, 1 minute).
+    \item {\em default:} \lstinline=PT10S=
+\end{myitemize}
+
+\subparagraph[speedup factor]{[runtime] \textrightarrow [[\_\_NAME\_\_]] \textrightarrow [[[simulation]]] \textrightarrow speedup factor}
+
+If \lstinline=[job]execution time limit= is set, the task simulated run length
+is computed by dividing it by this factor.
+
+\begin{myitemize}
+    \item {\em type:} float
+    \item {\em default:} (none) - i.e.\ do not use proportional run length
+    \item {\em example:} \lstinline=10.0=
+\end{myitemize}
+
+\subparagraph[time limit buffer]{[runtime] \textrightarrow [[\_\_NAME\_\_]] \textrightarrow [[[simulation]]] \textrightarrow time limit buffer}
+
+For dummy jobs, a new \lstinline=[job]execution time limit= is set to the
+simulated task run length plus this buffer interval, to avoid job kill due to
+exceeding the time limit.
+
+\begin{myitemize}
+    \item {\em type:} ISO 8601 duration/interval representation (e.g.
+ \lstinline=PT10S=, 10 seconds, or \lstinline=PT1M=, 1 minute).
+    \item {\em default:} PT10S
+\end{myitemize}
+
+\subparagraph[fail cycle points]{[runtime] \textrightarrow [[\_\_NAME\_\_]] \textrightarrow [[[simulation]]] \textrightarrow fail cycle points}
+
+Configure simulated or dummy jobs to fail at certain cycle points.
+
+\begin{myitemize}
+    \item {\em type:} list of strings (cycle points), or {\em all}
+    \item {\em default:} (none) - no instances of the task will fail
+    \item {\em examples:}
+    \begin{myitemize}
+        \item \lstinline=all= - all instance of the task will fail
+        \item \lstinline=2017-08-12T06, 2017-08-12T18= - these instances of the
+        task will fail
+    \end{myitemize}
+\end{myitemize}
+
+\subparagraph[fail try 1 only]{[runtime] \textrightarrow [[\_\_NAME\_\_]] \textrightarrow [[[simulation]]] \textrightarrow fail try 1 only}
+
+If this is set to \lstinline=True= only the first run of the task instance will
+fail, otherwise retries will fail too.
+
+\begin{myitemize}
+    \item {\em type:} boolean
+    \item {\em default:} \lstinline=True=
+\end{myitemize}
+
+\subparagraph[disable task event handlers]{[runtime] \textrightarrow [[\_\_NAME\_\_]] \textrightarrow [[[simulation]]] \textrightarrow disable task event handlers}
+
+If this is set to \lstinline=True= configured task event handlers will not be called
+in simulation or dummy modes.
+
+\begin{myitemize}
+    \item {\em type:} boolean
+    \item {\em default:} \lstinline=True=
+\end{myitemize}
+
 \subsection{[visualization]}
 
 Configuration of suite graphing for the \lstinline=cylc graph= command (graph
diff --git a/doc/titlepic.sty b/doc/src/cylc-user-guide/titlepic.sty
similarity index 100%
rename from doc/titlepic.sty
rename to doc/src/cylc-user-guide/titlepic.sty
diff --git a/doc/titlepic/README b/doc/src/cylc-user-guide/titlepic/README
similarity index 100%
rename from doc/titlepic/README
rename to doc/src/cylc-user-guide/titlepic/README
diff --git a/doc/titlepic/titlepic-manual.pdf b/doc/src/cylc-user-guide/titlepic/titlepic-manual.pdf
similarity index 100%
rename from doc/titlepic/titlepic-manual.pdf
rename to doc/src/cylc-user-guide/titlepic/titlepic-manual.pdf
diff --git a/doc/titlepic/titlepic-manual.tex b/doc/src/cylc-user-guide/titlepic/titlepic-manual.tex
similarity index 100%
rename from doc/titlepic/titlepic-manual.tex
rename to doc/src/cylc-user-guide/titlepic/titlepic-manual.tex
diff --git a/doc/titlepic/titlepic.sty b/doc/src/cylc-user-guide/titlepic/titlepic.sty
similarity index 100%
rename from doc/titlepic/titlepic.sty
rename to doc/src/cylc-user-guide/titlepic/titlepic.sty
diff --git a/doc/src/index.css b/doc/src/index.css
new file mode 100644
index 0000000..e96adb8
--- /dev/null
+++ b/doc/src/index.css
@@ -0,0 +1,86 @@
+/* Cylc logo colors:
+ * red: #ff5966;
+ * yellow: #ffcc00;
+ * green: #00c697;
+ * blue: #00b3fd;
+ */
+
+body { 
+    font-family:courier;
+    font-weight:bold;
+    background:LightSteelBlue;
+}
+
+div.uberpage {
+    color:white;
+    margin:0 auto;
+    width:700px;
+    background-image:url(graphics/png/orig/niwa-colour-small.png);
+    background-repeat:no-repeat;
+    background-position: bottom right;
+}
+
+div.page {
+    color:#00b3fd;
+    background:white;
+    background-image:url(graphics/cylc-logo.png);
+    background-repeat:no-repeat;
+    background-position: top right;
+    margin:0 auto;
+    margin-top:50px;
+    padding:50px;
+    width:600px;
+}
+
+.lbox {
+    width: 60%;
+}
+
+.rbox {
+    float:right;
+    /*font-size:80%;*/
+}
+
+.info {
+    font-size:80%;
+}
+
+a:link, a:visited { 
+    color: SteelBlue;
+}
+
+h1 {
+    border-bottom:2px solid #00b3fd;
+    margin-top:0;
+}
+h2 {
+    border-bottom:2px solid #00b3fd;
+    margin-right:25%;
+    margin-left:30px;
+}
+h3 {
+    border-bottom:1px solid #00b3fd;
+    margin-right:50%;
+    margin-left:60px;
+    padding-top:20px;
+}
+
+code {
+    font-family: "Courier 10 Pitch", Courier, monospace; 
+    font-size: 90%;
+    font-weight:bold;
+}
+
+pre.code {
+    display:block;
+    margin-top:-10px;
+    padding:10px;
+    font-family: "Courier 10 Pitch", Courier, monospace; 
+    font-size: 90%;
+    font-weight:bold;
+    white-space: pre-wrap; /* css-3 */
+    white-space: -moz-pre-wrap !important; 
+    white-space: -pre-wrap; /* Opera 4-6 */
+    white-space: -o-pre-wrap; /* Opera 7 */
+    word-wrap: break-word; /* Internet Explorer 5.5+ */
+}
diff --git a/doc/src/make-index.sh b/doc/src/make-index.sh
new file mode 100755
index 0000000..e8f24da
--- /dev/null
+++ b/doc/src/make-index.sh
@@ -0,0 +1,164 @@
+#!/bin/bash
+
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2017 NIWA
+# 
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+# Install to 'install/' and create an HTML index page to Cylc docs.
+
+set -e
+
+OUT=install
+rm -rf $OUT
+mkdir -p $OUT
+cp src/index.css $OUT
+cp -r src/cylc-user-guide/graphics $OUT
+cp src/cylc-logo.png $OUT/graphics
+
+CYLC_VERSION=$($(dirname $0)/../../bin/cylc --version)
+INDEX=$OUT/index.html
+
+CUG_PDF=src/cylc-user-guide/pdf/cug-pdf.pdf
+CUG_HTML_SINGLE=src/cylc-user-guide/html/single/
+CUG_HTML_MULTI=src/cylc-user-guide/html/multi/
+SDG_PDF=src/suite-design-guide/document.pdf
+
+cat > $INDEX <<__END__
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN">
+<html>
+  <head>
+    <title>Cylc-${CYLC_VERSION}</title>
+    <link rel="stylesheet" href="index.css">
+  </head>
+<body>
+
+<div class="uberpage">
+<div class="page">
+
+<h1>Cylc Documentation</h1>
+
+<p>cylc-${CYLC_VERSION}</p>
+
+<div class="rbox">
+<h3 style="margin:10px; margin-top:0">Command Help</h3>
+<pre class="code">
+cylc --help
+cylc COMMAND --help
+</pre>
+<h3 style="margin:10px">Misc.</h3>
+<ul>
+<li><a href="https://github.com/cylc/cylc/blob/master/CHANGES.md">change log</a></li>
+</ul>
+</div>
+
+<div class="lbox">
+<h3 style="margin:10px">User Guide</h3>
+<ul>
+__END__
+
+if [[ -f $CUG_PDF ]]; then
+  cp $CUG_PDF $OUT/cylc-user-guide.pdf
+  cat >> $INDEX <<__END__
+  <li><a href="cylc-user-guide.pdf">PDF</a></li>
+__END__
+else
+    cat >> $INDEX <<__END__
+    <li>PDF <i>(not generated)</i></li>
+__END__
+fi
+
+mkdir -p $OUT/html
+if [[ -f $CUG_HTML_SINGLE/cug-html.html ]]; then
+  cp -r $CUG_HTML_SINGLE $OUT/html/single
+  cat >> $INDEX <<__END__
+  <li><a href="html/single/cug-html.html">HTML (single page)</a> </li>
+__END__
+else
+    cat >> $INDEX <<__END__
+    <li>HTML single page <i>(not generated)</i></li>
+__END__
+fi
+
+if [[ -f $CUG_HTML_MULTI/cug-html.html ]]; then
+  cp -r $CUG_HTML_MULTI $OUT/html/multi
+  cat >> $INDEX <<__END__
+  <li><a href="html/multi/cug-html.html">HTML (multi page)</a></li>
+__END__
+else
+    cat >> $INDEX <<__END__
+    <li>HTML multi page <i>(not generated)</i></li>
+__END__
+fi
+
+cat >> $INDEX <<__END__
+</ul>
+</div>
+
+<div class="lbox">
+<h3 style="margin:10px">Suite Design Guide</h3>
+<ul>
+__END__
+
+if [[ -f $SDG_PDF ]]; then
+  cp $SDG_PDF $OUT/suite-design-guide.pdf
+  cat >> $INDEX <<__END__
+  <li><a href="suite-design-guide.pdf">PDF</a></li>
+__END__
+else
+    cat >> $INDEX <<__END__
+    <li>PDF <i>(not generated)</i></li>
+__END__
+fi
+
+cat >> $INDEX <<__END__
+</ul>
+</div>
+
+<div class="lbox">
+<h3 style="margin:10px">Online Resources</h3>
+<ul>
+<li> <a href="http://cylc.github.io/cylc/">Cylc Web Site</a> </li>
+<ul>
+  <li> <a href="http://cylc.github.io/cylc/documentation.html">Online Documentation</a> </li>
+</ul>
+<li> <a href="https://github.com/cylc/cylc">Code Repository (GitHub)</a> </li>
+</ul>
+</div>
+</div>
+
+<div class="info">
+<p>Document generation:</p>
+<ul>
+<li> user: <b>
+__END__
+whoami >> $INDEX
+cat >> $INDEX <<__END__
+</b> </li>
+<li> host: <b>
+__END__
+hostname -f >> $INDEX
+cat >> $INDEX <<__END__
+</b> </li>
+<li> date: <b>
+__END__
+date >> $INDEX
+
+cat >> $INDEX <<__END__
+</div>
+</div>
+
+</body>
+</html>
+__END__
diff --git a/Makefile b/doc/src/suite-design-guide/Makefile
similarity index 79%
copy from Makefile
copy to doc/src/suite-design-guide/Makefile
index 7577cf0..2d49d34 100644
--- a/Makefile
+++ b/doc/src/suite-design-guide/Makefile
@@ -1,3 +1,5 @@
+#!/usr/bin/make -f
+
 # THIS FILE IS PART OF THE CYLC SUITE ENGINE.
 # Copyright (C) 2008-2017 NIWA
 # 
@@ -14,13 +16,14 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-all: version docs
+.PHONY: all clean
 
-version:
-	admin/create-version-file
+all: document.pdf
 
-docs:
-	cd doc && $(MAKE)
+document.pdf: *.tex
+	- pdflatex document.tex
+	- pdflatex document.tex
+	- pdflatex document.tex
 
 clean:
-	cd doc && $(MAKE) clean
+	rm -f *.aux *.out *.toc *.log *.pdf
diff --git a/doc/src/suite-design-guide/document.tex b/doc/src/suite-design-guide/document.tex
new file mode 100644
index 0000000..d74d896
--- /dev/null
+++ b/doc/src/suite-design-guide/document.tex
@@ -0,0 +1,11 @@
+\include{preamble}
+\begin{document}
+\include{title-page}
+\tableofcontents
+\include{introduction}
+\include{style-guide}
+\include{general-principles}
+\include{efficiency}
+\include{portable-suites}
+\include{roadmap}
+\end{document}
diff --git a/doc/src/suite-design-guide/efficiency.tex b/doc/src/suite-design-guide/efficiency.tex
new file mode 100644
index 0000000..252904f
--- /dev/null
+++ b/doc/src/suite-design-guide/efficiency.tex
@@ -0,0 +1,355 @@
+\section{Efficiency And Maintainability}
+\label{Efficiency And Maintainability}
+
+Efficiency (in the sense of {\em economy of suite definition}) and
+maintainability go hand in hand. This section describes techniques for clean
+and efficient construction of complex workflows that are easy to understand,
+maintain, and modify.
+
+\subsection{The Task Family Hierarchy}
+\label{The Task Family Hierarchy}
+
+A properly designed family hierarchy fulfills three purposes in Cylc:
+
+\begin{itemize}
+  \item efficient sharing of all configuration common to groups of related
+    tasks
+  \item efficient bulk triggering, for clear scheduling graphs
+  \item clean suite visualization and monitoring, because families are
+    collapsible in the GUIs
+\end{itemize}
+
+\subsubsection{Sharing By Inheritance}
+\label{Sharing By Inheritance}
+
+Duplication is a maintenance risk because changes have to be repeated in
+multiple places without mistakes. On the other hand, unnecessary sharing of
+items via global variables is also bad because it is hard to be sure which
+tasks are using which variables. A properly designed runtime inheritance
+hierarchy can give every task exactly what it needs, and nothing that it
+doesn't need.
+
+If a group of related tasks has some configuration in common, it can be
+factored out into a task family inherited by all.
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+[runtime]
+    [[OBSPROC]]
+        # Settings common to all obs processing tasks.
+    [[obs1]]
+        inherit = OBSPROC
+    [[obs2]]
+        inherit = OBSPROC
+\end{lstlisting}
+
+If several families have settings in common, they can in turn can inherit
+from higher-level families. 
+
+Multiple inheritance allows efficient sharing even for overlapping categories
+of tasks. For example consider that some obs processing tasks in the following
+suite run parallel jobs and some serial:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+[runtime]
+    [[SERIAL]]
+        # Serial job settings.
+    [[PARALLEL]]
+        # Parallel job settings.
+    [[OBSPROC]]
+        # Settings for all obs processing tasks.
+    [[obs1, obs2, obs3]]
+        # Serial obs processing tasks.
+        inherit = OBSPROC, SERIAL
+    [[obs4, obs5]]
+        # Parallel obs processing tasks.
+        inherit = OBSPROC, PARALLEL
+\end{lstlisting}
+
+Note that suite parameters should really be used to define family members
+efficiently - see Section~\ref{Generating Tasks}.
+
+Cylc provides tools to help make sense of your inheritance hierarchy:
+
+\begin{itemize}
+  \item \lstinline=cylc graph -n/--namespaces= - plot the full multiple
+      inheritance graph (not the dependency graph)
+  \item \lstinline=cylc get-config SUITE= - print selected sections or items
+      after inheritance processing
+  \item \lstinline=cylc graph SUITE= - plot the dependency graph, with
+      collapsible first-parent families (see~\ref{Task Families And Visualization})
+  \item \lstinline=cylc list -t/--tree SUITE= - print the first-parent
+    inheritance hierarchy
+  \item \lstinline=cylc list -m/--mro SUITE= - print the inheritance
+      precedence order for each runtime namespace
+\end{itemize}
+
+\subsubsection{Family Triggering}
+
+Task families can be used to simplify the scheduling graph wherever many
+tasks need to trigger at once:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+[scheduling]
+    [[dependencies]]
+        graph = pre => MODELS
+[runtime]
+    [[MODELS]]
+    [[model1, model2, model3, ...]]
+        inherit = MODELS
+\end{lstlisting}
+
+To trigger {\em off of} many tasks at once, family names need to be qualified
+by \lstinline@<state>-all@ or \lstinline@<state>-any@ to indicate the desired
+member-triggering semantics:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+[scheduling]
+    [[dependencies]]
+        graph = """pre => MODELS
+                MODELS:succeed-all => post"""
+\end{lstlisting}
+
+Note that this can be simplified further because Cylc ignores trigger
+qualifiers like \lstinline=:succeed-all= on the right of trigger arrows
+to allow chaining of dependencies:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+[scheduling]
+    [[dependencies]]
+        graph = pre => MODELS:succeed-all => post
+\end{lstlisting}
+
+\subsubsection{Family-to-Family Triggering}
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+[scheduling]
+    [[dependencies]]
+        graph = BIG_FAM_1:succeed-all => BIG_FAM_2
+\end{lstlisting}
+
+This means every member of \lstinline=BIG_FAM_2= depends on every member
+of \lstinline=BIG_FAM_1= succeeding. For very large families this can create so
+many dependencies that it affects the performance of Cylc at run time, as
+well as cluttering graph visualizations with unnecessary edges. Instead,
+interpose a dummy task that signifies completion of the first family:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+[scheduling]
+    [[dependencies]]
+        graph = BIG_FAM_1:succeed-all => big_fam_1_done => BIG_FAM_2
+\end{lstlisting}
+
+For families with \lstinline=M= and \lstinline=N= members respectively, this 
+reduces the number of dependencies from \lstinline=M*N= to \lstinline=M+N=
+without affecting the scheduling.
+
+\includegraphics[width=\textwidth]{resources/png/fam-to-fam-1.png}
+\includegraphics[width=\textwidth]{resources/png/fam-to-fam-2.png}
+
+\subsubsection{Task Families And Visualization}
+\label{Task Families And Visualization}
+
+{\em First parents} in the inheritance hierarchy double as collapsible summary
+groups for visualization and monitoring. Tasks should generally be grouped into
+visualization families that reflect their logical purpose in the suite rather
+than technical detail such as inherited job submission or host settings. So in
+the example under Section~\ref{Sharing By Inheritance} above all
+\lstinline=obs<n>= tasks collapse into \lstinline=OBSPROC= but not into
+\lstinline=SERIAL= or \lstinline=PARALLEL=.
+
+If necessary you can introduce new namespaces just for visualization:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+[runtime]
+    [[MODEL]]
+        # (No settings here - just for visualization).
+    [[model1, model2]]
+        inherit = MODEL, HOSTX
+    [[model3, model4]]
+        inherit = MODEL, HOSTY
+\end{lstlisting}
+
+To stop a solo parent being used in visualization, demote it to secondary with
+a null parent like this:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+[runtime]
+    [[SERIAL]]
+    [[foo]]
+        # Inherit settings from SERIAL but don't use it in visualization.
+        inherit = None, SERIAL
+\end{lstlisting}
+
+\subsection{Generating Tasks Automatically}
+\label{Generating Tasks}
+
+Groups of tasks that are closely related such as an ensemble of model runs or 
+a family of obs processing tasks, or sections of workflow that are repeated
+with minor variations, can be generated automatically by iterating over
+some integer range (e.g.\ \lstinline=model<n>= for \lstinline at n = 1..10@) or
+list of strings (e.g.\ \lstinline=obs<type>= for
+\lstinline at type = ship, buoy, radiosonde, ...@).
+
+\subsubsection{Jinja2 Loops}
+
+Task generation was traditionally done in Cylc with explicit Jinja2 loops,
+like this:
+\lstset{language=suiterc}
+\begin{lstlisting}
+# Task generation the old way: Jinja2 loops (NO LONGER RECOMMENDED!)
+{% set PARAMS = range(1,11) %}
+[scheduling]
+    [[dependencies]]
+        graph = """
+{% for P in PARAMS %}
+      pre => model_p{{P}} => post
+      {% if P == 5 %}
+          model_p{{P}} => check
+      {% endif %}
+{% endfor %}    """
+[runtime]
+{% for P in PARAMS %}
+    [[model_p{{P}}]]
+        script = echo "my parameter value is {{P}}"
+    {% if P == 1 %}
+        # special case...
+    {% endif %}
+{% endfor %}
+\end{lstlisting}
+
+Unfortunately this makes a mess of the suite definition, particularly the
+scheduling graph, and it gets worse with nested loops over multiple parameters.
+
+\includegraphics[width=\textwidth]{resources/png/param-1.png}
+
+\subsubsection{Parameterized Tasks}
+\label{Parameterized Tasks}
+
+Cylc-6.11 introduced built-in {\em suite parameters} for generating tasks
+without destroying the clarity of the base suite definition. Here's the same
+example using suite parameters instead of Jinja2 loops:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+# Task generation the new way: suite parameters.
+[cylc]
+    [[parameters]]
+        p = 1..10
+[scheduling]
+    [[dependencies]]
+        graph = """pre => model<p> => post
+                model<p=5> => check"""
+[runtime]
+    [[model<p>]]
+        script = echo "my parameter value is ${CYLC_TASK_PARAM_p}"
+    [[model<p=7>]]
+        # special case ...
+\end{lstlisting}
+
+Here \lstinline at model<p>@ expands to \lstinline at model_p7@ for \lstinline at p=7@,
+and so on, via the default expansion template for integer-valued parameters,
+but custom templates can be defined if necessary. Parameters can also be
+defined as lists of strings, and you can define dependencies between different
+values: \lstinline at chunk<p-1> => chunk<p>@.  Here's a multi-parameter example:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+[cylc]
+    [[parameters]]
+        run = a, b, c
+        m = 1..5
+[scheduling]
+    [[dependencies]]
+        graph = pre => init<run> => sim<run,m> => close<run> => post
+[runtime]
+    [[sim<run,m>]]
+\end{lstlisting}
+
+\includegraphics[width=\textwidth]{resources/png/param-2.png}
+
+If family members are defined by suite parameters, then parameterized
+trigger expressions are equivalent to family \lstinline=:<state>-all= triggers.
+For example, this:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+[cylc]
+    [[parameters]]
+        n = 1..5
+[scheduling]
+    [[dependencies]]
+        graph = pre => model<n> => post
+[runtime]
+    [[MODELS]]
+    [[model<n>]]
+        inherit = MODELS
+\end{lstlisting}
+
+is equivalent to this:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+[cylc]
+    [[parameters]]
+        n = 1..5
+[scheduling]
+    [[dependencies]]
+        graph = pre => MODELS:succeed-all => post
+[runtime]
+    [[MODELS]]
+    [[model<n>]]
+        inherit = MODELS
+\end{lstlisting}
+
+(but future plans for family triggering may make the second case more
+efficient for very large families).
+
+For more information on parameterized tasks see the Cylc user guide.
+
+\subsection{Optional App Config Files}
+\label{Optional App Config Files}
+
+Closely related tasks with few configuration differences between them - such as
+multiple UM forecast and reconfiguration apps in the same suite - should use
+the same Rose app configuration with the differences supplied by optional
+configs, rather than duplicating the entire app for each task.
+
+Optional app configs should be valid on top of the main app config and not
+dependent on the use of other optional app configs. This ensures they will
+work correctly with macros and can therefore be upgraded automatically.
+
+\note{Currently optional configs don't work very well with UM STASH
+  configuration - see Section~\ref{UM STASH in Optional App Configs}.}
+
+Optional app configs can be loaded by command line switch:
+
+\begin{lstlisting}
+rose task-run -O key1 -O key2
+\end{lstlisting}
+
+or by environment variable:
+
+\begin{lstlisting}
+ROSE_APP_OPT_CONF_KEYS = key1 key2
+\end{lstlisting}
+
+The environment variable is generally preferred in suites because you don't
+have to repeat and override the root-level script configuration: 
+
+\begin{lstlisting}
+[runtime]
+    [[root]]
+        script = rose task-run -v
+    [[foo]]
+        [[[environment]]]
+            ROSE_APP_OPT_CONF_KEYS = key1 key2
+\end{lstlisting}
diff --git a/doc/src/suite-design-guide/general-principles.tex b/doc/src/suite-design-guide/general-principles.tex
new file mode 100644
index 0000000..feb88a0
--- /dev/null
+++ b/doc/src/suite-design-guide/general-principles.tex
@@ -0,0 +1,682 @@
+\section{Basic Principles}
+\label{Basic Principles}
+
+This section covers general principles that should be kept in mind when writing
+any suite. More advanced topics are covered later: {\em Efficiency And
+Maintainability} (section~\ref{Efficiency And Maintainability}) and {\em
+Portable Suites} (section~\ref{Portable Suites}).
+
+\subsection{UTC Mode}
+
+Cylc has full timezone support if needed, but real time NWP suites should use
+UTC mode to avoid problems at the transition between local standard time and
+daylight saving time, and to enable the same suite to run the same way in
+different timezones.
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+[cylc]
+    UTC mode = True
+\end{lstlisting}
+\subsection{Fine Or Coarse-Grained Suites}
+
+Suites can have many small simple tasks, fewer large complex tasks, or anything
+in between. A task that runs many distinct processes can be split into many
+distinct tasks. The fine-grained approach is more transparent and it allows
+more task level concurrency and quicker failure recovery - you can rerun just
+what failed without repeating anything unnecessarily.
+
+\subsubsection{rose bunch}
+
+One caveat to our fine-graining advice is that submitting a large number of
+small tasks at once may be a problem on some platforms. If you have many
+similar concurrent jobs you can use \lstinline=rose bunch= to pack them into a
+single task with incremental rerun capability: retriggering the task will rerun
+just the component jobs that did not successfully complete earlier.
+
+\subsection{Monolithic Or Interdependent Suites}
+\label{Monolithic Or Interdependent Suites}
+
+When writing suites from scratch you may need to decide between putting
+multiple loosely connected sub-workflows into a single large suite, or
+constructing a more modular system of smaller suites that depend on each other
+through inter-suite triggering. Each approach has its pros and cons, depending
+on your requirements and preferences with respect to the complexity and
+manageability of the resulting system.
+
+The \lstinline=cylc gscan= GUI lets you monitor multiple suites at a time, and
+you can define virtual groups of suites that collapse into a single state
+summary.
+
+\subsubsection{Inter-Suite Triggering}
+
+A task in one suite can explicitly trigger off of a task in another suite. The
+full range of possible triggering conditions is supported, including custom
+message triggers. Remote triggering involves repeatedly querying (``polling'')
+the remote suite run database, not the suite daemon, so it works even if the
+other suite is down at the time.
+
+There is special graph syntax to support triggering off of a task in another
+suite, or you can call the underlying \lstinline=cylc suite-state= command
+directly in task scripting.
+
+In real time suites you may want to use clock-triggers to delay the onset of
+inter-suite polling until roughly the expected completion time of the remote
+task.
+
+\subsection{Self-Contained Suites}
+\label{Self-Contained Suites}
+
+All files generated by Cylc during a suite run are confined to the {\em suite
+run directory} \lstinline=$HOME/cylc-run/<SUITE>=. However, Cylc has no control
+over the locations of the programs, scripts, and files, that are executed,
+read, or generated by your tasks at runtime.  It is up to you to ensure that
+all of this is confined to the suite run directory too, as far as possible.
+
+Self-contained suites are more robust, easier to work with, and more portable.
+Multiple instances of the same suite (with different suite names) should be
+able to run concurrently under the same user account without mutual
+interference.
+
+\subsubsection{Avoiding External Files}
+
+Suites that use external scripts, executables, and files beyond the essential
+system libraries and utilities are vulnerable to external changes: someone
+else might interfere with these files without telling you.
+
+In some case you may need to symlink to large external files anyway, if space
+or copy speed is a problem, but otherwise suites with private copies of all the
+files they need are more robust.
+
+\subsubsection{Installing Files At Start-up}
+
+Use \lstinline=rose suite-run= {\em file creation mode} or \lstinline=R1=
+install tasks to copy files to the self-contained suite run directory at
+start-up.  Install tasks are preferred for time-consuming installations because
+they don't slow the suite start-up process, they can be monitored in the GUI,
+they can run directly on target platforms, and you can rerun them later without
+restarting the suite. 
+
+\subsubsection{Confining Ouput To The Run Directory}
+
+Output files should be confined to the suite run directory tree. Then all
+output is easy to find, multiple instances of the same suite can run
+concurrently without interference, and other users should be able to copy and
+run your suite with few modifications. Cylc provides a \lstinline at share@
+directory for generated files that are used by several tasks in a suite
+(see~\ref{Shared Task IO Paths}). Archiving tasks can use \lstinline=rose arch=
+to copy or move selected files to external locations as needed (see~\ref{Suite
+Housekeeping}).
+
+\subsection{Task Host Selection}
+
+At sites with multiple task hosts to choose from, use
+\lstinline=rose host-select= to dynamically select appropriate task hosts
+rather than hard coding particular hostnames. This enables your suite to
+adapt to particular machines being down or heavily overloaded by selecting
+from a group of hosts based on a series of criteria.
+\lstinline=rose host-select= will only return hosts that can be contacted by
+non-interactive SSH.
+
+\subsection{Task Scripting}
+
+Non-trivial task scripting should be held in external files rather than
+inlined in the suite.rc. This keeps the suite definition tidy, and it
+allows proper shell-mode text editing and independent testing of task scripts.
+
+For automatic access by task jobs, task-specific scripts should be kept in 
+Rose app bin directories, and shared scripts kept in (or installed to) the
+suite bin directory.
+
+\subsubsection{Coding Standards}
+
+When writing your own task scripts make consistent use of appropriate coding
+standards such as:
+
+\begin{itemize}
+    \item PEP8 for Python - \url{https://www.python.org/dev/peps/pep-0008/}
+    \item Google Shell Style Guide for Bash -
+      \url{https://google.github.io/styleguide/shell.xml}
+\end{itemize}
+
+\subsubsection{Basic Functionality}
+
+In consideration of future users who may not be expert on the internals of your
+suite and its tasks, all task scripts should:
+
+\begin{itemize}
+  \item Print clear usage information if invoked incorrectly (and via the
+    standard options \lstinline=-h, --help=).
+  \item Print useful diagnostic messages in case of error. For example, if a
+    file was not found, the error message should contain the full path to the
+    expected location.
+  \item Always return correct shell exit status - zero for success, non-zero
+    for failure. This is used by Cylc job wrapper code to detect success and
+    failure and report it back to the suite daemon.
+  \item In shell scripts use \lstinline=set -u= to abort on any reference to
+    an undefined variable. If you really need an undefined variable to evaluate
+    to an empty string, make it explicit: \lstinline at FOO=${FOO:-}@.
+  \item In shell scripts use \lstinline=set -e= to abort on any error without
+      having to failure-check each command explicitly.
+\end{itemize}
+
+
+\subsection{Rose Apps}
+
+Rose apps allow all non-shared task configuration - which is not relevant to
+workflow automation - to be moved from the suite definition into app config
+files. This makes suites tidier and easier to understand, and it allows
+\lstinline=rose edit= to provide a unified metadata-enhanced view of the suite
+and its apps (see~\ref{Rose Metadata Compliance}).
+
+Rose apps are a clear winner for tasks with complex configuration requirements.
+It matters less for those with little configuration, but for consistency and to
+take full advantage of \lstinline=rose edit= it makes sense to use Rose apps
+for most tasks.
+
+When most tasks are Rose apps, set the app-run command as a root-level default,
+and override it for the occasional non Rose app task:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+[runtime]
+    [[root]]
+        script = rose task-run -v
+    [[rose-app1]]
+        #...
+    [[rose-app2]]
+        #...
+    [[hello-world]]  # Not a Rose app.
+        script = echo "Hello World"
+\end{lstlisting}
+
+\subsection{Rose Metadata Compliance}
+\label{Rose Metadata Compliance}
+ 
+Rose metadata drives page layout and sort order in \lstinline=rose edit=, plus
+help information, input validity checking, macros for advanced checking and app
+version upgrades, and more.
+
+To ensure the suite and its constituent applications are being run as intended
+it should be valid against any provided metadata: launch the
+\lstinline=rose edit= GUI or run \lstinline=rose macro --validate= on the
+command line to highlight any errors, and correct them prior to use. If errors
+are flagged incorrectly you should endeavour to fix the metadata.
+
+When writing a new suite or application, consider creating metadata to
+facilitate ease of use by others.
+
+\subsection{Task Independence}
+
+Essential dependencies must be encoded in the suite graph, but tasks should
+not
+rely unnecessarily on the action of other tasks. For example, tasks should
+create their own output directories if they don't already exist, even if they
+would normally be created by an earlier task in the workflow. This makes it is
+easier to run tasks alone during development and testing.
+
+
+\subsection{Clock-Triggered Tasks}
+\label{Clock-Triggered Tasks}
+Tasks that wait on real time data should use clock-triggers to delay job
+submission until the expected data arrival time:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+[scheduling]
+    initial cycle point = now
+    [[special tasks]]
+        # Trigger 5 min after wall-clock time is equal to cycle point.
+        clock-trigger = get-data(PT5M)
+    [[dependencies]]
+        [[[T00]]]
+            graph = get-data => process-data
+\end{lstlisting}
+
+Clock-triggered tasks typically have to handle late data arrival. Task
+execution {\em retry delays} can be used to simply retrigger the task at
+intervals until the data is found, but frequently retrying small tasks probably
+should not go to a batch scheduler, and multiple task failures will be logged
+for what is a essentially a normal condition (at least it is normal until the
+data is really late).
+
+Rather than using task execution retry delays to repeatedly trigger a task that
+checks for a file, it may be better to have the task itself repeatedly poll for
+the data (see~\ref{Rose App File Polling} for example).
+
+\subsection{Rose App File Polling}
+\label{Rose App File Polling}
+
+Rose apps have built-in polling functionality to check repeatedly for the
+existence of files before executing the main app. See the \lstinline=[poll]=
+section in Rose app config documentation. This is a good way to implement
+check-and-wait functionality in clock-triggered tasks (\ref{Clock-Triggered
+Tasks}), for example.
+
+It is important to note that frequent polling may be bad for some filesystems,
+so be sure to configure a reasonable interval between polls. 
+
+\subsection{Task Execution Time Limits}
+
+Instead of setting job wall clock limits directly in batch scheduler
+directives, use the \lstinline=execution time limit= suite config item.
+Cylc automatically derives the correct batch scheduler directives from this,
+and it is also used to run \lstinline=background= and \lstinline=at= jobs via
+the \lstinline=timeout= command, and to poll tasks that haven't reported in
+finished by the configured time limit.
+
+\subsection{Restricting Suite Activity}
+\label{Restricting Suite Activity}
+
+It may be possible for large suites to overwhelm a job host by submitting too
+many jobs at once:
+
+\begin{itemize}
+  \item Large suites that are not sufficiently limited by real time clock
+      triggering or inter-cycle dependence may generate a lot of {\em runahead}
+      (this refers to Cylc's ability to run multiple cycles at once, restricted
+      only by the dependencies of individual tasks).
+  \item Some suites may have large families of tasks whose members all
+    become ready at the same time.
+\end{itemize}
+
+These problems can be avoided with {\em runahead limiting} and {\em internal
+queues}, respectively.
+
+\subsubsection{Runahead Limiting}
+\label{Runahead Limiting}
+
+By default Cylc allows a maximum of three cycle points to be active at the same time, but this value is configurable:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+[scheduling]
+    initial cycle point = 2020-01-01T00
+    # Don't allow any cycle interleaving:
+    max active cycle points = 1
+\end{lstlisting}
+
+\subsubsection{Internal Queues}
+
+Tasks can be assigned to named internal queues that limit the number of members
+that can be active (i.e.\ submitted or running) at the same time:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+[scheduling]
+    initial cycle point = 2020-01-01T00
+    [[queues]]
+        # Allow only 2 members of BIG_JOBS to run at once:
+        [[[big_jobs_queue]]]
+            limit = 2
+            members = BIG_JOBS
+    [[dependencies]]
+        [[[T00]]]
+            graph = pre => BIG_JOBS
+[runtime]
+    [[BIG_JOBS]]
+    [[foo, bar, baz, ...]]
+        inherit = BIG_JOBS
+\end{lstlisting}
+
+\subsection{Suite Housekeeping}
+\label{Suite Housekeeping}
+
+Ongoing cycling suites can generate an enormous number of output files and logs
+so regular housekeeping is very important. Special housekeeping tasks,
+typically the last tasks in each cycle, should be included to archive selected
+important files and then delete everything at some offset from the current
+cycle point.
+
+The Rose built-in apps \lstinline=rose_arch= and \lstinline=rose_prune=
+provide an easy way to do this. They can be configured easily with
+file-matching patterns and cycle point offsets to perform various housekeeping
+operations on matched files.
+
+\subsection{Complex Jinja2 Code}
+
+The Jinja2 template processor provides general programming constructs,
+extensible with custom Python filters, that can be used to {\em generate} the
+suite definition. This makes it possible to write flexible multi-use
+suites with structure and content that varies according to various input
+switches. There is a cost to this flexibility however: excessive use of Jinja2
+can make a suite hard to understand and maintain. It is difficult to say
+exactly where to draw the line, but we recommend erring on the side of
+simplicity and clarity: write suites that are easy to understand and therefore
+easy to modify for other purposes, rather than extremely complicated suites
+that attempt do everything out of the box but are hard to maintain and modify.
+
+Note that use of Jinja2 loops for generating tasks is now deprecated in favour
+of built-in parameterized tasks - see~\ref{Parameterized Tasks}.
+
+\subsection{Shared Configuration}
+
+Configuration that is common to multiple tasks should be defined in one
+place and used by all, rather than duplicated in each task. Duplication is
+a maintenance risk because changes have to be made consistently in several
+places at once.
+
+\subsubsection{Jinja2 Variables}
+
+In simple cases you can share by passing a Jinja2 variable to all the tasks
+that need it:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+{% set JOB_VERSION = 'A23' %}
+[runtime]
+    [[foo]]
+        script = run-foo --version={{JOB_VERSION}}
+    [[bar]]
+        script = run-bar --version={{JOB_VERSION}}
+\end{lstlisting}
+
+\subsubsection{Inheritance}
+
+Sharing by inheritance of task families is recommended when more than a few
+configuration items are involved.
+
+The simplest application of inheritance is to set set global defaults in the
+\lstinline=[[runtime]][root]= namespace that is inherited by all tasks.
+However, this should only be done for settings that really are used
+by the vast majority of tasks. Over-sharing of via root, particularly of
+environment variables, is a maintenance risk because it can be very
+difficult to be sure which tasks are {\em using} which global variables.
+
+Any \lstinline=[runtime]= settings can be shared - scripting, host
+and batch scheduler configuration, environment variables, and so on - from
+single items up to complete task or app configurations.  At the latter extreme,
+it is quite common to have several tasks that inherit the same complete
+job configuration followed by minor task-specific additions:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+[runtime]
+    [[FILE-CONVERT]]
+        script = convert-netcdf
+        #...
+    [[convert-a]]
+        inherit = FILE-CONVERT
+        [[[environment]]]
+              FILE_IN = file-a
+    [[convert-b]]
+        inherit = FILE-CONVERT
+        [[[environment]]]
+              FILE_IN = file-b
+\end{lstlisting}
+
+Inheritance is covered in more detail from an efficiency perspective in
+Section~\ref{The Task Family Hierarchy}.
+
+\subsubsection{Shared Task IO Paths}
+\label{Shared Task IO Paths}
+
+If one task uses files generated by another task (and both see the same
+filesystem) a common IO path should normally be passed to both tasks via a
+shared environment variable. As far as Cylc is concerned this is no different
+to other shared configuration items, but there are some additional aspects
+of usage worth addressing here.
+
+Primarily, for self-containment (see~\ref{Self-Contained Suites}) shared IO
+paths should be under the {\em suite share directory}, the location of which is
+passed to all tasks as \lstinline=$CYLC_SUITE_SHARE_PATH=.
+
+The \lstinline at rose task-env@ utility can provide additional environment
+variables that refer to static and cyclepoint-specific locations under the
+suite share directory.
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+[runtime]
+    [[my-task]]
+        env-script = $(eval rose task-env -T P1D -T P2D)
+\end{lstlisting}
+
+For a current cycle point of \lstinline=20170105= this will make the following
+variables available to tasks:
+
+\lstset{language=sh}
+\begin{lstlisting}
+ROSE_DATA=$CYLC_SUITE_SHARE_PATH/data
+ROSE_DATAC=$CYLC_SUITE_SHARE_PATH/cycle/20170105
+ROSE_DATACP1D=$CYLC_SUITE_SHARE_PATH/cycle/20170104
+ROSE_DATACP2D=$CYLC_SUITE_SHARE_PATH/cycle/20170103
+\end{lstlisting}
+
+Subdirectories of \lstinline@$ROSE_DATAC@ etc.\ should be agreed between
+different sub-systems of the suite; typically they are named for the
+file-generating tasks, and the file-consuming tasks should know to look there.
+
+The share-not-duplicate rule can be relaxed for shared files whose names are
+agreed by convention, so long as their locations under the share directory are
+proper shared suite variables. For instance the Unified Model uses a large
+number of files whose conventional names (\lstinline=glu_snow=, for example)
+can reasonably be expected not to change, so they are typically hardwired into
+app configurations (as \lstinline=$ROSE_DATA/glu_snow=, for example) to avoid
+cluttering the suite definition.
+
+Here two tasks share a workspace under the suite share directory by inheritance:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+# Sharing an I/O location via inheritance.
+[scheduling]
+    [[dependencies]]
+        graph = write_data => read_data
+[runtime]
+    [[root]]
+        env-script = $(eval rose task-env)
+    [[WORKSPACE]]
+        [[[environment]]]
+            DATA_DIR = ${ROSE_DATA}/png
+    [[write_data]]
+        inherit = WORKSPACE
+        script = """
+mkdir -p $DATA_DIR
+write-data.exe -o ${DATA_DIR}"""
+    [[read_data]]
+        inherit = WORKSPACE
+        script = read-data.exe -i ${DATA_DIR}
+\end{lstlisting}
+
+In simple cases where an appropriate family does not already exist paths can
+be shared via Jinja variables:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+# Sharing an I/O location with Jinja2.
+{% set DATA_DIR = '$ROSE_DATA/stuff' %}
+[scheduling]
+    [[dependencies]]
+        graph = write_data => read_data
+[runtime]
+    [[write_data]]
+        script = """
+mkdir -p {{DATA_DIR}}
+write-data.exe -o {{DATA_DIR}}"""
+    [[read_data]]
+        script = read-data.exe -i {{DATA_DIR}}
+\end{lstlisting}
+
+For completeness we note that it is also possible to configure multiple tasks
+to use the same work directory so they can all share files in \lstinline@$PWD at .
+(Cylc executes task jobs in special work directories that by default are unique
+to each task). This may simplify the suite slightly, and it may be useful if
+you are unfortunate enough to have executables that are designed for IO in
+\lstinline@$PWD@, {\em but it is not recommended.} There is a higher risk
+of interference between tasks; it will break \lstinline=rose task-run=
+incremental file creation mode; and \lstinline=rose task-run --new= will in
+effect delete the work directories of tasks other than its intended target.
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+# Shared work directory: tasks can read and write in $PWD - use with caution!
+[scheduling]
+    initial cycle point = 2018
+    [[dependencies]]
+        [[[P1Y]]]
+            graph = write_data => read_data
+[runtime]
+    [[WORKSPACE]]
+        work sub-directory = $CYLC_TASK_CYCLE_POINT/datadir
+    [[write_data]]
+        inherit = WORKSPACE
+        script = write-data.exe
+    [[read_data]]
+        inherit = WORKSPACE
+        script = read-data.exe
+\end{lstlisting}
+
+\subsection{Varying Behaviour By Cycle Point}
+
+To make a cycling job behave differently at different cycle points you {\em
+could} use a single task with scripting that reacts to the cycle point it finds
+itself running at, but it is better to use different tasks (in different
+cycling sections) that inherit the same base job configuration. This results
+in a more transparent suite that can be understood just by inspecting the
+graph:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+# Run the same job differently at different cycle points.
+[scheduling]
+    initial cycle point = 2020-01-01T00
+    [[dependencies]]
+        [[[T00]]]
+            graph = pre => long_fc => post
+        [[[T12]]]
+            graph = pre => short_fc => post
+[runtime]
+    [[MODEL]]
+        script = run-model.sh
+    [[long_fc]]
+        inherit = MODEL
+        [[[job]]]
+            execution time limit = PT30M
+        [[[environment]]]
+            RUN_LEN = PT48H
+    [[short_fc]]
+        inherit = MODEL
+        [[[job]]]
+            execution time limit = PT10M
+        [[[environment]]]
+            RUN_LEN = PT12H
+\end{lstlisting}
+
+The few differences between \lstinline=short_fc= and \lstinline=long_fc=,
+including batch scheduler resource requests, can be configured after common
+settings are inherited.
+
+\subsubsection{At Start-Up}
+
+Similarly, if a cycling job needs special behaviour at the initial (or any other)
+cycle point, just use a different logical task in an \lstinline=R1= graph and
+have it inherit the same job as the general cycling task, not a single task
+with scripting that behaves differently if it finds itself running at the
+initial cycle point.
+
+\subsection{Automating Failure Recovery}
+
+\subsubsection{Job Submission Retries}
+
+When submitting jobs to a remote host, use job submission retries to
+automatically resubmit tasks in the event of network outages. Note this is
+distinct from job retries for job execution failure (just below).
+
+Job submission retries should normally be host (or host-group for
+\lstinline=rose host-select=) specific, not task-specific, so configure them in
+a host (or host-group) specific family. The following suite.rc fragment
+configures all HPC jobs to retry on job submission failure up to 10
+times at 1 minute intervals, then another 5 times at 1 hour intervals:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+[runtime]
+    [[HPC]]  # Inherited by all jobs submitted to HPC.
+        [[[job]]]
+            submission retry delays = 10*PT1M, 5*PT1H
+\end{lstlisting}
+
+\subsubsection{Job Execution Retries}
+
+Automatic retry on job execution failure is useful if you have good reason to
+believe that a simple retry will usually succeed. This may be the case if the
+job host is known to be flaky, or if the job only ever fails for one known
+reason that can be fixed on a retry. For example, if a model fails occasionally
+with a numerical instability that can be remedied with a short timestep rerun,
+then an automatic retry may be appropriate:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+[runtime]
+    [[model]]
+        script = """
+if [[ $CYLC_TASK_TRY_NUMBER > 1 ]]; then
+    SHORT_TIMESTEP=true
+else
+    SHORT_TIMESTEP=false
+fi
+model.exe"""
+        [[[job]]]
+            execution retry delays = 1*PT0M
+\end{lstlisting}
+
+\subsubsection{Failure Recovery Workflows}
+
+For recovery from failures that require explicit diagnosis you can configure
+alternate routes through the workflow, together with {\em suicide triggers}
+that remove the unused route. In the following example, if the model fails a
+diagnosis task will trigger; if it determines the cause of the failure is a
+known numerical instability (e.g.\ by parsing model job logs) it will succeed,
+triggering a short timestep run. Postprocessing can proceed from either the
+original or the short-step model run, and suicide triggers remove the unused
+path from the workflow:
+
+%\begin{figure}[H]
+%\noindent\begin{minipage}[b]{0.65\textwidth} %
+\lstset{language=suiterc}
+\begin{lstlisting}
+[scheduling]
+    [[dependencies]]
+        graph = """
+            model | model_short => postproc
+            model:fail => diagnose => model_short
+              # Clean up with suicide triggers:
+            model => ! diagnose & ! model_short
+            model_short => ! model"""
+\end{lstlisting}
+%\end{minipage}\hfill
+%\begin{minipage}[b]{0.15\textwidth}
+  \includegraphics[width=0.18\textwidth]{resources/png/failure-recovery.png}
+%\end{minipage}
+%\end{figure}
+
+\subsection{Include Files}
+
+Include-files should not be overused, but they can sometimes be useful
+(e.g.\ see Portable Suites~\ref{Portable Suites}):
+
+\begin{lstlisting}
+#...
+{% include 'inc/foo.rc' %}
+\end{lstlisting}
+
+(Technically this inserts a Jinja2-rendered file template). Cylc also has a
+native include mechanism that pre-dates Jinja2 support and literally inlines
+the include-file:
+
+\begin{lstlisting}
+#...
+%include 'inc/foo.rc'
+\end{lstlisting}
+
+The two methods normally produce the same result, but use the Jinja2 version if
+you need to construct an include-file name from a variable (because Cylc
+include-files get inlined before Jinja2 processing is done):
+
+\begin{lstlisting}
+#...
+{% include 'inc/' ~ SITE ~ '.rc' %}
+\end{lstlisting}
+
+
diff --git a/doc/src/suite-design-guide/introduction.tex b/doc/src/suite-design-guide/introduction.tex
new file mode 100644
index 0000000..762d793
--- /dev/null
+++ b/doc/src/suite-design-guide/introduction.tex
@@ -0,0 +1,28 @@
+\section{Introduction}
+\label{Introduction}
+
+This document provides guidance on making complex Cylc + Rose workflows that
+are clear, maintainable, and portable. Note that best practice advice may
+evolve over time with the capabilities of Rose and Cylc.
+
+Content is drawn from the Rose and Cylc user guides, earlier Met Office suite
+design and operational suite review documents, experience with real suites
+across the Unified Model Consortium, and discussion among members of the UM
+TISD (Technical Infrastructure Suite Design) working group.
+
+We start with the most general topics (coding style, general principles),
+move on to more advanced topics (efficiency and maintainability, portable
+suites), and end with some pointers to future developments.
+
+{\em A good working knowledge of Cylc and Rose is assumed}.
+
+\begin{itemize}
+    \item Cylc: \url{http://cylc.github.io/cylc/documentation.html}
+    \item Rose: \url{http://metomi.github.io/rose/doc/rose.html}
+\end{itemize}
+
+\note{for non-Rose users: this document comes out of the Unified Model
+  Consortium wherein Cylc is used within the Rose {\em suite management
+  framework}. However, the bulk of the information in this guide is about
+Cylc suite design; which parts are Rose-specific should be clear from
+context.}
diff --git a/doc/src/suite-design-guide/portable-suites.tex b/doc/src/suite-design-guide/portable-suites.tex
new file mode 100644
index 0000000..63d0a0f
--- /dev/null
+++ b/doc/src/suite-design-guide/portable-suites.tex
@@ -0,0 +1,493 @@
+\section{Portable Suites}
+\label{Portable Suites}
+
+A {\em portable} or {\em interoperable} suite can run ``out of the box'' at
+different sites, or in different environments such as research and operations
+within a site.  For convenience we just use the term {\em site portability}.
+
+Lack of portability is a major barrier to collaborative development when
+sites need to run more or less the same workflow, because it is very
+difficult to translate changes manually between large, complicated suites.
+
+Most suites are riddled with site-specific details such as local build
+configurations, file paths, host names, and batch scheduler directives, etc.;
+but it is possible to cleanly factor all this out to make a portable suite.
+Significant variations in workflow structure can even be accommodated quite
+easily. If the site workflows are {\em too different}, however, you may decide
+that it is appropriate for each site to maintain separate suites.
+
+The recommended way to do this, which we expand on below, is:
+
+\begin{itemize}
+  \item Put all site-specific settings in include-files loaded at the end
+    of a generic ``core'' suite definition.
+  \item Use ``optional optional'' app config files for site-specific variations
+    in the core suite's Rose apps.
+  \item (Make minimal use of inlined site switches too, if necessary).
+\end{itemize}
+
+The result should actually be tidier than the original in one respect: all
+the messy platform-specific resource directives etc., will be hidden away in
+the site include-files.
+
+\subsection{The Jinja2 SITE Variable}
+
+First a suite Jinja2 variable called \lstinline=SITE= should be set to the site
+name, either in \lstinline=rose-suite.conf=, or in the suite definition itself
+(perhaps automatically, by querying the local environment in some way).
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+#!Jinja2
+{% set SITE = "niwa" %}
+#...
+\end{lstlisting}
+
+This will be used to select site-specific configuration, as described below.
+
+\subsection{Site Include-Files}
+
+If a section heading in a suite.rc file is repeated the items under it simply
+add to or override those defined under the same section earlier in the file
+(but note Section~\ref{List Item Override In Site Include-Files}).
+For example, this task definition:
+
+\begin{lstlisting}
+[runtime]
+    [[foo]]
+        script = run-foo.sh
+        [[[remote]]]
+            host = hpc1.niwa.co.nz
+\end{lstlisting}
+
+can equally be written like this:
+
+\begin{lstlisting}
+[runtime]  # Part 1 (site-agnostic).
+    [[foo]]
+        script = run-foo.sh
+[runtime]  # Part 2 (site-specific).
+    [[foo]]
+        [[[remote]]]
+            host = hpc1.niwa.co.nz
+\end{lstlisting}
+
+(Note that if Part 2 had also defined \lstinline=script= the new value would
+override the original. It can sometimes be useful to set a widely used
+default and override it in a few cases, but be aware that this can make it more 
+difficult to determine the origin of affected values.)
+
+In this way all site-specific \lstinline=[runtime]= settings, with their
+respective sub-section headings, can be moved to the end of the file, and then
+out into an include-file (file inclusion is essentially just literal inlining):
+
+\begin{lstlisting}
+#...
+{% set SITE = "niwa" %}
+
+# Core site-agnostic settings:
+#...
+[runtime]
+    [[foo]]
+        script = run-foo.sh
+#...
+
+# Site-specific settings:
+{% include 'site/' ~ SITE ~ '.rc' %}
+\end{lstlisting}
+
+where the site include-file \lstinline=site/niwa.rc= contains:
+
+\begin{lstlisting}
+# site/niwa.rc
+[runtime]
+    [[foo]]
+        [[[remote]]]
+            host = hpc1.niwa.co.nz
+\end{lstlisting}
+
+\subsection{Site-Specific Graphs}
+
+Repeated \lstinline=graph= strings under the same graph section headings are
+always additive (graph strings are the only exception to the normal repeat item
+override semantics). So, for instance, this graph:
+
+\begin{lstlisting}
+[scheduling]
+    initial cycle point = 2025
+    [[dependencies]]
+        [[[P1Y]]]
+            graph = "pre => model => post => niwa_archive"
+\end{lstlisting}
+
+can be written like this:
+
+\begin{lstlisting}
+[scheduling]
+    initial cycle point = 2025
+    [[dependencies]]
+        [[[P1Y]]]
+            graph = "pre => model => post"
+        [[[P1Y]]]
+            graph = "post => niwa_archive"
+\end{lstlisting}
+
+and again, the site-specific part can be taken out to a site include-file:
+
+\begin{lstlisting}
+#...
+{% set SITE = "niwa" %}
+
+# Core site-agnostic settings.
+#...
+[scheduling]
+    initial cycle point = 2025
+    [[dependencies]]
+        [[[P1Y]]]
+            graph = "pre => model => post"
+#...
+# Site-specific settings:
+{% include 'site/' ~ SITE ~ '.rc' %}
+\end{lstlisting}
+
+where the site include-file \lstinline=site/niwa.rc= contains:
+
+\begin{lstlisting}
+# site/niwa.rc
+[scheduling]
+    [[dependencies]]
+        [[[P1Y]]]
+            graph = "post => niwa_archive"
+\end{lstlisting}
+
+Note that the site-file graph needs to define the dependencies of the
+site-specific tasks, and thus their points of connection to the core suite
+- which is why the core task \lstinline=post= appears in the graph here (if
+\lstinline=post= had any site-specific runtime settings, to get it to run at
+this site, they would also be in the site-file).
+
+\subsection{Inlined Site-Switching}
+\label{Inlined Site-Switching}
+
+It may be tempting to use inlined switch blocks throughout the suite instead of
+site include-files, but {\em this is not recommended} - it is verbose and
+untidy (the greater the number of supported sites, the bigger the
+mess) and it exposes all site configuration to all users:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+#...
+[runtime]
+    [[model]]
+        script = run-model.sh
+{# Site switch blocks not recommended:#}
+{% if SITE == 'niwa' %}
+        [[[job]]]
+            batch system = loadleveler
+        [[[directives]]]
+            # NIWA Loadleveler directives...
+{% elif SITE == 'metoffice' %}
+        [[[job]]]
+            batch system = pbs
+        [[[directives]]]
+            # Met Office PBS directives...
+{% elif SITE == ... %}
+            #...
+{% else %}
+    {{raise('Unsupported site: ' ~ SITE)}}
+{% endif %}
+    #...
+\end{lstlisting}
+
+Inlined switches can be used, however, to configure exceptional behaviour at
+one site without requiring the other sites to duplicate the default behaviour.
+But be wary of accumulating too many of these switches:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+# (core suite.rc file)
+#...
+{% if SITE == 'small' %}
+   {# We can't run 100 members... #}
+   {% set ENSEMBLE_SIZE = 25 %}
+{% else %}
+   {# ...but everyone else can! #}
+   {% set ENSEMBLE_SIZE = 100 %}
+{% endif %}
+#...
+\end{lstlisting}
+
+Inlined switches can also be used to temporarily isolate a site-specific
+change to a hitherto non site-specific part of the suite, thereby avoiding the
+need to update all site include-files before getting agreement from the suite
+owner and collaborators.
+
+\subsection{Site-Specific Suite Variables}
+
+It can sometimes be useful to set site-specific values of suite variables that
+aren't exposed to users via \lstinline=rose-suite.conf=. For example, consider
+a suite that can run a special post-processing workflow of some kind at sites
+where IDL is available. The IDL-dependence switch can be set per site like this: 
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+#...
+{% from SITE ~ '-vars.rc' import HAVE_IDL, OTHER_VAR %}
+graph = """
+  pre => model => post
+{% if HAVE_IDL %}
+      post => idl-1 => idl-2 => idl-3
+{% endif %}
+        """
+\end{lstlisting}
+
+where for \lstinline at SITE = niwa@ the file \lstinline=niwa-vars.rc= contains:
+\lstset{language=suiterc}
+\begin{lstlisting}
+{# niwa-vars.rc #}
+{% set HAVE_IDL = True %}
+{% set OTHER_VAR = "the quick brown fox" %}
+\end{lstlisting}
+
+Note we are assuming there are significantly fewer options (IDL or not, in this
+case) than sites, otherwise the IDL workflow should just go in the site
+include-files of the sites that need it.
+
+\subsection{Site-Specific Optional Suite Configs}
+
+During development and testing of a portable suite you can use an optional Rose
+suite config file to automatically set site-specific suite inputs and thereby
+avoid the need to make manual changes every time you check out and run a new
+version. The site switch itself has to be set of course, but there may be other
+settings too such as model parameters for a standard local test domain. Just
+put these settings in \lstinline=opt/rose-suite-niwa.conf= (for site ``niwa'')
+and run the suite with \lstinline=rose suite-run -O niwa=.
+
+\subsection{Site-Specific Optional App Configs}
+
+Typically a few but not all apps will need some site customization, e.g.\ for
+local archive configuration, local science options, or whatever. To avoid
+explicit site-customization of individual task-run command lines use Rose's
+built-in {\em optional optional app config} capability:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+[runtime]
+    [[root]]
+        script = rose task-run -v -O '({{SITE}})'
+\end{lstlisting}
+
+Normally a missing optional app config is considered to be an error, but the 
+round parentheses here mean the named optional config is optional - i.e.\
+use it if it exists, otherwise ignore.
+
+With this setting in place we can simply add a
+\lstinline=opt/rose-app-niwa.conf= to any app that needs customization at
+\lstinline at SITE = niwa at .
+
+\subsection{An Example}
+
+The following small suite is not portable because all of its tasks are
+submitted to a NIWA HPC host; two task are entirely NIWA-specific in that they 
+respectively install files from a local database and upload products to a local
+distribution system; and one task runs a somewhat NIWA-specific configuration
+of a model. The remaining tasks are site-agnostic apart from local job host
+and batch scheduler directives.
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+[cylc]
+    UTC mode = True
+[scheduling]
+    initial cycle point = 2017-01-01
+    [[dependencies]]
+        [[[R1]]]
+            graph = install_niwa => preproc
+        [[[P1D]]]
+            graph = """
+                preproc & model[-P1D] => model => postproc => upload_niwa
+                postproc => idl-1 => idl-2 => idl-3"""
+[runtime]
+    [[root]]
+        script = rose task-run -v
+    [[HPC]]  # NIWA job host and batch scheduler settings.
+        [[[remote]]]
+            host = hpc1.niwa.co.nz
+        [[[job]]]
+            batch system = loadleveler
+        [[[directives]]]
+            account_no = NWP1623
+            class = General
+            job_type = serial  # (most jobs in this suite are serial)
+    [[install_niwa]]  # NIWA-specific file installation task.
+        inherit = HPC
+    [[preproc]]
+        inherit = HPC
+    [[model]]  # Run the model on a local test domain.
+        inherit = HPC
+        [[[directives]]]  # Override the serial job_type setting.
+            job_type = parallel
+        [[[environment]]]
+            SPEED = fast
+    [[postproc]]
+        inherit = HPC
+    [[upload_niwa]]  # NIWA-specific product upload.
+        inherit = HPC
+\end{lstlisting}
+
+To make this portable, refactor it into a core suite.rc file that contains the
+clean site-independent workflow configuration and loads all site-specific
+settings from an include-file at the end:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+# suite.rc: CORE SITE-INDEPENDENT CONFIGURATION.
+{% set SITE = 'niwa' %}
+{% from 'site/' ~ SITE ~ '-vars.rc' import HAVE_IDL %}
+[cylc]
+    UTC mode = True
+[scheduling]
+    initial cycle point = 2017-01-01
+    [[dependencies]]
+        [[[P1D]]]
+            graph = """
+preproc & model[-P1D] => model => postproc
+{% if HAVE_IDL %}
+    postproc => idl-1 => idl-2 => idl-3
+{% endif %}
+                    """
+[runtime]
+    [[root]]
+        script = rose task-run -v -O '({{SITE}})'
+    [[preproc]]
+        inherit = HPC
+    [[preproc]]
+        inherit = HPC
+    [[model]]
+        inherit = HPC
+        [[[environment]]]
+            SPEED = fast
+{% include 'site/' ~ SITE ~ '.rc' %}
+\end{lstlisting}
+
+plus site files \lstinline=site/niwa-vars.rc=:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+# site/niwa-vars.rc: NIWA SITE SETTINGS FOR THE EXAMPLE SUITE.
+{% set HAVE_IDL = True %}
+\end{lstlisting}
+
+and \lstinline=site/niwa.rc=:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+# site/niwa.rc: NIWA SITE SETTINGS FOR THE EXAMPLE SUITE.
+[scheduling]
+    [[dependencies]]
+        [[[R1]]]
+            graph = install_niwa => preproc
+        [[[P1D]]]
+            graph = postproc => upload_niwa
+[runtime]
+    [[HPC]]
+        [[[remote]]]
+            host = hpc1.niwa.co.nz
+        [[[job]]]
+            batch system = loadleveler
+        [[[directives]]]
+            account_no = NWP1623
+            class = General
+            job_type = serial  # (most jobs in this suite are serial)
+    [[install_niwa]]  # NIWA-specific file installation.
+    [[model]]
+        [[[directives]]]  # Override the serial job_type setting.
+            job_type = parallel
+    [[upload_niwa]]  # NIWA-specific product upload.
+\end{lstlisting}
+
+and finally, an optional app config file for the local model domain:
+
+\lstset{language=bash}
+\begin{lstlisting}
+app/model/rose-app.conf  # Main app config.
+app/model/opt/rose-app-niwa.conf  # NIWA site settings.
+\end{lstlisting}
+
+Some points to note:
+
+\begin{itemize}
+  \item It is straightforward to extend support to a new site by copying an
+    existing site file(s) and adapting it to the new job host and batch
+    scheduler etc.
+
+  \item Batch system directives should be considered site-specific unless
+    all supported sites have the same batch system and the same host
+    architecture (including CPU clock speed and memory size etc.).
+
+  \item We've assumed that all tasks run on a single HPC host at both
+    sites. If that's not a valid assumption the \lstinline=HPC= family
+    inheritance relationships would have to become site-specific.
+
+  \item Core task runtime configuration aren't needed in site files at all
+    if their job host and batch system settings can be defined in common
+    families that are (\lstinline=HPC= in this case).
+\end{itemize}
+
+
+\subsection{Collaborative Development Model}
+\label{Collaborative Development Model}
+
+Official releases of a portable suite should be made from the suite trunk.
+
+Changes should be developed on feature branches so as not to affect other users
+of the suite.
+
+Site-specific changes shouldn't touch the core suite.rc file, just the relevant
+site include-file, and therefore should not need close scrutiny from other
+sites.
+
+Changes to the core suite.rc file should be agreed by all stakeholders, and
+should be carefully checked for effects on site include-files:
+
+\begin{itemize}
+  \item Changing the name of tasks or families in the core suite may break
+    sites that add configuration to the original runtime namespace.
+  \item Adding new tasks or families to the core suite may require
+    corresponding additions to the site files.
+  \item Deleting tasks or families from the core suite may require
+    corresponding parts of the site files to be removed. And also, check for
+    site-specific triggering off of deleted tasks or families.
+\end{itemize}
+
+However, if the owner site has to get some changes into the trunk before all
+collaborating sites have time to test them, version control will of course
+protect those lagging behind from any immediate ill effects.
+
+When a new feature is complete and tested at the developer's site, the suite
+owner should check out the branch, review and test it, and if necessary request
+that other sites do the same and report back. The owner can then merge the
+new feature to the trunk once satisfied.
+
+All planning and discussion associated with the change should be documented on
+MOSRS Trac tickets associated with the suite.
+
+\subsection{Research-To-Operations Transition}
+
+Under this collaborative development model it is {\em possible} to use the
+same suite in research and operations, largely eliminating the difficult
+translation between the two environments. Where appropriate, this can save
+a lot of work.
+
+Operations-specific parts of the suite should be factored out (as for site
+portability) into include-files that are only loaded in the operational
+environment. Improvements and upgrades can be developed on feature branches in
+the research environment. Operations staff can check out completed feature
+branches for testing in the operational environment before merging to trunk or
+refering back to research if problems are found. After sufficient testing the
+new suite version can be deployed into operations.
+
+\note{This obviously glosses over the myriad complexities of the technical
+  and scientific testing and validation of suite upgrades; it merely describes
+  what is possible from a suite design and collaborative development
+perspective.}
diff --git a/doc/src/suite-design-guide/preamble.tex b/doc/src/suite-design-guide/preamble.tex
new file mode 100644
index 0000000..e734eef
--- /dev/null
+++ b/doc/src/suite-design-guide/preamble.tex
@@ -0,0 +1,87 @@
+% ---- PREAMBLE ----
+% layout
+\documentclass{article}
+\usepackage[margin=3cm, headheight=1cm]{geometry}
+
+% Font stuff.
+\renewcommand{\familydefault}{\sfdefault}  % sanz-serif
+\usepackage{parskip}  % regular paragraph separation
+
+% Imports.
+\usepackage[usenames]{color}
+\usepackage{graphicx}
+\usepackage{listings}
+  \usepackage{courier}
+\usepackage{hyperref}
+\usepackage{textcomp}
+
+% Hyperlinks.
+\definecolor{links}{rgb}{0.1,0.1,0.6}
+\hypersetup{colorlinks=true, linkcolor=links, urlcolor=links}
+\urlstyle{same}
+
+% Headers / Footers.
+\usepackage{fancyhdr}
+\lhead{Rose+Cylc Suite Design Best Practice Guide}  % TODO: hardcoded !!!
+\rhead{
+  \includegraphics[width=0.15\textwidth]{resources/png/rose-logo}
+  \includegraphics[width=0.15\textwidth]{resources/tex/cylc-logo}}
+\pagestyle{fancy}
+
+% Code listings: default style.
+\definecolor{keywords}{rgb}{0.8,0.4,0.0}
+\definecolor{comments}{rgb}{1.0,0.3,0.5}
+\definecolor{identifiers}{rgb}{0.3,0.4,0.5}
+\definecolor{strings}{rgb}{0.2,0.5,0.3}
+\definecolor{basic}{rgb}{0.2,0.3,0.4}
+\definecolor{command}{rgb}{0.0,0.2,0.1}
+\definecolor{transcr}{rgb}{0.0,0.2,0.4}
+\newcommand\mysmall{\fontsize{8}{9.2}\selectfont}
+
+% bold for courier font:
+\renewcommand{\ttdefault}{pcr}
+
+\lstset{
+basicstyle=\color{basic}\mysmall\ttfamily\bfseries,
+identifierstyle=\color{identifiers},
+keywordstyle=\color{keywords},
+commentstyle=\color{comments},
+stringstyle=\color{strings},
+showstringspaces=false,
+upquote=true,
+}
+
+% Code listings: suite.rc language support.
+\definecolor{level1}{rgb}{0.0,0.2,0.6}
+\definecolor{level2}{rgb}{0.0,0.3,0.7}
+\definecolor{level3}{rgb}{0.0,0.4,0.8}
+\definecolor{jinja2}{rgb}{0.7,0.5,0.3}
+\lstdefinelanguage{suiterc}
+{
+string=[b]{"},
+sensitive=true,
+comment=[l]{\#},
+morecomment=[s][\color{level1}]{[}{]},
+morecomment=[s][\color{level2}]{[[}{]]},
+morecomment=[s][\color{level3}]{[[[}{]]]},
+morecomment=[s][\color{jinja2}]{\{\%}{\%\}},
+morecomment=[s][\color{jinja2}]{\{\{}{\}\}},
+morecomment=[s][\color{jinja2}]{\{\#}{\#\}},
+}
+
+\definecolor{note}{rgb}{0.6,0.6,0.6}
+\newcommand{\note}[1]{{\color{note}\textbf{Note:} \textit{#1}}}
+
+\definecolor{terminology}{rgb}{0.7,0.5,0.2}
+\newcommand{\terminology}[1]{{\color{terminology} \textit{#1}}}
+
+\definecolor{todo}{rgb}{1.0,1.0,0.0}
+\newcommand{\TODO}[1]{{\colorbox{todo}{\textbf{TODO:} \textit{#1}}}}
+
+\usepackage{framed}
+\definecolor{shadecolor}{rgb}{0.95,0.95,0.95}
+
+%\usepackage{draftwatermark}
+%\SetWatermarkText{Draft 15/03/17}
+%\SetWatermarkScale{5}
+%\SetWatermarkColor[rgb]{0.95,0.90,0.90}
diff --git a/doc/src/suite-design-guide/resources/png/failure-recovery.png b/doc/src/suite-design-guide/resources/png/failure-recovery.png
new file mode 100644
index 0000000..625a11b
Binary files /dev/null and b/doc/src/suite-design-guide/resources/png/failure-recovery.png differ
diff --git a/doc/src/suite-design-guide/resources/png/fam-to-fam-1.png b/doc/src/suite-design-guide/resources/png/fam-to-fam-1.png
new file mode 100644
index 0000000..e263198
Binary files /dev/null and b/doc/src/suite-design-guide/resources/png/fam-to-fam-1.png differ
diff --git a/doc/src/suite-design-guide/resources/png/fam-to-fam-2.png b/doc/src/suite-design-guide/resources/png/fam-to-fam-2.png
new file mode 100644
index 0000000..a0c2b97
Binary files /dev/null and b/doc/src/suite-design-guide/resources/png/fam-to-fam-2.png differ
diff --git a/doc/src/suite-design-guide/resources/png/param-1.png b/doc/src/suite-design-guide/resources/png/param-1.png
new file mode 100644
index 0000000..6f839b0
Binary files /dev/null and b/doc/src/suite-design-guide/resources/png/param-1.png differ
diff --git a/doc/src/suite-design-guide/resources/png/param-2.png b/doc/src/suite-design-guide/resources/png/param-2.png
new file mode 100644
index 0000000..5c52451
Binary files /dev/null and b/doc/src/suite-design-guide/resources/png/param-2.png differ
diff --git a/doc/src/suite-design-guide/resources/png/rose-logo.png b/doc/src/suite-design-guide/resources/png/rose-logo.png
new file mode 100644
index 0000000..5007efe
Binary files /dev/null and b/doc/src/suite-design-guide/resources/png/rose-logo.png differ
diff --git a/doc/src/suite-design-guide/resources/tex/cylc-logo.pdf b/doc/src/suite-design-guide/resources/tex/cylc-logo.pdf
new file mode 100644
index 0000000..502327c
Binary files /dev/null and b/doc/src/suite-design-guide/resources/tex/cylc-logo.pdf differ
diff --git a/doc/src/suite-design-guide/roadmap.tex b/doc/src/suite-design-guide/roadmap.tex
new file mode 100644
index 0000000..60fc1c7
--- /dev/null
+++ b/doc/src/suite-design-guide/roadmap.tex
@@ -0,0 +1,75 @@
+\section{Roadmap}
+
+Several planned future developments in Rose and Cylc may have an impact on
+suite design.
+
+\subsection{List Item Override In Site Include-Files}
+\label{List Item Override In Site Include-Files}
+
+A few Cylc config items hold lists of task (or family) names, e.g.:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+[scheduling]
+    [[special tasks]]
+        clock-triggered = get-data-a, get-data-b
+    #...
+#...
+\end{lstlisting}
+
+Currently a repeated config item completely overrides a previously set value
+(apart from graph strings which are always additive). This means a site
+include-file (for example) can't add a new site-specific clock-triggered task
+without writing out the complete list of all clock-triggered tasks in the
+suite, which breaks the otherwise clean separation into core and site files.
+
+{\em In the future we plan to support add, subtract, unset, and override
+semantics for all items - see \url{https://github.com/cylc/cylc/issues/1363}}.
+
+\subsection{UM STASH in Optional App Configs}
+\label{UM STASH in Optional App Configs}
+
+A caveat to the advice on use of option app configs in Section~\ref{Optional
+App Config Files}: in general you might need the ability to turn off or modify
+some STASH requests in the main app, not just add additional site-specific
+STASH. But overriding STASH in optional configs is fragile because STASH
+namelists names are automatically generated from a {\em hash} of the precise
+content of the namelist. This makes it possible to uniquely identify the same
+STASH requests in different apps, but if any detail of a STASH request changes
+in a main app its namelist name will change and any optional configs that refer
+to it will become divorced from their intended target.
+
+Until this problem is solved we recommend that:
+
+\begin{itemize}
+  \item All STASH in main UM apps should be grouped into sensible {\em
+    packages} that can be turned on and off in optional configs without
+    referencing the individual STASH request namelists.
+  \item Or all STASH should be held in optional site configs and none in the
+    main app. Note however that STASH is difficult to configure outside of
+    \lstinline=rose edit=, and the editor does not yet allow you to edit
+    optional configs - see \url{https://github.com/metomi/rose/issues/1685}.
+\end{itemize}
+
+\subsection{Modular Suite Design}
+
+The modular suite design concept is that we should be able to import common
+workflow segments at install time rather than duplicating them in each suite:
+\url{https://github.com/cylc/cylc/issues/1829}. The content of a suite module
+will be encapsulated in a protected namespace to avoid clashing with the
+importing suite, and selected inputs and outputs exposed via a proper
+interface.
+
+This should aid portable suite design too by enabling site-specific parts of a
+workflow (local product generation for example) to be stored and imported
+on-site rather than polluting the source and revision control record of
+the core suite that everyone sees.
+
+We note that this can already be done to a limited extent by using 
+\lstinline=rose suite-run= to install suite.rc fragments from an external
+location. However, as a literal inlining mechanism with no encapsulation or 
+interface, the internals of the ``imported'' fragments would have to be
+compatible with the suite definition in every respect.
+
+See also~\ref{Monolithic Or Interdependent Suites} on modular {\em systems of
+suites} connected by inter-suite triggering.
diff --git a/doc/src/suite-design-guide/style-guide.tex b/doc/src/suite-design-guide/style-guide.tex
new file mode 100644
index 0000000..1459adc
--- /dev/null
+++ b/doc/src/suite-design-guide/style-guide.tex
@@ -0,0 +1,281 @@
+\section{Style Guidelines}
+
+Coding style is largely subjective, but for collaborative development of
+complex systems it is important to settle on a clear and consistent style to
+avoid getting into a mess. The following style rules are recommended.
+
+\subsection{Tab Characters}
+
+Do not use tab characters. Tab width depends on editor settings, so a mixture
+of tabs and spaces in the same file can render to a mess.
+
+Use \lstinline=grep -InPr "\t" *= to find tabs recursively in files in
+a directory.
+
+In {\em vim} use \lstinline=%retab= to convert existing tabs to spaces,
+and set \lstinline=expandtab= to automatically convert new tabs.
+
+In {\em emacs} use {\em whitespace-cleanup}.
+
+In {\em gedit}, use the {\em Draw Spaces} plugin to display tabs and spaces.
+
+\subsection{Trailing Whitespace}
+
+Trailing whitespace is untidy, it makes quick reformatting of paragraphs
+difficult, and it can result in hard-to-find bugs (space after intended
+line continuation markers).
+
+To remove existing trailing whitespace in a file use a \lstinline=sed= or
+\lstinline=perl= one-liner:
+
+\lstset{language=sh}
+\begin{lstlisting}
+$ perl -pi -e "s/ +$//g" /path/to/file
+# or:
+$ sed --in-place 's/[[:space:]]\+$//' path/to/file
+\end{lstlisting}
+
+Or do a similar search-and-replace operation in your editor. Editors like {\em
+vim} and {\em emacs} can also be configured to highlight or automatically
+remove trailing whitespace on the fly.
+
+\subsection{Indentation}
+
+Consistent indentation makes a suite definition more readable, it shows section
+nesting clearly, and it makes block re-indentation operations easier in text
+editors. Indent suite.rc syntax four spaces per nesting level:
+
+\subsubsection{Config Items}
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+[SECTION]
+    # A comment.
+    title = the quick brown fox
+    [[SUBSECTION]]
+        # Another comment.
+        a short item = value1
+        a very very long item = value2
+\end{lstlisting}
+
+Don't align \lstinline at item = value@ pairs on the \lstinline@=@ character
+like this:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+[SECTION]  # Avoid this.
+             a short item = value1
+    a very very long item = value2
+\end{lstlisting}
+
+or like this:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+[SECTION]  # Avoid this.
+    a short item          = value1
+    a very very long item = value2
+\end{lstlisting}
+
+because the whole block may need re-indenting after a single change, which will
+pollute your revision history with spurious changes.
+
+Comments should be indented to the same level as the section or item they refer
+to, and trailing comments should be preceded by two spaces, as shown above.
+
+\subsubsection{Script String Lines}
+
+Script strings are written verbatim to task job scripts so they should really
+be indented from the left margin:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+[runtime]
+    [[foo]]
+        # Recommended.
+        post-script = """
+if [[ $RESULT == "bad" ]]; then
+    echo Goodbye World!
+    exit 1
+fi"""
+\end{lstlisting}
+
+Indentation is {\em mostly} ignored by the bash interpreter, however, so if you
+feel it aids readability it is {\em mostly} harmless to indent internal script
+lines as if part of the Cylc syntax, or even out to the triple quotes:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+[runtime]
+    [[foo]]
+        # OK, but...
+        post-script = """
+            if [[ $RESULT == "bad" ]]; then
+                echo Goodbye World!
+                exit 1
+            fi"""
+\end{lstlisting}
+
+But if you do this, watch your line length (see~\ref{Line Length}) and {\em do not
+indent here documents}:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+[runtime]
+    [[foo]]
+        # ... this is an error!
+        script = """
+        cat >> log.txt <<_EOF_
+            The quick brown fox jumped
+            over the lazy dog.
+        _EOF_
+                 """
+\end{lstlisting}
+
+(The leading whitespace would end up in \lstinline=log.txt=, and indenting the
+\lstinline=_EOF_= marker is actually an error).
+
+\subsubsection{Graph String Lines}
+
+Multiline \lstinline at graph@ strings can be entirely free-form:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+[scheduling]
+    [[dependencies]]
+        graph = """
+     # Main workflow:
+  FAMILY:succeed-all => bar & baz => qux
+
+     # Housekeeping:
+  qux => rose_arch => rose_prune"""
+\end{lstlisting}
+
+Whitespace is ignored in graph string parsing, however, so internal graph lines
+can be indented as if part of the suite.rc syntax, or even out to the triple
+quotes, if you feel it aids readability (but watch line length with large
+indents; see~\ref{Line Length}):
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+[scheduling]
+    [[dependencies]]
+        graph = """
+           # Main workflow:
+           FAMILY:succeed-all => bar & baz => qux
+
+           # Housekeeping:
+           qux => rose_arch => rose_prune"""
+\end{lstlisting}
+
+Both styles are acceptable; choose one and use it consistently.
+
+\subsubsection{Jinja2 Code}
+
+A suite.rc file with embedded Jinja2 code is essentially a Jinja2 program to
+generate a Cylc suite definition. It is not possible to consistently indent the
+Jinja2 as if it were part of the suite.rc syntax (which to the Jinja2 processor
+is just arbitrary text), so it should be indented from the left margin on
+its own terms:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+[runtime]
+    [[OPS]]
+{% for T in OPS_TASKS %}
+    {% for M in range(M_MAX) %}
+    [[ops_{{T}}_{{M}}]]
+        inherit = OPS
+    {% endfor %}
+{% endfor %}
+\end{lstlisting}
+
+\subsection{Comments}
+
+Comments should be minimal, but not too minimal. If context and clear
+task and variable names will do, leave it at that. Extremely verbose comments
+tend to get out of sync with the code they describe, which can be worse
+than having no comments.
+
+Avoid long lists of numbered comments - future changes may require mass
+renumbering.
+
+Avoid page-width ``section divider'' comments, especially if they are not
+strictly limited to the standard line length (see~\ref{Line Length}).
+
+Indent comments to the same level as the config items they describe.
+
+\subsection{Titles, Descriptions, And URLs}
+
+Document the suite and its tasks with \lstinline=title=,
+\lstinline=description=, and \lstinline=url= items instead of comments. These
+can be displayed, or linked to, by the GUI at runtime.
+
+\subsection{Line Length And Continuation}
+\label{Line Length}
+
+Keep to the standard maximum line length of 79 characters where possible. Very
+long lines affect readability and make side-by-side diffs hard to view.
+
+Backslash line continuation markers can be used anywhere in the suite.rc file
+but should be avoided if possible because they are easily broken by invisible
+trailing whitespace.
+
+Continuation markers are not needed in graph strings where trailing
+trigger arrows imply line continuation:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+[scheduling]
+    [[dependencies]]
+        # No line continuation marker is needed here.
+        graph = """prep => one => two => three =>
+                four => five six => seven => eight"""
+[runtime]
+    [[MY_TASKS]]
+    # A line continuation marker *is* needed here:
+    [[one, two, three, four, five, six, seven, eight, nine, ten, \
+      eleven, twelve, thirteen ]]
+        inherit = MY_TASKS
+\end{lstlisting}
+
+\subsection{Task Naming Conventions}
+
+Use \lstinline=UPPERCASE= for family names and \lstinline=lowercase=
+for tasks, so you can distinguish them at a glance.
+
+Choose a convention for multi-component names and use it consistently. Put the
+most general name components first for natural grouping in the GUI, e.g.\
+\lstinline=obs_sonde=, \lstinline=obs_radar= (not \lstinline=sonde_obs= etc.)
+
+Within your convention keep names as short as possible.
+
+\subsubsection{UM System Task Names}
+
+For UM System suites we recommend the following full task naming convention:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+model_system_function[_member]
+\end{lstlisting}
+
+For example, \lstinline=glu_ops_process_scatwind= where \lstinline=glu= refers
+to the global (deterministic model) update run, \lstinline=ops= is the system
+that owns the task, and \lstinline=process_scatwind= is the function it
+performs. The optional \lstinline=member= suffix is intended for use with
+ensembles as needed.
+
+Within this convention keep names as short as possible, e.g.\ use
+\lstinline=fcst= instead of \lstinline=forecast=.
+
+UM forecast apps should be given names that reflect their general science
+configuration rather than geographic domain, to allow use on other model
+domains without causing confusion.
+
+\subsection{Rose Config Files}
+
+Use \lstinline=rose config-dump= to load and re-save new Rose .conf files. This
+puts the files in a standard format (ordering of lines etc.) to ensure that
+spurious changes aren't generated when you next use \lstinline=rose edit=.
+
+See also~\ref{Optional App Config Files} on optional app config files.
diff --git a/doc/src/suite-design-guide/title-page.tex b/doc/src/suite-design-guide/title-page.tex
new file mode 100644
index 0000000..70921b6
--- /dev/null
+++ b/doc/src/suite-design-guide/title-page.tex
@@ -0,0 +1,16 @@
+\thispagestyle{empty}
+
+\begin{titlepage}
+    \begin{center}
+        \includegraphics[width=0.3\textwidth]{resources/png/rose-logo}
+        \includegraphics[width=0.3\textwidth]{resources/tex/cylc-logo}
+
+        \Huge{Cylc Rose Suite Design\\Best Practice Guide}
+
+      \small{Version 1.0 - 23 March 2017}
+
+      \small{Last updated for: Cylc-7.2.0 and Rose-2017.02.0}
+
+        \large{Hilary Oliver, Dave Matthews, Andy Clark, and Contributors}
+    \end{center}
+\end{titlepage}
diff --git a/examples/clock-expire/suite.rc b/examples/clock-expire/suite.rc
index 3ac1f72..8f31ce7 100644
--- a/examples/clock-expire/suite.rc
+++ b/examples/clock-expire/suite.rc
@@ -3,7 +3,7 @@ description = """
 Skip a daily post-processing workflow if the 'copy' task has expired."""
 
 [cylc]
-   cycle point format = %Y-%m-%dT%H
+    cycle point format = %Y-%m-%dT%H
 [scheduling]
     initial cycle point = now
     final cycle point = +P3D
@@ -16,5 +16,6 @@ Skip a daily post-processing workflow if the 'copy' task has expired."""
     [[dependencies]]
         [[[P1D]]]
             graph = """
-        model[-P1D] => model => copy => proc
-              copy:expired => !proc"""
+                model[-P1D] => model => copy => proc
+                copy:expired => !proc
+            """
diff --git a/examples/delayed-retry/suite.rc b/examples/delayed-retry/suite.rc
index ad87d80..60d843d 100644
--- a/examples/delayed-retry/suite.rc
+++ b/examples/delayed-retry/suite.rc
@@ -3,9 +3,10 @@
         graph = "foo => bar"
 [runtime]
     [[foo]]
-        retry delays = 3*PT6S
         script = """
 sleep 10
 if (( CYLC_TASK_TRY_NUMBER < 3 )); then
     bin/false
 fi"""
+        [[[job]]]
+            execution retry delays = 3*PT6S
diff --git a/examples/family/extra/suite.rc b/examples/family/extra/suite.rc
index 9b1d853..d304abf 100644
--- a/examples/family/extra/suite.rc
+++ b/examples/family/extra/suite.rc
@@ -35,5 +35,5 @@ y1 | H => I"""
     collapsed families = FAMX, FAMY
     use node color for edges = False
     [[node attributes]]
-        FAMx = 'color=red', 'shape=ellipse'
+        FAMX = 'color=red', 'shape=ellipse'
         FAMY = 'color=blue', 'shape=ellipse'
diff --git a/examples/inherit/single/two/suite.rc b/examples/inherit/single/two/suite.rc
index 0c4fba5..0992b2b 100644
--- a/examples/inherit/single/two/suite.rc
+++ b/examples/inherit/single/two/suite.rc
@@ -64,8 +64,5 @@ generated with 'cylc jobscript'."""
         inherit = OPS
         description = "OPS ATOVS postprocessing"
 
-    [[prepobs]]
-        description = "obs preprocessing"
-
 [visualization]
     collapsed families = OPS, VAR, BAZ
diff --git a/examples/satellite/ext-trigger/suite.rc b/examples/satellite/ext-trigger/suite.rc
index c1cc775..e8f820f 100644
--- a/examples/satellite/ext-trigger/suite.rc
+++ b/examples/satellite/ext-trigger/suite.rc
@@ -45,15 +45,15 @@ until triggered by an external system."""
 [runtime]
     [[prep]]
         title = clean the suite output directories
-        command scripting = \
+        script = \
 rm -rf $CYLC_SUITE_SHARE_DIR $CYLC_SUITE_WORK_DIR
 
     [[satsim]]
         title = simulate a satellite data feed
         description = """Generates {{N_DATASETS}} arbitrarily labelled
 datasets very quickly, to show parallel processing streams."""
-        pre-command scripting = mkdir -p {{DATA_IN_DIR}}
-        command scripting = """
+        pre-script = mkdir -p {{DATA_IN_DIR}}
+        script = """
 COUNT=0
 while true; do
     ((COUNT == {{N_DATASETS}})) && break
@@ -73,29 +73,29 @@ done"""
         # Define a common cycle-point-specific work-directory for all
         # processing tasks so that they all work on the same dataset.
         work sub-directory = proc-$CYLC_TASK_CYCLE_POINT
-        pre-command scripting = "DATASET=dataset-$CYLC_EXT_TRIGGER_ID"
-        post-command scripting = sleep 5
+        pre-script = "DATASET=dataset-$CYLC_EXT_TRIGGER_ID"
+        post-script = sleep 5
 
     [[get_data]]
         inherit = WORKDIR
         title = retrieve next dataset
         description = just do it - we know it exists already
-        command scripting = mv {{DATA_IN_DIR}}/${DATASET}.raw $PWD
+        script = mv {{DATA_IN_DIR}}/${DATASET}.raw $PWD
 
     [[proc1]]
         inherit = WORKDIR
         title = convert .raw dataset to .proc1 form
-        command scripting = mv ${DATASET}.raw ${DATASET}.proc1
+        script = mv ${DATASET}.raw ${DATASET}.proc1
 
     [[proc2]]
         inherit = WORKDIR
         title = convert .proc1 dataset to .proc2 form
-        command scripting = mv ${DATASET}.proc1 ${DATASET}.proc2
+        script = mv ${DATASET}.proc1 ${DATASET}.proc2
 
     [[products]]
         inherit = WORKDIR
         title = generate products from .proc2 processed dataset
-        command scripting = """
+        script = """
 mkdir -p {{PRODUCT_DIR}}
 mv ${DATASET}.proc2 {{PRODUCT_DIR}}/${DATASET}.prod"""
 
@@ -103,7 +103,7 @@ mv ${DATASET}.proc2 {{PRODUCT_DIR}}/${DATASET}.prod"""
         title = collate all products from the suite run
         # Note you might want to use "cylc suite-state" to check that
         # _all_ product tasks have finished before collating results.
-        command scripting = """
+        script = """
 echo PRODUCTS:
 ls {{PRODUCT_DIR}}
 sleep 20"""
diff --git a/examples/satellite/task-retries/suite.rc b/examples/satellite/task-retries/suite.rc
index 0d347b1..87691d8 100644
--- a/examples/satellite/task-retries/suite.rc
+++ b/examples/satellite/task-retries/suite.rc
@@ -39,15 +39,15 @@ automatically until they succeed."""
 [runtime]
     [[prep]]
         title = clean the suite output directories
-        command scripting = \
+        script = \
 rm -rf $CYLC_SUITE_SHARE_DIR $CYLC_SUITE_WORK_DIR
 
     [[satsim]]
         title = simulate a satellite data feed
         description = """Generates {{N_DATASETS}} arbitrarily labelled
 datasets after random durations."""
-        pre-command scripting = mkdir -p {{DATA_IN_DIR}}
-        command scripting = """
+        pre-script = mkdir -p {{DATA_IN_DIR}}
+        script = """
 COUNT=0
 while true; do
     ((COUNT == {{N_DATASETS}})) && break
@@ -63,36 +63,37 @@ done"""
         # Define a common cycle-point-specific work-directory for all
         # processing tasks so that they all work on the same dataset.
         work sub-directory = proc-$CYLC_TASK_CYCLE_POINT
-        post-command scripting = sleep 5
+        post-script = sleep 5
 
     [[get_data]]
         inherit = WORKDIR
         title = retreive next dataset
         description = grab ONE new dataset if available else retry
-        retry delays = 10*PT2S
-        command scripting = """
+        script = """
 DATASET=$( ls {{DATA_IN_DIR}}/dataset-*.raw 2>/dev/null | head -n 1 )
 [[ -z $DATASET ]] && exit 1
 mv $DATASET $PWD"""
+        [[[job]]]
+            execution retry delays = 10*PT2S
 
     [[proc1]]
         inherit = WORKDIR
         title = convert .raw dataset to .proc1 form
-        command scripting = """
+        script = """
 DATASET=$(ls dataset-*.raw)
 mv $DATASET ${DATASET%raw}proc1"""
 
     [[proc2]]
         inherit = WORKDIR
         title = convert .proc1 dataset to .proc2 form
-        command scripting = """
+        script = """
 DATASET=$(ls dataset-*.proc1)
 mv $DATASET ${DATASET%proc1}proc2"""
 
     [[products]]
         inherit = WORKDIR
         title = generate products from .proc2 processed dataset
-        command scripting = """
+        script = """
 mkdir -p {{PRODUCT_DIR}}
 DATASET=$( ls dataset-*.proc2 )
 mv $DATASET {{PRODUCT_DIR}}/${DATASET%proc2}prod"""
@@ -101,7 +102,7 @@ mv $DATASET {{PRODUCT_DIR}}/${DATASET%proc2}prod"""
         title = collate all products from the suite run
         # Note you might want to use "cylc suite-state" to check that
         # _all_ product tasks have finished before collating results.
-        command scripting = """
+        script = """
 echo PRODUCTS:
 ls {{PRODUCT_DIR}}
 sleep 20"""
diff --git a/examples/task-states/bin/change-my-job-sub-method.sh b/examples/task-states/bin/change-my-job-sub-method.sh
index e372d58..64ca590 100755
--- a/examples/task-states/bin/change-my-job-sub-method.sh
+++ b/examples/task-states/bin/change-my-job-sub-method.sh
@@ -9,6 +9,5 @@ echo "${0}:resetting job submission method with cylc broadcast"
 
 NAME=${TASKID%.*}
 CYCLE=${TASKID#*.}
+cylc broadcast -n $NAME -p $CYCLE --set '[job]batch system=background' $SUITE
 
-echo cylc broadcast -n $NAME -t $CYCLE --set "[job]batch system=background" $SUITE
-cylc broadcast -n $NAME -t $CYCLE --set "[job]batch system=background" $SUITE
diff --git a/examples/task-states/suite.rc b/examples/task-states/suite.rc
index fea9acc..5a0850a 100644
--- a/examples/task-states/suite.rc
+++ b/examples/task-states/suite.rc
@@ -3,6 +3,10 @@ title = "gcylc task state color theme demo"
 description = """Generate a lot of possible task states,
 to show what they look like live in gcylc."""
 
+[cylc]
+    UTC mode = True
+    cycle point format = %Y-%m-%dT%HZ
+
 [scheduling]
     initial cycle point = 20120808T00
     final cycle point = 20120812T00
@@ -32,19 +36,21 @@ to show what they look like live in gcylc."""
     [[m_x]]
         inherit = FAMILY
         title = "this task succeeds on the second try "
-        retry delays = PT18S
         script = """
 sleep 10
 if [[ $CYLC_TASK_TRY_NUMBER < 2 ]]; then
     cylc task message -p WARNING ABORTING
     exit 1
 fi"""
+        [[[job]]]
+            execution retry delays = PT18S
     [[bad]]
         title = "A task that tries and fails twice"
         description = """Failed instances of this task are removed from the suite
 at the end of each cycle by a suicide trigger."""
-        retry delays = PT12S
         script = "sleep 10; exit 1"
+        [[[job]]]
+            execution retry delays = PT12S
     [[bad2]]
         title = "A task that fails to submit twice"
         [[[job]]]
@@ -59,7 +65,7 @@ submission method for the retry."""
             submission retry delays = PT18S
         [[[events]]]
             submission retry handler = change-my-job-sub-method.sh
- 
+
 [visualization]
     use node color for labels = True
     [[node attributes]]
diff --git a/examples/tutorial/oneoff/retry/suite.rc b/examples/tutorial/oneoff/retry/suite.rc
index 29d2a3e..88f8417 100644
--- a/examples/tutorial/oneoff/retry/suite.rc
+++ b/examples/tutorial/oneoff/retry/suite.rc
@@ -4,7 +4,6 @@ title = "A task with automatic retry on failure"
         graph = "hello"
 [runtime]
     [[hello]]
-        retry delays = 2*PT6S # retry twice after 6-second delays
         script = """
 sleep 10
 if [[ $CYLC_TASK_TRY_NUMBER < 3 ]]; then
@@ -13,3 +12,5 @@ if [[ $CYLC_TASK_TRY_NUMBER < 3 ]]; then
 else
     echo "Hello World!"
 fi"""
+        [[[job]]]
+            execution retry delays = 2*PT6S # retry twice after 6-second delays
diff --git a/lib/cylc/batch_sys_handlers/background.py b/lib/cylc/batch_sys_handlers/background.py
index 8afa68c..4b85f96 100644
--- a/lib/cylc/batch_sys_handlers/background.py
+++ b/lib/cylc/batch_sys_handlers/background.py
@@ -22,7 +22,6 @@ import os
 import re
 from subprocess import Popen, STDOUT
 import sys
-from cylc.batch_sys_manager import BATCH_SYS_MANAGER
 
 
 class BgCommandHandler(object):
diff --git a/lib/cylc/batch_sys_handlers/sge.py b/lib/cylc/batch_sys_handlers/sge.py
index ee012ad..6da916c 100644
--- a/lib/cylc/batch_sys_handlers/sge.py
+++ b/lib/cylc/batch_sys_handlers/sge.py
@@ -50,9 +50,14 @@ class SGEHandler(object):
             directives[key] = value
         lines = []
         for key, value in directives.items():
-            if value:
+            if value and " " in key:
+                # E.g. -l h_rt=3:00:00
+                lines.append("%s%s=%s" % (self.DIRECTIVE_PREFIX, key, value))
+            elif value:
+                # E.g. -q queue_name
                 lines.append("%s%s %s" % (self.DIRECTIVE_PREFIX, key, value))
             else:
+                # E.g. -V
                 lines.append("%s%s" % (self.DIRECTIVE_PREFIX, key))
         return lines
 
diff --git a/lib/cylc/batch_sys_manager.py b/lib/cylc/batch_sys_manager.py
index 174bfbe..d2e59f8 100644
--- a/lib/cylc/batch_sys_manager.py
+++ b/lib/cylc/batch_sys_manager.py
@@ -18,8 +18,7 @@
 
 """Manage submission, poll and kill of a job to the batch systems.
 
-Export the symbol BATCH_SYS_MANAGER, which is the singleton object for the
-BatchSysManager class.
+Export the BatchSysManager class.
 
 Batch system handler (a.k.a. job submission method) modules should be placed
 under the "cylc.batch_sys_handlers" package. Each module should export the
@@ -104,7 +103,7 @@ batch_sys.SUBMIT_CMD_TMPL
     * A Python string template for getting the batch system command to submit a
       job file. The command is formed using the logic:
           batch_sys.SUBMIT_CMD_TMPL % {"job": job_file_path}
-      See also "batch_sys.job_submit".
+      See also "batch_sys._job_submit_impl".
 
 """
 
@@ -206,7 +205,7 @@ class BatchSysManager(object):
             if os.path.isdir(suite_py) and suite_py not in sys.path:
                 sys.path.append(suite_py)
 
-    def get_inst(self, batch_sys_name):
+    def _get_sys(self, batch_sys_name):
         """Return an instance of the class for "batch_sys_name"."""
         if batch_sys_name in self._INSTANCES:
             return self._INSTANCES[batch_sys_name]
@@ -224,20 +223,20 @@ class BatchSysManager(object):
 
     def format_directives(self, job_conf):
         """Format the job directives for a job file, if relevant."""
-        batch_sys = self.get_inst(job_conf['batch_system_name'])
+        batch_sys = self._get_sys(job_conf['batch_system_name'])
         if hasattr(batch_sys, "format_directives"):
             return batch_sys.format_directives(job_conf)
 
     def get_fail_signals(self, job_conf):
         """Return a list of failure signal names to trap in the job file."""
-        batch_sys = self.get_inst(job_conf['batch_system_name'])
+        batch_sys = self._get_sys(job_conf['batch_system_name'])
         if hasattr(batch_sys, "get_fail_signals"):
             return batch_sys.get_fail_signals(job_conf)
         return ["EXIT", "ERR", "TERM", "XCPU"]
 
     def get_vacation_signal(self, job_conf):
         """Return the vacation signal name for a job file."""
-        batch_sys = self.get_inst(job_conf['batch_system_name'])
+        batch_sys = self._get_sys(job_conf['batch_system_name'])
         if hasattr(batch_sys, "get_vacation_signal"):
             return batch_sys.get_vacation_signal(job_conf)
 
@@ -372,7 +371,7 @@ class BatchSysManager(object):
             st_file = open(st_file_path)
             for line in st_file:
                 if line.startswith(self.CYLC_BATCH_SYS_NAME + "="):
-                    batch_sys = self.get_inst(line.strip().split("=", 1)[1])
+                    batch_sys = self._get_sys(line.strip().split("=", 1)[1])
                     break
             else:
                 return (
@@ -412,54 +411,13 @@ class BatchSysManager(object):
         except IOError as exc:
             return (1, str(exc))
 
-    def job_submit(self, job_file_path, remote_mode):
-        """Submit a job file.
-
-        "job_file_path" is a string containing the path to the job file.
-        "remote_mode" is a boolean to indicate if submit is being initiated on
-        a remote job host.
-
-        Return a 4-element tuple (ret_code, out, err, job_id) where:
-        "ret_code" is the integer return code of the job submit command.
-        "out" is a string containing the standard output of the job submit
-        command.
-        "err" is a string containing the standard error output of the job
-        submit command.
-        "job_id" is a string containing the ID of the job submitted.
-
-        """
-        # SUITE_RUN_DIR/log/job/CYCLE/TASK/SUBMIT/job
-        if "$" in job_file_path:
-            job_file_path = os.path.expandvars(job_file_path)
-        self.configure_suite_run_dir(job_file_path.rsplit(os.sep, 6)[0])
-
-        if remote_mode:
-            batch_sys_name, submit_opts = (
-                self._job_submit_prepare_remote(job_file_path))
-        else:  # local mode
-            batch_sys_name = None
-            submit_opts = {}
-            for line in open(job_file_path):
-                if line.startswith(self.LINE_PREFIX_BATCH_SYS_NAME):
-                    batch_sys_name = line.replace(
-                        self.LINE_PREFIX_BATCH_SYS_NAME, "").strip()
-                elif line.startswith(self.LINE_PREFIX_BATCH_SUBMIT_CMD_TMPL):
-                    submit_opts["batch_submit_cmd_tmpl"] = line.replace(
-                        self.LINE_PREFIX_BATCH_SUBMIT_CMD_TMPL, "").strip()
-                elif line.startswith(self.LINE_PREFIX_EXECUTION_TIME_LIMIT):
-                    submit_opts["execution_time_limit"] = float(line.replace(
-                        self.LINE_PREFIX_EXECUTION_TIME_LIMIT, "").strip())
-
-        return self._job_submit_impl(
-            job_file_path, batch_sys_name, submit_opts)
-
     @classmethod
     def _create_nn(cls, job_file_path):
         """Create NN symbolic link, if necessary.
 
         If NN => 01, remove numbered directories with submit numbers greater
         than 01.
-        Helper for "self.submit".
+        Helper for "self._job_submit_impl".
 
         """
         job_file_dir = os.path.dirname(job_file_path)
@@ -554,11 +512,11 @@ class BatchSysManager(object):
         bad_job_ids = list(exp_job_ids)
         exp_pids = []
         bad_pids = []
-        items = [[self.get_inst(batch_sys_name), exp_job_ids, bad_job_ids]]
+        items = [[self._get_sys(batch_sys_name), exp_job_ids, bad_job_ids]]
         if getattr(items[0][0], "SHOULD_POLL_PROC_GROUP", False):
             exp_pids = [ctx.pid for ctx in my_ctx_list if ctx.pid is not None]
             bad_pids.extend(exp_pids)
-            items.append([self.get_inst("background"), exp_pids, bad_pids])
+            items.append([self._get_sys("background"), exp_pids, bad_pids])
         for batch_sys, exp_ids, bad_ids in items:
             if hasattr(batch_sys, "get_poll_many_cmd"):
                 # Some poll commands may not be as simple
@@ -639,7 +597,7 @@ class BatchSysManager(object):
         job_status_file.close()
 
         # Submit job
-        batch_sys = self.get_inst(batch_sys_name)
+        batch_sys = self._get_sys(batch_sys_name)
         proc_stdin_arg = None
         proc_stdin_value = None
         if hasattr(batch_sys, "get_submit_stdin"):
@@ -800,48 +758,3 @@ class BatchSysManager(object):
                     batch_sys_name = None
                     submit_opts = {}
         return items
-
-    def _job_submit_prepare_remote(self, job_file_path):
-        """Prepare a remote job file.
-
-        On remote mode, write job file, content from STDIN Modify job
-        script's CYLC_DIR for this host. Extract job submission method
-        and job submission command template.
-
-        Return (batch_sys_name, batch_sys_submit)
-
-        """
-        batch_sys_name = None
-        submit_opts = {}
-        mkdir_p(os.path.dirname(job_file_path))
-        job_file = open(job_file_path + ".tmp", "w")
-        while True:  # Note: "for line in sys.stdin:" may hang
-            line = sys.stdin.readline()
-            if not line:
-                sys.stdin.close()
-                break
-            if line.startswith(self.LINE_PREFIX_CYLC_DIR):
-                old_line = line
-                line = "%s'%s'\n" % (
-                    self.LINE_PREFIX_CYLC_DIR, os.environ["CYLC_DIR"])
-                if old_line != line:
-                    job_file.write(self.LINE_UPDATE_CYLC_DIR)
-            elif line.startswith(self.LINE_PREFIX_BATCH_SYS_NAME):
-                batch_sys_name = line.replace(
-                    self.LINE_PREFIX_BATCH_SYS_NAME, "").strip()
-            elif line.startswith(self.LINE_PREFIX_BATCH_SUBMIT_CMD_TMPL):
-                submit_opts["batch_submit_cmd_tmpl"] = line.replace(
-                    self.LINE_PREFIX_BATCH_SUBMIT_CMD_TMPL, "").strip()
-            elif line.startswith(self.LINE_PREFIX_EXECUTION_TIME_LIMIT):
-                submit_opts["execution_time_limit"] = float(line.replace(
-                    self.LINE_PREFIX_BATCH_SUBMIT_CMD_TMPL, "").strip())
-            job_file.write(line)
-        job_file.close()
-        os.rename(job_file_path + ".tmp", job_file_path)
-        os.chmod(job_file_path, (
-            os.stat(job_file_path).st_mode |
-            stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH))
-        return batch_sys_name, submit_opts
-
-
-BATCH_SYS_MANAGER = BatchSysManager()
diff --git a/lib/cylc/cfgspec/globalcfg.py b/lib/cylc/cfgspec/globalcfg.py
index a83f820..295f3a5 100644
--- a/lib/cylc/cfgspec/globalcfg.py
+++ b/lib/cylc/cfgspec/globalcfg.py
@@ -95,19 +95,20 @@ SPEC = {
     'documentation': {
         'files': {
             'html index': vdr(
-                vtype='string', default="$CYLC_DIR/doc/index.html"),
+                vtype='string', default="$CYLC_DIR/doc/install/index.html"),
             'pdf user guide': vdr(
-                vtype='string', default="$CYLC_DIR/doc/pdf/cug-pdf.pdf"),
+                vtype='string',
+                default="$CYLC_DIR/doc/install/cylc-user-guide.pdf"),
             'multi-page html user guide': vdr(
                 vtype='string',
-                default="$CYLC_DIR/doc/html/multi/cug-html.html"),
+                default="$CYLC_DIR/doc/install/html/multi/cug-html.html"),
             'single-page html user guide': vdr(
                 vtype='string',
-                default="$CYLC_DIR/doc/html/single/cug-html.html"),
+                default="$CYLC_DIR/doc/install/html/single/cug-html.html"),
         },
         'urls': {
             'internet homepage': vdr(
-                vtype='string', default="http://cylc.github.com/cylc/"),
+                vtype='string', default="http://cylc.github.io/cylc/"),
             'local index': vdr(vtype='string', default=None),
         },
     },
@@ -147,10 +148,10 @@ SPEC = {
                 vtype='interval_list', default=[]),
             'execution polling intervals': vdr(
                 vtype='interval_list', default=[]),
-            'remote copy template': vdr(
+            'scp command': vdr(
                 vtype='string',
                 default='scp -oBatchMode=yes -oConnectTimeout=10'),
-            'remote shell template': vdr(
+            'ssh command': vdr(
                 vtype='string',
                 default='ssh -oBatchMode=yes -oConnectTimeout=10'),
             'use login shell': vdr(vtype='boolean', default=True),
@@ -203,8 +204,8 @@ SPEC = {
                 vtype='interval_list', default=[]),
             'execution polling intervals': vdr(
                 vtype='interval_list', default=[]),
-            'remote copy template': vdr(vtype='string'),
-            'remote shell template': vdr(vtype='string'),
+            'scp command': vdr(vtype='string'),
+            'ssh command': vdr(vtype='string'),
             'use login shell': vdr(vtype='boolean', default=None),
             'cylc executable': vdr(vtype='string'),
             'global init-script': vdr(vtype='string'),
@@ -370,6 +371,14 @@ def upg(cfg, descr):
         '7.0.0',
         ['submission polling intervals'],
         ['hosts', 'localhost', 'submission polling intervals'])
+    u.deprecate(
+        '7.3.1',
+        ['hosts', '__MANY__', 'remote shell template'],
+        ['hosts', '__MANY__', 'ssh command'])
+    u.deprecate(
+        '7.3.1',
+        ['hosts', '__MANY__', 'remote copy template'],
+        ['hosts', '__MANY__', 'scp command'])
     u.upgrade()
 
 
@@ -612,7 +621,10 @@ class GlobalConfig(config):
             if host == 'localhost':
                 continue
             for item, value in cfg['hosts'][host].items():
-                newvalue = value or cfg['hosts']['localhost'][item]
+                if value is None:
+                    newvalue = cfg['hosts']['localhost'][item]
+                else:
+                    newvalue = value
                 if newvalue and 'directory' in item:
                     # replace local home dir with $HOME for evaluation on other
                     # host
diff --git a/lib/cylc/cfgspec/gscan.py b/lib/cylc/cfgspec/gscan.py
index d5301cb..d554d3a 100644
--- a/lib/cylc/cfgspec/gscan.py
+++ b/lib/cylc/cfgspec/gscan.py
@@ -29,7 +29,8 @@ USER_FILE = os.path.join(os.environ['HOME'], '.cylc', 'gscan.rc')
 
 SPEC = {
     'columns': vdr(vtype='string_list', default=['suite', 'status']),
-    'activate on startup': vdr(vtype='boolean', default=False)
+    'activate on startup': vdr(vtype='boolean', default=False),
+    'window size': vdr(vtype='integer_list', default=[300, 200]),
 }
 
 
diff --git a/lib/cylc/cfgspec/suite.py b/lib/cylc/cfgspec/suite.py
index 1582df0..073eb15 100644
--- a/lib/cylc/cfgspec/suite.py
+++ b/lib/cylc/cfgspec/suite.py
@@ -36,7 +36,7 @@ from cylc.cfgspec.globalcfg import GLOBAL_CFG
 from cylc.network import PRIVILEGE_LEVELS
 
 
-REC_PARAM_INT_RANGE = re.compile('(\d+)\.\.(\d+)')
+REC_PARAM_INT_RANGE = re.compile('(\d+)\.\.(\d+)(?:\.\.(\d+))?')
 
 
 def _coerce_cycleinterval(value, keys, _):
@@ -141,19 +141,19 @@ def _coerce_final_cycletime(value, keys, _):
 def _coerce_parameter_list(value, keys, _):
     """Coerce parameter list."""
     value = _strip_and_unquote_list(keys, value)
-    if len(value) == 1:
-        # May be a range e.g. '1..5' (bounds inclusive)
-        try:
-            lower, upper = REC_PARAM_INT_RANGE.match(value[0]).groups()
-        except AttributeError:
-            if '.' in value[0]:
-                # Dot is illegal in node names, probably bad range syntax.
-                raise IllegalValueError("parameter", keys, value)
-        else:
-            n_dig = len(upper)
-            return [
-                str(i).zfill(n_dig) for i in range(int(lower), int(upper) + 1)]
-    return value
+    # May be an integer range with step e.g. '1..6..2' (bounds inclusive).
+    try:
+        lower, upper, step = REC_PARAM_INT_RANGE.match(value[0]).groups()
+        step = step or 1
+    except AttributeError:
+        if '.' in value[0]:
+            # Dot is illegal in node names, probably bad range syntax.
+            raise IllegalValueError("parameter", keys, value)
+        return value
+    else:
+        n_dig = len(upper)
+        return [str(i).zfill(n_dig) for i in
+                range(int(lower), int(upper) + 1, int(step))]
 
 coercers['cycletime'] = _coerce_cycletime
 coercers['cycletime_format'] = _coerce_cycletime_format
@@ -180,14 +180,19 @@ SPEC = {
         'cycle point time zone': vdr(
             vtype='cycletime_time_zone', default=None),
         'required run mode': vdr(
-            vtype='string', options=['live', 'dummy', 'simulation', '']),
+            vtype='string',
+            options=['live', 'dummy', 'dummy-local', 'simulation', '']),
         'force run mode': vdr(
-            vtype='string', options=['live', 'dummy', 'simulation', '']),
+            vtype='string',
+            options=['live', 'dummy', 'dummy-local', 'simulation', '']),
         'abort if any task fails': vdr(vtype='boolean', default=False),
         'health check interval': vdr(vtype='interval', default=None),
         'task event mail interval': vdr(vtype='interval', default=None),
         'log resolved dependencies': vdr(vtype='boolean', default=False),
         'disable automatic shutdown': vdr(vtype='boolean', default=False),
+        'simulation': {
+            'disable suite event handlers': vdr(vtype='boolean', default=True),
+        },
         'environment': {
             '__MANY__': vdr(vtype='string'),
         },
@@ -228,23 +233,20 @@ SPEC = {
             'mail to': vdr(vtype='string'),
             'mail footer': vdr(vtype='string'),
         },
-        'simulation mode': {
-            'disable suite event hooks': vdr(vtype='boolean', default=True),
-        },
-        'dummy mode': {
-            'disable suite event hooks': vdr(vtype='boolean', default=True),
-        },
         'reference test': {
             'suite shutdown event handler': vdr(
                 vtype='string', default='cylc hook check-triggering'),
             'required run mode': vdr(
-                vtype='string', options=['live', 'simulation', 'dummy', '']),
+                vtype='string',
+                options=['live', 'simulation', 'dummy-local', 'dummy', '']),
             'allow task failures': vdr(vtype='boolean', default=False),
             'expected task failures': vdr(vtype='string_list', default=[]),
             'live mode suite timeout': vdr(
                 vtype='interval', default=DurationFloat(60)),
             'dummy mode suite timeout': vdr(
                 vtype='interval', default=DurationFloat(60)),
+            'dummy-local mode suite timeout': vdr(
+                vtype='interval', default=DurationFloat(60)),
             'simulation mode suite timeout': vdr(
                 vtype='interval', default=DurationFloat(60)),
         },
@@ -305,38 +307,28 @@ SPEC = {
             'title': vdr(vtype='string', default=""),
             'description': vdr(vtype='string', default=""),
             'URL': vdr(vtype='string', default=""),
-            'init-script': vdr(vtype='string'),
-            'env-script': vdr(vtype='string'),
-            'err-script': vdr(vtype='string'),
-            'pre-script': vdr(vtype='string'),
-            'script': vdr(
-                vtype='string',
-                default='echo Dummy task; sleep $(cylc rnd 1 16)'),
-            'post-script': vdr(vtype='string'),
+            'init-script': vdr(vtype='string', default=""),
+            'env-script': vdr(vtype='string', default=""),
+            'err-script': vdr(vtype='string', default=""),
+            'pre-script': vdr(vtype='string', default=""),
+            'script': vdr(vtype='string', default=""),
+            'post-script': vdr(vtype='string', default=""),
             'extra log files': vdr(vtype='string_list', default=[]),
             'enable resurrection': vdr(vtype='boolean', default=False),
             'work sub-directory': vdr(vtype='string'),
+            'simulation': {
+                'default run length': vdr(vtype='interval', default='PT10S'),
+                'speedup factor': vdr(vtype='float', default=None),
+                'time limit buffer': vdr(vtype='interval', default='PT10S'),
+                'fail cycle points': vdr(vtype='string_list', default=[]),
+                'fail try 1 only': vdr(vtype='boolean', default=True),
+                'disable task event handlers': vdr(
+                    vtype='boolean', default=True),
+            },
             'environment filter': {
                 'include': vdr(vtype='string_list'),
                 'exclude': vdr(vtype='string_list'),
             },
-            'simulation mode': {
-                'run time range': vdr(
-                    vtype='interval_list',
-                    default=[DurationFloat(1), DurationFloat(16)]),
-                'simulate failure': vdr(vtype='boolean', default=False),
-                'disable task event hooks': vdr(vtype='boolean', default=True),
-                'disable retries': vdr(vtype='boolean', default=True),
-            },
-            'dummy mode': {
-                'script': vdr(
-                    vtype='string',
-                    default='echo Dummy task; sleep $(cylc rnd 1 16)'),
-                'disable pre-script': vdr(vtype='boolean', default=True),
-                'disable post-script': vdr(vtype='boolean', default=True),
-                'disable task event hooks': vdr(vtype='boolean', default=True),
-                'disable retries': vdr(vtype='boolean', default=True),
-            },
             'job': {
                 'batch system': vdr(vtype='string', default='background'),
                 'batch submit command template': vdr(vtype='string'),
@@ -373,21 +365,17 @@ SPEC = {
                 'reset timer': vdr(vtype='boolean', default=None),
                 'submission timeout': vdr(vtype='interval'),
 
-                'expired handler': vdr(vtype='string_list', default=[]),
-                'submitted handler': vdr(vtype='string_list', default=[]),
-                'started handler': vdr(vtype='string_list', default=[]),
-                'succeeded handler': vdr(vtype='string_list', default=[]),
-                'failed handler': vdr(vtype='string_list', default=[]),
-                'submission failed handler': vdr(
-                    vtype='string_list', default=[]),
-                'warning handler': vdr(vtype='string_list', default=[]),
-                'retry handler': vdr(vtype='string_list', default=[]),
-                'submission retry handler': vdr(
-                    vtype='string_list', default=[]),
-                'execution timeout handler': vdr(
-                    vtype='string_list', default=[]),
-                'submission timeout handler': vdr(
-                    vtype='string_list', default=[]),
+                'expired handler': vdr(vtype='string_list'),
+                'submitted handler': vdr(vtype='string_list'),
+                'started handler': vdr(vtype='string_list'),
+                'succeeded handler': vdr(vtype='string_list'),
+                'failed handler': vdr(vtype='string_list'),
+                'submission failed handler': vdr(vtype='string_list'),
+                'warning handler': vdr(vtype='string_list'),
+                'retry handler': vdr(vtype='string_list'),
+                'submission retry handler': vdr(vtype='string_list'),
+                'execution timeout handler': vdr(vtype='string_list'),
+                'submission timeout handler': vdr(vtype='string_list'),
             },
             'suite state polling': {
                 'user': vdr(vtype='string'),
@@ -498,6 +486,10 @@ def upg(cfg, descr):
         '6.11.0',
         ['runtime', '__MANY__', 'execution polling intervals'],
         ['runtime', '__MANY__', 'job', 'execution polling intervals'])
+    u.obsolete('7.2.2', ['cylc', 'dummy mode'])
+    u.obsolete('7.2.2', ['cylc', 'simulation mode'])
+    u.obsolete('7.2.2', ['runtime', '__MANY__', 'dummy mode'])
+    u.obsolete('7.2.2', ['runtime', '__MANY__', 'simulation mode'])
     u.upgrade()
 
 
diff --git a/lib/cylc/config.py b/lib/cylc/config.py
index eb2dafa..3441220 100644
--- a/lib/cylc/config.py
+++ b/lib/cylc/config.py
@@ -23,10 +23,9 @@ structures.
 
 
 from copy import deepcopy, copy
-import re
+from fnmatch import fnmatchcase
 import os
 import re
-import sys
 import traceback
 
 from cylc.c3mro import C3
@@ -34,23 +33,21 @@ from cylc.exceptions import CylcError
 from cylc.graph_parser import GraphParser
 from cylc.param_expand import NameExpander
 from cylc.cfgspec.suite import RawSuiteConfig
-from cylc.cycling.loader import (get_point, get_point_relative,
-                                 get_interval, get_interval_cls,
-                                 get_sequence, get_sequence_cls,
-                                 init_cyclers, INTEGER_CYCLING_TYPE,
-                                 ISO8601_CYCLING_TYPE)
+from cylc.cycling.loader import (
+    get_point, get_point_relative, get_interval, get_interval_cls,
+    get_sequence, get_sequence_cls, init_cyclers, INTEGER_CYCLING_TYPE,
+    ISO8601_CYCLING_TYPE)
 from cylc.cycling import IntervalParsingError
 from cylc.envvar import check_varnames
 import cylc.flags
 from cylc.graphnode import graphnode, GraphNodeError
-from cylc.message_output import MessageOutput
 from cylc.print_tree import print_tree
 from cylc.taskdef import TaskDef, TaskDefError
 from cylc.task_id import TaskID
 from cylc.task_trigger import TaskTrigger
 from cylc.wallclock import get_current_time_string
-
 from isodatetime.data import Calendar
+from isodatetime.parsers import DurationParser
 from parsec.OrderedDict import OrderedDictWithDefaults
 from parsec.util import replicate
 from cylc.suite_logging import OUT, ERR
@@ -92,13 +89,6 @@ class SuiteConfigError(Exception):
     def __str__(self):
         return repr(self.msg)
 
-
-class TaskNotDefinedError(SuiteConfigError):
-    """A named task not defined."""
-
-    def __str__(self):
-        return "Task not defined: %s" % self.msg
-
 # TODO: separate config for run and non-run purposes?
 
 
@@ -453,32 +443,32 @@ class SuiteConfig(object):
         # Parse special task cycle point offsets, and replace family names.
         if cylc.flags.verbose:
             OUT.info("Parsing [special tasks]")
-        for type in self.cfg['scheduling']['special tasks']:
-            result = copy(self.cfg['scheduling']['special tasks'][type])
+        for s_type in self.cfg['scheduling']['special tasks']:
+            result = copy(self.cfg['scheduling']['special tasks'][s_type])
             extn = ''
-            for item in self.cfg['scheduling']['special tasks'][type]:
+            for item in self.cfg['scheduling']['special tasks'][s_type]:
                 name = item
-                if type == 'external-trigger':
+                if s_type == 'external-trigger':
                     m = re.match(EXT_TRIGGER_RE, item)
                     if m is None:
                         raise SuiteConfigError(
-                            "ERROR: Illegal %s spec: %s" % (type, item)
+                            "ERROR: Illegal %s spec: %s" % (s_type, item)
                         )
                     name, ext_trigger_msg = m.groups()
                     extn = "(" + ext_trigger_msg + ")"
 
-                elif type in ['clock-trigger', 'clock-expire']:
+                elif s_type in ['clock-trigger', 'clock-expire']:
                     m = re.match(CLOCK_OFFSET_RE, item)
                     if m is None:
                         raise SuiteConfigError(
-                            "ERROR: Illegal %s spec: %s" % (type, item)
+                            "ERROR: Illegal %s spec: %s" % (s_type, item)
                         )
                     if (self.cfg['scheduling']['cycling mode'] !=
                             Calendar.MODE_GREGORIAN):
                         raise SuiteConfigError(
                             "ERROR: %s tasks require "
                             "[scheduling]cycling mode=%s" % (
-                                type, Calendar.MODE_GREGORIAN)
+                                s_type, Calendar.MODE_GREGORIAN)
                         )
                     name, offset_string = m.groups()
                     if not offset_string:
@@ -487,14 +477,14 @@ class SuiteConfig(object):
                         if offset_string.startswith("-"):
                             ERR.warning(
                                 "%s offsets are normally positive: %s" % (
-                                    type, item))
+                                    s_type, item))
                     try:
                         offset_interval = (
                             get_interval(offset_string).standardise())
                     except IntervalParsingError as exc:
                         raise SuiteConfigError(
                             "ERROR: Illegal %s spec: %s" % (
-                                type, offset_string))
+                                s_type, offset_string))
                     extn = "(" + offset_string + ")"
 
                 # Replace family names with members.
@@ -505,23 +495,29 @@ class SuiteConfig(object):
                             # (sub-family)
                             continue
                         result.append(member + extn)
-                        if type == 'clock-trigger':
+                        if s_type == 'clock-trigger':
                             self.clock_offsets[member] = offset_interval
-                        if type == 'clock-expire':
+                        if s_type == 'clock-expire':
                             self.expiration_offsets[member] = offset_interval
-                        if type == 'external-trigger':
+                        if s_type == 'external-trigger':
                             self.ext_triggers[member] = ext_trigger_msg
-                elif type == 'clock-trigger':
+                elif s_type == 'clock-trigger':
                     self.clock_offsets[name] = offset_interval
-                elif type == 'clock-expire':
+                elif s_type == 'clock-expire':
                     self.expiration_offsets[name] = offset_interval
-                elif type == 'external-trigger':
+                elif s_type == 'external-trigger':
                     self.ext_triggers[name] = self.dequote(ext_trigger_msg)
 
-            self.cfg['scheduling']['special tasks'][type] = result
+            self.cfg['scheduling']['special tasks'][s_type] = result
 
         self.collapsed_families_rc = (
             self.cfg['visualization']['collapsed families'])
+        for fam in self.collapsed_families_rc:
+            if fam not in self.runtime['first-parent descendants']:
+                raise SuiteConfigError(
+                    'ERROR [visualization]collapsed families: '
+                    '%s is not a first parent' % fam)
+
         if is_reload:
             # on suite reload retain an existing state of collapse
             # (used by the "cylc graph" viewer)
@@ -550,6 +546,11 @@ class SuiteConfig(object):
 
         self.configure_queues()
 
+        if self.run_mode in ['simulation', 'dummy', 'dummy-local']:
+            self.configure_sim_modes()
+
+        self.configure_suite_state_polling_tasks()
+
         # Warn or abort (if --strict) if naked dummy tasks (no runtime
         # section) are found in graph or queue config.
         if len(self.naked_dummy_tasks) > 0:
@@ -954,9 +955,6 @@ class SuiteConfig(object):
                 if name not in self.runtime['first-parent descendants'][p]:
                     self.runtime['first-parent descendants'][p].append(name)
 
-        # for name in self.cfg['runtime']:
-        #     print name, self.runtime['linearized ancestors'][name]
-
     def compute_inheritance(self, use_simple_method=True):
         if cylc.flags.verbose:
             OUT.info("Parsing the runtime namespace hierarchy")
@@ -1152,6 +1150,105 @@ class SuiteConfig(object):
                 log_msg += "\n+ %s: %s" % (key, ', '.join(queue['members']))
             OUT.info(log_msg)
 
+    def configure_suite_state_polling_tasks(self):
+        # Check custom script is not defined for automatic suite polling tasks.
+        for l_task in self.suite_polling_tasks:
+            try:
+                cs = self.pcfg.getcfg(sparse=True)['runtime'][l_task]['script']
+            except:
+                pass
+            else:
+                if cs:
+                    OUT.info(cs)
+                    # (allow explicit blanking of inherited script)
+                    raise SuiteConfigError(
+                        "ERROR: script cannot be defined for automatic" +
+                        " suite polling task " + l_task)
+        # Generate the automatic scripting.
+        for name, tdef in self.taskdefs.items():
+            if name not in self.suite_polling_tasks:
+                continue
+            rtc = tdef.rtconfig
+            comstr = "cylc suite-state" + \
+                     " --task=" + tdef.suite_polling_cfg['task'] + \
+                     " --point=$CYLC_TASK_CYCLE_POINT" + \
+                     " --status=" + tdef.suite_polling_cfg['status']
+            for key, fmt in [
+                    ('user', ' --%s=%s'),
+                    ('host', ' --%s=%s'),
+                    ('interval', ' --%s=%d'),
+                    ('max-polls', ' --%s=%s'),
+                    ('run-dir', ' --%s=%s'),
+                    ('template', ' --%s=%s')]:
+                if rtc['suite state polling'][key]:
+                    comstr += fmt % (key, rtc['suite state polling'][key])
+            comstr += " " + tdef.suite_polling_cfg['suite']
+            script = "echo " + comstr + "\n" + comstr
+            rtc['script'] = script
+
+    def configure_sim_modes(self):
+        # Adjust task defs for simulation mode and dummy modes.
+        for name, tdef in self.taskdefs.items():
+            # Compute simulated run time by scaling the execution limit.
+            rtc = tdef.rtconfig
+            limit = rtc['job']['execution time limit']
+            speedup = rtc['simulation']['speedup factor']
+            if limit and speedup:
+                sleep_sec = (DurationParser().parse(
+                    str(limit)).get_seconds() / speedup)
+            else:
+                sleep_sec = DurationParser().parse(
+                    str(rtc['simulation']['default run length'])
+                ).get_seconds()
+            rtc['job']['execution time limit'] = (
+                sleep_sec + DurationParser().parse(str(
+                    rtc['simulation']['time limit buffer'])
+                ).get_seconds())
+            rtc['job']['simulated run length'] = sleep_sec
+
+            # Generate dummy scripting.
+            rtc['init-script'] = ""
+            rtc['env-script'] = ""
+            rtc['pre-script'] = ""
+            rtc['post-script'] = ""
+            scr = "sleep %d" % sleep_sec
+            # Dummy message outputs.
+            for msg in rtc['outputs'].values():
+                scr += "\ncylc message '%s'" % msg
+            if rtc['simulation']['fail try 1 only']:
+                arg1 = "true"
+            else:
+                arg1 = "false"
+            arg2 = " ".join(rtc['simulation']['fail cycle points'])
+            scr += "\ncylc__job__dummy_result %s %s || exit 1" % (arg1, arg2)
+            rtc['script'] = scr
+
+            # Disable batch scheduler in dummy modes.
+            # TODO - to use batch schedulers in dummy mode we need to
+            # identify which resource directives to disable or modify.
+            # (Only execution time limit is automatic at the moment.)
+            rtc['job']['batch system'] = 'background'
+
+            # Disable environment, in case it depends on env-script.
+            rtc['environment'] = {}
+
+            if tdef.run_mode == 'dummy-local':
+                # Run all dummy tasks on the suite host.
+                rtc['remote']['host'] = None
+                rtc['remote']['owner'] = None
+
+            # Simulation mode tasks should fail in which cycle points?
+            f_pts = []
+            f_pts_orig = rtc['simulation']['fail cycle points']
+            if 'all' in f_pts_orig:
+                # None for "fail all points".
+                f_pts = None
+            else:
+                # (And [] for "fail no points".)
+                for point_str in f_pts_orig:
+                    f_pts.append(get_point(point_str).standardise())
+            rtc['simulation']['fail cycle points'] = f_pts
+
     def get_parent_lists(self):
         return self.runtime['parents']
 
@@ -1310,20 +1407,6 @@ class SuiteConfig(object):
                     else:
                         ERR.warning(msg)
 
-        # Check custom script is not defined for automatic suite polling tasks
-        for l_task in self.suite_polling_tasks:
-            try:
-                cs = self.pcfg.getcfg(sparse=True)['runtime'][l_task]['script']
-            except:
-                pass
-            else:
-                if cs:
-                    OUT.info(cs)
-                    # (allow explicit blanking of inherited script)
-                    raise SuiteConfigError(
-                        "ERROR: script cannot be defined for automatic" +
-                        " suite polling task " + l_task)
-
     def get_task_name_list(self):
         # return a list of all tasks used in the dependency graph
         return self.taskdefs.keys()
@@ -1405,12 +1488,7 @@ class SuiteConfig(object):
                 self.ns_defn_order.append(name)
 
             # check task name legality and create the taskdef
-            if name not in self.taskdefs:
-                try:
-                    self.taskdefs[name] = self.get_taskdef(name)
-                except TaskDefError as exc:
-                    ERR.error(orig_expr)
-                    raise SuiteConfigError(str(exc))
+            self.get_taskdef(name, orig_expr)
 
             if name in self.suite_polling_tasks:
                 self.taskdefs[name].suite_polling_cfg = {
@@ -1424,18 +1502,10 @@ class SuiteConfig(object):
                 else:
                     self.taskdefs[name].add_sequence(seq)
 
-            # Record custom message outputs, and generate scripting to fake
-            # their completion in dummy mode.
-            rtconfig = self.taskdefs[name].rtconfig
-            dm_scrpt = rtconfig['dummy mode']['script']
-            for msg in self.cfg['runtime'][name]['outputs'].values():
-                outp = MessageOutput(msg, base_interval)
-                if outp not in self.taskdefs[name].outputs:
-                    self.taskdefs[name].outputs.append(outp)
-                    dm_scrpt += "\nsleep 2; cylc message '%s'" % msg
-            if rtconfig['dummy mode']['script'] != dm_scrpt:
-                rtconfig['dummy mode'] = copy(rtconfig['dummy mode'])
-                rtconfig['dummy mode']['script'] = dm_scrpt
+            # Record custom message outputs.
+            for item in self.cfg['runtime'][name]['outputs'].items():
+                if (item, base_interval) not in self.taskdefs[name].outputs:
+                    self.taskdefs[name].outputs.append((item, base_interval))
 
     def generate_triggers(self, lexpression, left_nodes,
                           right, seq, suicide, base_interval):
@@ -1861,13 +1931,55 @@ class SuiteConfig(object):
                 self.generate_triggers(
                     expr, lefts, right, seq, suicide, base_interval)
 
-    def get_taskdef(self, name):
+    def find_taskdefs(self, name):
+        """Find TaskDef objects in family "name" or matching "name".
+
+        Return a list of TaskDef objects which:
+        * have names that glob matches "name".
+        * are in a family that glob matches "name".
+        """
+        ret = []
+        if name in self.taskdefs:
+            # Match a task name
+            ret.append(self.taskdefs[name])
+        else:
+            fams = self.get_first_parent_descendants()
+            # Match a family name
+            if name in fams:
+                for member in fams[name]:
+                    if member in self.taskdefs:
+                        ret.append(self.taskdefs[member])
+            else:
+                # Glob match task names
+                for key, taskdef in self.taskdefs.items():
+                    if fnmatchcase(key, name):
+                        ret.append(taskdef)
+                # Glob match family names
+                for key, members in fams.items():
+                    if fnmatchcase(key, name):
+                        for member in members:
+                            if member in self.taskdefs:
+                                ret.append(self.taskdefs[member])
+        return ret
+
+    def get_taskdef(self, name, orig_expr=None):
+        """Return an instance of TaskDef for task name."""
+        if name not in self.taskdefs:
+            try:
+                self.taskdefs[name] = self._get_taskdef(name)
+            except TaskDefError as exc:
+                if orig_expr:
+                    ERR.error(orig_expr)
+                raise SuiteConfigError(str(exc))
+        return self.taskdefs[name]
+
+    def _get_taskdef(self, name):
         """Get the dense task runtime."""
         # (TaskDefError caught above)
         try:
             rtcfg = self.cfg['runtime'][name]
         except KeyError:
-            raise TaskNotDefinedError(name)
+            raise SuiteConfigError("Task not defined: %s" % name)
         # We may want to put in some handling for cases of changing the
         # initial cycle via restart (accidentally or otherwise).
 
diff --git a/lib/cylc/cycling/__init__.py b/lib/cylc/cycling/__init__.py
index 895b786..bf794df 100644
--- a/lib/cylc/cycling/__init__.py
+++ b/lib/cylc/cycling/__init__.py
@@ -18,20 +18,26 @@
 
 """This module provides base classes for cycling data objects."""
 
+import unittest
+
+from abc import ABCMeta, abstractmethod, abstractproperty
+
 
 def parse_exclusion(expr):
     count = expr.count('!')
     if count == 0:
         return expr, None
     elif count > 1:
-        raise Exception("'%s': only one exclusion per expression "
+        raise Exception("'%s': only one set of exclusions per expression "
                         "permitted" % expr)
     else:
-        remainder, exclusion = expr.split('!')
-        if '/' in exclusion:
+        remainder, exclusions = expr.split('!')
+        if '/' in exclusions:
             raise Exception("'%s': exclusion must be at the end of the "
                             "expression" % expr)
-        return remainder.strip(), exclusion.strip()
+        exclusions = exclusions.translate(None, ' ()')
+        exclusions = exclusions.split(',')
+        return remainder.strip(), exclusions
 
 
 class CyclerTypeError(TypeError):
@@ -77,7 +83,7 @@ class SequenceDegenerateError(Exception):
 
 class PointBase(object):
 
-    """The base class for single points in a cycler sequence.
+    """The abstract base class for single points in a cycler sequence.
 
     Points should be based around a string value.
 
@@ -90,27 +96,38 @@ class PointBase(object):
     method to reprocess their value into a standard form.
 
     """
+    __metaclass__ = ABCMeta
+
+    _TYPE = None
+    _TYPE_SORT_KEY = None
 
-    TYPE = None
-    TYPE_SORT_KEY = None
+    @abstractproperty
+    def TYPE(self):
+        return self._TYPE
+
+    @abstractproperty
+    def TYPE_SORT_KEY(self):
+        return self._TYPE_SORT_KEY
 
     def __init__(self, value):
         if not isinstance(value, basestring):
             raise TypeError(type(value))
         self.value = value
 
+    @abstractmethod
     def add(self, other):
         """Add other (interval) to self, returning a point."""
-        raise NotImplementedError()
+        pass
 
     def cmp_(self, other):
         """Compare self to other point, returning a 'cmp'-like result."""
-        raise NotImplementedError()
+        pass
 
     def standardise(self):
         """Format self.value into a standard representation and check it."""
         return self
 
+    @abstractmethod
     def sub(self, other):
         """Subtract other (interval or point), returning a point or interval.
 
@@ -121,7 +138,7 @@ class PointBase(object):
          IntervalBase-derived object)
 
         """
-        raise NotImplementedError()
+        pass
 
     def __str__(self):
         # Stringify.
@@ -178,36 +195,51 @@ class IntervalBase(object):
     method to reprocess their value into a standard form.
 
     """
+    __metaclass__ = ABCMeta
+
+    _TYPE = None
+    _TYPE_SORT_KEY = None
 
-    TYPE = None
-    TYPE_SORT_KEY = None
+    @abstractproperty
+    def TYPE(self):
+        return self._TYPE
+
+    @abstractproperty
+    def TYPE_SORT_KEY(self):
+        return self._TYPE_SORT_KEY
 
     @classmethod
+    @abstractmethod
     def get_null(cls):
         """Return a null interval."""
-        raise NotImplementedError()
+        pass
 
+    @abstractmethod
     def get_inferred_child(self, string):
         """For a given string, infer the offset given my instance units."""
-        raise NotImplementedError()
+        pass
 
+    @abstractmethod
     def __abs__(self):
         # Return an interval with absolute values for all properties.
-        raise NotImplementedError()
+        pass
 
+    @abstractmethod
     def __mul__(self, factor):
         # Return an interval with all properties multiplied by factor.
-        raise NotImplementedError()
+        pass
 
+    @abstractmethod
     def __nonzero__(self):
         # Return True if the interval has any non-zero properties.
-        raise NotImplementedError()
+        pass
 
     def __init__(self, value):
         if not isinstance(value, basestring):
             raise TypeError(type(value))
         self.value = value
 
+    @abstractmethod
     def add(self, other):
         """Add other to self, returning a Point or Interval.
 
@@ -218,19 +250,21 @@ class IntervalBase(object):
          IntervalBase-derived object)
 
         """
-        raise NotImplementedError()
+        pass
 
+    @abstractmethod
     def cmp_(self, other):
         """Compare self to other (interval), returning a 'cmp'-like result."""
-        raise NotImplementedError()
+        pass
 
     def standardise(self):
         """Format self.value into a standard representation."""
         return self
 
+    @abstractmethod
     def sub(self, other):
         """Subtract other (interval) from self; return an interval."""
-        raise NotImplementedError()
+        pass
 
     def is_null(self):
         return (self == self.get_null())
@@ -266,7 +300,7 @@ class IntervalBase(object):
 
 class SequenceBase(object):
 
-    """The base class for cycler sequences.
+    """The abstract base class for cycler sequences.
 
     Subclasses should accept a sequence-specific string, a
     start context string, and a stop context string as
@@ -285,68 +319,107 @@ class SequenceBase(object):
     is equal to another (represents the same set of points).
 
     """
+    __metaclass__ = ABCMeta
+
+    _TYPE = None
+    _TYPE_SORT_KEY = None
+
+    @abstractproperty
+    def TYPE(self):
+        return self._TYPE
 
-    TYPE = None
-    TYPE_SORT_KEY = None
+    @abstractproperty
+    def TYPE_SORT_KEY(self):
+        return self._TYPE_SORT_KEY
 
     @classmethod
+    @abstractmethod  # Note: stacked decorator not strictly enforced in Py2.x
     def get_async_expr(cls, start_point=0):
         """Express a one-off sequence at the initial cycle point."""
-        raise NotImplementedError()
+        pass
 
+    @abstractmethod
     def __init__(self, sequence_string, context_start, context_stop=None):
         """Parse sequence string according to context point strings."""
         pass
 
+    @abstractmethod
     def get_interval(self):
         """Return the cycling interval of this sequence."""
-        raise NotImplementedError()
+        pass
 
+    @abstractmethod
     def get_offset(self):
         """Deprecated: return the offset used for this sequence."""
-        raise NotImplementedError()
+        pass
 
+    @abstractmethod
     def set_offset(self, i_offset):
         """Deprecated: alter state to offset the entire sequence."""
-        raise NotImplementedError()
+        pass
 
+    @abstractmethod
     def is_on_sequence(self, point):
         """Is point on-sequence, disregarding bounds?"""
-        raise NotImplementedError()
+        pass
 
     def _get_point_in_bounds(self, point):
         """Return point, or None if out of bounds."""
-        raise NotImplementedError()
+        raise NotImplementedError("Not implemented yet")
 
+    @abstractmethod
     def is_valid(self, point):
         """Is point on-sequence and in-bounds?"""
-        raise NotImplementedError()
+        pass
 
+    @abstractmethod
     def get_prev_point(self, point):
         """Return the previous point < point, or None if out of bounds."""
-        raise NotImplementedError()
+        pass
 
+    @abstractmethod
     def get_nearest_prev_point(self, point):
         """Return the largest point < some arbitrary point."""
-        raise NotImplementedError()
+        pass
 
+    @abstractmethod
     def get_next_point(self, point):
         """Return the next point > point, or None if out of bounds."""
-        raise NotImplementedError()
+        pass
 
+    @abstractmethod
     def get_next_point_on_sequence(self, point):
         """Return the next point > point assuming that point is on-sequence,
         or None if out of bounds."""
-        raise NotImplementedError()
+        pass
 
+    @abstractmethod
     def get_first_point(self, point):
         """Return the first point >= to point, or None if out of bounds."""
-        raise NotImplementedError()
+        pass
 
+    @abstractmethod
     def get_stop_point(self):
         """Return the last point in this sequence, or None if unbounded."""
-        raise NotImplementedError()
+        pass
 
+    @abstractmethod
     def __eq__(self, other):
         # Return True if other (sequence) is equal to self.
-        raise NotImplementedError()
+        pass
+
+
+class TestBaseClasses(unittest.TestCase):
+    """Test the abstract base classes cannot be instantiated on their own
+    """
+    def test_simple_abstract_class_test(self):
+        """Cannot instantiate abstract classes, they must be defined in
+        the subclasses"""
+        self.assertRaises(TypeError, SequenceBase, "sequence-string",
+                          "context_string")
+        self.assertRaises(TypeError, IntervalBase, "value")
+        self.assertRaises(TypeError, PointBase, "value")
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/lib/cylc/cycling/integer.py b/lib/cylc/cycling/integer.py
index 1714931..61f234e 100755
--- a/lib/cylc/cycling/integer.py
+++ b/lib/cylc/cycling/integer.py
@@ -263,8 +263,17 @@ class IntegerSequence(SequenceBase):
         self.i_offset = IntegerInterval('P0')
 
         matched_recurrence = False
-
-        expression, exclusion = parse_exclusion(dep_section)
+        expression, excl_points = parse_exclusion(dep_section)
+        # Create a list of multiple exclusion points, if there are any.
+        if excl_points:
+            self.exclusions = set()
+            for excl in excl_points:
+                self.exclusions.add(get_point_from_expression(
+                    excl,
+                    None,
+                    is_required=False))
+        else:
+            self.exclusions = None
 
         for rec, format_num in RECURRENCE_FORMAT_RECS:
             results = rec.match(expression)
@@ -297,12 +306,6 @@ class IntegerSequence(SequenceBase):
             stop, self.p_context_stop, is_required=end_required)
         if intv:
             self.i_step = IntegerInterval(intv)
-        if exclusion:
-            self.exclusion = get_point_from_expression(exclusion, None,
-                                                       is_required=False)
-        else:
-            self.exclusion = None
-
         if format_num == 3:
             # REPEAT/START/PERIOD
             if not intv or reps is not None and reps <= 1:
@@ -417,7 +420,7 @@ class IntegerSequence(SequenceBase):
 
     def is_on_sequence(self, point):
         """Is point on-sequence, disregarding bounds?"""
-        if self.exclusion and point == self.exclusion:
+        if self.exclusions and point in self.exclusions:
             return False
         if self.i_step:
             return int(point - self.p_start) % int(self.i_step) == 0
@@ -452,7 +455,7 @@ class IntegerSequence(SequenceBase):
         else:
             prev_point = point - self.i_step
         ret = self._get_point_in_bounds(prev_point)
-        if self.exclusion and ret == self.exclusion:
+        if self.exclusions and ret in self.exclusions:
             return self.get_prev_point(ret)
         return ret
 
@@ -468,7 +471,7 @@ class IntegerSequence(SequenceBase):
                 break
             prev_point = sequence_point
             sequence_point = self.get_next_point(sequence_point)
-        if self.exclusion and prev_point == self.exclusion:
+        if self.exclusions and prev_point in self.exclusions:
             return self.get_nearest_prev_point(prev_point)
         return prev_point
 
@@ -484,7 +487,7 @@ class IntegerSequence(SequenceBase):
         i = int(point - self.p_start) % int(self.i_step)
         next_point = point + self.i_step - IntegerInterval.from_integer(i)
         ret = self._get_point_in_bounds(next_point)
-        if self.exclusion and ret and ret == self.exclusion:
+        if self.exclusions and ret and ret in self.exclusions:
             return self.get_next_point(ret)
         return ret
 
@@ -496,7 +499,7 @@ class IntegerSequence(SequenceBase):
             return None
         next_point = point + self.i_step
         ret = self._get_point_in_bounds(next_point)
-        if self.exclusion and ret and ret == self.exclusion:
+        if self.exclusions and ret and ret in self.exclusions:
             return self.get_next_point_on_sequence(ret)
         return ret
 
@@ -509,19 +512,19 @@ class IntegerSequence(SequenceBase):
             point = self._get_point_in_bounds(point)
         else:
             point = self.get_next_point(point)
-        if self.exclusion and point == self.exclusion:
+        if self.exclusions and point in self.exclusions:
             return self.get_next_point_on_sequence(point)
         return point
 
     def get_start_point(self):
         """Return the first point in this sequence, or None."""
-        if self.exclusion and self.p_start == self.exclusion:
+        if self.exclusions and self.p_start in self.exclusions:
             return self.get_next_point_on_sequence(self.p_start)
         return self.p_start
 
     def get_stop_point(self):
         """Return the last point in this sequence, or None if unbounded."""
-        if self.exclusion and self.p_stop == self.exclusion:
+        if self.exclusions and self.p_stop in self.exclusions:
             return self.get_prev_point(self.p_stop)
         return self.p_stop
 
@@ -534,7 +537,7 @@ class IntegerSequence(SequenceBase):
             return self.i_step == other.i_step and \
                 self.p_start == other.p_start and \
                 self.p_stop == other.p_stop and \
-                self.exclusion == other.exclusion
+                self.exclusions == other.exclusions
 
 
 def init_from_cfg(cfg):
@@ -583,6 +586,43 @@ class TestIntegerSequence(unittest.TestCase):
             point = sequence.get_next_point(point)
         self.assertEqual([int(out) for out in output], [1, 2, 4, 5])
 
+    def test_multiple_exclusions_simple(self):
+        """Tests the multiple exclusion syntax for integer notation"""
+        sequence = IntegerSequence('R/P1!(2,3,7)', 1, 10)
+        output = []
+        point = sequence.get_start_point()
+        while point:
+            output.append(point)
+            point = sequence.get_next_point(point)
+        self.assertEqual([int(out) for out in output], [1, 4, 5, 6, 8, 9, 10])
+
+    def test_multiple_exclusions_extensive(self):
+        """Tests IntegerSequence methods for sequences with multi-exclusions"""
+        points = [IntegerPoint(i) for i in range(10)]
+        sequence = IntegerSequence('R/P1!(2,3,7)', 1, 10)
+        self.assertFalse(sequence.is_on_sequence(points[3]))
+        self.assertFalse(sequence.is_valid(points[3]))
+        self.assertEqual(sequence.get_prev_point(points[3]), points[1])
+        self.assertEqual(sequence.get_prev_point(points[4]), points[1])
+        self.assertEqual(sequence.get_nearest_prev_point(points[3]), points[1])
+        self.assertEqual(sequence.get_nearest_prev_point(points[4]), points[1])
+        self.assertEqual(sequence.get_next_point(points[3]), points[4])
+        self.assertEqual(sequence.get_next_point(points[2]), points[4])
+        self.assertEqual(sequence.get_next_point_on_sequence(
+            points[3]),
+            points[4])
+        self.assertEqual(sequence.get_next_point_on_sequence(
+            points[6]),
+            points[8])
+
+        sequence = IntegerSequence('R/P1!(1,3,4)', 1, 10)
+        self.assertEqual(sequence.get_first_point(points[1]), points[2])
+        self.assertEqual(sequence.get_first_point(points[0]), points[2])
+        self.assertEqual(sequence.get_start_point(), points[2])
+
+        sequence = IntegerSequence('R/P1!(8,9,10)', 1, 10)
+        self.assertEqual(sequence.get_stop_point(), points[7])
+
     def test_exclusions_extensive(self):
         """Test IntegerSequence methods for sequences with exclusions."""
         point_0 = IntegerPoint(0)
diff --git a/lib/cylc/cycling/iso8601.py b/lib/cylc/cycling/iso8601.py
index b6463ba..3ad355b 100755
--- a/lib/cylc/cycling/iso8601.py
+++ b/lib/cylc/cycling/iso8601.py
@@ -298,7 +298,6 @@ class ISO8601Sequence(SequenceBase):
 
     def __init__(self, dep_section, context_start_point=None,
                  context_end_point=None):
-
         self.dep_section = dep_section
 
         if context_start_point is None:
@@ -326,14 +325,30 @@ class ISO8601Sequence(SequenceBase):
             dump_format=SuiteSpecifics.DUMP_FORMAT,
             assumed_time_zone=SuiteSpecifics.ASSUMED_TIME_ZONE
         )
-        self.recurrence, excl_point = self.abbrev_util.parse_recurrence(
-            dep_section)
-        self.exclusion = ISO8601Point.from_nonstandard_string(
-            str(excl_point)) if excl_point else None
+
+        # Parse_recurrence returns an isodatetime TimeRecurrence object
+        # and a list of exclusion strings.
+        self.recurrence, excl_points = self.abbrev_util.parse_recurrence(
+            dep_section)  # should this be self.dep_section??
+
+        # Convert (potentially) non-standard strings to ISO8601Point objects.
+        self.exclusions = []
+        for point in excl_points:
+            exclusion = ISO8601Point.from_nonstandard_string(
+                str(point)) if point else None
+            self.exclusions.append(exclusion)
+
+        # Convert the exclusion ISO8601Points to TimePoints so they can
+        # be compared with the TimeRecurrence object later.
+        self.p_iso_exclusions = []
+        for timepoint in self.exclusions:
+            self.p_iso_exclusions.append(point_parse(str(timepoint)))
+
         self.step = ISO8601Interval(str(self.recurrence.duration))
         self.value = str(self.recurrence)
-        if self.exclusion:
-            self.value += '!' + str(self.exclusion)
+        # Concatenate the strings in exclusion list
+        if self.exclusions:
+            self.value += '!' + str(self.exclusions)
 
     def get_interval(self):
         """Return the interval between points in this sequence."""
@@ -353,14 +368,14 @@ class ISO8601Sequence(SequenceBase):
         self._cached_next_point_values = {}
         self._cached_valid_point_booleans = {}
         self._cached_recent_valid_points = []
-        self.value = str(self.recurrence) + '!' + str(self.exclusion)
-        if self.exclusion:
-            self.value += '!' + str(self.exclusion)
+        self.value = str(self.recurrence) + '!' + str(self.exclusions)
+        if self.exclusions:
+            self.value += '!' + str(self.exclusions)
 
     def is_on_sequence(self, point):
         """Return True if point is on-sequence."""
         # Iterate starting at recent valid points, for speed.
-        if self.exclusion and point == self.exclusion:
+        if self.exclusions and point in self.exclusions:
             return False
         for valid_point in reversed(self._cached_recent_valid_points):
             if valid_point == point:
@@ -399,7 +414,11 @@ class ISO8601Sequence(SequenceBase):
                 raise SequenceDegenerateError(self.recurrence,
                                               SuiteSpecifics.DUMP_FORMAT,
                                               res, point)
-            if self.exclusion and res == self.exclusion:
+            # Check if res point is in the list of exclusions
+            # If so, check the previous point by recursion.
+            # Once you have found a point that is *not* in the exclusion
+            # list, you can return it.
+            if self.exclusions and res in self.exclusions:
                 return self.get_prev_point(res)
         return res
 
@@ -408,14 +427,12 @@ class ISO8601Sequence(SequenceBase):
         if self.is_on_sequence(point):
             return self.get_prev_point(point)
         p_iso_point = point_parse(point.value)
-        p_iso_excl = None
-        if self.exclusion:
-            p_iso_excl = point_parse(self.exclusion.value)
         prev_iso_point = None
+
         for recurrence_iso_point in self.recurrence:
+            # Is recurrence point greater than aribitrary point?
             if (recurrence_iso_point > p_iso_point or
-                    (p_iso_excl and recurrence_iso_point == p_iso_excl)):
-                # Technically, >=, but we already test for this above.
+                    (recurrence_iso_point in self.p_iso_exclusions)):
                 break
             prev_iso_point = recurrence_iso_point
         if prev_iso_point is None:
@@ -426,7 +443,8 @@ class ISO8601Sequence(SequenceBase):
                 self.recurrence, SuiteSpecifics.DUMP_FORMAT,
                 nearest_point, point
             )
-        if self.exclusion and nearest_point == self.exclusion:
+        # Check all exclusions
+        if self.exclusions and nearest_point in self.exclusions:
             return self.get_prev_point(nearest_point)
         return nearest_point
 
@@ -445,7 +463,7 @@ class ISO8601Sequence(SequenceBase):
             while next_point is not None and (next_point <= point or excluded):
                 excluded = False
                 next_point = self.get_next_point_on_sequence(next_point)
-                if next_point and next_point == self.exclusion:
+                if next_point and next_point in self.exclusions:
                     excluded = True
             if next_point is not None:
                 self._check_and_cache_next_point(point, next_point)
@@ -455,7 +473,7 @@ class ISO8601Sequence(SequenceBase):
         for recurrence_iso_point in self.recurrence:
             if recurrence_iso_point > p_iso_point:
                 next_point = ISO8601Point(str(recurrence_iso_point))
-                if next_point and next_point == self.exclusion:
+                if next_point and next_point in self.exclusions:
                     continue
                 self._check_and_cache_next_point(point, next_point)
                 return next_point
@@ -494,7 +512,8 @@ class ISO8601Sequence(SequenceBase):
                     self.recurrence, SuiteSpecifics.DUMP_FORMAT,
                     point, result
                 )
-        if result and result == self.exclusion:
+        # Check it is in the exclusions list now
+        if result and result in self.exclusions:
             return self.get_next_point_on_sequence(result)
         return result
 
@@ -509,7 +528,8 @@ class ISO8601Sequence(SequenceBase):
             if recurrence_iso_point >= p_iso_point:
                 first_point_value = str(recurrence_iso_point)
                 ret = ISO8601Point(first_point_value)
-                if ret and ret == self.exclusion:
+                # Check multiple exclusions
+                if ret and ret in self.exclusions:
                     return self.get_next_point_on_sequence(ret)
                 if (len(self._cached_first_point_values) >
                         self._MAX_CACHED_POINTS):
@@ -523,7 +543,8 @@ class ISO8601Sequence(SequenceBase):
         """Return the first point in this sequence, or None."""
         for recurrence_iso_point in self.recurrence:
             point = ISO8601Point(str(recurrence_iso_point))
-            if not self.exclusion or point != self.exclusion:
+            # Check for multiple exclusions
+            if not self.exclusions or point not in self.exclusions:
                 return point
         return None
 
@@ -540,7 +561,7 @@ class ISO8601Sequence(SequenceBase):
                 prev = curr
                 curr = recurrence_iso_point
             ret = ISO8601Point(str(recurrence_iso_point))
-            if self.exclusion and ret == self.exclusion:
+            if self.exclusions and ret in self.exclusions:
                 return ISO8601Point(str(prev))
             return ret
         return None
@@ -717,6 +738,70 @@ class TestISO8601Sequence(unittest.TestCase):
         self.assertEqual(output, ['20000101T0000Z', '20000101T0100Z',
                                   '20000101T0300Z', '20000101T0400Z'])
 
+    def test_multiple_exclusions_complex1(self):
+        """Tests sequences that have multiple exclusions and a more
+        complicated format"""
+
+        # A sequence that specifies a dep start time
+        sequence = ISO8601Sequence('20000101T01Z/PT1H!20000101T02Z',
+                                   '20000101T01Z')
+
+        output = []
+        point = sequence.get_start_point()
+        count = 0
+        # We are going to make four sequence points
+        while point and count < 4:
+            output.append(point)
+            point = sequence.get_next_point(point)
+            count += 1
+        output = [str(out) for out in output]
+        # We should expect one of the hours to be excluded: T02
+        self.assertEqual(output, ['20000101T0100Z', '20000101T0300Z',
+                                  '20000101T0400Z', '20000101T0500Z'])
+
+    def test_multiple_exclusions_complex2(self):
+        """Tests sequences that have multiple exclusions and a more
+        complicated format"""
+
+        # A sequence that specifies a dep start time
+        sequence = ISO8601Sequence('20000101T01Z/PT1H!'
+                                   '(20000101T02Z,20000101T03Z)',
+                                   '20000101T00Z',
+                                   '20000101T05Z')
+
+        output = []
+        point = sequence.get_start_point()
+        count = 0
+        # We are going to make four sequence points
+        while point and count < 3:
+            output.append(point)
+            point = sequence.get_next_point(point)
+            count += 1
+        output = [str(out) for out in output]
+        # We should expect two of the hours to be excluded: T02, T03
+        self.assertEqual(output, ['20000101T0100Z', '20000101T0400Z',
+                                  '20000101T0500Z'])
+
+    def test_multiple_exclusions_simple(self):
+        """Tests generation of points for sequences with multiple exclusions
+        """
+        init(time_zone='Z')
+        sequence = ISO8601Sequence('PT1H!(20000101T02Z,20000101T03Z)',
+                                   '20000101T00Z')
+
+        output = []
+        point = sequence.get_start_point()
+        count = 0
+        # We are going to make four sequence points
+        while point and count < 4:
+            output.append(point)
+            point = sequence.get_next_point(point)
+            count += 1
+        output = [str(out) for out in output]
+        # We should expect two of the hours to be excluded: T02 and T03
+        self.assertEqual(output, ['20000101T0000Z', '20000101T0100Z',
+                                  '20000101T0400Z', '20000101T0500Z'])
+
     def test_exclusions_extensive(self):
         """Test ISO8601Sequence methods for sequences with exclusions"""
         init(time_zone='+05')
@@ -741,6 +826,49 @@ class TestISO8601Sequence(unittest.TestCase):
         self.assertEqual(sequence.get_first_point(point_0), point_1)
         self.assertEqual(sequence.get_start_point(), point_1)
 
+    def test_multiple_exclusions_extensive(self):
+        """Test ISO8601Sequence methods for sequences with multiple exclusions
+        """
+        init(time_zone='+05')
+        sequence = ISO8601Sequence('PT1H!(20000101T02Z,20000101T03Z)',
+                                   '20000101T00Z',
+                                   '20000101T06Z')
+
+        point_0 = ISO8601Point('20000101T00Z')
+        point_1 = ISO8601Point('20000101T01Z')
+        point_2 = ISO8601Point('20000101T02Z')  # First excluded point
+        point_3 = ISO8601Point('20000101T03Z')  # Second excluded point
+        point_4 = ISO8601Point('20000101T04Z')
+
+        # Check the excluded points are not on the sequence
+        self.assertFalse(sequence.is_on_sequence(point_2))
+        self.assertFalse(sequence.is_on_sequence(point_3))
+        self.assertFalse(sequence.is_valid(point_2))  # Should be excluded
+        self.assertFalse(sequence.is_valid(point_3))  # Should be excluded
+        # Check that we can correctly retrieve previous points
+        self.assertEqual(sequence.get_prev_point(point_2), point_1)
+        # Should skip two excluded points
+        self.assertEqual(sequence.get_prev_point(point_4), point_1)
+        self.assertEqual(sequence.get_nearest_prev_point(point_2), point_1)
+        self.assertEqual(sequence.get_nearest_prev_point(point_4), point_1)
+        self.assertEqual(sequence.get_next_point(point_1), point_4)
+        self.assertEqual(sequence.get_next_point(point_3), point_4)
+
+        sequence = ISO8601Sequence('PT1H!20000101T00Z', '20000101T00Z')
+        # Check that the first point is after 00.
+        self.assertEqual(sequence.get_first_point(point_0), point_1)
+        self.assertEqual(sequence.get_start_point(), point_1)
+
+        # Check a longer list of exclusions
+        # Also note you can change the format of the exclusion list
+        # (removing the parentheses)
+        sequence = ISO8601Sequence('PT1H! 20000101T02Z, 20000101T03Z,'
+                                   '20000101T04Z',
+                                   '20000101T00Z',
+                                   '20000101T06Z')
+        self.assertEqual(sequence.get_nearest_prev_point(point_3), point_1)
+        self.assertEqual(sequence.get_nearest_prev_point(point_4), point_1)
+
     def test_simple(self):
         """Run some simple tests for date-time cycling."""
         init(time_zone='Z')
diff --git a/lib/cylc/graph_parser.py b/lib/cylc/graph_parser.py
index 2cddc18..444337f 100644
--- a/lib/cylc/graph_parser.py
+++ b/lib/cylc/graph_parser.py
@@ -1,4 +1,4 @@
-#!usr/bin/env python
+#!/usr/bin/env python
 
 # THIS FILE IS PART OF THE CYLC SUITE ENGINE.
 # Copyright (C) 2008-2017 NIWA
@@ -255,7 +255,7 @@ class GraphParser(object):
             # Auto-trigger lone nodes and initial nodes in a chain.
             for name, offset, _ in self.__class__.REC_NODES.findall(chain[0]):
                 if not offset:
-                    pairs.add(('', name))
+                    pairs.add((None, name))
             for i in range(0, len(chain) - 1):
                 pairs.add((chain[i], chain[i + 1]))
 
@@ -275,48 +275,61 @@ class GraphParser(object):
         Trigger qualifiers, but not cycle offsets, are ignored on the right to
         allow chaining.
         """
-        if self.__class__.OP_OR in right:
+        # Raise error for right-hand-side OR operators.
+        if right and self.__class__.OP_OR in right:
             raise GraphParseError("ERROR, illegal OR on RHS: %s" % right)
-        # Remove aualifiers from right-side nodes.
-        for qual in self.__class__.REC_TRIG_QUAL.findall(right):
-            right = right.replace(qual, '')
-        if self.__class__.SUICIDE_MARK in left:
+
+        # Remove qualifiers from right-side nodes.
+        if right:
+            for qual in self.__class__.REC_TRIG_QUAL.findall(right):
+                right = right.replace(qual, '')
+
+        # Raise error if suicide triggers on the left of the trigger.
+        if left and self.__class__.SUICIDE_MARK in left:
             raise GraphParseError(
                 "ERROR, suicide markers must be"
                 " on the right of a trigger: %s" % left)
+
         # Cycle point offsets are not allowed on the right side (yet).
-        if '[' in right:
+        if right and '[' in right:
             raise GraphParseError(
                 "ERROR, illegal cycle point offset on the right: %s => %s" % (
                     left, right))
+
         # Check that parentheses match.
-        if left.count("(") != left.count(")"):
+        if left and left.count("(") != left.count(")"):
             raise GraphParseError(
                 "ERROR, parenthesis mismatch in: \"" + left + "\"")
 
         # Split right side on AND.
         rights = right.split(self.__class__.OP_AND)
-        if right and not all(rights):
+        if '' in rights or right and not all(rights):
             raise GraphParseError(
                 "ERROR, null task name in graph: %s=>%s" % (left, right))
 
-        if self.__class__.OP_OR in left or '(' in left:
+        if not left or (self.__class__.OP_OR in left or '(' in left):
             # Treat conditional or bracketed expressions as a single entity.
             lefts = [left]
         else:
             # Split non-conditional left-side expressions on AND.
             lefts = left.split(self.__class__.OP_AND)
-        if left and not all(lefts):
+        if '' in lefts or left and not all(lefts):
             raise GraphParseError(
                 "ERROR, null task name in graph: %s=>%s" % (left, right))
 
         for left in lefts:
-            # Extract infomation about all nodes on the left.
-            info = self.__class__.REC_NODES.findall(left)
+            # Extract information about all nodes on the left.
+
+            if left:
+                info = self.__class__.REC_NODES.findall(left)
+                expr = left
+            else:
+                # There is no left-hand-side task.
+                info = []
+                expr = ''
 
             # Make success triggers explicit.
             n_info = []
-            expr = left
             for name, offset, trig in info:
                 if not trig:
                     trig = self.__class__.TRIG_SUCCEED
diff --git a/lib/cylc/gui/app_gcylc.py b/lib/cylc/gui/app_gcylc.py
index 6efdd01..d0b958b 100644
--- a/lib/cylc/gui/app_gcylc.py
+++ b/lib/cylc/gui/app_gcylc.py
@@ -68,11 +68,12 @@ from cylc.cfgspec.gcylc import gcfg
 from cylc.wallclock import (
     get_current_time_string, get_time_string_from_unix_time)
 from cylc.task_state import (
-    TaskState, TASK_STATUSES_ALL, TASK_STATUSES_RESTRICTED,
+    TASK_STATUSES_ALL, TASK_STATUSES_RESTRICTED,
     TASK_STATUSES_WITH_JOB_SCRIPT, TASK_STATUSES_WITH_JOB_LOGS,
     TASK_STATUSES_TRIGGERABLE, TASK_STATUSES_ACTIVE,
     TASK_STATUS_WAITING, TASK_STATUS_HELD, TASK_STATUS_READY,
     TASK_STATUS_RUNNING, TASK_STATUS_SUCCEEDED, TASK_STATUS_FAILED)
+from cylc.task_state_prop import get_status_prop
 
 
 def run_get_stdout(command, filter=False):
@@ -1144,7 +1145,7 @@ been defined for this suite""").inform()
     def startsuite(self, bt, window, coldstart_rb, warmstart_rb, restart_rb,
                    entry_point_string, stop_point_string_entry,
                    checkpoint_entry, optgroups, mode_live_rb, mode_sim_rb,
-                   mode_dum_rb, hold_cb, holdpoint_entry):
+                   mode_dum_rb, mode_dum_loc_rb, hold_cb, holdpoint_entry):
         """Call back for "Run Suite" dialog box.
 
         Build "cylc run/restart" command from dialog box options and entries,
@@ -1171,6 +1172,8 @@ been defined for this suite""").inform()
             command += ' --mode=simulation'
         elif mode_dum_rb.get_active():
             command += ' --mode=dummy'
+        elif mode_dum_loc_rb.get_active():
+            command += ' --mode=dummy-local'
 
         if method == 'restart' and checkpoint_entry.get_text():
             command += ' --checkpoint=' + checkpoint_entry.get_text()
@@ -1223,13 +1226,43 @@ been defined for this suite""").inform()
         about.run()
         about.destroy()
 
-    def view_task_descr(self, w, task_id):
+    def view_task_descr(self, w, e, task_id, *args):
         command = ("cylc show" + self.get_remote_run_opts() + " " +
                    self.cfg.suite + " " + task_id)
         foo = gcapture_tmpfile(command, self.cfg.cylc_tmpdir, 600, 400)
         self.gcapture_windows.append(foo)
         foo.run()
 
+    def view_in_editor(self, w, e, task_id, choice):
+        try:
+            task_state_summary = self.updater.full_state_summary[task_id]
+        except KeyError:
+            warning_dialog('%s is not live' % task_id, self.window).warn()
+            return False
+        if (not task_state_summary['logfiles'] and
+                not task_state_summary.get('job_hosts')):
+            warning_dialog('%s has no log files' % task_id, self.window).warn()
+        else:
+            if choice == 'job-activity.log':
+                command_opt = "--activity"
+            elif choice == 'job.status':
+                command_opt = "--status"
+            elif choice == 'job.out':
+                command_opt = "--stdout"
+            elif choice == 'job.err':
+                command_opt = "--stderr"
+            elif choice == 'job':
+                command_opt = ""
+
+            command = (
+                "cylc cat-log %s --geditor %s %s" % (
+                    command_opt, self.cfg.suite, task_id)
+            )
+
+            foo = gcapture_tmpfile(command, self.cfg.cylc_tmpdir, 400, 400)
+            self.gcapture_windows.append(foo)
+            foo.run()
+
     def view_task_info(self, w, e, task_id, choice):
         try:
             task_state_summary = self.updater.full_state_summary[task_id]
@@ -1344,16 +1377,18 @@ been defined for this suite""").inform()
                     gtk.STOCK_DIALOG_INFO, gtk.ICON_SIZE_MENU)
                 info_item.set_image(img)
                 view_menu.append(info_item)
-                info_item.connect(
-                    'activate', self.popup_requisites, task_ids[0])
+                self.connect_right_click_sub_menu(is_graph_view, info_item,
+                                                  self.popup_requisites,
+                                                  task_ids[0], None)
 
                 js0_item = gtk.ImageMenuItem('run "cylc show"')
                 img = gtk.image_new_from_stock(
                     gtk.STOCK_DIALOG_INFO, gtk.ICON_SIZE_MENU)
                 js0_item.set_image(img)
                 view_menu.append(js0_item)
-                js0_item.connect(
-                    'activate', self.view_task_descr, task_ids[0])
+                self.connect_right_click_sub_menu(is_graph_view, js0_item,
+                                                  self.view_task_descr,
+                                                  task_ids[0], None)
 
                 # PDF user guide.
                 # This method of setting a custom menu item is not supported
@@ -1363,7 +1398,48 @@ been defined for this suite""").inform()
                 # help_menu.append(cug_pdf_item)
                 # cug_pdf_item.connect('activate', self.browse, '--pdf')
 
-        # Separator.
+                # View In Editor.
+                view_editor_menu = gtk.Menu()
+                view_editor_item = gtk.ImageMenuItem("View In Editor")
+                img = gtk.image_new_from_stock(gtk.STOCK_DIALOG_INFO,
+                                               gtk.ICON_SIZE_MENU)
+                view_editor_item.set_image(img)
+                view_editor_item.set_submenu(view_editor_menu)
+                menu.append(view_editor_item)
+
+                # NOTE: we have to respond to 'button-release-event' rather
+                # than 'activate' in order for sub-menus to work in the
+                # graph-view so use connect_right_click_sub_menu instead of
+                # item.connect
+
+                for key, filename in [
+                        ('job script', 'job'),
+                        ('job activity log', 'job-activity.log'),
+                        ('job status file', 'job.status')]:
+                    item = gtk.ImageMenuItem(key)
+                    item.set_image(gtk.image_new_from_stock(
+                        gtk.STOCK_DND, gtk.ICON_SIZE_MENU))
+                    view_editor_menu.append(item)
+                    self.connect_right_click_sub_menu(is_graph_view, item,
+                                                      self.view_in_editor,
+                                                      task_ids[0], filename)
+                    item.set_sensitive(
+                        t_states[0] in TASK_STATUSES_WITH_JOB_SCRIPT)
+
+                for key, filename in [
+                        ('job stdout', 'job.out'),
+                        ('job stderr', 'job.err')]:
+                    item = gtk.ImageMenuItem(key)
+                    item.set_image(gtk.image_new_from_stock(
+                        gtk.STOCK_DND, gtk.ICON_SIZE_MENU))
+                    view_editor_menu.append(item)
+                    self.connect_right_click_sub_menu(is_graph_view, item,
+                                                      self.view_in_editor,
+                                                      task_ids[0], filename)
+                    item.set_sensitive(
+                        t_states[0] in TASK_STATUSES_WITH_JOB_LOGS)
+
+        # Separator
         menu.append(gtk.SeparatorMenuItem())
 
         # Trigger (run now).
@@ -1589,7 +1665,7 @@ been defined for this suite""").inform()
         else:
             tb.insert(tb.get_end_iter(), line)
 
-    def popup_requisites(self, w, task_id):
+    def popup_requisites(self, w, e, task_id, *args):
         """Show prerequisites of task_id in a pop up window."""
         name, point_string = TaskID.split(task_id)
         results, bad_items = self.get_comms_info(
@@ -1982,10 +2058,12 @@ shown here in the state they were in at the time of triggering.''')
         box.pack_start(gtk.Label('Mode'), True)
         mode_live_rb = gtk.RadioButton(None, "live")
         box.pack_start(mode_live_rb, True)
-        mode_sim_rb = gtk.RadioButton(mode_live_rb, "simulation")
-        box.pack_start(mode_sim_rb, True)
         mode_dum_rb = gtk.RadioButton(mode_live_rb, "dummy")
         box.pack_start(mode_dum_rb, True)
+        mode_dum_loc_rb = gtk.RadioButton(mode_live_rb, "dummy-local")
+        box.pack_start(mode_dum_loc_rb, True)
+        mode_sim_rb = gtk.RadioButton(mode_live_rb, "simulation")
+        box.pack_start(mode_sim_rb, True)
 
         mode_live_rb.set_active(True)
         vbox.pack_start(box)
@@ -2083,7 +2161,8 @@ shown here in the state they were in at the time of triggering.''')
                              warmstart_rb, restart_rb, point_string_entry,
                              stop_point_string_entry, checkpoint_entry,
                              optgroups, mode_live_rb, mode_sim_rb,
-                             mode_dum_rb, hold_cb, holdpoint_entry)
+                             mode_dum_rb, mode_dum_loc_rb, hold_cb,
+                             holdpoint_entry)
 
         help_run_button = gtk.Button("_Help Run")
         help_run_button.connect("clicked", self.command_help, "control", "run")
@@ -2251,7 +2330,7 @@ shown here in the state they were in at the time of triggering.''')
                 name,
             )
             for submit_num, job_user_at_host in sorted(
-                    job_hosts.items(), reverse=True):
+                    job_hosts.items(), reverse=True, key=lambda x: int(x[0])):
                 submit_num_str = "%02d" % int(submit_num)
                 local_job_log_dir = os.path.join(itask_log_dir, submit_num_str)
                 for filename in ["job", "job-activity.log"]:
@@ -3130,8 +3209,7 @@ This is what my suite does:..."""
                     pass
                 else:
                     icon = dotm.get_image(st)
-                    cb = gtk.CheckButton(
-                        TaskState.get_status_prop(st, 'gtk_label'))
+                    cb = gtk.CheckButton(get_status_prop(st, 'gtk_label'))
                     cb.set_active(st not in self.filter_states_excl)
                     cb.connect('toggled', self.check_task_filter_buttons)
                     tooltip = gtk.Tooltips()
diff --git a/lib/cylc/gui/gpanel.py b/lib/cylc/gui/gpanel.py
index 9a5a1a2..5250500 100755
--- a/lib/cylc/gui/gpanel.py
+++ b/lib/cylc/gui/gpanel.py
@@ -33,7 +33,8 @@ from cylc.cfgspec.gscan import gsfg
 import cylc.flags
 from cylc.gui.app_gcylc import run_get_stdout
 from cylc.gui.dot_maker import DotMaker
-from cylc.gui.scanutil import KEY_PORT, get_scan_menu, update_suites_info
+from cylc.gui.scanutil import (KEY_PORT, get_gpanel_scan_menu,
+                               update_suites_info)
 from cylc.gui.util import get_icon, setup_icons
 from cylc.network import KEY_STATES
 from cylc.network.suite_state_client import extract_group_state
@@ -158,9 +159,7 @@ class ScanPanelAppletUpdater(object):
             self._should_force_update = False
 
         # Get new information.
-        self.suite_info_map = update_suites_info(
-            self.hosts, owner_pattern=self.owner_pattern,
-            prev_results=self.suite_info_map)
+        self.suite_info_map = update_suites_info(self)
         self.last_update_time = time.time()
         if self.suite_info_map:
             self._last_running_time = None
@@ -203,17 +202,17 @@ class ScanPanelAppletUpdater(object):
 
         extra_items.append(gscan_item)
 
-        menu = get_scan_menu(suite_keys,
-                             self.theme_name, self._set_theme,
-                             self.has_stopped_suites(),
-                             self.clear_stopped_suites,
-                             self.hosts,
-                             self.set_hosts,
-                             self.update_now,
-                             self.start,
-                             program_name="cylc gpanel",
-                             extra_items=extra_items,
-                             is_stopped=self.quit)
+        menu = get_gpanel_scan_menu(suite_keys,
+                                    self.theme_name, self._set_theme,
+                                    self.has_stopped_suites(),
+                                    self.clear_stopped_suites,
+                                    self.hosts,
+                                    self.set_hosts,
+                                    self.update_now,
+                                    self.start,
+                                    program_name="cylc gpanel",
+                                    extra_items=extra_items,
+                                    is_stopped=self.quit)
         menu.popup(None, None, None, event.button, event.time)
         return False
 
@@ -226,7 +225,8 @@ class ScanPanelAppletUpdater(object):
             len(self.suite_info_map) > self.MAX_INDIVIDUAL_SUITES)
         suite_statuses = {}
         compact_suite_statuses = []
-        for key, suite_info in sorted(self.suite_info_map.items()):
+        for key, suite_info in sorted(self.suite_info_map.items(),
+                                      key=lambda details: details[0][2]):
             if KEY_STATES not in suite_info:
                 continue
             host, _, suite = key
diff --git a/lib/cylc/gui/gscan.py b/lib/cylc/gui/gscan.py
index d2a39fb..ddc8913 100644
--- a/lib/cylc/gui/gscan.py
+++ b/lib/cylc/gui/gscan.py
@@ -19,7 +19,7 @@
 
 import re
 import threading
-import time
+from time import sleep, time
 
 import gtk
 import gobject
@@ -30,9 +30,11 @@ from isodatetime.data import (
 from cylc.cfgspec.gcylc import gcfg
 from cylc.cfgspec.globalcfg import GLOBAL_CFG
 from cylc.cfgspec.gscan import gsfg
+from cylc.gui.legend import ThemeLegendWindow
 from cylc.gui.dot_maker import DotMaker
 from cylc.gui.scanutil import (
-    KEY_PORT, get_scan_menu, launch_gcylc, update_suites_info)
+    KEY_PORT, get_scan_menu, launch_gcylc, update_suites_info,
+    launch_hosts_dialog, launch_about_dialog)
 from cylc.gui.util import get_icon, setup_icons, set_exception_hook_dialog
 from cylc.network import (
     KEY_GROUP, KEY_STATES, KEY_TASKS_BY_STATE, KEY_TITLE, KEY_UPDATE_TIME)
@@ -87,7 +89,6 @@ class ScanApp(object):
         self.theme_name = gcfg.get(['use theme'])
         self.theme = gcfg.get(['themes', self.theme_name])
 
-        self.dots = DotMaker(self.theme)
         suite_treemodel = gtk.TreeStore(
             str,  # group
             str,  # host
@@ -100,7 +101,7 @@ class ScanApp(object):
             str,  # states_text
             str)  # warning_text
         self._prev_tooltip_location_id = None
-        self.suite_treeview = gtk.TreeView(suite_treemodel)
+        self.treeview = gtk.TreeView(suite_treemodel)
 
         # Visibility of columns
         vis_cols = gsfg.get(["columns"])
@@ -130,7 +131,7 @@ class ScanApp(object):
             column.set_sort_column_id(col_id)
             column.set_visible(col_title.lower() in vis_cols)
             column.set_resizable(True)
-            self.suite_treeview.append_column(column)
+            self.treeview.append_column(column)
 
         # Construct the status column.
         status_column = gtk.TreeViewColumn(gsfg.COL_STATUS)
@@ -141,7 +142,7 @@ class ScanApp(object):
         status_column.pack_start(cell_text_cycle, expand=False)
         status_column.set_cell_data_func(
             cell_text_cycle, self._set_cell_text_cycle, self.CYCLE_COLUMN)
-        self.suite_treeview.append_column(status_column)
+        self.treeview.append_column(status_column)
 
         # Warning icon.
         warn_icon = gtk.CellRendererPixbuf()
@@ -166,23 +167,17 @@ class ScanApp(object):
             status_column.set_cell_data_func(
                 cell_pixbuf_state, self._set_cell_pixbuf_state, i)
 
-        self.suite_treeview.show()
-        if hasattr(self.suite_treeview, "set_has_tooltip"):
-            self.suite_treeview.set_has_tooltip(True)
+        self.treeview.show()
+        if hasattr(self.treeview, "set_has_tooltip"):
+            self.treeview.set_has_tooltip(True)
             try:
-                self.suite_treeview.connect('query-tooltip',
-                                            self._on_query_tooltip)
+                self.treeview.connect('query-tooltip',
+                                      self._on_query_tooltip)
             except TypeError:
                 # Lower PyGTK version.
                 pass
-        self.suite_treeview.connect("button-press-event",
-                                    self._on_button_press_event)
-        scrolled_window = gtk.ScrolledWindow()
-        scrolled_window.set_policy(gtk.POLICY_AUTOMATIC,
-                                   gtk.POLICY_AUTOMATIC)
-        scrolled_window.add(self.suite_treeview)
-        scrolled_window.show()
-        self.vbox.pack_start(scrolled_window, expand=True, fill=True)
+        self.treeview.connect("button-press-event",
+                              self._on_button_press_event)
 
         patterns = {"name": None, "owner": None}
         for label, items in [
@@ -195,19 +190,245 @@ class ScanApp(object):
                     raise ValueError("Invalid %s pattern: %s" % (label, items))
 
         self.updater = ScanAppUpdater(
-            self.window, self.hosts, suite_treemodel, self.suite_treeview,
+            self.window, self.hosts, suite_treemodel, self.treeview,
             comms_timeout=comms_timeout, poll_interval=poll_interval,
             group_column_id=self.GROUP_COLUMN,
             name_pattern=patterns["name"], owner_pattern=patterns["owner"])
+
         self.updater.start()
+
+        self.dot_size = gcfg.get(['dot icon size'])
+        self._set_dots()
+
+        self.create_menubar()
+
+        accelgroup = gtk.AccelGroup()
+        self.window.add_accel_group(accelgroup)
+        key, modifier = gtk.accelerator_parse('<Alt>m')
+        accelgroup.connect_group(
+            key, modifier, gtk.ACCEL_VISIBLE, self._toggle_hide_menu_bar)
+
+        self.create_tool_bar()
+
+        self.menu_hbox = gtk.HBox()
+        self.menu_hbox.pack_start(self.menu_bar, expand=True, fill=True)
+        self.menu_hbox.pack_start(self.tool_bar, expand=True, fill=True)
+        self.menu_hbox.show_all()
+        self.menu_hbox.hide_all()
+
+        scrolled_window = gtk.ScrolledWindow()
+        scrolled_window.set_policy(gtk.POLICY_AUTOMATIC,
+                                   gtk.POLICY_AUTOMATIC)
+        scrolled_window.add(self.treeview)
+        scrolled_window.show()
+
+        self.vbox.pack_start(self.menu_hbox, expand=False)
+        self.vbox.pack_start(scrolled_window, expand=True, fill=True)
+
         self.window.add(self.vbox)
         self.window.connect("destroy", self._on_destroy_event)
-        self.window.set_default_size(300, 150)
-        self.suite_treeview.grab_focus()
+        wsize = gsfg.get(['window size'])
+        self.window.set_default_size(*wsize)
+        self.treeview.grab_focus()
         self.window.show()
 
+        self.theme_legend_window = None
         self.warning_icon_shown = []
 
+    def popup_theme_legend(self, widget=None):
+        """Popup a theme legend window."""
+        if self.theme_legend_window is None:
+            self.theme_legend_window = ThemeLegendWindow(
+                self.window, self.theme)
+            self.theme_legend_window.connect(
+                "destroy", self.destroy_theme_legend)
+        else:
+            self.theme_legend_window.present()
+
+    def update_theme_legend(self):
+        """Update the theme legend window, if it exists."""
+        if self.theme_legend_window is not None:
+            self.theme_legend_window.update(self.theme)
+
+    def destroy_theme_legend(self, widget):
+        """Handle a destroy of the theme legend window."""
+        self.theme_legend_window = None
+
+    def create_menubar(self):
+        """Create the main menu."""
+        self.menu_bar = gtk.MenuBar()
+
+        file_menu = gtk.Menu()
+        file_menu_root = gtk.MenuItem('_File')
+        file_menu_root.set_submenu(file_menu)
+
+        exit_item = gtk.ImageMenuItem('E_xit')
+        img = gtk.image_new_from_stock(gtk.STOCK_QUIT, gtk.ICON_SIZE_MENU)
+        exit_item.set_image(img)
+        exit_item.show()
+        exit_item.connect("activate", self._on_destroy_event)
+        file_menu.append(exit_item)
+
+        view_menu = gtk.Menu()
+        view_menu_root = gtk.MenuItem('_View')
+        view_menu_root.set_submenu(view_menu)
+
+        col_item = gtk.ImageMenuItem("_Columns...")
+        img = gtk.image_new_from_stock(gtk.STOCK_INDEX, gtk.ICON_SIZE_MENU)
+        col_item.set_image(img)
+        col_item.show()
+        col_menu = gtk.Menu()
+        for column_index, column in enumerate(self.treeview.get_columns()):
+            name = column.get_title()
+            is_visible = column.get_visible()
+            column_item = gtk.CheckMenuItem(name.replace("_", "__"))
+            column_item._connect_args = column_index
+            column_item.set_active(is_visible)
+            column_item.connect("toggled", self._on_toggle_column_visible)
+            column_item.show()
+            col_menu.append(column_item)
+
+        col_item.set_submenu(col_menu)
+        col_item.show_all()
+        view_menu.append(col_item)
+
+        view_menu.append(gtk.SeparatorMenuItem())
+
+        # Construct theme chooser items (same as cylc.gui.app_main).
+        theme_item = gtk.ImageMenuItem('Theme...')
+        img = gtk.image_new_from_stock(
+            gtk.STOCK_SELECT_COLOR, gtk.ICON_SIZE_MENU)
+        theme_item.set_image(img)
+        thememenu = gtk.Menu()
+        theme_item.set_submenu(thememenu)
+        theme_item.show()
+
+        theme_items = {}
+        theme = "default"
+        theme_items[theme] = gtk.RadioMenuItem(label=theme)
+        thememenu.append(theme_items[theme])
+        theme_items[theme].theme_name = theme
+        for theme in gcfg.get(['themes']):
+            if theme == "default":
+                continue
+            theme_items[theme] = gtk.RadioMenuItem(
+                group=theme_items['default'], label=theme)
+            thememenu.append(theme_items[theme])
+            theme_items[theme].theme_name = theme
+
+        # set_active then connect, to avoid causing an unnecessary toggle now.
+        theme_items[self.theme_name].set_active(True)
+        for theme in gcfg.get(['themes']):
+            theme_items[theme].show()
+            theme_items[theme].connect(
+                'toggled',
+                lambda i: (i.get_active() and self._set_theme(i.theme_name)))
+
+        view_menu.append(theme_item)
+
+        theme_legend_item = gtk.ImageMenuItem("Show task state key")
+        img = gtk.image_new_from_stock(
+            gtk.STOCK_SELECT_COLOR, gtk.ICON_SIZE_MENU)
+        theme_legend_item.set_image(img)
+        theme_legend_item.show()
+        theme_legend_item.connect("activate", self.popup_theme_legend)
+        view_menu.append(theme_legend_item)
+
+        view_menu.append(gtk.SeparatorMenuItem())
+
+        # Construct a configure scanned hosts item.
+        hosts_item = gtk.ImageMenuItem("Configure Hosts")
+        img = gtk.image_new_from_stock(
+            gtk.STOCK_PREFERENCES, gtk.ICON_SIZE_MENU)
+        hosts_item.set_image(img)
+        hosts_item.show()
+        hosts_item.connect(
+            "button-press-event",
+            lambda b, e: launch_hosts_dialog(
+                self.hosts, self.updater.set_hosts))
+        view_menu.append(hosts_item)
+
+        sep_item = gtk.SeparatorMenuItem()
+        sep_item.show()
+
+        help_menu = gtk.Menu()
+        help_menu_root = gtk.MenuItem('_Help')
+        help_menu_root.set_submenu(help_menu)
+
+        self.menu_bar.append(file_menu_root)
+        self.menu_bar.append(view_menu_root)
+        self.menu_bar.append(help_menu_root)
+
+        # Construct an about dialog item.
+        info_item = gtk.ImageMenuItem("About")
+        img = gtk.image_new_from_stock(gtk.STOCK_ABOUT, gtk.ICON_SIZE_MENU)
+        info_item.set_image(img)
+        info_item.show()
+        info_item.connect(
+            "button-press-event",
+            lambda b, e: launch_about_dialog("cylc gscan", self.hosts)
+        )
+        help_menu.append(info_item)
+
+        self.menu_bar.show_all()
+
+    def _set_dots(self):
+        self.dots = DotMaker(self.theme, size=self.dot_size)
+
+    def create_tool_bar(self):
+        """Create the tool bar for the GUI."""
+        self.tool_bar = gtk.Toolbar()
+
+        update_now_button = gtk.ToolButton(
+            icon_widget=gtk.image_new_from_stock(
+                gtk.STOCK_REFRESH, gtk.ICON_SIZE_SMALL_TOOLBAR))
+        update_now_button.set_label("Update")
+        tooltip = gtk.Tooltips()
+        tooltip.enable()
+        tooltip.set_tip(update_now_button, "Update now")
+        update_now_button.connect("clicked",
+                                  self.updater.update_now)
+
+        clear_stopped_button = gtk.ToolButton(
+            icon_widget=gtk.image_new_from_stock(
+                gtk.STOCK_CLEAR, gtk.ICON_SIZE_SMALL_TOOLBAR))
+        clear_stopped_button.set_label("Clear")
+        tooltip = gtk.Tooltips()
+        tooltip.enable()
+        tooltip.set_tip(clear_stopped_button, "Clear stopped suites")
+        clear_stopped_button.connect("clicked",
+                                     self.updater.clear_stopped_suites)
+
+        expand_button = gtk.ToolButton(
+            icon_widget=gtk.image_new_from_stock(
+                gtk.STOCK_ADD, gtk.ICON_SIZE_SMALL_TOOLBAR))
+        expand_button.set_label("Expand all")
+        tooltip = gtk.Tooltips()
+        tooltip.enable()
+        tooltip.set_tip(expand_button, "Expand all rows")
+        expand_button.connect(
+            "clicked", lambda e: self.treeview.expand_all())
+
+        collapse_button = gtk.ToolButton(
+            icon_widget=gtk.image_new_from_stock(
+                gtk.STOCK_REMOVE, gtk.ICON_SIZE_SMALL_TOOLBAR))
+        collapse_button.set_label("Expand all")
+        tooltip = gtk.Tooltips()
+        tooltip.enable()
+        tooltip.set_tip(collapse_button, "Collapse all rows")
+        collapse_button.connect(
+            "clicked", lambda e: self.treeview.collapse_all())
+
+        self.tool_bar.insert(update_now_button, 0)
+        self.tool_bar.insert(clear_stopped_button, 0)
+        self.tool_bar.insert(collapse_button, 0)
+        self.tool_bar.insert(expand_button, 0)
+        separator = gtk.SeparatorToolItem()
+        separator.set_expand(True)
+        self.tool_bar.insert(separator, 0)
+
+        self.tool_bar.show_all()
+
     def _on_button_press_event(self, treeview, event):
         """Tree view button press callback."""
         x = int(event.x)
@@ -251,68 +472,71 @@ class ScanApp(object):
             path = pth[0]
 
             iter_ = treemodel.get_iter(path)
+
             host, owner, suite = treemodel.get(
                 iter_, self.HOST_COLUMN, self.OWNER_COLUMN, self.SUITE_COLUMN)
-            if suite is None:
+            if suite is not None:
+                suite_keys.append((host, owner, suite))
+
+            elif suite is None:
                 # On an expanded cycle point row, so get from parent.
-                host, owner, suite = treemodel.get(
-                    treemodel.iter_parent(iter_),
-                    self.HOST_COLUMN, self.OWNER_COLUMN, self.SUITE_COLUMN)
-            suite_keys.append((host, owner, suite))
+                try:
+                    host, owner, suite = treemodel.get(
+                        treemodel.iter_parent(iter_),
+                        self.HOST_COLUMN, self.OWNER_COLUMN, self.SUITE_COLUMN)
+                    suite_keys.append((host, owner, suite))
+
+                except:
+                    # Now iterate over the children instead.
+                    # We need to iterate over the children as there can be more
+                    # than one suite in a group of suites.
+                    # Get a TreeIter pointing to the first child of parent iter
+                    suite_iter = treemodel.iter_children(iter_)
+
+                    # Iterate over the children until you get to end
+                    while suite_iter is not None:
+                        host, owner, suite = treemodel.get(suite_iter,
+                                                           self.HOST_COLUMN,
+                                                           self.OWNER_COLUMN,
+                                                           self.SUITE_COLUMN)
+                        suite_keys.append((host, owner, suite))
+                        # Advance to the next pointer in the treemodel
+                        suite_iter = treemodel.iter_next(suite_iter)
 
         if event.type == gtk.gdk._2BUTTON_PRESS:
             if suite_keys:
                 launch_gcylc(suite_keys[0])
             return False
 
-        view_item = gtk.ImageMenuItem("View Column...")
-        img = gtk.image_new_from_stock(gtk.STOCK_INDEX, gtk.ICON_SIZE_MENU)
-        view_item.set_image(img)
-        view_item.show()
-        view_menu = gtk.Menu()
-        view_item.set_submenu(view_menu)
-        for column_index, column in enumerate(treeview.get_columns()):
-            name = column.get_title()
-            is_visible = column.get_visible()
-            column_item = gtk.CheckMenuItem(name.replace("_", "__"))
-            column_item._connect_args = (column_index, is_visible)
-            column_item.set_active(is_visible)
-            column_item.connect("toggled", self._on_toggle_column_visible)
-            column_item.show()
-            view_menu.append(column_item)
-
-        menu = get_scan_menu(
-            suite_keys,
-            self.theme_name,
-            self._set_theme,
-            self.updater.has_stopped_suites(),
-            self.updater.clear_stopped_suites,
-            self.hosts,
-            self.updater.set_hosts,
-            self.updater.update_now,
-            self.updater.start,
-            program_name="cylc gscan",
-            extra_items=[view_item],
-        )
+        menu = get_scan_menu(suite_keys, self._toggle_hide_menu_bar)
         menu.popup(None, None, None, event.button, event.time)
         return False
 
     def _on_destroy_event(self, _):
         """Callback on destroy of main window."""
-        self.updater.quit = True
-        gtk.main_quit()
+        try:
+            self.updater.quit = True
+            gtk.main_quit()
+        except RuntimeError:
+            pass
         return False
 
+    def _toggle_hide_menu_bar(self, *_):
+        if self.menu_hbox.get_property("visible"):
+            self.menu_hbox.hide_all()
+        else:
+            self.menu_hbox.show_all()
+
     def _on_query_tooltip(self, _, x, y, kbd_ctx, tooltip):
         """Handle a tooltip creation request."""
-        tip_context = self.suite_treeview.get_tooltip_context(x, y, kbd_ctx)
+        tip_context = self.treeview.get_tooltip_context(x, y, kbd_ctx)
         if tip_context is None:
             self._prev_tooltip_location_id = None
             return False
-        x, y = self.suite_treeview.convert_widget_to_bin_window_coords(x, y)
+        x, y = self.treeview.convert_widget_to_bin_window_coords(x, y)
         path, column, cell_x, _ = (
-            self.suite_treeview.get_path_at_pos(x, y))
-        model = self.suite_treeview.get_model()
+            self.treeview.get_path_at_pos(x, y))
+        model = self.treeview.get_model()
         iter_ = model.get_iter(path)
         parent_iter = model.iter_parent(iter_)
         if parent_iter is None or parent_iter and model.iter_has_child(iter_):
@@ -420,8 +644,9 @@ class ScanApp(object):
 
     def _on_toggle_column_visible(self, menu_item):
         """Toggle column visibility callback."""
-        column_index, is_visible = menu_item._connect_args
-        column = self.suite_treeview.get_columns()[column_index]
+        column_index = menu_item._connect_args
+        column = self.treeview.get_columns()[column_index]
+        is_visible = column.get_visible()
         column.set_visible(not is_visible)
         self.updater.update()
         return False
@@ -504,8 +729,7 @@ class ScanApp(object):
         suite_update_time = model.get_value(iter_, self.UPDATE_TIME_COLUMN)
         time_point = timepoint_from_epoch(suite_update_time)
         time_point.set_time_zone_to_local()
-        current_time = time.time()
-        current_point = timepoint_from_epoch(current_time)
+        current_point = timepoint_from_epoch(time())
         if str(time_point).split("T")[0] == str(current_point).split("T")[0]:
             time_string = str(time_point).split("T")[1]
         else:
@@ -525,7 +749,9 @@ class ScanApp(object):
         """Set GUI theme."""
         self.theme_name = new_theme_name
         self.theme = gcfg.get(['themes', self.theme_name])
-        self.dots = DotMaker(self.theme)
+        self._set_dots()
+        self.updater.update()
+        self.update_theme_legend()
 
     @staticmethod
     def _set_tooltip(widget, text):
@@ -555,7 +781,7 @@ class ScanAppUpdater(threading.Thread):
         self._should_force_update = False
         self.quit = False
         self.suite_treemodel = suite_treemodel
-        self.suite_treeview = suite_treeview
+        self.treeview = suite_treeview
         self.group_column_id = group_column_id
         self.tasks_by_state = {}
         self.warning_times = {}
@@ -577,16 +803,16 @@ class ScanAppUpdater(threading.Thread):
         """Expand a row if it matches rose_ids suite and host."""
         point_string_name_tuple = model.get(row_iter, 0, 1)
         if point_string_name_tuple in row_ids:
-            self.suite_treeview.expand_to_path(rpath)
+            self.treeview.expand_to_path(rpath)
         return False
 
     def _get_user_expanded_row_ids(self):
         """Return a list of user-expanded row point_strings and names."""
         names = []
-        model = self.suite_treeview.get_model()
+        model = self.treeview.get_model()
         if model is None or model.get_iter_first() is None:
             return names
-        self.suite_treeview.map_expanded_rows(self._add_expanded_row, names)
+        self.treeview.map_expanded_rows(self._add_expanded_row, names)
         return names
 
     def _get_warnings(self, key):
@@ -604,7 +830,7 @@ class ScanAppUpdater(threading.Thread):
         warnings.sort()
         return warnings[-5:]
 
-    def clear_stopped_suites(self):
+    def clear_stopped_suites(self, _=None):
         """Clear stopped suite information that may have built up."""
         for key, result in self.suite_info_map.copy().items():
             if KEY_PORT not in result:
@@ -613,7 +839,7 @@ class ScanAppUpdater(threading.Thread):
 
     def clear_warnings(self, host, owner, suite):
         """Marks all presently issued warnings for a suite as read."""
-        self.warning_times[(host, owner, suite)] = time.time()
+        self.warning_times[(host, owner, suite)] = time()
 
     def get_last_n_tasks(self, host, owner, suite, task_state, point_string):
         """Returns a list of the last 'n' tasks with the provided state for
@@ -659,22 +885,20 @@ class ScanAppUpdater(threading.Thread):
         while not self.quit:
             time_for_update = (
                 self.last_update_time is None or
-                time.time() >= self.last_update_time + self.poll_interval
+                time() >= self.last_update_time + self.poll_interval
             )
             if not self._should_force_update and not time_for_update:
-                time.sleep(1)
+                sleep(1)
                 continue
             if self._should_force_update:
                 self._should_force_update = False
             title = self.window.get_title()
             gobject.idle_add(self.window.set_title, title + " (updating)")
-            self.suite_info_map = update_suites_info(
-                self.hosts, self.comms_timeout, self.owner_pattern,
-                self.name_pattern, self.suite_info_map)
-            self.last_update_time = time.time()
+            self.suite_info_map = update_suites_info(self)
+            self.last_update_time = time()
             gobject.idle_add(self.window.set_title, title)
             gobject.idle_add(self.update)
-            time.sleep(1)
+            sleep(1)
 
     def set_hosts(self, new_hosts):
         """Set new hosts."""
@@ -692,8 +916,9 @@ class ScanAppUpdater(threading.Thread):
         group_iters = {}
         for key, suite_info in sorted(self.suite_info_map.items()):
             host, owner, suite = key
-            suite_updated_time = suite_info.get(
-                KEY_UPDATE_TIME, int(time.time()))
+            suite_updated_time = suite_info.get(KEY_UPDATE_TIME)
+            if suite_updated_time is None:
+                suite_updated_time = int(time())
             title = suite_info.get(KEY_TITLE)
             group = suite_info.get(KEY_GROUP)
 
@@ -704,7 +929,7 @@ class ScanAppUpdater(threading.Thread):
 
             # Build up and assign group iters across the various suites
             if (group_iters.get(group) is None and
-                    self.suite_treeview.get_column(
+                    self.treeview.get_column(
                         self.group_column_id).get_visible()):
                 states_text = ""
                 for state, number in sorted(group_counts[group].items()):
@@ -779,6 +1004,6 @@ class ScanAppUpdater(threading.Thread):
                 states_text += '%s %d ' % (state, number)
         return states_text.rstrip()
 
-    def update_now(self):
+    def update_now(self, _=None):
         """Force an update as soon as possible."""
         self._should_force_update = True
diff --git a/lib/cylc/gui/scanutil.py b/lib/cylc/gui/scanutil.py
index 13f577f..34beddf 100644
--- a/lib/cylc/gui/scanutil.py
+++ b/lib/cylc/gui/scanutil.py
@@ -40,14 +40,14 @@ DURATION_EXPIRE_STOPPED = 600.0
 KEY_PORT = "port"
 
 
-def get_scan_menu(suite_keys,
-                  theme_name, set_theme_func,
-                  has_stopped_suites, clear_stopped_suites_func,
-                  scanned_hosts, change_hosts_func,
-                  update_now_func, start_func,
-                  program_name, extra_items=None, owner=None,
-                  is_stopped=False):
-    """Return a right click menu for scan GUIs.
+def get_gpanel_scan_menu(
+        suite_keys, theme_name, set_theme_func, has_stopped_suites,
+        clear_stopped_suites_func, scanned_hosts, change_hosts_func,
+        update_now_func, start_func, program_name, extra_items=None,
+        owner=None, is_stopped=False):
+    """Return a right click menu for the gpanel GUI.
+
+    TODO this used to be for gscan too; simplify now it's only for gpanel?
 
     suite_keys should be a list of (host, owner, suite) tuples (if any).
     theme_name should be the name of the current theme.
@@ -87,8 +87,8 @@ def get_scan_menu(suite_keys,
     for host, owner, suite in suite_keys:
         gcylc_item = gtk.ImageMenuItem("Launch gcylc: %s - %s@%s" % (
             suite.replace('_', '__'), owner, host))
-        img = gtk.image_new_from_stock("gcylc", gtk.ICON_SIZE_MENU)
-        gcylc_item.set_image(img)
+        img_gcylc = gtk.image_new_from_stock("gcylc", gtk.ICON_SIZE_MENU)
+        gcylc_item.set_image(img_gcylc)
         gcylc_item._connect_args = (host, owner, suite)
         gcylc_item.connect(
             "button-press-event",
@@ -107,6 +107,59 @@ def get_scan_menu(suite_keys,
         sep_item.show()
         menu.append(sep_item)
 
+    # Construct a cylc stop item to stop a suite
+    if len(suite_keys) > 1:
+        stoptask_item = gtk.ImageMenuItem('Stop all')
+    else:
+        stoptask_item = gtk.ImageMenuItem('Stop')
+
+    img_stop = gtk.image_new_from_stock(gtk.STOCK_MEDIA_STOP,
+                                        gtk.ICON_SIZE_MENU)
+    stoptask_item.set_image(img_stop)
+    stoptask_item._connect_args = suite_keys, 'stop'
+    stoptask_item.connect("button-press-event",
+                          lambda b, e: call_cylc_command(b._connect_args[0],
+                                                         b._connect_args[1]))
+    stoptask_item.show()
+    menu.append(stoptask_item)
+
+    # Construct a cylc hold item to hold (pause) a suite
+    if len(suite_keys) > 1:
+        holdtask_item = gtk.ImageMenuItem('Hold all')
+    else:
+        holdtask_item = gtk.ImageMenuItem('Hold')
+
+    img_hold = gtk.image_new_from_stock(gtk.STOCK_MEDIA_PAUSE,
+                                        gtk.ICON_SIZE_MENU)
+    holdtask_item.set_image(img_hold)
+    holdtask_item._connect_args = suite_keys, 'hold'
+    holdtask_item.connect("button-press-event",
+                          lambda b, e: call_cylc_command(b._connect_args[0],
+                                                         b._connect_args[1]))
+    menu.append(holdtask_item)
+    holdtask_item.show()
+
+    # Construct a cylc release item to release a paused/stopped suite
+    if len(suite_keys) > 1:
+        unstoptask_item = gtk.ImageMenuItem('Release all')
+    else:
+        unstoptask_item = gtk.ImageMenuItem('Release')
+
+    img_release = gtk.image_new_from_stock(gtk.STOCK_MEDIA_PLAY,
+                                           gtk.ICON_SIZE_MENU)
+    unstoptask_item.set_image(img_release)
+    unstoptask_item._connect_args = suite_keys, 'release'
+    unstoptask_item.connect("button-press-event",
+                            lambda b, e: call_cylc_command(b._connect_args[0],
+                                                           b._connect_args[1]))
+    unstoptask_item.show()
+    menu.append(unstoptask_item)
+
+    # Add another separator
+    sep_item = gtk.SeparatorMenuItem()
+    sep_item.show()
+    menu.append(sep_item)
+
     # Construct theme chooser items (same as cylc.gui.app_main).
     theme_item = gtk.ImageMenuItem('Theme')
     img = gtk.image_new_from_stock(gtk.STOCK_SELECT_COLOR, gtk.ICON_SIZE_MENU)
@@ -177,7 +230,7 @@ def get_scan_menu(suite_keys,
     hosts_item.show()
     hosts_item.connect(
         "button-press-event",
-        lambda b, e: _launch_hosts_dialog(scanned_hosts, change_hosts_func))
+        lambda b, e: launch_hosts_dialog(scanned_hosts, change_hosts_func))
     menu.append(hosts_item)
 
     sep_item = gtk.SeparatorMenuItem()
@@ -191,13 +244,125 @@ def get_scan_menu(suite_keys,
     info_item.show()
     info_item.connect(
         "button-press-event",
-        lambda b, e: _launch_about_dialog(program_name, scanned_hosts)
+        lambda b, e: launch_about_dialog(program_name, scanned_hosts)
     )
     menu.append(info_item)
     return menu
 
 
-def _launch_about_dialog(program_name, hosts):
+def get_scan_menu(suite_keys, toggle_hide_menu_bar):
+    """Return a right click menu for the gscan GUI.
+
+    suite_keys should be a list of (host, owner, suite) tuples (if any).
+    toggle_hide_menu_bar - function to show/hide main menu bar
+
+    """
+    def _add_main_menu_item(menu):
+        sep_item = gtk.SeparatorMenuItem()
+        sep_item.show()
+        menu.append(sep_item)
+        main_menu_item = gtk.ImageMenuItem("toggle main menu (<Alt>m)")
+        img = gtk.image_new_from_stock(gtk.STOCK_INDEX, gtk.ICON_SIZE_MENU)
+        main_menu_item.set_image(img)
+        main_menu_item.connect("button-press-event",
+                               lambda b, e: toggle_hide_menu_bar())
+        main_menu_item.show()
+        menu.append(main_menu_item)
+
+    menu = gtk.Menu()
+
+    if not suite_keys:
+        null_item = gtk.ImageMenuItem("Click on a suite or group")
+        img = gtk.image_new_from_stock(
+            gtk.STOCK_DIALOG_WARNING, gtk.ICON_SIZE_MENU)
+        null_item.set_image(img)
+        null_item.show()
+        menu.append(null_item)
+        _add_main_menu_item(menu)
+        return menu
+
+    # Construct gcylc launcher items for each relevant suite.
+    for host, owner, suite in suite_keys:
+        gcylc_item = gtk.ImageMenuItem("Launch gcylc: %s - %s@%s" % (
+            suite.replace('_', '__'), owner, host))
+        img_gcylc = gtk.image_new_from_stock("gcylc", gtk.ICON_SIZE_MENU)
+        gcylc_item.set_image(img_gcylc)
+        gcylc_item._connect_args = (host, owner, suite)
+        gcylc_item.connect(
+            "button-press-event",
+            lambda b, e: launch_gcylc(b._connect_args))
+        gcylc_item.show()
+        menu.append(gcylc_item)
+
+    sep_item = gtk.SeparatorMenuItem()
+    sep_item.show()
+    menu.append(sep_item)
+
+    # Construct a cylc stop item to stop a suite
+    if len(suite_keys) > 1:
+        stoptask_item = gtk.ImageMenuItem('Stop all...')
+    else:
+        stoptask_item = gtk.ImageMenuItem('Stop...')
+
+    stop_menu = gtk.Menu()
+    stoptask_item.set_submenu(stop_menu)
+    img_stop = gtk.image_new_from_stock(gtk.STOCK_MEDIA_STOP,
+                                        gtk.ICON_SIZE_MENU)
+    stoptask_item.set_image(img_stop)
+
+    for stop_type in ['', '--kill', '--now', '--now --now']:
+        item = gtk.ImageMenuItem('stop %s' % stop_type)
+        img_stop = gtk.image_new_from_stock(gtk.STOCK_MEDIA_STOP,
+                                            gtk.ICON_SIZE_MENU)
+        item.set_image(img_stop)
+        stop_menu.append(item)
+        item._connect_args = suite_keys, 'stop %s' % stop_type
+        item.connect(
+            "button-press-event",
+            lambda b, e: call_cylc_command(b._connect_args[0],
+                                           b._connect_args[1]))
+        item.show()
+
+    stoptask_item.show()
+    menu.append(stoptask_item)
+
+    # Construct a cylc hold item to hold (pause) a suite
+    if len(suite_keys) > 1:
+        holdtask_item = gtk.ImageMenuItem('Hold all')
+    else:
+        holdtask_item = gtk.ImageMenuItem('Hold')
+
+    img_hold = gtk.image_new_from_stock(gtk.STOCK_MEDIA_PAUSE,
+                                        gtk.ICON_SIZE_MENU)
+    holdtask_item.set_image(img_hold)
+    holdtask_item._connect_args = suite_keys, 'hold'
+    holdtask_item.connect("button-press-event",
+                          lambda b, e: call_cylc_command(b._connect_args[0],
+                                                         b._connect_args[1]))
+    menu.append(holdtask_item)
+    holdtask_item.show()
+
+    # Construct a cylc release item to release a paused/stopped suite
+    if len(suite_keys) > 1:
+        unstoptask_item = gtk.ImageMenuItem('Release all')
+    else:
+        unstoptask_item = gtk.ImageMenuItem('Release')
+
+    img_release = gtk.image_new_from_stock(gtk.STOCK_MEDIA_PLAY,
+                                           gtk.ICON_SIZE_MENU)
+    unstoptask_item.set_image(img_release)
+    unstoptask_item._connect_args = suite_keys, 'release'
+    unstoptask_item.connect("button-press-event",
+                            lambda b, e: call_cylc_command(b._connect_args[0],
+                                                           b._connect_args[1]))
+    unstoptask_item.show()
+    menu.append(unstoptask_item)
+    _add_main_menu_item(menu)
+
+    return menu
+
+
+def launch_about_dialog(program_name, hosts):
     """Launch a modified version of the app_main.py About dialog."""
     hosts_text = "Hosts monitored: " + ", ".join(hosts)
     comments_text = hosts_text
@@ -216,7 +381,7 @@ def _launch_about_dialog(program_name, hosts):
     about.destroy()
 
 
-def _launch_hosts_dialog(existing_hosts, change_hosts_func):
+def launch_hosts_dialog(existing_hosts, change_hosts_func):
     """Launch a dialog for configuring the suite hosts to scan.
 
     Arguments:
@@ -249,12 +414,8 @@ def _launch_hosts_dialog(existing_hosts, change_hosts_func):
     dialog.destroy()
 
 
-def launch_gcylc(key):
-    """Launch gcylc for a given suite and host."""
-    host, owner, suite = key
-    args = ["--host=" + host, "--user=" + owner, suite]
-
-    # Get version of suite
+def get_suite_version(args):
+    """Gets the suite version given the host, owner, and suite arguments"""
     f_null = open(os.devnull, "w")
     if cylc.flags.debug:
         stderr = sys.stderr
@@ -266,6 +427,18 @@ def launch_gcylc(key):
     suite_version = proc.communicate()[0].strip()
     proc.wait()
 
+    return suite_version
+
+
+def launch_gcylc(key):
+    """Launch gcylc for a given suite and host."""
+    host, owner, suite = key
+    args = ["--host=" + host, "--user=" + owner, suite]
+
+    # Get version of suite - now separate method get_suite_version()
+    f_null = open(os.devnull, "w")
+    suite_version = get_suite_version(args)
+
     # Run correct version of "cylc gui", provided that "admin/cylc-wrapper" is
     # installed.
     env = None
@@ -283,42 +456,100 @@ def launch_gcylc(key):
         Popen(["nohup"] + command, env=env, stdout=stdout, stderr=stderr)
 
 
-def update_suites_info(
-        hosts=None, timeout=None, owner_pattern=None, name_pattern=None,
-        prev_results=None):
+def call_cylc_command(keys, command_id):
+    """Calls one of the Cylc commands (such as 'stop', 'hold', etc...).
+
+    Will accept either a single tuple for a key, or a list of keys.
+    See the examples below. If you pass it a list of keys, it will
+    iterate and call the command_id on each suite (key) it is given.
+
+    Args:
+        keys (tuple): The key containing host, owner, and suite
+        command_id (str): A string giving the Cylc command.
+
+    Example:
+        call_cylc_command(keys, "stop")
+        call_cylc_command((host, owner, suite), "hold")
+        call_cylc_command([(host, owner, suite),
+                           (host, owner, suite),
+                           (host, owner, suite)], "hold")
+    """
+
+    if not isinstance(keys, list):
+        keys = [keys]
+
+    for key in keys:
+        host, owner, suite = key
+        args = ["--host=" + host, "--user=" + owner, suite]
+
+        # Get version of suite
+        f_null = open(os.devnull, "w")
+        suite_version = get_suite_version(args)
+
+        env = None
+        if suite_version != CYLC_VERSION:
+            env = dict(os.environ)
+            env["CYLC_VERSION"] = suite_version
+        command = ["cylc"] + command_id.split() + args
+
+        if cylc.flags.debug:
+            stdout = sys.stdout
+            stderr = sys.stderr
+            Popen(command, env=env, stdout=stdout, stderr=stderr)
+        else:
+            stdout = f_null
+            stderr = stdout
+            Popen(["nohup"] + command, env=env, stdout=stdout, stderr=stderr)
+
+
+def update_suites_info(updater):
     """Return mapping of suite info by host, owner and suite name.
 
-    hosts - hosts to scan, or the default set in the site/user global.rc
-    timeout - communication timeout
-    owner_pattern - return only suites with owners matching this compiled re
-    name_pattern - return only suites with names matching this compiled re
-    prev_results - previous results returned by this function
+    updater - gscan or gpanel updater.
 
     Return a dict of the form: {(host, owner, name): suite_info, ...}
 
     where each "suite_info" is a dict with keys:
         KEY_GROUP - group name of suite
         KEY_OWNER - suite owner name
-        KEY_PORT - suite port, for runninig suites only
+        KEY_PORT - suite port, for running suites only
         KEY_STATES - suite state
         KEY_STATES:cycle - states by cycle
         KEY_TASKS_BY_STATE - tasks by state
         KEY_TITLE - suite title
         KEY_UPDATE_TIME - last update time of suite
     """
+    # Compulsory attributes from updater
+    # hosts - hosts to scan, or the default set in the site/user global.rc
+    # owner_pattern - return only suites with owners matching this compiled re
+    # prev_results - previous results returned by this function
+    hosts = updater.hosts
+    owner_pattern = updater.owner_pattern
+    prev_results = updater.suite_info_map
+    # Optional attributes from updater
+    # timeout - communication timeout
+    timeout = getattr(updater, "comms_timeout", None)
+    # name_pattern - return only suites with names matching this compiled re
+    name_pattern = getattr(updater, "name_pattern", None)
+    # Scan
     results = {}
-    for host, port, result in scan_all(hosts=hosts, timeout=timeout):
+    for host, port, result in scan_all(
+            hosts=hosts, timeout=timeout, updater=updater):
+        if updater.quit:
+            break
         if (name_pattern and not name_pattern.match(result[KEY_NAME]) or
                 owner_pattern and not owner_pattern.match(result[KEY_OWNER])):
             continue
         try:
             result[KEY_PORT] = port
-            result[KEY_UPDATE_TIME] = int(float(result[KEY_UPDATE_TIME]))
             results[(host, result[KEY_OWNER], result[KEY_NAME])] = result
+            result[KEY_UPDATE_TIME] = int(float(result[KEY_UPDATE_TIME]))
         except (KeyError, TypeError, ValueError):
             pass
     expire_threshold = time() - DURATION_EXPIRE_STOPPED
     for (host, owner, name), prev_result in prev_results.items():
+        if updater.quit:
+            break
         if ((host, owner, name) in results or
                 host not in hosts or
                 owner_pattern and not owner_pattern.match(owner) or
diff --git a/lib/cylc/gui/tailer.py b/lib/cylc/gui/tailer.py
index 056888c..c5b5618 100644
--- a/lib/cylc/gui/tailer.py
+++ b/lib/cylc/gui/tailer.py
@@ -79,8 +79,7 @@ class Tailer(threading.Thread):
                 owner, host = user_at_host.split("@", 1)
             else:
                 owner, host = (None, user_at_host)
-            ssh = str(GLOBAL_CFG.get_host_item(
-                "remote shell template", host, owner))
+            ssh = str(GLOBAL_CFG.get_host_item("ssh command", host, owner))
             command = shlex.split(ssh) + ["-n", user_at_host]
             cmd_tmpl = str(GLOBAL_CFG.get_host_item(
                 "remote tail command template", host, owner))
diff --git a/lib/cylc/gui/updater.py b/lib/cylc/gui/updater.py
index ee19093..3bce52c 100644
--- a/lib/cylc/gui/updater.py
+++ b/lib/cylc/gui/updater.py
@@ -30,7 +30,7 @@ from cylc.dump import get_stop_state_summary
 from cylc.gui.cat_state import cat_state
 from cylc.network import ConnectionError, ConnectionDeniedError
 from cylc.network.suite_state_client import (
-    StateSummaryClient, SuiteStillInitialisingError, get_suite_status_string,
+    StateSummaryClient, get_suite_status_string,
     SUITE_STATUS_NOT_CONNECTED, SUITE_STATUS_CONNECTED,
     SUITE_STATUS_INITIALISING, SUITE_STATUS_STOPPED, SUITE_STATUS_STOPPING
 )
@@ -136,7 +136,7 @@ class Updater(threading.Thread):
         self.cfg = app.cfg
         self.info_bar = app.info_bar
 
-        self._summary_update_time = None
+        self.summary_update_time = None
         self.err_log_lines = []
         self._err_num_log_lines = 10
         self.err_log_size = 0
@@ -195,7 +195,16 @@ class Updater(threading.Thread):
         try:
             self.daemon_version = self.suite_info_client.get_info(
                 'get_cylc_version')
-        except (ConnectionError) as exc:
+        except ConnectionDeniedError as exc:
+            if cylc.flags.debug:
+                traceback.print_exc()
+            if not self.connect_fail_warned:
+                self.connect_fail_warned = True
+                gobject.idle_add(
+                    self.warn,
+                    "ERROR: %s\n\nIncorrect suite passphrase?" % exc)
+            return
+        except ConnectionError as exc:
             # Failed to (re)connect
             # Suite not running, starting up or just stopped.
             if cylc.flags.debug:
@@ -220,15 +229,6 @@ class Updater(threading.Thread):
                 self.info_bar.set_update_time,
                 update_time_str, self.info_bar.DISCONNECTED_TEXT)
             return
-        except ConnectionDeniedError as exc:
-            if cylc.flags.debug:
-                traceback.print_exc()
-            if not self.connect_fail_warned:
-                self.connect_fail_warned = True
-                gobject.idle_add(
-                    self.warn,
-                    "ERROR: %s\n\nIncorrect suite passphrase?" % exc)
-            return
         except Exception as exc:
             if cylc.flags.debug:
                 traceback.print_exc()
@@ -291,28 +291,26 @@ class Updater(threading.Thread):
 
     def retrieve_summary_update_time(self):
         """Retrieve suite summary update time; return True if changed."""
-        summary_update_time = float(
-            self.state_summary_client.get_suite_state_summary_update_time()
-        )
-        if (summary_update_time is None or
-                self._summary_update_time is None or
-                summary_update_time != self._summary_update_time):
-            self._summary_update_time = summary_update_time
-            return True
-        return False
+        prev_summary_update_time = self.summary_update_time
+        self.summary_update_time = (
+            self.state_summary_client.get_suite_state_summary_update_time())
+        if self.summary_update_time is None:
+            self.set_status(SUITE_STATUS_INITIALISING)
+        else:
+            self.summary_update_time = float(self.summary_update_time)
+        return prev_summary_update_time != self.summary_update_time
 
     def retrieve_state_summaries(self):
         """Retrieve suite summary."""
-        ret = self.state_summary_client.get_suite_state_summary()
         glbl, states, fam_states = (
             self.state_summary_client.get_suite_state_summary())
-        self.ancestors = self.suite_info_client.get_info(
-            'get_first_parent_ancestors')
-        self.ancestors_pruned = self.suite_info_client.get_info(
-            'get_first_parent_ancestors', pruned=True)
-        self.descendants = self.suite_info_client.get_info(
-            'get_first_parent_descendants')
-        self.all_families = self.suite_info_client.get_info('get_all_families')
+
+        (self.ancestors, self.ancestors_pruned, self.descendants,
+            self.all_families) = self.suite_info_client.get_info(
+                {'function': 'get_first_parent_ancestors'},
+                {'function': 'get_first_parent_ancestors', 'pruned': True},
+                {'function': 'get_first_parent_descendants'},
+                {'function': 'get_all_families'})
 
         self.mode = glbl['run_mode']
 
@@ -341,7 +339,7 @@ class Updater(threading.Thread):
         self.connected = False
         self.set_status(SUITE_STATUS_STOPPED)
         self.connect_schd.start()
-        self._summary_update_time = None
+        self.summary_update_time = None
         self.state_summary = {}
         self.full_state_summary = {}
         self.fam_state_summary = {}
@@ -401,18 +399,8 @@ class Updater(threading.Thread):
         try:
             err_log_changed = self.retrieve_err_log()
             summaries_changed = self.retrieve_summary_update_time()
-            if summaries_changed:
+            if self.summary_update_time is not None and summaries_changed:
                 self.retrieve_state_summaries()
-        except SuiteStillInitialisingError:
-            # Connection achieved but state summary data not available yet.
-            if cylc.flags.debug:
-                print >> sys.stderr, "  connected, suite initializing ..."
-            self.set_status(SUITE_STATUS_INITIALISING)
-            if self.info_bar.prog_bar_can_start():
-                gobject.idle_add(
-                    self.info_bar.prog_bar_start, SUITE_STATUS_INITIALISING)
-                self.info_bar.set_state([])
-            return False
         except Exception as exc:
             if self.status == SUITE_STATUS_STOPPING:
                 # Expected stop: prevent the reconnection warning dialog.
@@ -427,23 +415,17 @@ class Updater(threading.Thread):
         else:
             # Got suite data.
             self.version_mismatch_warned = False
-            if (self.status == SUITE_STATUS_STOPPING and
-                    self.info_bar.prog_bar_can_start()):
-                gobject.idle_add(
-                    self.info_bar.prog_bar_start, self.status)
-            if (self.is_reloading and
-                    self.info_bar.prog_bar_can_start()):
-                gobject.idle_add(
-                    self.info_bar.prog_bar_start, "reloading")
-            if (self.info_bar.prog_bar_active() and
-                    not self.is_reloading and
-                    self.status not in [SUITE_STATUS_STOPPING,
-                                        SUITE_STATUS_INITIALISING]):
+            status_str = None
+            if self.status in [SUITE_STATUS_INITIALISING,
+                               SUITE_STATUS_STOPPING]:
+                status_str = self.status
+            elif self.is_reloading:
+                status_str = "reloading"
+            if status_str is None:
                 gobject.idle_add(self.info_bar.prog_bar_stop)
-            if summaries_changed or err_log_changed:
-                return True
-            else:
-                return False
+            elif self.info_bar.prog_bar_can_start():
+                gobject.idle_add(self.info_bar.prog_bar_start, status_str)
+            return summaries_changed or err_log_changed
 
     def filter_by_name(self, states):
         """Filter by name string."""
@@ -509,8 +491,7 @@ class Updater(threading.Thread):
         self.info_bar.set_mode(self.mode)
         self.info_bar.set_update_time(self.update_time_str)
         self.info_bar.set_status(self.status)
-        self.info_bar.set_log("\n".join(self.err_log_lines),
-                              self.err_log_size)
+        self.info_bar.set_log("\n".join(self.err_log_lines), self.err_log_size)
         return False
 
     def stop(self):
diff --git a/lib/cylc/gui/updater_dot.py b/lib/cylc/gui/updater_dot.py
index c2e82f0..8edb1a3 100644
--- a/lib/cylc/gui/updater_dot.py
+++ b/lib/cylc/gui/updater_dot.py
@@ -78,6 +78,7 @@ class DotUpdater(threading.Thread):
         self.task_list = []
         self.family_tree = {}
         self.expanded_rows = []
+        self.selected_rows = []
 
         # generate task state icons
         dotm = DotMaker(theme, size=dot_size)
@@ -303,9 +304,47 @@ class DotUpdater(threading.Thread):
                 self.expanded_rows.append(self.led_treestore.get_value(
                     self.led_treestore.get_iter(row), 0))
 
+    def _get_selected_rows(self):
+        """Make a note of currently selected rows.
+
+        Populates self.selected_rows with the value of the first column of all
+        selected rows.
+
+        """
+        self.selected_rows = []
+        _, selected_paths = self.led_treeview.get_selection(
+        ).get_selected_rows()
+        model = self.led_treeview.get_model()
+        for path in selected_paths:
+            self.selected_rows.append(model.get_value(model.get_iter(path), 0))
+
+    @staticmethod
+    def _reselect_row(model, _, iter_, (selection, selected_rows,)):
+        """Select rows if they are referenced by selected_rows.
+
+        If the value of the first column of a row matches a value in
+        `selected_rows` then `selection` will be updated to include this row.
+
+        Warning: This method has not been tested with multiple selection.
+
+        """
+        if model.get_value(iter_, 0) in selected_rows:
+            selection.select_iter(iter_)
+
+    def _set_selected_rows(self):
+        """Re-Selects previously selected rows where possible.
+
+        Uses self.selected_rows to determine which rows to select.
+
+        """
+        selection = self.led_treeview.get_selection()
+        selection.unselect_all()
+        model = self.led_treeview.get_model()
+        model.foreach(self._reselect_row, (selection, self.selected_rows,))
+
     def ledview_widgets(self):
-        # Make note of expanded rows.
-        self._get_expanded_rows()
+        self._get_expanded_rows()  # Make a note of expanded rows.
+        self._get_selected_rows()  # Make a note of selected rows.
 
         if not self.should_transpose_view:
             types = [str] + [gtk.gdk.Pixbuf] * len(self.point_strings)
@@ -460,6 +499,11 @@ class DotUpdater(threading.Thread):
             self._update_gui_transpose(tasks_by_point_string, state_summary)
 
         self.led_treeview.columns_autosize()
+
+        if self.is_transposed == self.should_transpose_view:
+            # Only select rows if we have not changed view mode.
+            self._set_selected_rows()
+
         return False
 
     def _update_gui_transpose(self, tasks_by_point_string, state_summary):
diff --git a/lib/cylc/gui/updater_graph.py b/lib/cylc/gui/updater_graph.py
index ef442b6..c1428cd 100644
--- a/lib/cylc/gui/updater_graph.py
+++ b/lib/cylc/gui/updater_graph.py
@@ -100,10 +100,6 @@ class GraphUpdater(threading.Thread):
         # empty graphw object:
         self.graphw = CGraphPlain(self.cfg.suite)
 
-        # TODO - handle failure to get a remote proxy in reconnect()
-
-        self.graph_warned = {}
-
         # lists of nodes to newly group or ungroup (not of all currently
         # grouped and ungrouped nodes - still held server side)
         self.group = []
diff --git a/lib/cylc/job.sh b/lib/cylc/job.sh
index 7aa4ed8..f7d1a5b 100644
--- a/lib/cylc/job.sh
+++ b/lib/cylc/job.sh
@@ -53,8 +53,7 @@ cylc__job__main() {
     for signal_name in ${CYLC_VACATION_SIGNALS:-}; do
         trap "cylc__job__trap_vacation ${signal_name}" "${signal_name}"
     done
-    set -u
-    set -o pipefail
+    set -euo pipefail
     # Export CYLC_ suite and task environment variables
     cylc__job__inst__cylc_env
     # Write task job self-identify
@@ -200,3 +199,52 @@ cylc__job__trap_vacation() {
         "Task job script vacated by signal ${trapped_signal}" || true
     exit 1
 }
+
+###############################################################################
+# Handle dummy job cycle point specific success or failure.
+# Globals:
+#   CYLC_TASK_TRY_NUMBER
+#   CYLC_TASK_CYCLE_POINT
+#   CYLC_CYCLING_MODE
+# Arguments:
+#   Fail try 1 only (T/F)
+#   Fail cycle points:
+#     'all' - fail all cycle points
+#     'P1 P2 P3 ...' - fail these cycle points
+# Returns:
+#   0 - dummy job succeed
+#   1 - dummy job fail
+cylc__job__dummy_result() {
+    typeset fail_try_1_only="$1"; shift
+    typeset fail_cycle_points="$@"
+    typeset fail_this_point=false
+    if [[ "${fail_cycle_points}" == *all* ]]; then
+        # Fail all points.
+        fail_this_point=true
+    else
+        # Fail some or no points.
+        if [[ "${CYLC_CYCLING_MODE}" == "integer" ]]; then
+            for POINT in ${fail_cycle_points}; do
+                if ((CYLC_TASK_CYCLE_POINT == POINT)); then
+                    fail_this_point=true
+                    break
+                fi
+            done
+        else
+            for POINT in ${fail_cycle_points}; do
+                if $(cylc cyclepoint --equal="$POINT"); then
+                    fail_this_point=true
+                    break
+                fi
+            done
+        fi
+    fi
+    if ! $fail_this_point || \
+            ($fail_try_1_only && ((CYLC_TASK_TRY_NUMBER > 1)) ); then
+        echo "(dummy job succeed)"
+        return 0
+    else
+        >&2 echo "(dummy job fail)"
+        return 1
+    fi
+}
diff --git a/lib/cylc/job_file.py b/lib/cylc/job_file.py
index 5d72e24..dc5ca41 100644
--- a/lib/cylc/job_file.py
+++ b/lib/cylc/job_file.py
@@ -22,26 +22,18 @@ import re
 import stat
 from subprocess import Popen, PIPE
 
-from cylc.batch_sys_manager import BATCH_SYS_MANAGER
+from cylc.batch_sys_manager import BatchSysManager
 from cylc.cfgspec.globalcfg import GLOBAL_CFG
 import cylc.flags
 
 
-class JobFile(object):
+class JobFileWriter(object):
 
     """Write task job files."""
 
-    _INSTANCE = None
-
-    @classmethod
-    def get_inst(cls):
-        """Return a unique instance of this class."""
-        if cls._INSTANCE is None:
-            cls._INSTANCE = cls()
-        return cls._INSTANCE
-
     def __init__(self):
         self.suite_env = {}
+        self.batch_sys_mgr = BatchSysManager()
 
     def set_suite_env(self, suite_env):
         """Configure suite environment for all job files."""
@@ -76,28 +68,47 @@ class JobFile(object):
                 self._write_environment_2(handle, job_conf)
                 self._write_script(handle, job_conf)
                 self._write_epilogue(handle, job_conf)
-            # check syntax
+        except IOError as exc:
+            # Remove temporary file
             try:
-                proc = Popen([job_conf['shell'], '-n', tmp_name], stderr=PIPE)
-            except OSError as exc:
-                if exc.filename is None:
-                    exc.filename = job_conf['shell']
-                raise
-            else:
-                if proc.wait():
-                    raise RuntimeError(proc.communicate()[1])
-            # make it executable
-            mode = (
-                os.stat(tmp_name).st_mode |
-                stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
-            os.chmod(tmp_name, mode)
-            os.rename(tmp_name, local_job_file_path)
-        finally:
-            # don't leave behind any bad file
+                os.unlink(tmp_name)
+            except OSError:
+                pass
+            raise exc
+        # check syntax
+        try:
+            proc = Popen([job_conf['shell'], '-n', tmp_name], stderr=PIPE)
+        except OSError as exc:
+            # Popen has a bad habit of not telling you anything if it fails
+            # to run the executable.
+            if exc.filename is None:
+                exc.filename = job_conf['shell']
+            # Remove temporary file
             try:
                 os.unlink(tmp_name)
             except OSError:
                 pass
+            raise exc
+        else:
+            if proc.wait():
+                # This will leave behind the temporary file,
+                # which is useful for debugging syntax errors, etc.
+                raise RuntimeError(proc.communicate()[1])
+        # Make job file executable
+        mode = (
+            os.stat(tmp_name).st_mode |
+            stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
+        os.chmod(tmp_name, mode)
+        os.rename(tmp_name, local_job_file_path)
+
+    @staticmethod
+    def _check_script_value(value):
+        """Return True if script has any executable statements."""
+        for line in value.splitlines():
+            line = line.strip()
+            if line and not line.startswith("#"):
+                return True
+        return False
 
     @staticmethod
     def _get_derived_host_item(job_conf, key):
@@ -119,40 +130,38 @@ class JobFile(object):
         for prefix, value in [
                 ("# Suite: ", job_conf['suite_name']),
                 ("# Task: ", job_conf['task_id']),
-                (BATCH_SYS_MANAGER.LINE_PREFIX_JOB_LOG_DIR, job_conf['job_d']),
-                (BATCH_SYS_MANAGER.LINE_PREFIX_BATCH_SYS_NAME,
+                (BatchSysManager.LINE_PREFIX_JOB_LOG_DIR, job_conf['job_d']),
+                (BatchSysManager.LINE_PREFIX_BATCH_SYS_NAME,
                  job_conf['batch_system_name']),
-                (BATCH_SYS_MANAGER.LINE_PREFIX_BATCH_SUBMIT_CMD_TMPL,
+                (BatchSysManager.LINE_PREFIX_BATCH_SUBMIT_CMD_TMPL,
                  job_conf['batch_submit_command_template']),
-                (BATCH_SYS_MANAGER.LINE_PREFIX_EXECUTION_TIME_LIMIT,
+                (BatchSysManager.LINE_PREFIX_EXECUTION_TIME_LIMIT,
                  job_conf['execution_time_limit'])]:
             if value:
                 handle.write("\n%s%s" % (prefix, value))
 
-    @staticmethod
-    def _write_directives(handle, job_conf):
+    def _write_directives(self, handle, job_conf):
         """Job directives."""
-        lines = BATCH_SYS_MANAGER.format_directives(job_conf)
+        lines = self.batch_sys_mgr.format_directives(job_conf)
         if lines:
             handle.write('\n\n# DIRECTIVES:')
             for line in lines:
                 handle.write('\n' + line)
 
-    @classmethod
-    def _write_prelude(cls, handle, job_conf):
+    def _write_prelude(self, handle, job_conf):
         """Job script prelude."""
         # Environment variables for prelude
         handle.write("\nexport CYLC_DIR='%s'" % (os.environ['CYLC_DIR']))
         if cylc.flags.debug:
             handle.write("\nexport CYLC_DEBUG='true'")
-        for key in ['CYLC_VERSION'] + cls._get_host_item(
+        for key in ['CYLC_VERSION'] + self._get_host_item(
                 job_conf, 'copyable environment variables'):
             if key in os.environ:
                 handle.write("\nexport %s='%s'" % (key, os.environ[key]))
         # Variables for traps
         handle.write("\nCYLC_FAIL_SIGNALS='%s'" % " ".join(
-            BATCH_SYS_MANAGER.get_fail_signals(job_conf)))
-        vacation_signals_str = BATCH_SYS_MANAGER.get_vacation_signal(job_conf)
+            self.batch_sys_mgr.get_fail_signals(job_conf)))
+        vacation_signals_str = self.batch_sys_mgr.get_vacation_signal(job_conf)
         if vacation_signals_str:
             handle.write("\nCYLC_VACATION_SIGNALS='%s'" % vacation_signals_str)
 
@@ -265,21 +274,21 @@ class JobFile(object):
         """Global Init-script."""
         global_init_script = cls._get_host_item(
             job_conf, 'global init-script')
-        if global_init_script:
+        if cls._check_script_value(global_init_script):
             handle.write("\n\ncylc__job__inst__global_init_script() {")
             handle.write("\n# GLOBAL-INIT-SCRIPT:\n")
             handle.write(global_init_script)
             handle.write("\n}")
 
-    @staticmethod
-    def _write_script(handle, job_conf):
+    @classmethod
+    def _write_script(cls, handle, job_conf):
         """Write (*-)script in functions.
 
         init-script, env-script, err-script, pre-script, script, post-script
         """
         for prefix in ['init-', 'env-', 'err-', 'pre-', '', 'post-']:
             value = job_conf[prefix + 'script']
-            if value:
+            if cls._check_script_value(value):
                 handle.write("\n\ncylc__job__inst__%sscript() {" % (
                     prefix.replace("-", "_")))
                 handle.write("\n# %sSCRIPT:\n%s" % (
@@ -291,4 +300,4 @@ class JobFile(object):
         """Write epilogue."""
         handle.write('\n\n. "${CYLC_DIR}/lib/cylc/job.sh"\ncylc__job__main')
         handle.write("\n\n%s%s\n" % (
-            BATCH_SYS_MANAGER.LINE_PREFIX_EOF, job_conf['job_d']))
+            BatchSysManager.LINE_PREFIX_EOF, job_conf['job_d']))
diff --git a/lib/cylc/job_host.py b/lib/cylc/job_host.py
deleted file mode 100644
index 3346d3d..0000000
--- a/lib/cylc/job_host.py
+++ /dev/null
@@ -1,207 +0,0 @@
-#!/usr/bin/env python
-
-# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
-# Copyright (C) 2008-2017 NIWA
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""Manage a remote job host."""
-
-import os
-from pipes import quote
-from subprocess import Popen, PIPE
-import shlex
-from time import sleep, time
-from uuid import uuid4
-
-from cylc.cfgspec.globalcfg import GLOBAL_CFG
-from cylc.owner import USER
-from cylc.suite_logging import ERR, LOG
-from cylc.suite_srv_files_mgr import SuiteSrvFilesManager
-
-
-class RemoteJobHostInitError(Exception):
-    """Cannot initialise suite run directory of remote job host."""
-
-    MSG_INIT = "%s: initialisation did not complete:\n"  # %s user_at_host
-    MSG_TIDY = "%s: clean up did not complete:\n"  # %s user_at_host
-
-    def __str__(self):
-        msg, user_at_host, cmd_str, ret_code, out, err = self.args
-        ret = (msg + "COMMAND FAILED (%d): %s\n") % (
-            user_at_host, ret_code, cmd_str)
-        for label, item in ("STDOUT", out), ("STDERR", err):
-            if item:
-                for line in item.splitlines(True):  # keep newline chars
-                    ret += "COMMAND %s: %s" % (label, line)
-        return ret
-
-
-class RemoteJobHostManager(object):
-    """Manage a remote job host."""
-
-    _INSTANCE = None
-
-    @classmethod
-    def get_inst(cls):
-        """Return a singleton instance of this class."""
-        if cls._INSTANCE is None:
-            cls._INSTANCE = cls()
-        return cls._INSTANCE
-
-    def __init__(self):
-        self.initialised = {}  # {(user, host): should_unlink, ...}
-        self.single_task_mode = False
-        self.suite_srv_files_mgr = SuiteSrvFilesManager()
-
-    def init_suite_run_dir(self, reg, host, owner):
-        """Initialise suite run dir on a user at host.
-
-        Create SUITE_RUN_DIR/log/job/ if necessary.
-        Install suite contact environment file.
-        Install suite python modules.
-
-        Raise RemoteJobHostInitError if initialisation cannot complete.
-
-        """
-        if host is None:
-            host = 'localhost'
-        if ((host, owner) in [('localhost', None), ('localhost', USER)] or
-                (host, owner) in self.initialised or self.single_task_mode):
-            return
-        user_at_host = host
-        if owner:
-            user_at_host = owner + '@' + host
-
-        r_suite_run_dir = GLOBAL_CFG.get_derived_host_item(
-            reg, 'suite run directory', host, owner)
-        r_log_job_dir = GLOBAL_CFG.get_derived_host_item(
-            reg, 'suite job log directory', host, owner)
-        r_suite_srv_dir = os.path.join(
-            r_suite_run_dir, self.suite_srv_files_mgr.DIR_BASE_SRV)
-
-        # Create a UUID file in the service directory.
-        # If remote host has the file in its service directory, we can assume
-        # that the remote host has a shared file system with the suite host.
-        ssh_tmpl = GLOBAL_CFG.get_host_item(
-            'remote shell template', host, owner)
-        uuid_str = str(uuid4())
-        uuid_fname = os.path.join(
-            self.suite_srv_files_mgr.get_suite_srv_dir(reg), uuid_str)
-        try:
-            open(uuid_fname, 'wb').close()
-            proc = Popen(
-                shlex.split(ssh_tmpl) + [
-                    '-n', user_at_host,
-                    'test', '-e', os.path.join(r_suite_srv_dir, uuid_str)],
-                stdout=PIPE, stderr=PIPE)
-            if proc.wait() == 0:
-                # Initialised, but no need to tidy up
-                self.initialised[(host, owner)] = False
-                return
-        finally:
-            try:
-                os.unlink(uuid_fname)
-            except OSError:
-                pass
-
-        cmds = []
-        # Command to create suite directory structure on remote host.
-        cmds.append(shlex.split(ssh_tmpl) + [
-            '-n', user_at_host,
-            'mkdir', '-p',
-            r_suite_run_dir, r_log_job_dir, r_suite_srv_dir])
-        # Command to copy contact and authentication files to remote host.
-        # Note: no need to do this if task communication method is "poll".
-        should_unlink = GLOBAL_CFG.get_host_item(
-            'task communication method', host, owner) != "poll"
-        if should_unlink:
-            scp_tmpl = GLOBAL_CFG.get_host_item(
-                'remote copy template', host, owner)
-            cmds.append(shlex.split(scp_tmpl) + [
-                '-p',
-                self.suite_srv_files_mgr.get_contact_file(reg),
-                self.suite_srv_files_mgr.get_auth_item(
-                    self.suite_srv_files_mgr.FILE_BASE_PASSPHRASE, reg),
-                self.suite_srv_files_mgr.get_auth_item(
-                    self.suite_srv_files_mgr.FILE_BASE_SSL_CERT, reg),
-                user_at_host + ':' + r_suite_srv_dir + '/'])
-        # Command to copy python library to remote host.
-        suite_run_py = os.path.join(
-            GLOBAL_CFG.get_derived_host_item(reg, 'suite run directory'),
-            'python')
-        if os.path.isdir(suite_run_py):
-            cmds.append(shlex.split(scp_tmpl) + [
-                '-pr',
-                suite_run_py, user_at_host + ':' + r_suite_run_dir + '/'])
-        # Run commands in sequence.
-        for cmd in cmds:
-            proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
-            out, err = proc.communicate()
-            if proc.wait():
-                raise RemoteJobHostInitError(
-                    RemoteJobHostInitError.MSG_INIT,
-                    user_at_host, ' '.join([quote(item) for item in cmd]),
-                    proc.returncode, out, err)
-        self.initialised[(host, owner)] = should_unlink
-        LOG.info('Initialised %s:%s' % (user_at_host, r_suite_run_dir))
-
-    def unlink_suite_contact_files(self, reg):
-        """Remove suite contact files from initialised hosts.
-
-        This is called on shutdown, so we don't want anything to hang.
-        Terminate any incomplete SSH commands after 10 seconds.
-        """
-        # Issue all SSH commands in parallel
-        procs = {}
-        for (host, owner), should_unlink in self.initialised.items():
-            if not should_unlink:
-                continue
-            user_at_host = host
-            if owner:
-                user_at_host = owner + '@' + host
-            ssh_tmpl = GLOBAL_CFG.get_host_item(
-                'remote shell template', host, owner)
-            r_suite_contact_file = os.path.join(
-                GLOBAL_CFG.get_derived_host_item(
-                    reg, 'suite run directory', host, owner),
-                SuiteSrvFilesManager.DIR_BASE_SRV,
-                SuiteSrvFilesManager.FILE_BASE_CONTACT)
-            cmd = shlex.split(ssh_tmpl) + [
-                '-n', user_at_host, 'rm', '-f', r_suite_contact_file]
-            procs[user_at_host] = (cmd, Popen(cmd, stdout=PIPE, stderr=PIPE))
-        # Wait for commands to complete for a max of 10 seconds
-        timeout = time() + 10.0
-        while procs and time() < timeout:
-            for user_at_host, (cmd, proc) in procs.copy().items():
-                if proc.poll() is None:
-                    continue
-                del procs[user_at_host]
-                out, err = proc.communicate()
-                if proc.wait():
-                    ERR.warning(RemoteJobHostInitError(
-                        RemoteJobHostInitError.MSG_TIDY,
-                        user_at_host, ' '.join([quote(item) for item in cmd]),
-                        proc.returncode, out, err))
-        # Terminate any remaining commands
-        for user_at_host, (cmd, proc) in procs.items():
-            try:
-                proc.terminate()
-            except OSError:
-                pass
-            out, err = proc.communicate()
-            if proc.wait():
-                ERR.warning(RemoteJobHostInitError(
-                    RemoteJobHostInitError.MSG_TIDY,
-                    user_at_host, ' '.join([quote(item) for item in cmd]),
-                    proc.returncode, out, err))
diff --git a/lib/cylc/message_output.py b/lib/cylc/message_output.py
deleted file mode 100644
index cc560ef..0000000
--- a/lib/cylc/message_output.py
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/usr/bin/env python
-
-# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
-# Copyright (C) 2008-2017 NIWA
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-
-import re
-from cycling.loader import get_interval, get_interval_cls
-from task_trigger import get_message_offset
-
-
-class MessageOutput(object):
-    """
-    A task message output.
-
-    Used to generate an output string for a message trigger at a cycle point.
-
-    TODO - these can be plain strings once the deprecated cycle point offset
-    placeholders are removed from cylc (see GitHub #1761).
-
-    """
-
-    def __init__(self, msg, base_interval=None):
-        self.msg = msg
-        self.msg_offset = get_message_offset(msg, base_interval)
-
-    def get_string(self, point):
-        """Return the message string for this cycle point.
-
-        Placeholders are replaced with the actual cycle point offset.
-
-        """
-        new_point = point
-        if self.msg_offset:
-            new_point = point + self.msg_offset
-        return re.sub('\[.*\]', str(new_point), self.msg)
-
-    def __eq__(self, other):
-        return self.msg == other.msg and self.msg_offset == other.msg_offset
diff --git a/lib/cylc/mp_pool.py b/lib/cylc/mp_pool.py
index f5ced20..0e2f494 100644
--- a/lib/cylc/mp_pool.py
+++ b/lib/cylc/mp_pool.py
@@ -29,7 +29,6 @@ Some notes:
   (early versions of this module gave a choice of process or thread).
 """
 
-import fileinput
 import logging
 import multiprocessing
 from pipes import quote
@@ -39,7 +38,6 @@ from tempfile import TemporaryFile
 import time
 import traceback
 
-from cylc.batch_sys_manager import BATCH_SYS_MANAGER
 from cylc.cfgspec.globalcfg import GLOBAL_CFG
 import cylc.flags
 from cylc.suite_logging import LOG, OUT
@@ -152,20 +150,6 @@ class SuiteProcPool(object):
     # Shared memory flag.
     STOP_JOB_SUBMISSION = multiprocessing.Value('i', 0)
 
-    _INSTANCE = None
-
-    @classmethod
-    def get_inst(cls, pool_size=None):
-        """Return a singleton instance.
-
-        On 1st call, instantiate the singleton. The argument "pool_size" is
-        only relevant on 1st call.
-
-        """
-        if cls._INSTANCE is None:
-            cls._INSTANCE = cls(pool_size)
-        return cls._INSTANCE
-
     def __init__(self, pool_size=None):
         self.pool_size = (
             pool_size or
@@ -173,8 +157,7 @@ class SuiteProcPool(object):
             multiprocessing.cpu_count())
         # (The Pool class defaults to cpu_count anyway, but does not
         # expose the result via its public interface).
-        self.log = LOG
-        self.log.debug(
+        LOG.debug(
             "Initializing process pool, size %d" % self.pool_size)
         self.pool = multiprocessing.Pool(processes=self.pool_size)
         self.results = {}
@@ -182,18 +165,19 @@ class SuiteProcPool(object):
     def close(self):
         """Close the pool to new commands."""
         if not (self.is_dead() or self.is_closed()):
-            self.log.debug("Closing process pool")
+            LOG.debug("Closing process pool")
             self.pool.close()
 
     def handle_results_async(self):
         """Pass any available results to their associated callback."""
-        for result_id, item in self.results.items():
-            result, callback = item
+        for key, (result, callback, callback_args) in self.results.items():
             if result.ready():
-                self.results.pop(result_id)
+                self.results.pop(key)
                 value = result.get()
                 if callable(callback):
-                    callback(value)
+                    if not callback_args:
+                        callback_args = []
+                    callback(value, *callback_args)
 
     def is_closed(self):
         """Is the pool closed?"""
@@ -210,20 +194,20 @@ class SuiteProcPool(object):
 
     def join(self):
         """Join after workers have exited. Close or terminate first."""
-        self.log.debug("Joining process pool")
+        LOG.debug("Joining process pool")
         self.pool.join()
 
-    def put_command(self, ctx, callback):
+    def put_command(self, ctx, callback, callback_args=None):
         """Queue a new shell command to execute."""
         try:
             result = self.pool.apply_async(_run_command, [ctx])
         except AssertionError as exc:
-            self.log.warning("%s\n  %s\n %s" % (
+            LOG.warning("%s\n  %s\n %s" % (
                 str(exc),
                 "Rejecting command (pool closed)",
                 ctx.cmd))
         else:
-            self.results[id(result)] = (result, callback)
+            self.results[id(result)] = (result, callback, callback_args)
 
     @staticmethod
     def run_command(ctx):
@@ -238,17 +222,18 @@ class SuiteProcPool(object):
     def terminate(self):
         """Kill all worker processes immediately."""
         if not self.is_dead():
-            self.log.debug("Terminating process pool")
+            LOG.debug("Terminating process pool")
             self.pool.terminate()
 
 
 def main():
     """Manual test playground."""
 
-    LOG.setLevel(logging.INFO)  # or logging.DEBUG
+    log = logging.getLogger(LOG)
+    log.setLevel(logging.INFO)  # or logging.DEBUG
     handler = logging.StreamHandler(sys.stdout)
     handler.setLevel(logging.DEBUG)
-    LOG.addHandler(handler)
+    log.addHandler(handler)
 
     def print_result(result):
         """Print result"""
@@ -258,7 +243,7 @@ def main():
             LOG.info('FAILED> ' + result['CMD'])
             LOG.info(result['ERR'].strip())
 
-    pool = mp_pool(3)
+    pool = SuiteProcPool(3)
 
     for i in range(3):
         com = "sleep 5 && echo Hello from JOB " + str(i)
diff --git a/lib/cylc/network/__init__.py b/lib/cylc/network/__init__.py
index bfe2a36..c5f541c 100644
--- a/lib/cylc/network/__init__.py
+++ b/lib/cylc/network/__init__.py
@@ -132,7 +132,7 @@ def check_access_priv(server_obj, required_privilege_level):
             priv_level, required_privilege_level,
             user, host, prog_name, uuid
         )
-        getLogger("log").warn(err)
+        getLogger("log").warning(err)
         # Raise an exception to be sent back to the client.
         raise Exception(err)
 
diff --git a/lib/cylc/network/https/base_client.py b/lib/cylc/network/https/base_client.py
index 97e1fe6..f2cfcfc 100644
--- a/lib/cylc/network/https/base_client.py
+++ b/lib/cylc/network/https/base_client.py
@@ -22,9 +22,6 @@ import sys
 from uuid import uuid4
 import warnings
 
-# Ignore incorrect SSL certificate warning from urllib3 via requests.
-warnings.filterwarnings("ignore", "Certificate has no `subjectAltName`")
-
 import cylc.flags
 from cylc.network import (
     ConnectionError, ConnectionDeniedError, ConnectionInfoError,
@@ -67,8 +64,33 @@ class BaseCommsClient(object):
         self.server_cert = None
         self.auth = None
 
-    def call_server_func(self, category, fname, **fargs):
-        """Call server_object.fname(*fargs, **fargs)."""
+    def _compile_url(self, category, func_dict, host):
+        payload = func_dict.pop("payload", None)
+        method = func_dict.pop("method", self.METHOD)
+        function = func_dict.pop("function", None)
+        url = 'https://%s:%s/%s/%s' % (host, self.port, category, function)
+        # If there are any parameters left in the dict after popping,
+        # append them to the url.
+        if func_dict:
+            import urllib
+            params = urllib.urlencode(func_dict, doseq=True)
+            url += "?" + params
+        request = {"url": url, "payload": payload, "method": method}
+        return request
+
+    def call_server_func(self, category, *func_dicts, **fargs):
+        """func_dict is a dictionary of command names (fnames)
+        and arguments to that command"""
+        # Deal with the case of one func_dict/function name passed
+        # by converting them to the generic case: a dictionary of
+        # a single function and its function arguments.
+        if isinstance(func_dicts[0], str):
+            function = func_dicts[0]
+            func_dict = {"function": function}
+            func_dict.update(fargs)
+        else:
+            func_dict = None
+
         if self.host is None and self.port is not None:
             self.host = get_hostname()
         try:
@@ -76,19 +98,23 @@ class BaseCommsClient(object):
         except (IOError, ValueError, SuiteServiceFileError):
             raise ConnectionInfoError(self.suite)
         handle_proxies()
-        payload = fargs.pop("payload", None)
-        method = fargs.pop("method", self.METHOD)
         host = self.host
         if host == "localhost":
             host = get_hostname().split(".")[0]
-        url = 'https://%s:%s/%s/%s' % (host, self.port, category, fname)
-        if fargs:
-            import urllib
-            params = urllib.urlencode(fargs, doseq=True)
-            url += "?" + params
-        return self._get_data_from_url(url, payload, method=method)
 
-    def _get_data_from_url(self, url, json_data, method=None):
+        http_request_items = []
+        try:
+            # dictionary containing: url, payload, method
+            http_request_item = self._compile_url(category, func_dict, host)
+            http_request_items.append(http_request_item)
+        except (IndexError, ValueError, AttributeError):
+            for f_dict in func_dicts:
+                http_request_item = self._compile_url(category, f_dict, host)
+                http_request_items.append(http_request_item)
+        # returns a list of http returns from the requests
+        return self._get_data_from_url(http_request_items)
+
+    def _get_data_from_url(self, http_request_items):
         requests_ok = True
         try:
             import requests
@@ -99,150 +125,177 @@ class BaseCommsClient(object):
             if version < [2, 4, 2]:
                 requests_ok = False
         if requests_ok:
-            return self._get_data_from_url_with_requests(
-                url, json_data, method=method)
-        return self._get_data_from_url_with_urllib2(
-            url, json_data, method=method)
+            return self._get_data_from_url_with_requests(http_request_items)
+        return self._get_data_from_url_with_urllib2(http_request_items)
 
-    def _get_data_from_url_with_requests(self, url, json_data, method=None):
+    def _get_data_from_url_with_requests(self, http_request_items):
         import requests
+        from requests.packages.urllib3.exceptions import InsecureRequestWarning
+        warnings.simplefilter("ignore", InsecureRequestWarning)
         username, password = self._get_auth()
         auth = requests.auth.HTTPDigestAuth(username, password)
         if not hasattr(self, "session"):
             self.session = requests.Session()
-        if method is None:
-            method = self.METHOD
-        if method == self.METHOD_POST:
-            session_method = self.session.post
-        else:
-            session_method = self.session.get
-        try:
-            ret = session_method(
-                url,
-                json=json_data,
-                verify=self._get_verify(),
-                proxies={},
-                headers=self._get_headers(),
-                auth=auth,
-                timeout=self.timeout
-            )
-        except requests.exceptions.SSLError as exc:
-            if "unknown protocol" in str(exc) and url.startswith("https:"):
-                # Server is using http rather than https, for some reason.
-                sys.stderr.write(WARNING_NO_HTTPS_SUPPORT.format(exc))
-                return self._get_data_from_url_with_requests(
-                    url.replace("https:", "http:", 1), json_data)
-            if cylc.flags.debug:
-                import traceback
-                traceback.print_exc()
-            raise ConnectionError(url, exc)
-        except requests.exceptions.Timeout as exc:
-            if cylc.flags.debug:
-                import traceback
-                traceback.print_exc()
-            raise ConnectionTimeout(url, exc)
-        except requests.exceptions.RequestException as exc:
-            if cylc.flags.debug:
-                import traceback
-                traceback.print_exc()
-            raise ConnectionError(url, exc)
-        if ret.status_code == 401:
-            raise ConnectionDeniedError(url, self.prog_name,
-                                        self.ACCESS_DESCRIPTION)
-        if ret.status_code >= 400:
-            from cylc.network.https.util import get_exception_from_html
-            exception_text = get_exception_from_html(ret.text)
-            if exception_text:
-                sys.stderr.write(exception_text)
+
+        http_return_items = []
+        for http_request_item in http_request_items:
+            method = http_request_item['method']
+            url = http_request_item['url']
+            json_data = http_request_item['payload']
+            if method is None:
+                method = self.METHOD
+            if method == self.METHOD_POST:
+                session_method = self.session.post
             else:
-                sys.stderr.write(ret.text)
-        try:
-            ret.raise_for_status()
-        except requests.exceptions.HTTPError as exc:
-            if cylc.flags.debug:
-                import traceback
-                traceback.print_exc()
-            raise ConnectionError(url, exc)
-        if self.auth and self.auth[1] != NO_PASSPHRASE:
-            self.srv_files_mgr.cache_passphrase(
-                self.suite, self.owner, self.host, self.auth[1])
-        try:
-            return ret.json()
-        except ValueError:
-            return ret.text
+                session_method = self.session.get
+            try:
+                ret = session_method(
+                    url,
+                    json=json_data,
+                    verify=self._get_verify(),
+                    proxies={},
+                    headers=self._get_headers(),
+                    auth=auth,
+                    timeout=self.timeout
+                )
+            except requests.exceptions.SSLError as exc:
+                if "unknown protocol" in str(exc) and url.startswith("https:"):
+                    # Server is using http rather than https, for some reason.
+                    sys.stderr.write(WARNING_NO_HTTPS_SUPPORT.format(exc))
+                    for item in http_request_items:
+                        item['url'] = item['url'].replace("https:", "http:", 1)
+                    return self._get_data_from_url_with_requests(
+                        http_request_items)
+                if cylc.flags.debug:
+                    import traceback
+                    traceback.print_exc()
+                raise ConnectionError(url, exc)
+            except requests.exceptions.Timeout as exc:
+                if cylc.flags.debug:
+                    import traceback
+                    traceback.print_exc()
+                raise ConnectionTimeout(url, exc)
+            except requests.exceptions.RequestException as exc:
+                if cylc.flags.debug:
+                    import traceback
+                    traceback.print_exc()
+                raise ConnectionError(url, exc)
+            if ret.status_code == 401:
+                raise ConnectionDeniedError(url, self.prog_name,
+                                            self.ACCESS_DESCRIPTION)
+            if ret.status_code >= 400:
+                from cylc.network.https.util import get_exception_from_html
+                exception_text = get_exception_from_html(ret.text)
+                if exception_text:
+                    sys.stderr.write(exception_text)
+                else:
+                    sys.stderr.write(ret.text)
+            try:
+                ret.raise_for_status()
+            except requests.exceptions.HTTPError as exc:
+                if cylc.flags.debug:
+                    import traceback
+                    traceback.print_exc()
+                raise ConnectionError(url, exc)
+            if self.auth and self.auth[1] != NO_PASSPHRASE:
+                self.srv_files_mgr.cache_passphrase(
+                    self.suite, self.owner, self.host, self.auth[1])
+            try:
+                ret = ret.json()
+                http_return_items.append(ret)
+            except ValueError:
+                ret = ret.text
+                http_return_items.append(ret)
+        # Return a single http return or a list of them if multiple
+        return (http_return_items if len(http_return_items) > 1
+                else http_return_items[0])
 
-    def _get_data_from_url_with_urllib2(self, url, json_data, method=None):
+    def _get_data_from_url_with_urllib2(self, http_request_items):
         import json
         import urllib2
         import ssl
         if hasattr(ssl, '_create_unverified_context'):
             ssl._create_default_https_context = ssl._create_unverified_context
-        if method is None:
-            method = self.METHOD
-        orig_json_data = json_data
-        username, password = self._get_auth()
-        auth_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
-        auth_manager.add_password(None, url, username, password)
-        auth = urllib2.HTTPDigestAuthHandler(auth_manager)
-        opener = urllib2.build_opener(auth, urllib2.HTTPSHandler())
-        headers_list = self._get_headers().items()
-        if json_data:
-            json_data = json.dumps(json_data)
-            headers_list.append(('Accept', 'application/json'))
-            json_headers = {'Content-Type': 'application/json',
-                            'Content-Length': len(json_data)}
-        else:
-            json_data = None
-            json_headers = {'Content-Length': 0}
-        opener.addheaders = headers_list
-        req = urllib2.Request(url, json_data, json_headers)
-
-        # This is an unpleasant monkey patch, but there isn't an alternative.
-        # urllib2 uses POST if there is a data payload, but that is not the
-        # correct criterion. The difference is basically that POST changes
-        # server state and GET doesn't.
-        req.get_method = lambda: method
-        try:
-            response = opener.open(req, timeout=self.timeout)
-        except urllib2.URLError as exc:
-            if "unknown protocol" in str(exc) and url.startswith("https:"):
-                # Server is using http rather than https, for some reason.
-                sys.stderr.write(WARNING_NO_HTTPS_SUPPORT.format(exc))
-                return self._get_data_from_url_with_urllib2(
-                    url.replace("https:", "http:", 1), orig_json_data)
-            if cylc.flags.debug:
-                import traceback
-                traceback.print_exc()
-            if "timed out" in str(exc):
-                raise ConnectionTimeout(url, exc)
+
+        http_return_items = []
+        for http_request_item in http_request_items:
+            method = http_request_item['method']
+            url = http_request_item['url']
+            json_data = http_request_item['payload']
+            if method is None:
+                method = self.METHOD
+            orig_json_data = json_data
+            username, password = self._get_auth()
+            auth_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
+            auth_manager.add_password(None, url, username, password)
+            auth = urllib2.HTTPDigestAuthHandler(auth_manager)
+            opener = urllib2.build_opener(auth, urllib2.HTTPSHandler())
+            headers_list = self._get_headers().items()
+            if json_data:
+                json_data = json.dumps(json_data)
+                headers_list.append(('Accept', 'application/json'))
+                json_headers = {'Content-Type': 'application/json',
+                                'Content-Length': len(json_data)}
             else:
+                json_data = None
+                json_headers = {'Content-Length': 0}
+            opener.addheaders = headers_list
+            req = urllib2.Request(url, json_data, json_headers)
+
+            # This is an unpleasant monkey patch, but there isn't an
+            # alternative. urllib2 uses POST if there is a data payload
+            # but that is not the correct criterion.
+            # The difference is basically that POST changes
+            # server state and GET doesn't.
+            req.get_method = lambda: method
+            try:
+                response = opener.open(req, timeout=self.timeout)
+            except urllib2.URLError as exc:
+                if "unknown protocol" in str(exc) and url.startswith("https:"):
+                    # Server is using http rather than https, for some reason.
+                    sys.stderr.write(WARNING_NO_HTTPS_SUPPORT.format(exc))
+                    for item in http_request_items:
+                        item['url'] = item['url'].replace("https:", "http:", 1)
+                    return self._get_data_from_url_with_urllib2(
+                        http_request_items)
+                if cylc.flags.debug:
+                    import traceback
+                    traceback.print_exc()
+                if "timed out" in str(exc):
+                    raise ConnectionTimeout(url, exc)
+                else:
+                    raise ConnectionError(url, exc)
+            except Exception as exc:
+                if cylc.flags.debug:
+                    import traceback
+                    traceback.print_exc()
                 raise ConnectionError(url, exc)
-        except Exception as exc:
-            if cylc.flags.debug:
-                import traceback
-                traceback.print_exc()
-            raise ConnectionError(url, exc)
-
-        if response.getcode() == 401:
-            raise ConnectionDeniedError(url, self.prog_name,
-                                        self.ACCESS_DESCRIPTION)
-        response_text = response.read()
-        if response.getcode() >= 400:
-            from cylc.network.https.util import get_exception_from_html
-            exception_text = get_exception_from_html(response_text)
-            if exception_text:
-                sys.stderr.write(exception_text)
-            else:
-                sys.stderr.write(response_text)
-            raise ConnectionError(url,
-                                  "%s HTTP return code" % response.getcode())
-        if self.auth and self.auth[1] != NO_PASSPHRASE:
-            self.srv_files_mgr.cache_passphrase(
-                self.suite, self.owner, self.host, self.auth[1])
-        try:
-            return json.loads(response_text)
-        except ValueError:
-            return response_text
+
+            if response.getcode() == 401:
+                raise ConnectionDeniedError(url, self.prog_name,
+                                            self.ACCESS_DESCRIPTION)
+            response_text = response.read()
+            if response.getcode() >= 400:
+                from cylc.network.https.util import get_exception_from_html
+                exception_text = get_exception_from_html(response_text)
+                if exception_text:
+                    sys.stderr.write(exception_text)
+                else:
+                    sys.stderr.write(response_text)
+                raise ConnectionError(
+                    url,
+                    "%s HTTP return code" % response.getcode())
+            if self.auth and self.auth[1] != NO_PASSPHRASE:
+                self.srv_files_mgr.cache_passphrase(
+                    self.suite, self.owner, self.host, self.auth[1])
+
+            try:
+                http_return_items.append(json.loads(response_text))
+            except ValueError:
+                http_return_items.append(response_text)
+        # Return a single http return or a list of them if multiple
+        return (http_return_items if len(http_return_items) > 1
+                else http_return_items[0])
 
     def _get_auth(self):
         """Return a user/password Digest Auth."""
@@ -323,3 +376,60 @@ class BaseCommsClientAnon(BaseCommsClient):
     def _get_verify(self):
         """Other suites' certificates may not be accessible."""
         return False
+
+
+if __name__ == '__main__':
+    import unittest
+
+    class TestBaseCommsClient(unittest.TestCase):
+        """Unit testing class to test the methods in BaseCommsClient
+        """
+        def test_url_compiler(self):
+            """Tests that the url parser works for a single url and command"""
+            category = 'info'  # Could be any from cylc/network/__init__.py
+            host = "localhost"
+            func_dict = {"function": "test_command",
+                         "apples": "False",
+                         "oranges": "True",
+                         "method": "GET",
+                         "payload": "None"}
+
+            myCommsClient = BaseCommsClient("test-suite", port=80)
+            request = myCommsClient._compile_url(category, func_dict, host)
+            test_url = ('https://localhost:80/info/test_command'
+                        '?apples=False&oranges=True')
+
+            self.assertEqual(request['url'], test_url)
+            self.assertEqual(request['payload'], "None")
+            self.assertEqual(request['method'], "GET")
+
+        def test_get_data_from_url_single(self):
+            """Test the get data from _get_data_from_url() function"""
+            myCommsClient = BaseCommsClient("dummy-suite")
+            url = "http://httpbin.org/get"
+            payload = None
+            method = "GET"
+            request = [{"url": url, "payload": payload, "method": method}]
+            ret = myCommsClient._get_data_from_url(request)
+            self.assertEqual(ret['url'], "http://httpbin.org/get")
+
+        def test_get_data_from_url_multiple(self):
+            """Tests that the _get_data_from_url() method can
+            handle multiple requests in call to the method."""
+            myCommsClient = BaseCommsClient("dummy-suite")
+            payload = None
+            method = "GET"
+            request1 = {"url": "http://httpbin.org/get#1",
+                        "payload": payload, "method": method}
+            request2 = {"url": "http://httpbin.org/get#2",
+                        "payload": payload, "method": method}
+            request3 = {"url": "http://httpbin.org/get#3",
+                        "payload": payload, "method": method}
+
+            rets = myCommsClient._get_data_from_url([request1,
+                                                     request2, request3])
+
+            for i in range(2):
+                self.assertEqual(rets[i]['url'], "http://httpbin.org/get")
+
+    unittest.main()
diff --git a/lib/cylc/network/https/client_reporter.py b/lib/cylc/network/https/client_reporter.py
index e8b9a60..f20d668 100644
--- a/lib/cylc/network/https/client_reporter.py
+++ b/lib/cylc/network/https/client_reporter.py
@@ -110,14 +110,14 @@ class CommsClientReporter(object):
             (auth_user, prog_name, user, host, uuid,
              priv_level) = get_client_info()
         except Exception:
-            LOG.warn(
+            LOG.warning(
                 self.__class__.LOG_CONNECT_DENIED_TMPL % (
                     "unknown", "unknown", "unknown", "unknown")
             )
             return
         connection_denied = get_client_connection_denied()
         if connection_denied:
-            LOG.warn(
+            LOG.warning(
                 self.__class__.LOG_CONNECT_DENIED_TMPL % (
                     user, host, prog_name, uuid)
             )
@@ -131,7 +131,7 @@ class CommsClientReporter(object):
                 caller.user, caller.host, caller.prog_name, caller.uuid))
         try:
             del self.clients[caller.uuid]
-        except:
+        except KeyError:
             # Already forgotten.
             pass
         self._housekeep()
@@ -139,11 +139,13 @@ class CommsClientReporter(object):
     def _housekeep(self):
         """Forget inactive clients."""
 
-        for uuid in self.clients.keys():
-            dtime = self.clients[uuid]
+        for uuid, dtime in self.clients.copy().items():
             if (self._total_seconds(datetime.datetime.utcnow() - dtime) >
                     self.__class__.CLIENT_FORGET_SEC):
-                del self.clients[uuid]
+                try:
+                    del self.clients[uuid]
+                except KeyError:
+                    pass
                 LOG.debug(
                     self.__class__.LOG_FORGET_TMPL % uuid)
 
diff --git a/lib/cylc/network/https/port_scan.py b/lib/cylc/network/https/port_scan.py
index af3367c..480a400 100644
--- a/lib/cylc/network/https/port_scan.py
+++ b/lib/cylc/network/https/port_scan.py
@@ -26,7 +26,6 @@ from uuid import uuid4
 from cylc.cfgspec.globalcfg import GLOBAL_CFG
 import cylc.flags
 from cylc.network import ConnectionError, ConnectionTimeout
-from cylc.network.https.suite_state_client import SuiteStillInitialisingError
 from cylc.network.https.suite_identifier_client import (
     SuiteIdClientAnon, SuiteIdClient)
 from cylc.suite_srv_files_mgr import (
@@ -41,67 +40,72 @@ MSG_TIMEOUT = "TIMEOUT"
 SLEEP_INTERVAL = 0.01
 
 
-def _scan1_impl(conn, timeout, my_uuid):
-    """Connect to host:port to get suite identify."""
+def _scan_worker(conn, timeout, my_uuid):
+    """Port scan worker."""
     srv_files_mgr = SuiteSrvFilesManager()
     while True:
-        if not conn.poll(SLEEP_INTERVAL):
-            continue
-        item = conn.recv()
-        if item == MSG_QUIT:
-            break
-        host, port = item
-        host_anon = host
-        if is_remote_host(host):
-            host_anon = get_host_ip_by_name(host)  # IP reduces DNS traffic
-        client = SuiteIdClientAnon(
-            None, host=host_anon, port=port, my_uuid=my_uuid, timeout=timeout)
         try:
-            result = client.identify()
-        except ConnectionTimeout as exc:
-            conn.send((host, port, MSG_TIMEOUT))
-        except (ConnectionError, SuiteStillInitialisingError) as exc:
-            conn.send((host, port, None))
-        else:
-            owner = result.get('owner')
-            name = result.get('name')
-            states = result.get('states', None)
-            if cylc.flags.debug:
-                print >> sys.stderr, '   suite:', name, owner
-            if states is None:
-                # This suite keeps its state info private.
-                # Try again with the passphrase if I have it.
-                try:
-                    pphrase = srv_files_mgr.get_auth_item(
-                        srv_files_mgr.FILE_BASE_PASSPHRASE, name, owner, host,
-                        content=True)
-                except SuiteServiceFileError:
-                    pass
-                else:
-                    if pphrase:
-                        client = SuiteIdClient(
-                            name, owner=owner, host=host, port=port,
-                            my_uuid=my_uuid, timeout=timeout)
-                        try:
-                            result = client.identify()
-                        except SuiteStillInitialisingError as exc:
-                            if cylc.flags.debug:
-                                print >> sys.stderr, (
-                                    '    (connected with passphrase,' +
-                                    ' suite initialising)')
-                        except ConnectionError as exc:
-                            # Nope (private suite, wrong passphrase).
-                            if cylc.flags.debug:
-                                print >> sys.stderr, '    (wrong passphrase)'
-                        else:
-                            if cylc.flags.debug:
-                                print >> sys.stderr, (
-                                    '    (got states with passphrase)')
-            conn.send((host, port, result))
+            if not conn.poll(SLEEP_INTERVAL):
+                continue
+            item = conn.recv()
+            if item == MSG_QUIT:
+                break
+            conn.send(_scan_item(timeout, my_uuid, srv_files_mgr, item))
+        except KeyboardInterrupt:
+            break
     conn.close()
 
 
-def scan_all(hosts=None, timeout=None):
+def _scan_item(timeout, my_uuid, srv_files_mgr, item):
+    """Connect to item host:port (item) to get suite identify."""
+    host, port = item
+    host_anon = host
+    if is_remote_host(host):
+        host_anon = get_host_ip_by_name(host)  # IP reduces DNS traffic
+    client = SuiteIdClientAnon(
+        None, host=host_anon, port=port, my_uuid=my_uuid,
+        timeout=timeout)
+    try:
+        result = client.identify()
+    except ConnectionTimeout as exc:
+        return (host, port, MSG_TIMEOUT)
+    except ConnectionError as exc:
+        return (host, port, None)
+    else:
+        owner = result.get('owner')
+        name = result.get('name')
+        states = result.get('states', None)
+        if cylc.flags.debug:
+            print >> sys.stderr, '   suite:', name, owner
+        if states is None:
+            # This suite keeps its state info private.
+            # Try again with the passphrase if I have it.
+            try:
+                pphrase = srv_files_mgr.get_auth_item(
+                    srv_files_mgr.FILE_BASE_PASSPHRASE,
+                    name, owner, host, content=True)
+            except SuiteServiceFileError:
+                pass
+            else:
+                if pphrase:
+                    client = SuiteIdClient(
+                        name, owner=owner, host=host, port=port,
+                        my_uuid=my_uuid, timeout=timeout)
+                    try:
+                        result = client.identify()
+                    except ConnectionError as exc:
+                        # Nope (private suite, wrong passphrase).
+                        if cylc.flags.debug:
+                            print >> sys.stderr, (
+                                '    (wrong passphrase)')
+                    else:
+                        if cylc.flags.debug:
+                            print >> sys.stderr, (
+                                '    (got states with passphrase)')
+        return (host, port, result)
+
+
+def scan_all(hosts=None, timeout=None, updater=None):
     """Scan all hosts."""
     try:
         timeout = float(timeout)
@@ -132,67 +136,75 @@ def scan_all(hosts=None, timeout=None):
             todo_set.add((host, port))
     proc_items = []
     results = []
-    while todo_set or proc_items:
-        no_action = True
-        # Get results back from child processes where possible
-        busy_proc_items = []
-        while proc_items:
-            proc, my_conn, terminate_time = proc_items.pop()
-            if my_conn.poll():
-                host, port, result = my_conn.recv()
-                if result is None:
-                    # Can't connect, ignore
-                    wait_set.remove((host, port))
-                elif result == MSG_TIMEOUT:
-                    # Connection timeout, leave in "wait_set"
-                    pass
+    try:
+        while todo_set or proc_items:
+            no_action = True
+            # Get results back from child processes where possible
+            busy_proc_items = []
+            while proc_items:
+                if updater and updater.quit:
+                    raise KeyboardInterrupt()
+                proc, my_conn, terminate_time = proc_items.pop()
+                if my_conn.poll():
+                    host, port, result = my_conn.recv()
+                    if result is None:
+                        # Can't connect, ignore
+                        wait_set.remove((host, port))
+                    elif result == MSG_TIMEOUT:
+                        # Connection timeout, leave in "wait_set"
+                        pass
+                    else:
+                        # Connection success
+                        results.append((host, port, result))
+                        wait_set.remove((host, port))
+                    if todo_set:
+                        # Immediately give the child process something to do
+                        host, port = todo_set.pop()
+                        wait_set.add((host, port))
+                        my_conn.send((host, port))
+                        busy_proc_items.append(
+                            (proc, my_conn, time() + INACTIVITY_TIMEOUT))
+                    else:
+                        # Or quit if there is nothing left to do
+                        my_conn.send(MSG_QUIT)
+                        my_conn.close()
+                        proc.join()
+                    no_action = False
+                elif time() > terminate_time:
+                    # Terminate child process if it is taking too long
+                    proc.terminate()
+                    proc.join()
+                    no_action = False
                 else:
-                    # Connection success
-                    results.append((host, port, result))
-                    wait_set.remove((host, port))
-                if todo_set:
-                    # Immediately give the child process something to do
+                    busy_proc_items.append((proc, my_conn, terminate_time))
+            proc_items += busy_proc_items
+            # Create some child processes where necessary
+            while len(proc_items) < max_procs and todo_set:
+                if updater and updater.quit:
+                    raise KeyboardInterrupt()
+                my_conn, conn = Pipe()
+                try:
+                    proc = Process(
+                        target=_scan_worker, args=(conn, timeout, my_uuid))
+                except OSError:
+                    # Die if unable to start any worker process.
+                    # OK to wait and see if any worker process already running.
+                    if not proc_items:
+                        raise
+                    if cylc.flags.debug:
+                        traceback.print_exc()
+                else:
+                    proc.start()
                     host, port = todo_set.pop()
                     wait_set.add((host, port))
                     my_conn.send((host, port))
-                    busy_proc_items.append(
+                    proc_items.append(
                         (proc, my_conn, time() + INACTIVITY_TIMEOUT))
-                else:
-                    # Or quit if there is nothing left to do
-                    my_conn.send(MSG_QUIT)
-                    my_conn.close()
-                    proc.join()
-                no_action = False
-            elif time() > terminate_time:
-                # Terminate child process if it is taking too long
-                proc.terminate()
-                proc.join()
-                no_action = False
-            else:
-                busy_proc_items.append((proc, my_conn, terminate_time))
-        proc_items += busy_proc_items
-        # Create some child processes where necessary
-        while len(proc_items) < max_procs and todo_set:
-            my_conn, conn = Pipe()
-            try:
-                proc = Process(
-                    target=_scan1_impl, args=(conn, timeout, my_uuid))
-            except OSError:
-                # Die if unable to start any worker process.
-                # OK to wait and see if any worker process already running.
-                if not proc_items:
-                    raise
-                if cylc.flags.debug:
-                    traceback.print_exc()
-            else:
-                proc.start()
-                host, port = todo_set.pop()
-                wait_set.add((host, port))
-                my_conn.send((host, port))
-                proc_items.append((proc, my_conn, time() + INACTIVITY_TIMEOUT))
-                no_action = False
-        if no_action:
-            sleep(SLEEP_INTERVAL)
+                    no_action = False
+            if no_action:
+                sleep(SLEEP_INTERVAL)
+    except KeyboardInterrupt:
+        return []
     # Report host:port with no results
     if wait_set:
         print >> sys.stderr, (
diff --git a/lib/cylc/network/https/suite_broadcast_server.py b/lib/cylc/network/https/suite_broadcast_server.py
index 2066f8b..5293d47 100644
--- a/lib/cylc/network/https/suite_broadcast_server.py
+++ b/lib/cylc/network/https/suite_broadcast_server.py
@@ -15,22 +15,22 @@
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""Server-side suite broadcast interface."""
 
-import json
 import re
 import threading
 
 from cylc.broadcast_report import (
+    CHANGE_FMT, CHANGE_PREFIX_SET,
     get_broadcast_change_iter,
     get_broadcast_change_report,
     get_broadcast_bad_options_report)
 from cylc.cycling.loader import get_point, standardise_point_string
 from cylc.wallclock import get_current_time_string
-from cylc.network import COMMS_BCAST_OBJ_NAME
 from cylc.network.https.base_server import BaseCommsServer
 from cylc.network.https.util import unicode_encode
 from cylc.network import check_access_priv
-from cylc.suite_logging import LOG
+from cylc.suite_logging import LOG, OUT
 from cylc.task_id import TaskID
 from cylc.rundb import CylcSuiteDAO
 
@@ -65,7 +65,6 @@ class BroadcastServer(BaseCommsServer):
 
     def __init__(self, linearized_ancestors):
         super(BroadcastServer, self).__init__()
-        self.log = LOG
         self.settings = {}
         self.db_inserts_map = {
             self.TABLE_BROADCAST_EVENTS: [],
@@ -148,7 +147,7 @@ class BroadcastServer(BaseCommsServer):
                     bad_point = False
                     try:
                         point_string = standardise_point_string(point_string)
-                    except Exception as exc:
+                    except Exception:
                         if point_string != '*':
                             bad_point_strings.append(point_string)
                             bad_point = True
@@ -168,7 +167,7 @@ class BroadcastServer(BaseCommsServer):
 
         # Log the broadcast
         self._append_db_queue(modified_settings)
-        self.log.info(get_broadcast_change_report(modified_settings))
+        LOG.info(get_broadcast_change_report(modified_settings))
 
         bad_options = {}
         if bad_point_strings:
@@ -286,10 +285,10 @@ class BroadcastServer(BaseCommsServer):
 
         # Log the broadcast
         self._append_db_queue(modified_settings, is_cancel=True)
-        self.log.info(
+        LOG.info(
             get_broadcast_change_report(modified_settings, is_cancel=True))
         if bad_options:
-            self.log.error(get_broadcast_bad_options_report(bad_options))
+            LOG.error(get_broadcast_bad_options_report(bad_options))
 
         return (modified_settings, bad_options)
 
@@ -317,12 +316,16 @@ class BroadcastServer(BaseCommsServer):
                             keys_list.append(keys + [key])
         return keys_list
 
-    def load_state(self, point, namespace, key, value):
+    def load_db_broadcast_states(self, row_idx, row):
         """Load broadcast variables from runtime DB broadcast states row."""
+        if row_idx == 0:
+            OUT.info("LOADING broadcast states")
+        point, namespace, key, value = row
         sections = []
-        if "]" in key:
-            sections = self.REC_SECTION.findall(key)
-            key = key.rsplit(r"]", 1)[-1]
+        cur_key = key
+        if "]" in cur_key:
+            sections = self.REC_SECTION.findall(cur_key)
+            cur_key = cur_key.rsplit(r"]", 1)[-1]
         with self.lock:
             self.settings.setdefault(point, {})
             self.settings[point].setdefault(namespace, {})
@@ -330,7 +333,13 @@ class BroadcastServer(BaseCommsServer):
             for section in sections:
                 dict_.setdefault(section, {})
                 dict_ = dict_[section]
-            dict_[key] = value
+            dict_[cur_key] = value
+        OUT.info(CHANGE_FMT.strip() % {
+            "change": CHANGE_PREFIX_SET,
+            "point": point,
+            "namespace": namespace,
+            "key": key,
+            "value": value})
 
     @classmethod
     def _get_bad_options(
diff --git a/lib/cylc/network/https/suite_command_server.py b/lib/cylc/network/https/suite_command_server.py
index 96e9e1d..c5e83a6 100644
--- a/lib/cylc/network/https/suite_command_server.py
+++ b/lib/cylc/network/https/suite_command_server.py
@@ -17,11 +17,8 @@
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 import ast
-import sys
-import os
 from Queue import Queue
 
-import cylc.flags
 from cylc.network.https.base_server import BaseCommsServer
 from cylc.network import check_access_priv
 
@@ -121,10 +118,14 @@ class SuiteCommandServer(BaseCommsServer):
 
     @cherrypy.expose
     @cherrypy.tools.json_out()
-    def reset_task_states(self, items, state=None):
+    def reset_task_states(self, items, state=None, outputs=None):
         if not isinstance(items, list):
             items = [items]
-        return self._put("reset_task_states", (items,), {"state": state})
+        if outputs and not isinstance(outputs, list):
+            outputs = [outputs]
+        return self._put(
+            "reset_task_states",
+            (items,), {"state": state, "outputs": outputs})
 
     @cherrypy.expose
     @cherrypy.tools.json_out()
diff --git a/lib/cylc/network/https/suite_identifier_server.py b/lib/cylc/network/https/suite_identifier_server.py
index 298b3a7..f1dccb8 100644
--- a/lib/cylc/network/https/suite_identifier_server.py
+++ b/lib/cylc/network/https/suite_identifier_server.py
@@ -23,7 +23,6 @@
 import cherrypy
 
 from cylc.config import SuiteConfig
-import cylc.flags
 from cylc.network import (
     KEY_DESCRIPTION, KEY_GROUP, KEY_NAME, KEY_OWNER, KEY_STATES,
     KEY_TASKS_BY_STATE, KEY_TITLE, KEY_UPDATE_TIME)
@@ -64,7 +63,7 @@ class SuiteIdServer(BaseCommsServer):
             result[KEY_GROUP] = config.cfg[KEY_GROUP]
         if access_priv_ok(self, "state-totals"):
             summary_server = StateSummaryServer.get_inst()
-            result[KEY_STATES] = summary_server.get_state_totals()
             result[KEY_UPDATE_TIME] = summary_server.get_summary_update_time()
+            result[KEY_STATES] = summary_server.get_state_totals()
             result[KEY_TASKS_BY_STATE] = summary_server.get_tasks_by_state()
         return result
diff --git a/lib/cylc/network/https/suite_info_client.py b/lib/cylc/network/https/suite_info_client.py
index 0af8e7a..bcd81d4 100644
--- a/lib/cylc/network/https/suite_info_client.py
+++ b/lib/cylc/network/https/suite_info_client.py
@@ -26,8 +26,8 @@ class SuiteInfoClient(BaseCommsClient):
 
     METHOD = BaseCommsClient.METHOD_GET
 
-    def get_info(self, command, **arg_dict):
-        return self.call_server_func(COMMS_INFO_OBJ_NAME, command, **arg_dict)
+    def get_info(self, *command, **arg_dict):
+        return self.call_server_func(COMMS_INFO_OBJ_NAME, *command, **arg_dict)
 
 
 class SuiteInfoClientAnon(BaseCommsClientAnon):
@@ -35,5 +35,5 @@ class SuiteInfoClientAnon(BaseCommsClientAnon):
 
     METHOD = BaseCommsClient.METHOD_GET
 
-    def get_info(self, command, **arg_dict):
-        return self.call_server_func(COMMS_INFO_OBJ_NAME, command, **arg_dict)
+    def get_info(self, *command, **arg_dict):
+        return self.call_server_func(COMMS_INFO_OBJ_NAME, *command, **arg_dict)
diff --git a/lib/cylc/network/https/suite_log_server.py b/lib/cylc/network/https/suite_log_server.py
index a08e916..ffd94e1 100644
--- a/lib/cylc/network/https/suite_log_server.py
+++ b/lib/cylc/network/https/suite_log_server.py
@@ -15,13 +15,13 @@
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""Server-side suite log interface."""
 
+import cherrypy
 import os
+
 from cylc.network.https.base_server import BaseCommsServer
 from cylc.network import check_access_priv
-from cylc.suite_logging import SuiteLog
-
-import cherrypy
 
 
 class SuiteLogServer(BaseCommsServer):
@@ -30,7 +30,7 @@ class SuiteLogServer(BaseCommsServer):
     def __init__(self, log):
         super(SuiteLogServer, self).__init__()
         self.log = log
-        self.err_file = log.get_log_path(SuiteLog.ERR)
+        self.err_file = log.get_log_path(log.ERR)
 
     def _get_err_has_changed(self, prev_err_size):
         """Return True if the file has changed size compared to prev_size."""
@@ -42,10 +42,16 @@ class SuiteLogServer(BaseCommsServer):
         try:
             size = os.path.getsize(self.err_file)
         except (IOError, OSError) as exc:
-            self.log.warn("Could not read suite err log file: %s" % exc)
+            self._warn_read_err(exc)
             return 0
         return size
 
+    def _warn_read_err(self, exc):
+        """Issue warning on failure to read/stat the ERR log file."""
+        my_log = self.log.get_log(self.log.LOG)
+        if my_log is not None:
+            my_log.warning("Could not read suite err log file: %s" % exc)
+
     @cherrypy.expose
     @cherrypy.tools.json_out()
     def get_err_content(self, prev_size, max_lines):
@@ -57,13 +63,13 @@ class SuiteLogServer(BaseCommsServer):
         if not self._get_err_has_changed(prev_size):
             return [], prev_size
         try:
-            f = open(self.err_file, "r")
-            f.seek(prev_size)
-            new_content = f.read()
-            f.close()
+            handle = open(self.err_file, "r")
+            handle.seek(prev_size)
+            new_content = handle.read()
+            handle.close()
             size = self._get_err_size()
-        except (IOError, OSError) as e:
-            self.log.warning("Could not read suite err log file: %s" % e)
+        except (IOError, OSError) as exc:
+            self._warn_read_err(exc)
             return "", prev_size
         new_content_lines = new_content.splitlines()[-max_lines:]
         return "\n".join(new_content_lines), size
diff --git a/lib/cylc/network/https/suite_state_client.py b/lib/cylc/network/https/suite_state_client.py
index ec03012..000df8d 100644
--- a/lib/cylc/network/https/suite_state_client.py
+++ b/lib/cylc/network/https/suite_state_client.py
@@ -69,16 +69,6 @@ def get_suite_status_string(paused, stopping, will_pause_at, will_stop_at):
         return SUITE_STATUS_RUNNING
 
 
-class SuiteStillInitialisingError(Exception):
-    """Exception raised if a summary is requested before the first update.
-
-    This can happen if client connects during start-up for large suites.
-
-    """
-    def __str__(self):
-        return "Suite initializing..."
-
-
 class StateSummaryClient(BaseCommsClient):
     """Client-side suite state summary interface."""
 
diff --git a/lib/cylc/network/https/suite_state_server.py b/lib/cylc/network/https/suite_state_server.py
index 2220e2e..a75116e 100644
--- a/lib/cylc/network/https/suite_state_server.py
+++ b/lib/cylc/network/https/suite_state_server.py
@@ -16,7 +16,8 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import time
+import cherrypy
+from time import time
 
 import cylc.flags
 from cylc.task_id import TaskID
@@ -24,14 +25,10 @@ from cylc.wallclock import TIME_ZONE_LOCAL_INFO, TIME_ZONE_UTC_INFO
 from cylc.config import SuiteConfig
 from cylc.network.https.base_server import BaseCommsServer
 from cylc.network.https.suite_state_client import (
-    get_suite_status_string, SuiteStillInitialisingError,
-    extract_group_state
-)
+    get_suite_status_string, extract_group_state)
 from cylc.network import check_access_priv
 from cylc.task_state import TASK_STATUS_RUNAHEAD
 
-import cherrypy
-
 
 class StateSummaryServer(BaseCommsServer):
     """Server-side suite state summary interface."""
@@ -52,8 +49,7 @@ class StateSummaryServer(BaseCommsServer):
         self.global_summary = {}
         self.family_summary = {}
         self.run_mode = run_mode
-        self.first_update_completed = False
-        self._summary_update_time = None
+        self.summary_update_time = None
 
         self.state_count_totals = {}
         self.state_count_cycles = {}
@@ -61,6 +57,7 @@ class StateSummaryServer(BaseCommsServer):
     def update(self, tasks, tasks_rh, min_point, max_point, max_point_rh,
                paused, will_pause_at, stopping, will_stop_at, ns_defn_order,
                reloading):
+        self.summary_update_time = time()
         global_summary = {}
         family_summary = {}
 
@@ -136,7 +133,7 @@ class StateSummaryServer(BaseCommsServer):
             global_summary['daemon time zone info'] = TIME_ZONE_UTC_INFO
         else:
             global_summary['daemon time zone info'] = TIME_ZONE_LOCAL_INFO
-        global_summary['last_updated'] = time.time()
+        global_summary['last_updated'] = self.summary_update_time
         global_summary['run_mode'] = self.run_mode
         global_summary['states'] = all_states
         global_summary['namespace definition order'] = ns_defn_order
@@ -147,13 +144,10 @@ class StateSummaryServer(BaseCommsServer):
         global_summary['status_string'] = get_suite_status_string(
             paused, stopping, will_pause_at, will_stop_at)
 
-        self._summary_update_time = time.time()
-
         # Replace the originals (atomic update, for access from other threads).
         self.task_summary = task_summary
         self.global_summary = global_summary
         self.family_summary = family_summary
-        self.first_update_completed = True
         self.state_count_totals = state_count_totals
         self.state_count_cycles = state_count_cycles
 
@@ -196,8 +190,6 @@ class StateSummaryServer(BaseCommsServer):
         """Return the global, task, and family summary data structures."""
         check_access_priv(self, 'full-read')
         self.report('get_state_summary')
-        if not self.first_update_completed:
-            raise SuiteStillInitialisingError()
         return (self.global_summary, self.task_summary, self.family_summary)
 
     @cherrypy.expose
@@ -206,9 +198,7 @@ class StateSummaryServer(BaseCommsServer):
         """Return the last time the summaries were changed (Unix time)."""
         check_access_priv(self, 'state-totals')
         self.report('get_state_summary_update_time')
-        if not self.first_update_completed:
-            raise SuiteStillInitialisingError()
-        return self._summary_update_time
+        return self.summary_update_time
 
     @cherrypy.expose
     @cherrypy.tools.json_out()
diff --git a/lib/cylc/network/suite_state_client.py b/lib/cylc/network/suite_state_client.py
index abebf66..77b930a 100644
--- a/lib/cylc/network/suite_state_client.py
+++ b/lib/cylc/network/suite_state_client.py
@@ -23,7 +23,7 @@ if METHOD == "https":
     from cylc.network.https.suite_state_client import (
         StateSummaryClient, extract_group_state,
         get_id_summary, SUITE_STATUS_SPLIT_REC,
-        get_suite_status_string, SuiteStillInitialisingError,
+        get_suite_status_string,
         SUITE_STATUS_NOT_CONNECTED, SUITE_STATUS_CONNECTED,
         SUITE_STATUS_INITIALISING, SUITE_STATUS_STOPPED, SUITE_STATUS_STOPPING,
         SUITE_STATUS_STOPPED_WITH
diff --git a/lib/cylc/param_expand.py b/lib/cylc/param_expand.py
index 2d74a0f..a47c8b6 100755
--- a/lib/cylc/param_expand.py
+++ b/lib/cylc/param_expand.py
@@ -199,7 +199,11 @@ class NameExpander(object):
         if not param_list:
             # Inner loop.
             current_values = copy(spec_vals)
-            results.append((str_tmpl % current_values, current_values))
+            try:
+                results.append((str_tmpl % current_values, current_values))
+            except KeyError as exc:
+                raise ParamExpandError('ERROR: parameter %s is not '
+                                       'defined.' % str(exc.args[0]))
         else:
             for param_val in param_list[0][1]:
                 spec_vals[param_list[0][0]] = param_val
@@ -342,7 +346,11 @@ class GraphExpander(object):
                         param_values[pname] = offval
                 for pname in param_values:
                     tmpl += self.param_tmpl_cfg[pname]
-                repl = tmpl % param_values
+                try:
+                    repl = tmpl % param_values
+                except KeyError as exc:
+                    raise ParamExpandError('ERROR: parameter %s is not '
+                                           'defined.' % str(exc.args[0]))
                 line = re.sub('<' + p_group + '>', repl, line)
                 # Remove out-of-range nodes to first arrow.
                 line = re.sub('^.*--<REMOVE>--.*?=>\s*?', '', line)
@@ -529,6 +537,18 @@ class TestParamExpand(unittest.TestCase):
                           self.graph_expander.expand,
                           'foo<i=4,j><i,j>')
 
+    def test_template_fail_missing_param(self):
+        """Test a template string specifying a non-existent parameter."""
+        kvals = [str(k) for k in range(2)]
+        params_map = {'k': kvals}
+        templates = {'k': '_%(z)s'}
+        self.assertRaises(ParamExpandError,
+                          NameExpander((params_map, templates,)).expand,
+                          'foo<k>')
+        self.assertRaises(ParamExpandError,
+                          GraphExpander((params_map, templates,)).expand,
+                          'foo<k>')
+
 
 if __name__ == "__main__":
     unittest.main()
diff --git a/lib/cylc/profiling/git.py b/lib/cylc/profiling/git.py
index 9775236..5c89304 100644
--- a/lib/cylc/profiling/git.py
+++ b/lib/cylc/profiling/git.py
@@ -64,8 +64,12 @@ def checkout(branch, delete_pyc=False):
 
 def get_commit_date(commit):
     """Returns the commit date (in unix time) of the profided commit."""
-    return int(Popen(['git', 'show', '-s', '--format=%at', commit],
-                     stdout=PIPE).communicate()[0].split()[-1])
+    try:
+        return int(Popen(['git', 'show', '-s', '--format=%at', commit],
+                         stdout=PIPE, stderr=PIPE
+                         ).communicate()[0].split()[-1])
+    except IndexError:
+        get_commit_date(commit.split('-')[0])
 
 
 def order_versions_by_date(versions):
diff --git a/lib/cylc/profiling/profile.py b/lib/cylc/profiling/profile.py
index 15bd365..88c2b3d 100644
--- a/lib/cylc/profiling/profile.py
+++ b/lib/cylc/profiling/profile.py
@@ -33,9 +33,14 @@ from .analysis import extract_results
 from .git import (checkout, describe, GitCheckoutError,)
 
 
-# Environment for executing cylc commands in.
-CYLC_ENV = os.environ.copy()
-CYLC_ENV['CYLC_CONF_PATH'] = ''
+def cylc_env(cylc_conf_path=''):
+    """Provide an environment for executing cylc commands in."""
+    cylc_env = os.environ.copy()
+    cylc_env['CYLC_CONF_PATH'] = cylc_conf_path
+    return cylc_env
+
+
+CLEAN_ENV = cylc_env()
 
 
 class SuiteFailedException(Exception):
@@ -64,7 +69,7 @@ class ProfilingKilledException(SuiteFailedException):
 
 def cylc_major_version():
     """Return the first character of the cylc version e.g. '7'."""
-    return Popen(['cylc', '--version'], env=CYLC_ENV, stdout=PIPE
+    return Popen(['cylc', '--version'], env=CLEAN_ENV, stdout=PIPE
                  ).communicate()[0].strip()[0]
 
 
@@ -72,7 +77,7 @@ def register_suite(reg, sdir):
     """Registers the suite located in sdir with the registration name reg."""
     cmd = ['cylc', 'register', reg, sdir]
     print '$ ' + ' '.join(cmd)
-    if not subprocess_call(cmd, stdout=PIPE, env=CYLC_ENV):
+    if not subprocess_call(cmd, stdout=PIPE, env=CLEAN_ENV):
         return True
     print '\tFailed'
     return False
@@ -82,7 +87,7 @@ def unregister_suite(reg):
     """Unregisters the suite reg."""
     cmd = ['cylc', 'unregister', reg]
     print '$ ' + ' '.join(cmd)
-    subprocess_call(cmd, stdout=PIPE, env=CYLC_ENV)
+    subprocess_call(cmd, stdout=PIPE, env=CLEAN_ENV)
 
 
 def purge_suite(reg):
@@ -96,7 +101,8 @@ def purge_suite(reg):
         return True
 
 
-def run_suite(reg, options, out_file, profile_modes, mode='live'):
+def run_suite(reg, options, out_file, profile_modes, mode='live',
+              conf_path=''):
     """Runs cylc run / cylc validate on the provided suite with the requested
     profiling options.
 
@@ -114,6 +120,7 @@ def run_suite(reg, options, out_file, profile_modes, mode='live'):
 
     """
     cmds = []
+    env = cylc_env(cylc_conf_path=conf_path)
 
     # Cylc profiling, echo command start time.
     if PROFILE_MODE_CYLC in profile_modes:
@@ -149,7 +156,7 @@ def run_suite(reg, options, out_file, profile_modes, mode='live'):
         tmp = ['-s namespaces=root']
         namespaces = Popen(
             ['cylc', 'list', reg] + jinja2_params + tmp, stdout=PIPE,
-            env=CYLC_ENV).communicate()[0].split() + ['root']
+            env=env).communicate()[0].split() + ['root']
         jinja2_params.append(
             '-s namespaces={0}'.format(','.join(namespaces)))
     cmds.extend(jinja2_params)
@@ -179,13 +186,13 @@ def run_suite(reg, options, out_file, profile_modes, mode='live'):
     print '$ ' + ' '.join(cmds)
     try:
         proc = Popen(' '.join(cmds), shell=True, stderr=open(time_err, 'w+'),
-                     stdout=open(startup_file, 'w+'), env=CYLC_ENV)
+                     stdout=open(startup_file, 'w+'), env=env)
         if proc.wait():
             raise SuiteFailedException(run_cmds, cmd_out, cmd_err)
     except KeyboardInterrupt:
         kill_cmd = ['cylc', 'stop', '--kill', reg]
         print '$ ' + ' '.join(kill_cmd)
-        subprocess_call(kill_cmd, env=CYLC_ENV)
+        subprocess_call(kill_cmd, env=env)
         raise ProfilingKilledException(run_cmds, cmd_out, cmd_err)
 
     # Return cylc stderr if present.
@@ -222,7 +229,8 @@ def run_experiment(exp):
                 run['options'] + ['cylc_compat_mode=%s' % cylc_maj_version],
                 out_file,
                 profile_modes,
-                exp.get('mode', 'live'))
+                exp.get('mode', 'live'),
+                conf_path=run.get('globalrc', ''))
             # Handle errors.
             if err_file:
                 print >> sys.stderr, ('WARNING: non-empty suite error log: ' +
diff --git a/lib/cylc/remote.py b/lib/cylc/remote.py
index 7c4de3c..d155bba 100644
--- a/lib/cylc/remote.py
+++ b/lib/cylc/remote.py
@@ -73,7 +73,7 @@ class remrun(object):
                 is_remote_user(self.owner) or is_remote_host(self.host))
 
     def execute(self, force_required=False, env=None, path=None,
-                dry_run=False):
+                dry_run=False, forward_x11=False):
         """Execute command on remote host.
 
         Returns False if remote re-invocation is not needed, True if it is
@@ -88,20 +88,20 @@ class remrun(object):
 
         name = os.path.basename(self.argv[0])[5:]  # /path/to/cylc-foo => foo
 
-        user_at_host = ''
+        # Build the remote command
+        command = shlex.split(GLOBAL_CFG.get_host_item(
+            "ssh command", self.host, self.owner))
+        if forward_x11:
+            command.append("-Y")
+
+        user_at_host = ""
         if self.owner:
-            user_at_host = self.owner + '@'
+            user_at_host = self.owner + "@"
         if self.host:
             user_at_host += self.host
         else:
-            user_at_host += 'localhost'
-
-        # Build the remote command
-
-        # ssh command and options (X forwarding)
-        ssh_tmpl = str(GLOBAL_CFG.get_host_item(
-            "remote shell template", self.host, self.owner))
-        command = shlex.split(ssh_tmpl) + ["-Y", user_at_host]
+            user_at_host += "localhost"
+        command.append(user_at_host)
 
         # Use bash -l?
         ssh_login_shell = self.ssh_login_shell
diff --git a/lib/cylc/rundb.py b/lib/cylc/rundb.py
index f49e9ae..672ed6e 100644
--- a/lib/cylc/rundb.py
+++ b/lib/cylc/rundb.py
@@ -173,6 +173,7 @@ class CylcSuiteDAO(object):
     TABLE_BROADCAST_EVENTS = "broadcast_events"
     TABLE_BROADCAST_STATES = "broadcast_states"
     TABLE_BROADCAST_STATES_CHECKPOINTS = "broadcast_states_checkpoints"
+    TABLE_INHERITANCE = "inheritance"
     TABLE_SUITE_PARAMS = "suite_params"
     TABLE_SUITE_PARAMS_CHECKPOINTS = "suite_params_checkpoints"
     TABLE_SUITE_TEMPLATE_VARS = "suite_template_vars"
@@ -211,6 +212,10 @@ class CylcSuiteDAO(object):
             ["time"],
             ["event"],
         ],
+        TABLE_INHERITANCE: [
+            ["namespace", {"is_primary_key": True}],
+            ["inheritance"],
+        ],
         TABLE_SUITE_PARAMS: [
             ["key", {"is_primary_key": True}],
             ["value"],
diff --git a/lib/cylc/scheduler.py b/lib/cylc/scheduler.py
index 8bda930..af22404 100644
--- a/lib/cylc/scheduler.py
+++ b/lib/cylc/scheduler.py
@@ -18,16 +18,12 @@
 """Cylc scheduler server."""
 
 from copy import deepcopy
-import logging
+from logging import DEBUG
 import os
-import pickle
-from pipes import quote
-from Queue import Empty
-import shlex
-from shutil import copy, copytree, rmtree
-from subprocess import call, Popen, PIPE
+import Queue
+from shutil import copytree, rmtree
+from subprocess import Popen, PIPE
 import sys
-from tempfile import mkstemp
 from time import sleep, time
 import traceback
 
@@ -35,25 +31,19 @@ import isodatetime.data
 import isodatetime.parsers
 from parsec.util import printcfg
 
-from cylc.broadcast_report import (
-    CHANGE_FMT as BROADCAST_LOAD_FMT,
-    CHANGE_PREFIX_SET as BROADCAST_LOAD_PREFIX)
 from cylc.cfgspec.globalcfg import GLOBAL_CFG
-from cylc.config import SuiteConfig, TaskNotDefinedError
+from cylc.config import SuiteConfig
 from cylc.cycling import PointParsingError
 from cylc.cycling.loader import get_point, standardise_point_string
 from cylc.daemonize import daemonize
 from cylc.exceptions import CylcError
 import cylc.flags
-from cylc.get_task_proxy import get_task_proxy
-from cylc.job_file import JobFile
-from cylc.job_host import RemoteJobHostManager, RemoteJobHostInitError
 from cylc.log_diagnosis import LogSpec
-from cylc.mp_pool import SuiteProcContext, SuiteProcPool
+from cylc.mp_pool import SuiteProcPool
 from cylc.network import (
     COMMS_SUITEID_OBJ_NAME, COMMS_STATE_OBJ_NAME,
     COMMS_CMD_OBJ_NAME, COMMS_BCAST_OBJ_NAME, COMMS_EXT_TRIG_OBJ_NAME,
-    COMMS_INFO_OBJ_NAME, COMMS_LOG_OBJ_NAME)
+    COMMS_INFO_OBJ_NAME, COMMS_LOG_OBJ_NAME, COMMS_TASK_MESSAGE_OBJ_NAME)
 from cylc.network.ext_trigger_server import ExtTriggerServer
 from cylc.network.daemon import CommsDaemon
 from cylc.network.suite_broadcast_server import BroadcastServer
@@ -62,24 +52,21 @@ from cylc.network.suite_identifier_server import SuiteIdServer
 from cylc.network.suite_info_server import SuiteInfoServer
 from cylc.network.suite_log_server import SuiteLogServer
 from cylc.network.suite_state_server import StateSummaryServer
+from cylc.network.task_msg_server import TaskMessageServer
 from cylc.owner import USER
-from cylc.suite_host import is_remote_host
-from cylc.suite_srv_files_mgr import (
-    SuiteSrvFilesManager, SuiteServiceFileError)
-from cylc.rundb import CylcSuiteDAO
+from cylc.suite_db_mgr import SuiteDatabaseManager
+from cylc.suite_events import (
+    SuiteEventContext, SuiteEventError, SuiteEventHandler)
 from cylc.suite_host import get_suite_host
 from cylc.suite_logging import SuiteLog, OUT, ERR, LOG
+from cylc.suite_srv_files_mgr import (
+    SuiteSrvFilesManager, SuiteServiceFileError)
 from cylc.taskdef import TaskDef
 from cylc.task_id import TaskID
+from cylc.task_job_mgr import TaskJobManager, RemoteJobHostInitError
 from cylc.task_pool import TaskPool
-from cylc.task_proxy import (
-    TaskProxy, TaskProxySequenceBoundsError, TaskActionTimer)
-from cylc.task_state import (
-    TASK_STATUS_HELD, TASK_STATUS_WAITING,
-    TASK_STATUS_QUEUED, TASK_STATUS_READY, TASK_STATUS_SUBMITTED,
-    TASK_STATUS_SUBMIT_FAILED, TASK_STATUS_SUBMIT_RETRYING,
-    TASK_STATUS_RUNNING, TASK_STATUS_SUCCEEDED, TASK_STATUS_FAILED,
-    TASK_STATUS_RETRYING)
+from cylc.task_proxy import TaskProxy, TaskProxySequenceBoundsError
+from cylc.task_state import TASK_STATUSES_ACTIVE, TASK_STATUS_FAILED
 from cylc.templatevars import load_template_vars
 from cylc.version import CYLC_VERSION
 from cylc.wallclock import (
@@ -100,20 +87,17 @@ class SchedulerStop(CylcError):
 class Scheduler(object):
     """Cylc scheduler server."""
 
-    EVENT_STARTUP = 'startup'
-    EVENT_SHUTDOWN = 'shutdown'
-    EVENT_TIMEOUT = 'timeout'
-    EVENT_INACTIVITY_TIMEOUT = 'inactivity'
-    EVENT_STALLED = 'stalled'
+    EVENT_STARTUP = SuiteEventHandler.EVENT_STARTUP
+    EVENT_SHUTDOWN = SuiteEventHandler.EVENT_SHUTDOWN
+    EVENT_TIMEOUT = SuiteEventHandler.EVENT_TIMEOUT
+    EVENT_INACTIVITY_TIMEOUT = SuiteEventHandler.EVENT_INACTIVITY_TIMEOUT
+    EVENT_STALLED = SuiteEventHandler.EVENT_STALLED
 
     # Intervals in seconds
     INTERVAL_MAIN_LOOP = 1.0
     INTERVAL_STOP_KILL = 10.0
     INTERVAL_STOP_PROCESS_POOL_EMPTY = 0.5
 
-    SUITE_EVENT_HANDLER = 'suite-event-handler'
-    SUITE_EVENT_MAIL = 'suite-event-mail'
-
     START_MESSAGE_PREFIX = 'Suite starting: '
     START_MESSAGE_TMPL = (
         START_MESSAGE_PREFIX + 'server=%(host)s:%(port)s pid=%(pid)s')
@@ -169,32 +153,27 @@ class Scheduler(object):
 
         self.run_mode = self.options.run_mode
 
-        # For persistence of reference test settings across reloads:
-        self.reference_test_mode = self.options.reftest
-        self.gen_reference_log = self.options.genref
-
         self.owner = USER
         self.host = get_suite_host()
         self.port = None
 
         self.is_stalled = False
 
-        self.graph_warned = {}
-
-        self.task_event_handler_env = {}
         self.contact_data = None
 
         self.do_process_tasks = False
-        self.do_update_state_summary = True
 
         # initialize some items in case of early shutdown
         # (required in the shutdown() method)
         self.suite_state = None
         self.command_queue = None
         self.pool = None
-        self.request_handler = None
+        self.proc_pool = None
+        self.task_job_mgr = None
+        self.task_events_mgr = None
+        self.suite_event_handler = None
+        self.message_queue = None
         self.comms_daemon = None
-        self.info_interface = None
 
         self._profile_amounts = {}
         self._profile_update_times = {}
@@ -219,14 +198,13 @@ class Scheduler(object):
         self.time_next_kill = None
         self.already_timed_out = False
 
-        self.pri_dao = None
-        self.pub_dao = None
+        self.suite_db_mgr = SuiteDatabaseManager(
+            self.suite_srv_files_mgr.get_suite_srv_dir(self.suite),  # pri_d
+            os.path.join(self.suite_run_dir, 'log'))                 # pub_d
 
         self.suite_log = None
-        self.log = LOG
 
         self.ref_test_allowed_failures = []
-        self.next_task_event_mail_time = None
 
     def start(self):
         """Start the server."""
@@ -235,7 +213,7 @@ class Scheduler(object):
         GLOBAL_CFG.create_cylc_run_tree(self.suite)
 
         if self.is_restart:
-            self._start_db_upgrade()
+            self.suite_db_mgr.restart_upgrade()
 
         try:
             if not self.options.no_detach and not cylc.flags.debug:
@@ -243,10 +221,11 @@ class Scheduler(object):
 
             slog = SuiteLog.get_inst(self.suite)
             if cylc.flags.debug:
-                slog.pimp(logging.DEBUG)
+                slog.pimp(DEBUG)
             else:
                 slog.pimp()
 
+            self.proc_pool = SuiteProcPool()
             self.configure_comms_daemon()
             self.configure()
             self.profiler.start()
@@ -316,70 +295,9 @@ conditions; see `cylc conditions`.
         for i in range(len(logo_lines)):
             print logo_lines[i], ('{0: ^%s}' % lmax).format(license_lines[i])
 
-    def _start_db_upgrade(self):
-        """Vacuum/upgrade runtime DB on restart."""
-        pri_db_path = os.path.join(
-            self.suite_srv_files_mgr.get_suite_srv_dir(self.suite),
-            CylcSuiteDAO.DB_FILE_BASE_NAME)
-
-        # Backward compat, upgrade database with state file if necessary
-        old_pri_db_path = os.path.join(
-            self.suite_run_dir, 'state', CylcSuiteDAO.OLD_DB_FILE_BASE_NAME)
-        old_pri_db_path_611 = os.path.join(
-            self.suite_run_dir, CylcSuiteDAO.OLD_DB_FILE_BASE_NAME_611[0])
-        old_state_file_path = os.path.join(
-            self.suite_run_dir, "state", "state")
-        if (os.path.exists(old_pri_db_path) and
-                os.path.exists(old_state_file_path) and
-                not os.path.exists(pri_db_path)):
-            # Upgrade pre-6.11.X runtime database + state file
-            copy(old_pri_db_path, pri_db_path)
-            pri_dao = CylcSuiteDAO(pri_db_path)
-            pri_dao.upgrade_with_state_file(old_state_file_path)
-            target = os.path.join(self.suite_run_dir, "state.tar.gz")
-            cmd = ["tar", "-C", self.suite_run_dir, "-czf", target, "state"]
-            if call(cmd) == 0:
-                rmtree(
-                    os.path.join(self.suite_run_dir, "state"),
-                    ignore_errors=True)
-            else:
-                try:
-                    os.unlink(os.path.join(self.suite_run_dir, "state.tar.gz"))
-                except OSError:
-                    pass
-                ERR.error("cannot tar-gzip + remove old state/ directory")
-            # Remove old files as well
-            try:
-                os.unlink(os.path.join(self.suite_run_dir, "cylc-suite-env"))
-            except OSError:
-                pass
-        elif os.path.exists(old_pri_db_path_611):
-            # Upgrade 6.11.X runtime database
-            os.rename(old_pri_db_path_611, pri_db_path)
-            pri_dao = CylcSuiteDAO(pri_db_path)
-            pri_dao.upgrade_from_611()
-            # Remove old files as well
-            for name in [
-                    CylcSuiteDAO.OLD_DB_FILE_BASE_NAME_611[1],
-                    "cylc-suite-env"]:
-                try:
-                    os.unlink(os.path.join(self.suite_run_dir, name))
-                except OSError:
-                    pass
-        else:
-            pri_dao = CylcSuiteDAO(pri_db_path)
-
-        # Vacuum the primary/private database file
-        OUT.info("Vacuuming the suite db ...")
-        pri_dao.vacuum()
-        OUT.info("...done")
-        pri_dao.close()
-
     def configure(self):
         """Configure suite daemon."""
         self.profiler.log_memory("scheduler.py: start configure")
-        SuiteProcPool.get_inst()
-
         self.profiler.log_memory("scheduler.py: before configure_suite")
         self.configure_suite()
         self.profiler.log_memory("scheduler.py: after configure_suite")
@@ -390,22 +308,34 @@ conditions; see `cylc conditions`.
                 raise SchedulerError(
                     'ERROR: this suite requires the %s run mode' % reqmode)
 
-        if self.gen_reference_log or self.reference_test_mode:
+        self.suite_event_handler = SuiteEventHandler(self.proc_pool)
+        self.task_job_mgr = TaskJobManager(
+            self.suite, self.proc_pool, self.suite_db_mgr,
+            self.suite_srv_files_mgr)
+        self.task_events_mgr = self.task_job_mgr.task_events_mgr
+        self.task_events_mgr.mail_interval = self._get_cylc_conf(
+            "task event mail interval")
+        self.task_events_mgr.mail_footer = self._get_events_conf("mail footer")
+        self.task_events_mgr.suite_url = self.config.cfg['URL']
+        if self.options.genref or self.options.reftest:
             self.configure_reftest()
 
-        self.log.info(self.START_MESSAGE_TMPL % {
+        LOG.info(self.START_MESSAGE_TMPL % {
             'host': self.host, 'port': self.port, 'pid': os.getpid()})
         # Note that the following lines must be present at the top of
         # the suite log file for use in reference test runs:
-        self.log.info('Run mode: ' + self.run_mode)
-        self.log.info('Initial point: ' + str(self.initial_point))
+        LOG.info('Run mode: ' + self.run_mode)
+        LOG.info('Initial point: ' + str(self.initial_point))
         if self.start_point != self.initial_point:
-            self.log.info('Start point: ' + str(self.start_point))
-        self.log.info('Final point: ' + str(self.final_point))
+            LOG.info('Start point: ' + str(self.start_point))
+        LOG.info('Final point: ' + str(self.final_point))
 
         self.pool = TaskPool(
-            self.suite, self.pri_dao, self.pub_dao, self.final_point,
-            self.comms_daemon, self.log, self.run_mode)
+            self.config, self.final_point, self.suite_db_mgr,
+            self.task_events_mgr)
+        self.message_queue = TaskMessageServer(self.suite)
+        self.comms_daemon.connect(
+            self.message_queue, COMMS_TASK_MESSAGE_OBJ_NAME)
 
         self.profiler.log_memory("scheduler.py: before load_tasks")
         if self.is_restart:
@@ -414,11 +344,13 @@ conditions; see `cylc conditions`.
             self.load_tasks_for_run()
         self.profiler.log_memory("scheduler.py: after load_tasks")
 
-        self.pool.put_rundb_suite_params(
+        self.suite_db_mgr.put_suite_params(
+            self.run_mode,
             self.initial_point,
             self.final_point,
+            self.pool.is_held,
             self.config.cfg['cylc']['cycle point format'])
-        self.pool.put_rundb_suite_template_vars(self.template_vars)
+        self.suite_db_mgr.put_suite_template_vars(self.template_vars)
         self.configure_suite_environment()
 
         # Copy local python modules from source to run directory
@@ -438,10 +370,6 @@ conditions; see `cylc conditions`.
         self.already_timed_out = False
         self.set_suite_timer()
 
-        # self.nudge_timer_start = None
-        # self.nudge_timer_on = False
-        # self.auto_nudge_interval = 5  # seconds
-
         self.already_inactive = False
         if self._get_events_conf(self.EVENT_INACTIVITY_TIMEOUT):
             self.set_suite_inactivity_timer()
@@ -452,9 +380,9 @@ conditions; see `cylc conditions`.
         """Load tasks for a new run."""
         if self.start_point is not None:
             if self.options.warm:
-                self.log.info('Warm Start %s' % self.start_point)
+                LOG.info('Warm Start %s' % self.start_point)
             else:
-                self.log.info('Cold Start %s' % self.start_point)
+                LOG.info('Cold Start %s' % self.start_point)
 
         task_list = self.filter_initial_task_list(
             self.config.get_task_name_list())
@@ -464,50 +392,37 @@ conditions; see `cylc conditions`.
                 # No start cycle point at which to load cycling tasks.
                 continue
             try:
-                itask = get_task_proxy(
-                    name, self.start_point, is_startup=True,
-                    message_queue=self.pool.message_queue)
+                self.pool.add_to_runahead_pool(TaskProxy(
+                    self.config.get_taskdef(name), self.start_point,
+                    is_startup=True))
             except TaskProxySequenceBoundsError as exc:
-                self.log.debug(str(exc))
+                LOG.debug(str(exc))
                 continue
-            # Load task.
-            self.pool.add_to_runahead_pool(itask)
 
     def load_tasks_for_restart(self):
         """Load tasks for restart."""
-        self.pri_dao.select_suite_params(
+        self.suite_db_mgr.pri_dao.select_suite_params(
             self._load_suite_params, self.options.checkpoint)
-        self.pri_dao.select_broadcast_states(
-            self._load_broadcast_states, self.options.checkpoint)
-        self.pri_dao.select_task_job_run_times(self._load_task_run_times)
-        self.pri_dao.select_task_pool_for_restart(
-            self._load_task_pool, self.options.checkpoint)
-        self.pri_dao.select_task_action_timers(self._load_task_action_timers)
+        self.suite_db_mgr.pri_dao.select_broadcast_states(
+            BroadcastServer.get_inst().load_db_broadcast_states,
+            self.options.checkpoint)
+        self.suite_db_mgr.pri_dao.select_task_job_run_times(
+            self._load_task_run_times)
+        self.suite_db_mgr.pri_dao.select_task_pool_for_restart(
+            self.pool.load_db_task_pool_for_restart, self.options.checkpoint)
+        self.suite_db_mgr.pri_dao.select_task_action_timers(
+            self.pool.load_db_task_action_timers)
         # Re-initialise run directory for user at host for each submitted and
         # running tasks.
         # Note: tasks should all be in the runahead pool at this point.
         for itask in self.pool.get_rh_tasks():
-            if itask.state.status in [
-                    TASK_STATUS_SUBMITTED, TASK_STATUS_RUNNING]:
+            if itask.state.status in TASK_STATUSES_ACTIVE:
                 try:
-                    RemoteJobHostManager.get_inst().init_suite_run_dir(
+                    self.task_job_mgr.init_host(
                         self.suite, itask.task_host, itask.task_owner)
                 except RemoteJobHostInitError as exc:
-                    self.log.error(str(exc))
-        self.pool.poll_task_jobs()
-
-    def _load_broadcast_states(self, row_idx, row):
-        """Load a setting in the previous broadcast states."""
-        if row_idx == 0:
-            OUT.info("LOADING broadcast states")
-        point, namespace, key, value = row
-        BroadcastServer.get_inst().load_state(point, namespace, key, value)
-        OUT.info(BROADCAST_LOAD_FMT.strip() % {
-            "change": BROADCAST_LOAD_PREFIX,
-            "point": point,
-            "namespace": namespace,
-            "key": key,
-            "value": value})
+                    LOG.error(str(exc))
+        self.command_poll_tasks()
 
     def _load_suite_params(self, row_idx, row):
         """Load previous initial/final cycle point."""
@@ -563,119 +478,23 @@ conditions; see `cylc conditions`.
         except (KeyError, ValueError, AttributeError):
             return
 
-    def _load_task_pool(self, row_idx, row):
-        """Load a task from previous task pool.
-
-        The state of task prerequisites (satisfied or not) and outputs
-        (completed or not) is determined by the recorded TASK_STATUS:
-
-        TASK_STATUS_WAITING    - prerequisites and outputs unsatisified
-        TASK_STATUS_HELD       - ditto (only waiting tasks can be held)
-        TASK_STATUS_QUEUED     - prereqs satisfied, outputs not completed
-                                 (only tasks ready to run can get queued)
-        TASK_STATUS_READY      - ditto
-        TASK_STATUS_SUBMITTED  - ditto (but see *)
-        TASK_STATUS_SUBMIT_RETRYING - ditto
-        TASK_STATUS_RUNNING    - ditto (but see *)
-        TASK_STATUS_FAILED     - ditto (tasks must run in order to fail)
-        TASK_STATUS_RETRYING   - ditto (tasks must fail in order to retry)
-        TASK_STATUS_SUCCEEDED  - prerequisites satisfied, outputs completed
-
-        (*) tasks reloaded with TASK_STATUS_SUBMITTED or TASK_STATUS_RUNNING
-        are polled to determine what their true status is.
-        """
-        if row_idx == 0:
-            OUT.info("LOADING task proxies")
-        (cycle, name, spawned, status, hold_swap, submit_num, try_num,
-         user_at_host) = row
-        try:
-            itask = get_task_proxy(
-                name,
-                get_point(cycle),
-                status=status,
-                hold_swap=hold_swap,
-                has_spawned=bool(spawned),
-                submit_num=submit_num,
-                is_reload_or_restart=True,
-                message_queue=self.pool.message_queue)
-        except TaskNotDefinedError as exc:
-            if cylc.flags.debug:
-                ERR.error(traceback.format_exc())
-            else:
-                ERR.error(str(exc))
-            ERR.warning((
-                "ignoring task %s from the suite run database file\n"
-                "(the task definition has probably been deleted from the "
-                "suite).") % name)
-        except Exception:
-            ERR.error(traceback.format_exc())
-            ERR.error("could not load task %s" % name)
-        else:
-            if status in (TASK_STATUS_SUBMITTED, TASK_STATUS_RUNNING):
-                itask.state.set_prerequisites_all_satisfied()
-                # update the task proxy with user at host
-                try:
-                    itask.task_owner, itask.task_host = user_at_host.split(
-                        "@", 1)
-                except ValueError:
-                    itask.task_owner = None
-                    itask.task_host = user_at_host
-
-            elif status in (TASK_STATUS_SUBMIT_FAILED, TASK_STATUS_FAILED):
-                itask.state.set_prerequisites_all_satisfied()
-
-            elif status in (TASK_STATUS_QUEUED, TASK_STATUS_READY):
-                itask.state.set_prerequisites_all_satisfied()
-                # reset to waiting as these had not been submitted yet.
-                itask.state.set_state(TASK_STATUS_WAITING)
-
-            elif status in (TASK_STATUS_SUBMIT_RETRYING, TASK_STATUS_RETRYING):
-                itask.state.set_prerequisites_all_satisfied()
-
-            elif status == TASK_STATUS_SUCCEEDED:
-                itask.state.set_prerequisites_all_satisfied()
-                # TODO - just poll for outputs in the job status file.
-                itask.state.outputs.set_all_completed()
-
-            if user_at_host:
-                itask.summary['job_hosts'][int(submit_num)] = user_at_host
-            if hold_swap:
-                OUT.info("+ %s.%s %s (%s)" % (name, cycle, status, hold_swap))
-            else:
-                OUT.info("+ %s.%s %s" % (name, cycle, status))
-            self.pool.add_to_runahead_pool(itask)
-
-    def _load_task_action_timers(self, row_idx, row):
-        """Load a task action timer, e.g. event handlers, retry states."""
-        if row_idx == 0:
-            OUT.info("LOADING task action timers")
-        (
-            cycle, name, ctx_key_pickle, ctx_pickle, delays_pickle, num, delay,
-            timeout,
-        ) = row
-        id_ = TaskID.get(name, cycle)
-        itask = self.pool.get_task_by_id(id_)
-        if itask is None:
-            ERR.warning("%(id)s: task not found, skip" % {"id": id_})
-            return
-        ctx_key = "?"
-        try:
-            ctx_key = pickle.loads(str(ctx_key_pickle))
-            ctx = pickle.loads(str(ctx_pickle))
-            delays = pickle.loads(str(delays_pickle))
-            if ctx_key and ctx_key[0] in ["poll_timers", "try_timers"]:
-                getattr(itask, ctx_key[0])[ctx_key[1]] = TaskActionTimer(
-                    ctx, delays, num, delay, timeout)
-            else:
-                itask.event_handler_try_timers[ctx_key] = TaskActionTimer(
-                    ctx, delays, num, delay, timeout)
-        except (EOFError, TypeError, LookupError, ValueError):
-            ERR.warning(
-                "%(id)s: skip action timer %(ctx_key)s" %
-                {"id": id_, "ctx_key": ctx_key})
-            ERR.warning(traceback.format_exc())
-            return
-        OUT.info("+ %s.%s %s" % (name, cycle, ctx_key))
+    def process_queued_task_messages(self):
+        """Handle incoming task messages for each task proxy."""
+        queue = self.message_queue.get_queue()
+        task_id_messages = {}
+        while queue.qsize():
+            try:
+                task_id, priority, message = queue.get(block=False)
+            except Queue.Empty:
+                break
+            queue.task_done()
+            task_id_messages.setdefault(task_id, [])
+            task_id_messages[task_id].append((priority, message))
+        for itask in self.pool.get_tasks():
+            if itask.identity in task_id_messages:
+                for priority, message in task_id_messages[itask.identity]:
+                    self.task_events_mgr.process_message(
+                        itask, priority, message, is_incoming=True)
 
     def process_command_queue(self):
         """Process queued commands."""
@@ -689,7 +508,7 @@ conditions; see `cylc conditions`.
         while True:
             try:
                 name, args, kwargs = queue.get(False)
-            except Empty:
+            except Queue.Empty:
                 break
             args_string = ', '.join([str(a) for a in args])
             cmdstr = name + '(' + args_string
@@ -703,21 +522,21 @@ conditions; see `cylc conditions`.
                 n_warnings = getattr(self, "command_%s" % name)(
                     *args, **kwargs)
             except SchedulerStop:
-                self.log.info('Command succeeded: ' + cmdstr)
+                LOG.info('Command succeeded: ' + cmdstr)
                 raise
             except Exception as exc:
                 # Don't let a bad command bring the suite down.
-                self.log.warning(traceback.format_exc())
-                self.log.warning(str(exc))
-                self.log.warning('Command failed: ' + cmdstr)
+                LOG.warning(traceback.format_exc())
+                LOG.warning(str(exc))
+                LOG.warning('Command failed: ' + cmdstr)
             else:
                 if n_warnings:
-                    self.log.info(
+                    LOG.info(
                         'Command succeeded with %s warning(s): %s' %
                         (n_warnings, cmdstr))
                 else:
-                    self.log.info('Command succeeded: ' + cmdstr)
-                self.do_update_state_summary = True
+                    LOG.info('Command succeeded: ' + cmdstr)
+                cylc.flags.iflag = True
                 if name in self.PROC_CMDS:
                     self.do_process_tasks = True
             queue.task_done()
@@ -768,8 +587,9 @@ conditions; see `cylc conditions`.
 
     def info_get_task_jobfile_path(self, task_id):
         """Return task job file path."""
-        task_id = self.get_standardised_taskid(task_id)
-        return self.pool.get_task_jobfile_path(task_id)
+        name, point = TaskID.split(task_id)
+        return self.task_events_mgr.get_task_job_log(
+            self.suite, point, name, tail=self.task_job_mgr.JOB_FILE_BASE)
 
     def info_get_suite_info(self):
         """Return a dict containing the suite title and description."""
@@ -833,8 +653,7 @@ conditions; see `cylc conditions`.
 
     def _set_stop(self, stop_mode=None):
         """Set shutdown mode."""
-        SuiteProcPool.get_inst().stop_job_submission()
-        TaskProxy.stop_sim_mode_job_submission = True
+        self.proc_pool.stop_job_submission()
         if stop_mode is None:
             stop_mode = TaskPool.STOP_REQUEST_CLEAN
         self.stop_mode = stop_mode
@@ -870,13 +689,24 @@ conditions; see `cylc conditions`.
         """Release tasks."""
         return self.pool.release_tasks(items)
 
-    def command_poll_tasks(self, items):
+    def command_poll_tasks(self, items=None):
         """Poll all tasks or a task/family if options are provided."""
-        return self.pool.poll_task_jobs(items)
+        if self.run_mode == 'simulation':
+            return
+        itasks, bad_items = self.pool.filter_task_proxies(items)
+        self.task_job_mgr.poll_task_jobs(self.suite, itasks, items is not None)
+        return len(bad_items)
 
-    def command_kill_tasks(self, items):
+    def command_kill_tasks(self, items=None):
         """Kill all tasks or a task/family if options are provided."""
-        return self.pool.kill_task_jobs(items)
+        itasks, bad_items = self.pool.filter_task_proxies(items)
+        if self.run_mode == 'simulation':
+            for itask in itasks:
+                if itask.state.status in TASK_STATUSES_ACTIVE:
+                    itask.state.reset_state(TASK_STATUS_FAILED)
+            return len(bad_items)
+        self.task_job_mgr.kill_task_jobs(self.suite, itasks, items is not None)
+        return len(bad_items)
 
     def command_release_suite(self):
         """Release all task proxies in the suite."""
@@ -894,13 +724,13 @@ conditions; see `cylc conditions`.
         """Hold tasks AFTER this point (itask.point > point)."""
         point = self.get_standardised_point(point_string)
         self.hold_suite(point)
-        self.log.info(
+        LOG.info(
             "The suite will pause when all tasks have passed %s" % point)
 
     def command_set_verbosity(self, lvl):
         """Remove suite verbosity."""
-        self.log.logger.setLevel(lvl)
-        cylc.flags.debug = (lvl == logging.DEBUG)
+        LOG.logger.setLevel(lvl)
+        cylc.flags.debug = (lvl == DEBUG)
         return True, 'OK'
 
     def command_remove_cycle(self, point_string, spawn=False):
@@ -922,10 +752,13 @@ conditions; see `cylc conditions`.
 
     def command_reload_suite(self):
         """Reload suite configuration."""
-        self.log.info("Reloading the suite definition.")
+        LOG.info("Reloading the suite definition.")
         old_tasks = set(self.config.get_task_name_list())
         self.configure_suite(reconfigure=True)
-        self.pool.reconfigure(self.final_point)
+        self.pool.reconfigure(self.config, self.final_point)
+        self.task_events_mgr.mail_interval = self._get_cylc_conf(
+            "task event mail interval")
+        self.task_events_mgr.mail_footer = self._get_events_conf("mail footer")
 
         # Log tasks that have been added by the reload, removed tasks are
         # logged by the TaskPool.
@@ -934,13 +767,15 @@ conditions; see `cylc conditions`.
             LOG.warning("Added task: '%s'" % (task,))
 
         self.configure_suite_environment()
-        if self.gen_reference_log or self.reference_test_mode:
+        if self.options.genref or self.options.reftest:
             self.configure_reftest(recon=True)
-        self.pool.put_rundb_suite_params(
+        self.suite_db_mgr.put_suite_params(
+            self.run_mode,
             self.initial_point,
             self.final_point,
+            self.pool.is_held,
             self.config.cfg['cylc']['cycle point format'])
-        self.do_update_state_summary = True
+        cylc.flags.iflag = True
 
     def command_set_runahead(self, interval=None):
         """Set runahead limit."""
@@ -1076,21 +911,17 @@ conditions; see `cylc conditions`.
         """Load and process the suite definition."""
 
         if reconfigure:
-            self.pri_dao.take_checkpoints(
-                "reload-init", other_daos=[self.pub_dao])
+            self.suite_db_mgr.checkpoint("reload-init")
         elif self.is_restart:
             # This logic handles the lack of initial cycle point in "suite.rc".
             # Things that can't change on suite reload.
-            pri_db_path = os.path.join(
-                self.suite_srv_files_mgr.get_suite_srv_dir(self.suite),
-                CylcSuiteDAO.DB_FILE_BASE_NAME)
-            self.pri_dao = CylcSuiteDAO(pri_db_path)
-            self.pri_dao.select_suite_params(self._load_initial_cycle_point)
-            self.pri_dao.select_suite_template_vars(self._load_template_vars)
+            pri_dao = self.suite_db_mgr.get_pri_dao()
+            pri_dao.select_suite_params(self._load_initial_cycle_point)
+            pri_dao.select_suite_template_vars(self._load_template_vars)
             # Take checkpoint and commit immediately so that checkpoint can be
             # copied to the public database.
-            self.pri_dao.take_checkpoints("restart")
-            self.pri_dao.execute_queued_items()
+            pri_dao.take_checkpoints("restart")
+            pri_dao.execute_queued_items()
 
         self.load_suiterc(reconfigure)
 
@@ -1113,43 +944,11 @@ conditions; see `cylc conditions`.
             self.run_mode = self.config.run_mode
 
         if reconfigure:
+            # Things that can't change on suite reload.
             BroadcastServer.get_inst().linearized_ancestors = (
                 self.config.get_linearized_ancestors())
         else:
-            # Things that can't change on suite reload.
-            pri_db_path = os.path.join(
-                self.suite_srv_files_mgr.get_suite_srv_dir(self.suite),
-                CylcSuiteDAO.DB_FILE_BASE_NAME)
-            pub_db_path = os.path.join(
-                self.suite_run_dir, 'log', CylcSuiteDAO.DB_FILE_BASE_NAME)
-            if not self.is_restart:
-                # Remove database created by previous runs
-                try:
-                    os.unlink(pri_db_path)
-                except OSError:
-                    # Just in case the path is a directory!
-                    rmtree(pri_db_path, ignore_errors=True)
-            # Ensure that:
-            # * public database is in sync with private database
-            # * private database file is private
-            self.pri_dao = CylcSuiteDAO(pri_db_path)
-            os.chmod(pri_db_path, 0600)
-            self.pub_dao = CylcSuiteDAO(pub_db_path, is_public=True)
-            self._copy_pri_db_to_pub_db()
-            pub_db_path_symlink = os.path.join(
-                self.suite_run_dir, CylcSuiteDAO.OLD_DB_FILE_BASE_NAME)
-            try:
-                orig_source = os.readlink(pub_db_path_symlink)
-            except OSError:
-                orig_source = None
-            source = os.path.join('log', CylcSuiteDAO.DB_FILE_BASE_NAME)
-            if orig_source != source:
-                try:
-                    os.unlink(pub_db_path_symlink)
-                except OSError:
-                    pass
-                os.symlink(source, pub_db_path_symlink)
-
+            self.suite_db_mgr.on_suite_start(self.is_restart)
             if self.config.cfg['scheduling']['hold after point']:
                 self.pool_hold_point = get_point(
                     self.config.cfg['scheduling']['hold after point'])
@@ -1179,8 +978,8 @@ conditions; see `cylc conditions`.
                 attr = getattr(self, attr_name)
                 if callable(attr) and attr_name.startswith('info_'):
                     info_commands[attr_name.replace('info_', '')] = attr
-            self.info_interface = SuiteInfoServer(info_commands)
-            self.comms_daemon.connect(self.info_interface, COMMS_INFO_OBJ_NAME)
+            self.comms_daemon.connect(
+                SuiteInfoServer(info_commands), COMMS_INFO_OBJ_NAME)
 
             self.suite_log = SuiteLog.get_inst(self.suite)
             log_interface = SuiteLogServer(self.suite_log)
@@ -1189,10 +988,12 @@ conditions; see `cylc conditions`.
             self.suite_state = StateSummaryServer.get_inst(self.run_mode)
             self.comms_daemon.connect(self.suite_state, COMMS_STATE_OBJ_NAME)
 
+        self.suite_db_mgr.put_runtime_inheritance(self.config)
+
     def configure_suite_environment(self):
         """Configure suite environment."""
         # Pass static cylc and suite variables to job script generation code
-        JobFile.get_inst().set_suite_env({
+        self.task_job_mgr.job_file_writer.set_suite_env({
             'CYLC_UTC': str(cylc.flags.utc),
             'CYLC_DEBUG': str(cylc.flags.debug),
             'CYLC_VERBOSE': str(cylc.flags.verbose),
@@ -1203,7 +1004,7 @@ conditions; see `cylc conditions`.
         })
 
         # Make suite vars available to [cylc][environment]:
-        for var, val in JobFile.get_inst().suite_env.items():
+        for var, val in self.task_job_mgr.job_file_writer.suite_env.items():
             os.environ[var] = val
         # Set local values of variables that are potenitally task-specific
         # due to different directory paths on different task hosts. These
@@ -1223,23 +1024,20 @@ conditions; see `cylc conditions`.
         cenv = self.config.cfg['cylc']['environment'].copy()
         for var, val in cenv.items():
             cenv[var] = os.path.expandvars(val)
-        # path to suite bin directory for suite and task event handlers
+        # path to suite bin directory for suite and event handlers
         cenv['PATH'] = os.pathsep.join([
             os.path.join(self.suite_dir, 'bin'), os.environ['PATH']])
 
-        # Make [cylc][environment] available to task event handlers in worker
-        # processes,
-        self.task_event_handler_env = cenv
         # and to suite event handlers in this process.
         for var, val in cenv.items():
             os.environ[var] = val
 
     def configure_reftest(self, recon=False):
         """Configure the reference test."""
-        if self.gen_reference_log:
+        if self.options.genref:
             self.config.cfg['cylc']['log resolved dependencies'] = True
 
-        elif self.reference_test_mode:
+        elif self.options.reftest:
             rtc = self.config.cfg['cylc']['reference test']
             req = rtc['required run mode']
             if req and req != self.run_mode:
@@ -1273,125 +1071,28 @@ conditions; see `cylc conditions`.
                 timeout)
             self.config.cfg['cylc']['events']['reset timer'] = False
 
-    def run_event_handlers(self, event, message):
-        """Run a suite event handler."""
-        # Run suite event hooks in simulation and dummy mode ONLY if enabled
-        for mode_name in ['simulation', 'dummy']:
-            key = mode_name + ' mode'
-            if (self.run_mode == mode_name and
-                    self.config.cfg['cylc'][key]['disable suite event hooks']):
-                return
-        self._run_event_mail(event, message)
-        self._run_event_custom_handlers(event, message)
-
-    def _run_event_mail(self, event, message):
-        """Helper for "run_event_handlers", do mail notification."""
-        if event in self._get_events_conf('mail events', []):
-            # SMTP server
-            env = dict(os.environ)
-            mail_smtp = self._get_events_conf('mail smtp')
-            if mail_smtp:
-                env['smtp'] = mail_smtp
-            subject = '[suite %(event)s] %(suite)s' % {
-                'suite': self.suite, 'event': event}
-            stdin_str = ''
-            for name, value in [
-                    ('suite event', event),
-                    ('reason', message),
-                    ('suite', self.suite),
-                    ('host', self.host),
-                    ('port', self.port),
-                    ('owner', self.owner)]:
-                if value:
-                    stdin_str += '%s: %s\n' % (name, value)
-            mail_footer_tmpl = self._get_events_conf('mail footer')
-            if mail_footer_tmpl:
-                stdin_str += (mail_footer_tmpl + '\n') % {
-                    'host': self.host,
-                    'port': self.port,
-                    'owner': self.owner,
-                    'suite': self.suite}
-            ctx = SuiteProcContext(
-                (self.SUITE_EVENT_HANDLER, event),
-                [
-                    'mail',
-                    '-s', subject,
-                    '-r', self._get_events_conf(
-                        'mail from', 'notifications@' + get_suite_host()),
-                    self._get_events_conf('mail to', USER),
-                ],
-                env=env,
-                stdin_str=stdin_str)
-            if SuiteProcPool.get_inst().is_closed():
-                # Run command in foreground if process pool is closed
-                SuiteProcPool.get_inst().run_command(ctx)
-                self._run_event_handlers_callback(ctx)
-            else:
-                # Run command using process pool otherwise
-                SuiteProcPool.get_inst().put_command(
-                    ctx, self._run_event_mail_callback)
-
-    def _run_event_custom_handlers(self, event, message):
-        """Helper for "run_event_handlers", custom event handlers."""
-        # Look for event handlers
-        # 1. Handlers for specific event
-        # 2. General handlers
-        handlers = self._get_events_conf('%s handler' % event)
-        if (not handlers and
-                event in self._get_events_conf('handler events', [])):
-            handlers = self._get_events_conf('handlers')
-        if not handlers:
-            return
+    def run_event_handlers(self, event, reason):
+        """Run a suite event handler.
 
-        for i, handler in enumerate(handlers):
-            cmd_key = ('%s-%02d' % (self.SUITE_EVENT_HANDLER, i), event)
-            # Handler command may be a string for substitution
-            cmd = handler % {
-                'event': quote(event),
-                'suite': quote(self.suite),
-                'message': quote(message),
-            }
-            if cmd == handler:
-                # Nothing substituted, assume classic interface
-                cmd = "%s '%s' '%s' '%s'" % (
-                    handler, event, self.suite, message)
-            ctx = SuiteProcContext(
-                cmd_key, cmd, env=dict(os.environ), shell=True)
-            abort_on_error = self._get_events_conf(
-                'abort if %s handler fails' % event)
-            if abort_on_error or SuiteProcPool.get_inst().is_closed():
-                # Run command in foreground if abort on failure is set or if
-                # process pool is closed
-                SuiteProcPool.get_inst().run_command(ctx)
-                self._run_event_handlers_callback(
-                    ctx, abort_on_error=abort_on_error)
-            else:
-                # Run command using process pool otherwise
-                SuiteProcPool.get_inst().put_command(
-                    ctx, self._run_event_handlers_callback)
-
-    def _run_event_handlers_callback(self, ctx, abort_on_error=False):
-        """Callback on completion of a suite event handler."""
-        if ctx.ret_code:
-            self.log.warning(str(ctx))
-            ERR.error('%s EVENT HANDLER FAILED' % ctx.cmd_key[1])
-            if (ctx.cmd_key[1] == self.EVENT_SHUTDOWN and
-                    self.reference_test_mode):
+        Run suite event hooks in simulation and dummy mode ONLY if enabled.
+        """
+        try:
+            if (self.run_mode in ['simulation', 'dummy'] and
+                self.config.cfg['cylc']['simulation'][
+                    'disable suite event handlers']):
+                return
+        except KeyError:
+            pass
+        try:
+            self.suite_event_handler.handle(self.config, SuiteEventContext(
+                event, reason, self.suite, self.owner, self.host, self.port))
+        except SuiteEventError as exc:
+            if event == self.EVENT_SHUTDOWN and self.options.reftest:
                 ERR.error('SUITE REFERENCE TEST FAILED')
-            if abort_on_error:
-                raise SchedulerError(ctx.err)
-        else:
-            self.log.info(str(ctx))
-            if (ctx.cmd_key[1] == self.EVENT_SHUTDOWN and
-                    self.reference_test_mode):
-                OUT.info('SUITE REFERENCE TEST PASSED\n')
-
-    def _run_event_mail_callback(self, ctx):
-        """Callback the mail command for notification of a suite event."""
-        if ctx.ret_code:
-            self.log.warning(str(ctx))
+            raise SchedulerError(exc.args[0])
         else:
-            self.log.info(str(ctx))
+            if event == self.EVENT_SHUTDOWN and self.options.reftest:
+                OUT.info('SUITE REFERENCE TEST PASSED')
 
     def run(self):
         """Main loop."""
@@ -1399,15 +1100,15 @@ conditions; see `cylc conditions`.
             self.hold_suite(self.pool_hold_point)
 
         if self.options.start_held:
-            self.log.info("Held on start-up (no tasks will be submitted)")
+            LOG.info("Held on start-up (no tasks will be submitted)")
             self.hold_suite()
 
         self.run_event_handlers(self.EVENT_STARTUP, 'suite starting')
 
         self.profiler.log_memory("scheduler.py: begin run while loop")
-        proc_pool = SuiteProcPool.get_inst()
 
         time_next_fs_check = None
+        cylc.flags.iflag = True
 
         if self.options.profile_mode:
             previous_profile_point = 0
@@ -1422,12 +1123,13 @@ conditions; see `cylc conditions`.
 
             if self.pool.do_reload:
                 self.pool.reload_taskdefs()
-                self.do_update_state_summary = True
+                self.suite_db_mgr.checkpoint("reload-done")
+                cylc.flags.iflag = True
 
             self.process_command_queue()
             if self.pool.release_runahead_tasks():
-                self.do_update_state_summary = True
-            proc_pool.handle_results_async()
+                cylc.flags.iflag = True
+            self.proc_pool.handle_results_async()
 
             # External triggers must be matched now. If any are matched pflag
             # is set to tell process_tasks() that task processing is required.
@@ -1437,66 +1139,70 @@ conditions; see `cylc conditions`.
             # require renegotiation of dependencies, etc.
             if self.process_tasks():
                 if cylc.flags.debug:
-                    self.log.debug("BEGIN TASK PROCESSING")
+                    LOG.debug("BEGIN TASK PROCESSING")
                     time0 = time()
 
                 self.pool.match_dependencies()
-                if self.stop_mode is None and self.pool.submit_tasks():
-                    self.do_update_state_summary = True
+                if self.stop_mode is None:
+                    itasks = self.pool.get_ready_tasks()
+                    if itasks:
+                        cylc.flags.iflag = True
+                    if self.config.cfg['cylc']['log resolved dependencies']:
+                        for itask in itasks:
+                            if itask.local_job_file_path:
+                                continue
+                            deps = itask.state.get_resolved_dependencies()
+                            LOG.info('triggered off %s' % deps, itask=itask)
+                    self.task_job_mgr.submit_task_jobs(
+                        self.suite, itasks, self.run_mode == 'simulation')
                 for meth in [
                         self.pool.spawn_all_tasks,
                         self.pool.remove_spent_tasks,
                         self.pool.remove_suiciding_tasks]:
                     if meth():
-                        self.do_update_state_summary = True
+                        cylc.flags.iflag = True
 
                 BroadcastServer.get_inst().expire(self.pool.get_min_point())
 
                 if cylc.flags.debug:
-                    self.log.debug(
+                    LOG.debug(
                         "END TASK PROCESSING (took %s seconds)" %
                         (time() - time0))
 
-            self.pool.process_queued_task_messages()
-            self.process_queued_task_event_handlers()
+            self.process_queued_task_messages()
             self.process_command_queue()
-            has_changes = cylc.flags.iflag or self.do_update_state_summary
-            if has_changes:
-                self.pool.put_rundb_task_pool()
-                self.update_state_summary()
+            self.task_events_mgr.process_events(self)
+            self.suite_db_mgr.put_task_event_timers(self.task_events_mgr)
+            has_changes = cylc.flags.iflag
+            if cylc.flags.iflag:
+                self.suite_db_mgr.put_task_pool(self.pool)
+                self.update_state_summary()  # Will reset cylc.flags.iflag
             try:
-                self.pool.process_queued_db_ops()
+                self.suite_db_mgr.process_queued_ops()
             except OSError as err:
+                if cylc.flags.debug:
+                    ERR.debug(traceback.format_exc())
                 raise SchedulerError(str(err))
             # If public database is stuck, blast it away by copying the content
             # of the private database into it.
-            if self.pub_dao.n_tries >= self.pub_dao.MAX_TRIES:
-                try:
-                    self._copy_pri_db_to_pub_db()
-                except (IOError, OSError) as exc:
-                    # Something has to be very wrong here, so stop the suite
-                    raise SchedulerError(str(exc))
-                else:
-                    # No longer stuck
-                    self.log.warning(
-                        "%(pub_db_name)s: recovered from %(pri_db_name)s" % {
-                            "pub_db_name": self.pub_dao.db_file_name,
-                            "pri_db_name": self.pri_dao.db_file_name})
-                    self.pub_dao.n_tries = 0
-
+            try:
+                self.suite_db_mgr.recover_pub_from_pri()
+            except (IOError, OSError) as exc:
+                # Something has to be very wrong here, so stop the suite
+                raise SchedulerError(str(exc))
             self.check_suite_timer()
             if self._get_events_conf(self.EVENT_INACTIVITY_TIMEOUT):
                 self.check_suite_inactive()
             # check submission and execution timeout and polling timers
             if self.run_mode != 'simulation':
-                self.pool.check_task_timers()
+                self.task_job_mgr.check_task_jobs(self.suite, self.pool)
 
             # Does the suite need to shutdown on task failure?
             if (self.config.cfg['cylc']['abort if any task fails'] and
                     self.pool.any_task_failed()):
                 # Task failure + abort if any task fails
                 self._set_stop(TaskPool.STOP_AUTO_ON_TASK_FAILURE)
-            elif self.reference_test_mode and self.ref_test_allowed_failures:
+            elif self.options.reftest and self.ref_test_allowed_failures:
                 # In reference test mode and unexpected failures occured
                 bad_tasks = []
                 for itask in self.pool.get_failed_tasks():
@@ -1518,20 +1224,20 @@ conditions; see `cylc conditions`.
             # Is the suite ready to shut down now?
             if self.pool.can_stop(self.stop_mode):
                 self.update_state_summary()
-                proc_pool.close()
+                self.proc_pool.close()
                 if self.stop_mode != TaskPool.STOP_REQUEST_NOW_NOW:
                     # Wait for process pool to complete,
                     # unless --now --now is requested
                     stop_process_pool_empty_msg = (
                         "Waiting for the command process pool to empty" +
                         " for shutdown")
-                    while not proc_pool.is_dead():
+                    while not self.proc_pool.is_dead():
                         sleep(self.INTERVAL_STOP_PROCESS_POOL_EMPTY)
                         if stop_process_pool_empty_msg:
-                            self.log.info(stop_process_pool_empty_msg)
+                            LOG.info(stop_process_pool_empty_msg)
                             OUT.info(stop_process_pool_empty_msg)
                             stop_process_pool_empty_msg = None
-                        proc_pool.handle_results_async()
+                        self.proc_pool.handle_results_async()
                         self.process_command_queue()
                 if self.options.profile_mode:
                     self.profiler.log_memory(
@@ -1543,8 +1249,8 @@ conditions; see `cylc conditions`.
                     raise SchedulerStop(self.stop_mode)
             elif (self.time_next_kill is not None and
                     time() > self.time_next_kill):
-                self.pool.poll_task_jobs()
-                self.pool.kill_task_jobs()
+                self.command_poll_tasks()
+                self.command_kill_tasks()
                 self.time_next_kill = time() + self.INTERVAL_STOP_KILL
 
             # Suite health checks
@@ -1596,7 +1302,6 @@ conditions; see `cylc conditions`.
             self.will_stop_at(), self.config.ns_defn_order,
             self.pool.do_reload)
         cylc.flags.iflag = False
-        self.do_update_state_summary = False
         self.is_stalled = False
         if self.suite_timer_active:
             self.suite_timer_active = False
@@ -1617,7 +1322,7 @@ conditions; see `cylc conditions`.
                 get_seconds_as_interval_string(
                     self._get_events_conf(self.EVENT_TIMEOUT))
             )
-            self.log.warning(message)
+            LOG.warning(message)
             self.run_event_handlers(self.EVENT_TIMEOUT, message)
             if self._get_events_conf('abort on timeout'):
                 raise SchedulerError('Abort on suite timeout is set')
@@ -1631,7 +1336,7 @@ conditions; see `cylc conditions`.
             message = 'suite timed out after inactivity for %s' % (
                 get_seconds_as_interval_string(
                     self._get_events_conf(self.EVENT_INACTIVITY_TIMEOUT)))
-            self.log.warning(message)
+            LOG.warning(message)
             self.run_event_handlers(self.EVENT_INACTIVITY_TIMEOUT, message)
             if self._get_events_conf('abort on inactivity'):
                 raise SchedulerError('Abort on suite inactivity is set')
@@ -1643,7 +1348,7 @@ conditions; see `cylc conditions`.
         self.is_stalled = self.pool.is_stalled()
         if self.is_stalled:
             message = 'suite stalled'
-            self.log.warning(message)
+            LOG.warning(message)
             self.run_event_handlers(self.EVENT_STALLED, message)
             self.pool.report_stalled_task_deps()
             if self._get_events_conf('abort on stalled'):
@@ -1670,257 +1375,16 @@ conditions; see `cylc conditions`.
                     self._get_events_conf('reset inactivity timer')):
                 self.set_suite_inactivity_timer()
 
+        self.pool.set_expired_tasks()
         if self.pool.waiting_tasks_ready():
             process = True
 
-        if self.run_mode == 'simulation' and self.pool.sim_time_check():
+        if self.run_mode == 'simulation' and self.pool.sim_time_check(
+                self.message_queue):
             process = True
 
-        # if not process:
-        #    # If we neglect to set cylc.flags.pflag on some event that
-        #    # makes re-negotiation of dependencies necessary then if
-        #    # that event ever happens in isolation the suite could stall
-        #    # unless manually nudged ("cylc nudge SUITE").  If this
-        #    # happens turn on debug logging to see what happens
-        #    # immediately before the stall,
-        #    # then set cylc.flags.pflag = True in
-        #    # the corresponding code section. Alternatively,
-        #    # for an undiagnosed stall you can uncomment this section to
-        #    # stimulate task processing every few seconds even during
-        #    # lulls in activity.  THIS SHOULD NOT BE NECESSARY, HOWEVER.
-        #    if not self.nudge_timer_on:
-        #        self.nudge_timer_start = now()
-        #        self.nudge_timer_on = True
-        #    else:
-        #        timeout = self.nudge_timer_start + \
-        #              datetime.timedelta(seconds=self.auto_nudge_interval)
-        #      if now() > timeout:
-        #          process = True
-        #          self.nudge_timer_on = False
-
         return process
 
-    def process_queued_task_event_handlers(self):
-        """Process task event handlers."""
-        ctx_groups = {}
-        env = None
-        now = time()
-        for itask in self.pool.get_tasks():
-            for key, try_timer in itask.event_handler_try_timers.items():
-                # This should not happen, ignore for now.
-                if try_timer.ctx is None:
-                    del itask.event_handler_try_timers[key]
-                    continue
-                if try_timer.is_waiting:
-                    continue
-                # Set timer if timeout is None.
-                if not try_timer.is_timeout_set():
-                    if try_timer.next() is None:
-                        itask.log(logging.WARNING, "%s failed" % str(key))
-                        del itask.event_handler_try_timers[key]
-                        continue
-                    # Report retries and delayed 1st try
-                    tmpl = None
-                    if try_timer.num > 1:
-                        tmpl = "%s failed, retrying in %s (after %s)"
-                    elif try_timer.delay:
-                        tmpl = "%s will run after %s (after %s)"
-                    if tmpl:
-                        itask.log(logging.DEBUG, tmpl % (
-                            str(key),
-                            try_timer.delay_as_seconds(),
-                            try_timer.timeout_as_str()))
-                # Ready to run?
-                if not try_timer.is_delay_done() or (
-                    # Avoid flooding user's mail box with mail notification.
-                    # Group together as many notifications as possible within a
-                    # given interval.
-                    try_timer.ctx.ctx_type == TaskProxy.EVENT_MAIL and
-                    not self.stop_mode and
-                    self.next_task_event_mail_time is not None and
-                    self.next_task_event_mail_time > now
-                ):
-                    continue
-
-                try_timer.set_waiting()
-                if try_timer.ctx.ctx_type == TaskProxy.CUSTOM_EVENT_HANDLER:
-                    # Run custom event handlers on their own
-                    if env is None:
-                        env = dict(os.environ)
-                        if self.task_event_handler_env:
-                            env.update(self.task_event_handler_env)
-                    SuiteProcPool.get_inst().put_command(
-                        SuiteProcContext(
-                            key, try_timer.ctx.cmd, env=env, shell=True,
-                        ),
-                        itask.custom_event_handler_callback)
-                else:
-                    # Group together built-in event handlers, where possible
-                    if try_timer.ctx not in ctx_groups:
-                        ctx_groups[try_timer.ctx] = []
-                    # "itask.submit_num" may have moved on at this point
-                    key1, submit_num = key
-                    ctx_groups[try_timer.ctx].append(
-                        (key1, str(itask.point), itask.tdef.name, submit_num))
-
-        next_task_event_mail_time = (
-            now + self._get_cylc_conf("task event mail interval"))
-        for ctx, id_keys in ctx_groups.items():
-            if ctx.ctx_type == TaskProxy.EVENT_MAIL:
-                # Set next_task_event_mail_time if any mail sent
-                self.next_task_event_mail_time = next_task_event_mail_time
-                self._process_task_event_email(ctx, id_keys)
-            elif ctx.ctx_type == TaskProxy.JOB_LOGS_RETRIEVE:
-                self._process_task_job_logs_retrieval(ctx, id_keys)
-
-    def _process_task_event_email(self, ctx, id_keys):
-        """Process event notification, by email."""
-        if len(id_keys) == 1:
-            # 1 event from 1 task
-            (_, event), point, name, submit_num = id_keys[0]
-            subject = "[%s/%s/%02d %s] %s" % (
-                point, name, submit_num, event, self.suite)
-        else:
-            event_set = set([id_key[0][1] for id_key in id_keys])
-            if len(event_set) == 1:
-                # 1 event from n tasks
-                subject = "[%d tasks %s] %s" % (
-                    len(id_keys), event_set.pop(), self.suite)
-            else:
-                # n events from n tasks
-                subject = "[%d task events] %s" % (len(id_keys), self.suite)
-        cmd = ["mail", "-s", subject]
-        # From: and To:
-        cmd.append("-r")
-        cmd.append(ctx.mail_from)
-        cmd.append(ctx.mail_to)
-        # STDIN for mail, tasks
-        stdin_str = ""
-        for id_key in sorted(id_keys):
-            (_, event), point, name, submit_num = id_key
-            stdin_str += "%s: %s/%s/%02d\n" % (event, point, name, submit_num)
-        # STDIN for mail, event info + suite detail
-        stdin_str += "\n"
-        for name, value in [
-                ('suite', self.suite),
-                ("host", self.host),
-                ("port", self.port),
-                ("owner", self.owner)]:
-            if value:
-                stdin_str += "%s: %s\n" % (name, value)
-        mail_footer_tmpl = self._get_events_conf("mail footer")
-        if mail_footer_tmpl:
-            stdin_str += (mail_footer_tmpl + "\n") % {
-                "host": self.host,
-                "port": self.port,
-                "owner": self.owner,
-                "suite": self.suite}
-        # SMTP server
-        env = dict(os.environ)
-        mail_smtp = ctx.mail_smtp
-        if mail_smtp:
-            env["smtp"] = mail_smtp
-        SuiteProcPool.get_inst().put_command(
-            SuiteProcContext(
-                ctx, cmd, env=env, stdin_str=stdin_str, id_keys=id_keys,
-            ),
-            self._task_event_email_callback)
-
-    def _task_event_email_callback(self, ctx):
-        """Call back when email notification command exits."""
-        tasks = {}
-        for itask in self.pool.get_tasks():
-            if itask.point is not None and itask.submit_num:
-                tasks[(str(itask.point), itask.tdef.name)] = itask
-        for id_key in ctx.cmd_kwargs["id_keys"]:
-            key1, point, name, submit_num = id_key
-            try:
-                itask = tasks[(point, name)]
-                try_timers = itask.event_handler_try_timers
-                if ctx.ret_code == 0:
-                    del try_timers[(key1, submit_num)]
-                    log_ctx = SuiteProcContext((key1, submit_num), None)
-                    log_ctx.ret_code = 0
-                    itask.command_log(log_ctx)
-                else:
-                    try_timers[(key1, submit_num)].unset_waiting()
-            except KeyError:
-                if cylc.flags.debug:
-                    ERR.debug(traceback.format_exc())
-
-    def _process_task_job_logs_retrieval(self, ctx, id_keys):
-        """Process retrieval of task job logs from remote user at host."""
-        if ctx.user_at_host and "@" in ctx.user_at_host:
-            s_user, s_host = ctx.user_at_host.split("@", 1)
-        else:
-            s_user, s_host = (None, ctx.user_at_host)
-        ssh_tmpl = str(GLOBAL_CFG.get_host_item(
-            "remote shell template", s_host, s_user))
-        rsync_str = str(GLOBAL_CFG.get_host_item(
-            "retrieve job logs command", s_host, s_user))
-
-        cmd = shlex.split(rsync_str) + ["--rsh=" + ssh_tmpl]
-        if cylc.flags.debug:
-            cmd.append("-v")
-        if ctx.max_size:
-            cmd.append("--max-size=%s" % (ctx.max_size,))
-        # Includes and excludes
-        includes = set()
-        for _, point, name, submit_num in id_keys:
-            # Include relevant directories, all levels needed
-            includes.add("/%s" % (point))
-            includes.add("/%s/%s" % (point, name))
-            includes.add("/%s/%s/%02d" % (point, name, submit_num))
-            includes.add("/%s/%s/%02d/**" % (point, name, submit_num))
-        cmd += ["--include=%s" % (include) for include in sorted(includes)]
-        cmd.append("--exclude=/**")  # exclude everything else
-        # Remote source
-        cmd.append(ctx.user_at_host + ":" + GLOBAL_CFG.get_derived_host_item(
-            self.suite, "suite job log directory", s_host, s_user) + "/")
-        # Local target
-        cmd.append(GLOBAL_CFG.get_derived_host_item(
-            self.suite, "suite job log directory") + "/")
-        SuiteProcPool.get_inst().put_command(
-            SuiteProcContext(ctx, cmd, env=dict(os.environ), id_keys=id_keys),
-            self._task_job_logs_retrieval_callback)
-
-    def _task_job_logs_retrieval_callback(self, ctx):
-        """Call back when log job retrieval completes."""
-        tasks = {}
-        for itask in self.pool.get_tasks():
-            if itask.point is not None and itask.submit_num:
-                tasks[(str(itask.point), itask.tdef.name)] = itask
-        for id_key in ctx.cmd_kwargs["id_keys"]:
-            key1, point, name, submit_num = id_key
-            try:
-                itask = tasks[(point, name)]
-                try_timers = itask.event_handler_try_timers
-                # All completed jobs are expected to have a "job.out".
-                names = ["job.out"]
-                # Failed jobs are expected to have a "job.err".
-                if itask.state.status != TASK_STATUS_SUCCEEDED:
-                    names.append("job.err")
-                name_oks = {}
-                for name in names:
-                    name_oks[name] = os.path.exists(itask.get_job_log_path(
-                        itask.HEAD_MODE_LOCAL, submit_num, name))
-                # All expected paths must exist to record a good attempt
-                log_ctx = SuiteProcContext((key1, submit_num), None)
-                if all(name_oks.values()):
-                    log_ctx.ret_code = 0
-                    del try_timers[(key1, submit_num)]
-                else:
-                    log_ctx.ret_code = 1
-                    log_ctx.err = "File(s) not retrieved:"
-                    for name, exist_ok in sorted(name_oks.items()):
-                        if not exist_ok:
-                            log_ctx.err += " %s" % name
-                    try_timers[(key1, submit_num)].unset_waiting()
-                itask.command_log(log_ctx)
-            except KeyError:
-                if cylc.flags.debug:
-                    ERR.debug(traceback.format_exc())
-
     def shutdown(self, reason=None):
         """Shutdown the suite."""
         msg = "Suite shutting down"
@@ -1935,10 +1399,9 @@ conditions; see `cylc conditions`.
 
         # The getattr() calls and if tests below are used in case the
         # suite is not fully configured before the shutdown is called.
-        if getattr(self, "log", None) is not None:
-            self.log.info(msg)
+        LOG.info(msg)
 
-        if self.gen_reference_log:
+        if self.options.genref:
             try:
                 handle = open(
                     os.path.join(self.config.fdir, 'reference.log'), 'wb')
@@ -1952,25 +1415,25 @@ conditions; see `cylc conditions`.
         if self.pool is not None:
             self.pool.warn_stop_orphans()
             try:
-                self.pool.put_rundb_task_pool()
-                self.pool.process_queued_db_ops()
+                self.suite_db_mgr.put_task_event_timers(self.task_events_mgr)
+                self.suite_db_mgr.put_task_pool(self.pool)
+                self.suite_db_mgr.process_queued_ops()
             except Exception as exc:
                 ERR.error(str(exc))
 
-        proc_pool = SuiteProcPool.get_inst()
-        if proc_pool:
-            if not proc_pool.is_dead():
+        if self.proc_pool:
+            if not self.proc_pool.is_dead():
                 # e.g. KeyboardInterrupt
-                proc_pool.terminate()
-            proc_pool.join()
-            proc_pool.handle_results_async()
+                self.proc_pool.terminate()
+            self.proc_pool.join()
+            self.proc_pool.handle_results_async()
 
         if self.comms_daemon:
             ifaces = [self.command_queue,
                       SuiteIdServer.get_inst(), StateSummaryServer.get_inst(),
                       ExtTriggerServer.get_inst(), BroadcastServer.get_inst()]
             if self.pool is not None:
-                ifaces.append(self.pool.message_queue)
+                ifaces.append(self.message_queue)
             for iface in ifaces:
                 try:
                     self.comms_daemon.disconnect(iface)
@@ -1990,12 +1453,11 @@ conditions; see `cylc conditions`.
             except OSError as exc:
                 ERR.warning("failed to remove suite contact file: %s\n%s\n" % (
                     fname, exc))
-        RemoteJobHostManager.get_inst().unlink_suite_contact_files(self.suite)
+            if self.task_job_mgr:
+                self.task_job_mgr.unlink_hosts_contacts(self.suite)
 
         # disconnect from suite-db, stop db queue
-        if getattr(self, "db", None) is not None:
-            self.pri_dao.close()
-            self.pub_dao.close()
+        self.suite_db_mgr.on_suite_shutdown()
 
         if getattr(self, "config", None) is not None:
             # run shutdown handlers
@@ -2007,12 +1469,12 @@ conditions; see `cylc conditions`.
         """Set stop point."""
         stop_point = get_point(stop_point_string)
         self.stop_point = stop_point
-        self.log.info("Setting stop cycle point: %s" % stop_point_string)
+        LOG.info("Setting stop cycle point: %s" % stop_point_string)
         self.pool.set_stop_point(self.stop_point)
 
     def set_stop_clock(self, unix_time, date_time_string):
         """Set stop clock time."""
-        self.log.info("Setting stop clock time: %s (unix time: %s)" % (
+        LOG.info("Setting stop clock time: %s (unix time: %s)" % (
             date_time_string, unix_time))
         self.stop_clock_time = unix_time
         self.stop_clock_time_string = date_time_string
@@ -2022,34 +1484,38 @@ conditions; see `cylc conditions`.
         name = TaskID.split(task_id)[0]
         if name in self.config.get_task_name_list():
             task_id = self.get_standardised_taskid(task_id)
-            self.log.info("Setting stop task: " + task_id)
+            LOG.info("Setting stop task: " + task_id)
             self.stop_task = task_id
         else:
-            self.log.warning(
-                "Requested stop task name does not exist: %s" % name)
+            LOG.warning("Requested stop task name does not exist: %s" % name)
 
     def stop_task_done(self):
         """Return True if stop task has succeeded."""
-        id_ = self.stop_task
-        if (id_ is None or not self.pool.task_succeeded(id_)):
+        if self.stop_task and self.pool.task_succeeded(self.stop_task):
+            LOG.info("Stop task %s finished" % self.stop_task)
+            return True
+        else:
             return False
-        self.log.info("Stop task " + id_ + " finished")
-        return True
 
     def hold_suite(self, point=None):
         """Hold all tasks in suite."""
         if point is None:
             self.pool.hold_all_tasks()
+            sdm = self.suite_db_mgr
+            sdm.db_inserts_map[sdm.TABLE_SUITE_PARAMS].append(
+                {"key": "is_held", "value": 1})
         else:
-            self.log.info("Setting suite hold cycle point: " + str(point))
+            LOG.info("Setting suite hold cycle point: " + str(point))
             self.pool.set_hold_point(point)
 
     def release_suite(self):
         """Release (un-hold) all tasks in suite."""
         if self.pool.is_held:
-            self.log.info("RELEASE: new tasks will be queued when ready")
+            LOG.info("RELEASE: new tasks will be queued when ready")
         self.pool.set_hold_point(None)
         self.pool.release_all_tasks()
+        sdm = self.suite_db_mgr
+        sdm.db_deletes_map[sdm.TABLE_SUITE_PARAMS].append({"key": "is_held"})
 
     def will_stop_at(self):
         """Return stop point, if set."""
@@ -2085,11 +1551,20 @@ conditions; see `cylc conditions`.
 
     def command_dry_run_tasks(self, items):
         """Dry-run tasks, e.g. edit run."""
-        return self.pool.dry_run_task(items)
+        itasks, bad_items = self.pool.filter_task_proxies(items)
+        n_warnings = len(bad_items)
+        if len(itasks) > 1:
+            LOG.warning("Unique task match not found: %s" % items)
+            return n_warnings + 1
+        if self.task_job_mgr.prep_submit_task_jobs(
+                self.suite, [itasks[0]], dry_run=True):
+            return n_warnings
+        else:
+            return n_warnings + 1
 
-    def command_reset_task_states(self, items, state=None):
+    def command_reset_task_states(self, items, state=None, outputs=None):
         """Reset the state of tasks."""
-        return self.pool.reset_task_states(items, state)
+        return self.pool.reset_task_states(items, state, outputs)
 
     def command_spawn_tasks(self, items):
         """Force spawn task successors."""
@@ -2097,8 +1572,7 @@ conditions; see `cylc conditions`.
 
     def command_take_checkpoints(self, items):
         """Insert current task_pool, etc to checkpoints tables."""
-        return self.pri_dao.take_checkpoints(
-            items[0], other_daos=[self.pub_dao])
+        return self.suite_db_mgr.checkpoint(items[0])
 
     def filter_initial_task_list(self, inlist):
         """Return list of initial tasks after applying a filter."""
@@ -2124,35 +1598,12 @@ conditions; see `cylc conditions`.
                     self.stop_clock_time
                 )
             )
-            self.log.info("Wall clock stop time reached: " + str(time_point))
+            LOG.info("Wall clock stop time reached: %s" % time_point)
             self.stop_clock_time = None
             return True
         else:
             return False
 
-    def _copy_pri_db_to_pub_db(self):
-        """Copy content of primary database file to public database file.
-
-        Use temporary file to ensure that we do not end up with a partial file.
-
-        """
-        temp_pub_db_file_name = None
-        self.pub_dao.close()
-        try:
-            self.pub_dao.conn = None  # reset connection
-            open(self.pub_dao.db_file_name, "a").close()  # touch
-            st_mode = os.stat(self.pub_dao.db_file_name).st_mode
-            temp_pub_db_file_name = mkstemp(
-                prefix=self.pub_dao.DB_FILE_BASE_NAME,
-                dir=os.path.dirname(self.pub_dao.db_file_name))[1]
-            copy(self.pri_dao.db_file_name, temp_pub_db_file_name)
-            os.rename(temp_pub_db_file_name, self.pub_dao.db_file_name)
-            os.chmod(self.pub_dao.db_file_name, st_mode)
-        except (IOError, OSError):
-            if temp_pub_db_file_name:
-                os.unlink(temp_pub_db_file_name)
-            raise
-
     def _update_profile_info(self, category, amount, amount_format="%s"):
         """Update the 1, 5, 15 minute dt averages for a given category."""
         now = time()
@@ -2178,7 +1629,7 @@ conditions; see `cylc conditions`.
             averages[minute_num] = sum(minute_amounts) / len(minute_amounts)
             output_text += (" %d: " + amount_format) % (
                 minute_num, averages[minute_num])
-        self.log.info(output_text)
+        LOG.info(output_text)
 
     def _update_cpu_usage(self):
         """Obtain CPU usage statistics."""
@@ -2186,7 +1637,7 @@ conditions; see `cylc conditions`.
         try:
             cpu_frac = float(proc.communicate()[0])
         except (TypeError, OSError, IOError, ValueError) as exc:
-            self.log.warning("Cannot get CPU % statistics: %s" % exc)
+            LOG.warning("Cannot get CPU % statistics: %s" % exc)
             return
         self._update_profile_info("CPU %", cpu_frac, amount_format="%.1f")
 
@@ -2204,14 +1655,5 @@ conditions; see `cylc conditions`.
 
     def _get_events_conf(self, key, default=None):
         """Return a named [cylc][[events]] configuration."""
-        for getter in [
-                self.config.cfg['cylc']['events'],
-                GLOBAL_CFG.get(['cylc', 'events'])]:
-            try:
-                value = getter[key]
-            except KeyError:
-                pass
-            else:
-                if value is not None:
-                    return value
-        return default
+        return self.suite_event_handler.get_events_conf(
+            self.config, key, default)
diff --git a/lib/cylc/scheduler_cli.py b/lib/cylc/scheduler_cli.py
index 76a3bfd..d6f393c 100644
--- a/lib/cylc/scheduler_cli.py
+++ b/lib/cylc/scheduler_cli.py
@@ -144,9 +144,9 @@ def parse_commandline(is_restart):
 
     parser.add_option(
         "-m", "--mode",
-        help="Run mode: live, simulation, or dummy; default is live.",
+        help="Run mode: live, dummy, dummy-local, simulation (default live).",
         metavar="STRING", action="store", default='live', dest="run_mode",
-        choices=["live", "dummy", "simulation"])
+        choices=["live", "dummy", "dummy-local", "simulation"])
 
     parser.add_option(
         "--reference-log",
diff --git a/lib/cylc/suite_db_mgr.py b/lib/cylc/suite_db_mgr.py
new file mode 100644
index 0000000..b448d83
--- /dev/null
+++ b/lib/cylc/suite_db_mgr.py
@@ -0,0 +1,392 @@
+#!/usr/bin/env python
+
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2017 NIWA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""Manage the suite runtime private and public databases.
+
+This module provides the logic to:
+* Create or initialise database file on start up.
+* Queue database operations.
+* Hide logic that is relevant for database operations.
+* Recover public run database file lock.
+* Manage existing run database files on restart.
+"""
+
+import os
+import pickle
+from shutil import copy, rmtree
+from subprocess import call
+from tempfile import mkstemp
+
+from cylc.network.suite_broadcast_server import BroadcastServer
+from cylc.rundb import CylcSuiteDAO
+from cylc.suite_logging import ERR, LOG, OUT
+from cylc.wallclock import get_current_time_string
+
+
+class SuiteDatabaseManager(object):
+    """Manage the suite runtime private and public databases."""
+
+    TABLE_CHECKPOINT_ID = CylcSuiteDAO.TABLE_CHECKPOINT_ID
+    TABLE_INHERITANCE = CylcSuiteDAO.TABLE_INHERITANCE
+    TABLE_SUITE_PARAMS = CylcSuiteDAO.TABLE_SUITE_PARAMS
+    TABLE_SUITE_TEMPLATE_VARS = CylcSuiteDAO.TABLE_SUITE_TEMPLATE_VARS
+    TABLE_TASK_ACTION_TIMERS = CylcSuiteDAO.TABLE_TASK_ACTION_TIMERS
+    TABLE_TASK_POOL = CylcSuiteDAO.TABLE_TASK_POOL
+    TABLE_TASK_STATES = CylcSuiteDAO.TABLE_TASK_STATES
+
+    def __init__(self, pri_d=None, pub_d=None):
+        self.pri_path = None
+        if pri_d:
+            self.pri_path = os.path.join(pri_d, CylcSuiteDAO.DB_FILE_BASE_NAME)
+        self.pub_path = None
+        if pub_d:
+            self.pub_path = os.path.join(pub_d, CylcSuiteDAO.DB_FILE_BASE_NAME)
+        self.pri_dao = None
+        self.pub_dao = None
+
+        self.db_deletes_map = {
+            self.TABLE_SUITE_PARAMS: [],
+            self.TABLE_TASK_POOL: [],
+            self.TABLE_TASK_ACTION_TIMERS: []}
+        self.db_inserts_map = {
+            self.TABLE_INHERITANCE: [],
+            self.TABLE_SUITE_PARAMS: [],
+            self.TABLE_SUITE_TEMPLATE_VARS: [],
+            self.TABLE_CHECKPOINT_ID: [],
+            self.TABLE_TASK_POOL: [],
+            self.TABLE_TASK_ACTION_TIMERS: []}
+        self.db_updates_map = {}
+
+    def checkpoint(self, name):
+        """Checkpoint the task pool, etc."""
+        return self.pri_dao.take_checkpoints(name, other_daos=[self.pub_dao])
+
+    def copy_pri_to_pub(self):
+        """Copy content of primary database file to public database file.
+
+        Use temporary file to ensure that we do not end up with a partial file.
+
+        """
+        temp_pub_db_file_name = None
+        self.pub_dao.close()
+        try:
+            self.pub_dao.conn = None  # reset connection
+            open(self.pub_dao.db_file_name, "a").close()  # touch
+            st_mode = os.stat(self.pub_dao.db_file_name).st_mode
+            temp_pub_db_file_name = mkstemp(
+                prefix=self.pub_dao.DB_FILE_BASE_NAME,
+                dir=os.path.dirname(self.pub_dao.db_file_name))[1]
+            copy(self.pri_dao.db_file_name, temp_pub_db_file_name)
+            os.rename(temp_pub_db_file_name, self.pub_dao.db_file_name)
+            os.chmod(self.pub_dao.db_file_name, st_mode)
+        except (IOError, OSError):
+            if temp_pub_db_file_name:
+                os.unlink(temp_pub_db_file_name)
+            raise
+
+    def get_pri_dao(self):
+        """Return the primary DAO."""
+        return CylcSuiteDAO(self.pri_path)
+
+    def on_suite_start(self, is_restart):
+        """Initialise data access objects.
+
+        Ensure that:
+        * private database file is private
+        * public database is in sync with private database
+        """
+        if not is_restart:
+            try:
+                os.unlink(self.pri_path)
+            except OSError:
+                # Just in case the path is a directory!
+                rmtree(self.pri_path, ignore_errors=True)
+        self.pri_dao = self.get_pri_dao()
+        os.chmod(self.pri_path, 0600)
+        self.pub_dao = CylcSuiteDAO(self.pub_path, is_public=True)
+        self.copy_pri_to_pub()
+        pub_db_path_symlink = os.path.join(
+            os.path.dirname(os.path.dirname(self.pub_path)),
+            CylcSuiteDAO.OLD_DB_FILE_BASE_NAME)
+        try:
+            orig_source = os.readlink(pub_db_path_symlink)
+        except OSError:
+            orig_source = None
+        source = os.path.join('log', CylcSuiteDAO.DB_FILE_BASE_NAME)
+        if orig_source != source:
+            try:
+                os.unlink(pub_db_path_symlink)
+            except OSError:
+                pass
+            os.symlink(source, pub_db_path_symlink)
+
+    def on_suite_shutdown(self):
+        """Close data access objects."""
+        if self.pri_dao:
+            self.pri_dao.close()
+            self.pri_dao = None
+        if self.pub_dao:
+            self.pub_dao.close()
+            self.pub_dao = None
+
+    def process_queued_ops(self):
+        """Handle queued db operations for each task proxy."""
+        # Record suite parameters and tasks in pool
+        # Record any broadcast settings to be dumped out
+        for obj in self, BroadcastServer.get_inst():
+            if any(obj.db_deletes_map.values()):
+                for table_name, db_deletes in sorted(
+                        obj.db_deletes_map.items()):
+                    while db_deletes:
+                        where_args = db_deletes.pop(0)
+                        self.pri_dao.add_delete_item(table_name, where_args)
+                        self.pub_dao.add_delete_item(table_name, where_args)
+            if any(obj.db_inserts_map.values()):
+                for table_name, db_inserts in sorted(
+                        obj.db_inserts_map.items()):
+                    while db_inserts:
+                        db_insert = db_inserts.pop(0)
+                        self.pri_dao.add_insert_item(table_name, db_insert)
+                        self.pub_dao.add_insert_item(table_name, db_insert)
+            if (hasattr(obj, 'db_updates_map') and
+                    any(obj.db_updates_map.values())):
+                for table_name, db_updates in sorted(
+                        obj.db_updates_map.items()):
+                    while db_updates:
+                        set_args, where_args = db_updates.pop(0)
+                        self.pri_dao.add_update_item(
+                            table_name, set_args, where_args)
+                        self.pub_dao.add_update_item(
+                            table_name, set_args, where_args)
+
+        # Previously, we used a separate thread for database writes. This has
+        # now been removed. For the private database, there is no real
+        # advantage in using a separate thread as it needs to be always in sync
+        # with what is current. For the public database, which does not need to
+        # be fully in sync, there is some advantage of using a separate
+        # thread/process, if writing to it becomes a bottleneck. At the moment,
+        # there is no evidence that this is a bottleneck, so it is better to
+        # keep the logic simple.
+        self.pri_dao.execute_queued_items()
+        self.pub_dao.execute_queued_items()
+
+    def put_runtime_inheritance(self, config):
+        """Put task/family inheritance in runtime database."""
+        for namespace in config.cfg['runtime']:
+            value = ' '.join(config.runtime['linearized ancestors'][namespace])
+            self.db_inserts_map[self.TABLE_INHERITANCE].append({
+                "namespace": namespace,
+                "inheritance": value})
+
+    def put_suite_params(
+            self, run_mode, initial_point, final_point, is_held,
+            cycle_point_format=None):
+        """Put run mode, initial/final cycle point in runtime database.
+
+        This method queues the relevant insert statements.
+        """
+        self.db_inserts_map[self.TABLE_SUITE_PARAMS].extend([
+            {"key": "run_mode", "value": run_mode},
+            {"key": "initial_point", "value": str(initial_point)},
+            {"key": "final_point", "value": str(final_point)},
+        ])
+        if cycle_point_format:
+            self.db_inserts_map[self.TABLE_SUITE_PARAMS].append(
+                {"key": "cycle_point_format", "value": str(cycle_point_format)}
+            )
+        if is_held:
+            self.db_inserts_map[self.TABLE_SUITE_PARAMS].append(
+                {"key": "is_held", "value": 1})
+
+    def put_suite_template_vars(self, template_vars):
+        """Put template_vars in runtime database.
+
+        This method queues the relevant insert statements.
+        """
+        for key, value in template_vars.items():
+            self.db_inserts_map[self.TABLE_SUITE_TEMPLATE_VARS].append(
+                {"key": key, "value": value})
+
+    def put_task_event_timers(self, task_events_mgr):
+        """Put statements to update the task_action_timers table."""
+        if task_events_mgr.event_timers:
+            self.db_deletes_map[self.TABLE_TASK_ACTION_TIMERS].append({})
+            for key, timer in task_events_mgr.event_timers.items():
+                key1, point, name, submit_num = key
+                self.db_inserts_map[self.TABLE_TASK_ACTION_TIMERS].append({
+                    "name": name,
+                    "cycle": point,
+                    "ctx_key_pickle": pickle.dumps((key1, submit_num,)),
+                    "ctx_pickle": pickle.dumps(timer.ctx),
+                    "delays_pickle": pickle.dumps(timer.delays),
+                    "num": timer.num,
+                    "delay": timer.delay,
+                    "timeout": timer.timeout})
+
+    def put_task_pool(self, pool):
+        """Put statements to update the task_pool table in runtime database.
+
+        Update the task_pool table and the task_action_timers table.
+        Queue delete (everything) statements to wipe the tables, and queue the
+        relevant insert statements for the current tasks in the pool.
+        """
+        self.db_deletes_map[self.TABLE_TASK_POOL].append({})
+        for itask in pool.get_all_tasks():
+            self.db_inserts_map[self.TABLE_TASK_POOL].append({
+                "name": itask.tdef.name,
+                "cycle": str(itask.point),
+                "spawned": int(itask.has_spawned),
+                "status": itask.state.status,
+                "hold_swap": itask.state.hold_swap})
+            for ctx_key_0 in ["poll_timers", "try_timers"]:
+                for ctx_key_1, timer in getattr(itask, ctx_key_0).items():
+                    if timer is None:
+                        continue
+                    self.db_inserts_map[self.TABLE_TASK_ACTION_TIMERS].append({
+                        "name": itask.tdef.name,
+                        "cycle": str(itask.point),
+                        "ctx_key_pickle": pickle.dumps((ctx_key_0, ctx_key_1)),
+                        "ctx_pickle": pickle.dumps(timer.ctx),
+                        "delays_pickle": pickle.dumps(timer.delays),
+                        "num": timer.num,
+                        "delay": timer.delay,
+                        "timeout": timer.timeout})
+            if itask.state.time_updated:
+                set_args = {
+                    "time_updated": itask.state.time_updated,
+                    "submit_num": itask.submit_num,
+                    "try_num": itask.get_try_num(),
+                    "status": itask.state.status}
+                where_args = {
+                    "cycle": str(itask.point),
+                    "name": itask.tdef.name,
+                }
+                self.db_updates_map.setdefault(self.TABLE_TASK_STATES, [])
+                self.db_updates_map[self.TABLE_TASK_STATES].append(
+                    (set_args, where_args))
+                itask.state.time_updated = None
+
+        self.db_inserts_map[self.TABLE_CHECKPOINT_ID].append({
+            # id = -1 for latest
+            "id": CylcSuiteDAO.CHECKPOINT_LATEST_ID,
+            "time": get_current_time_string(),
+            "event": CylcSuiteDAO.CHECKPOINT_LATEST_EVENT})
+
+    def put_insert_task_events(self, itask, args):
+        """Put INSERT statement for task_events table."""
+        self._put_insert_task_x(CylcSuiteDAO.TABLE_TASK_EVENTS, itask, args)
+
+    def put_insert_task_jobs(self, itask, args):
+        """Put INSERT statement for task_jobs table."""
+        self._put_insert_task_x(CylcSuiteDAO.TABLE_TASK_JOBS, itask, args)
+
+    def put_insert_task_states(self, itask, args):
+        """Put INSERT statement for task_states table."""
+        self._put_insert_task_x(CylcSuiteDAO.TABLE_TASK_STATES, itask, args)
+
+    def _put_insert_task_x(self, table_name, itask, args):
+        """Put INSERT statement for a task_* table."""
+        args.update({
+            "name": itask.tdef.name,
+            "cycle": str(itask.point)})
+        if "submit_num" not in args:
+            args["submit_num"] = itask.submit_num
+        self.db_inserts_map.setdefault(table_name, [])
+        self.db_inserts_map[table_name].append(args)
+
+    def put_update_task_jobs(self, itask, set_args):
+        """Put UPDATE statement for task_jobs table."""
+        self._put_update_task_x(
+            CylcSuiteDAO.TABLE_TASK_JOBS, itask, set_args)
+
+    def put_update_task_states(self, itask, set_args):
+        """Put UPDATE statement for task_states table."""
+        self._put_update_task_x(
+            CylcSuiteDAO.TABLE_TASK_STATES, itask, set_args)
+
+    def _put_update_task_x(self, table_name, itask, set_args):
+        """Put UPDATE statement for a task_* table."""
+        where_args = {
+            "cycle": str(itask.point),
+            "name": itask.tdef.name}
+        if "submit_num" not in set_args:
+            where_args["submit_num"] = itask.submit_num
+        self.db_updates_map.setdefault(table_name, [])
+        self.db_updates_map[table_name].append((set_args, where_args))
+
+    def recover_pub_from_pri(self):
+        """Recover public database from private database."""
+        if self.pub_dao.n_tries >= self.pub_dao.MAX_TRIES:
+            self.copy_pri_to_pub()
+            LOG.warning(
+                "%(pub_db_name)s: recovered from %(pri_db_name)s" % {
+                    "pub_db_name": self.pub_dao.db_file_name,
+                    "pri_db_name": self.pri_dao.db_file_name})
+            self.pub_dao.n_tries = 0
+
+    def restart_upgrade(self):
+        """Vacuum/upgrade runtime DB on restart."""
+        # Backward compat, upgrade database with state file if necessary
+        suite_run_d = os.path.dirname(os.path.dirname(self.pub_path))
+        old_pri_db_path = os.path.join(
+            suite_run_d, 'state', CylcSuiteDAO.OLD_DB_FILE_BASE_NAME)
+        old_pri_db_path_611 = os.path.join(
+            suite_run_d, CylcSuiteDAO.OLD_DB_FILE_BASE_NAME_611[0])
+        old_state_file_path = os.path.join(suite_run_d, "state", "state")
+        if (os.path.exists(old_pri_db_path) and
+                os.path.exists(old_state_file_path) and
+                not os.path.exists(self.pri_path)):
+            # Upgrade pre-6.11.X runtime database + state file
+            copy(old_pri_db_path, self.pri_path)
+            pri_dao = self.get_pri_dao()
+            pri_dao.upgrade_with_state_file(old_state_file_path)
+            target = os.path.join(suite_run_d, "state.tar.gz")
+            cmd = ["tar", "-C", suite_run_d, "-czf", target, "state"]
+            if call(cmd) == 0:
+                rmtree(os.path.join(suite_run_d, "state"), ignore_errors=True)
+            else:
+                try:
+                    os.unlink(os.path.join(suite_run_d, "state.tar.gz"))
+                except OSError:
+                    pass
+                ERR.error("cannot tar-gzip + remove old state/ directory")
+            # Remove old files as well
+            try:
+                os.unlink(os.path.join(suite_run_d, "cylc-suite-env"))
+            except OSError:
+                pass
+        elif os.path.exists(old_pri_db_path_611):
+            # Upgrade 6.11.X runtime database
+            os.rename(old_pri_db_path_611, self.pri_path)
+            pri_dao = self.get_pri_dao()
+            pri_dao.upgrade_from_611()
+            # Remove old files as well
+            for name in [
+                    CylcSuiteDAO.OLD_DB_FILE_BASE_NAME_611[1],
+                    "cylc-suite-env"]:
+                try:
+                    os.unlink(os.path.join(suite_run_d, name))
+                except OSError:
+                    pass
+        else:
+            pri_dao = self.get_pri_dao()
+
+        # Vacuum the primary/private database file
+        OUT.info("Vacuuming the suite db ...")
+        pri_dao.vacuum()
+        OUT.info("...done")
+        pri_dao.close()
diff --git a/lib/cylc/suite_events.py b/lib/cylc/suite_events.py
new file mode 100644
index 0000000..b61a26b
--- /dev/null
+++ b/lib/cylc/suite_events.py
@@ -0,0 +1,182 @@
+#!/usr/bin/env python
+
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2017 NIWA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""Suite event handler."""
+
+from collections import namedtuple
+import os
+from pipes import quote
+
+from cylc.cfgspec.globalcfg import GLOBAL_CFG
+from cylc.mp_pool import SuiteProcContext
+from cylc.owner import USER
+from cylc.suite_host import get_suite_host
+from cylc.suite_logging import OUT, ERR, LOG
+
+
+class SuiteEventError(Exception):
+    """Suite event error."""
+    pass
+
+
+SuiteEventContext = namedtuple(
+    "SuiteEventContext",
+    ["event", "reason", "suite", "owner", "host", "port"])
+
+
+class SuiteEventHandler(object):
+    """Suite event handler."""
+
+    EVENT_STARTUP = 'startup'
+    EVENT_SHUTDOWN = 'shutdown'
+    EVENT_TIMEOUT = 'timeout'
+    EVENT_INACTIVITY_TIMEOUT = 'inactivity'
+    EVENT_STALLED = 'stalled'
+
+    SUITE_EVENT_HANDLER = 'suite-event-handler'
+    SUITE_EVENT_MAIL = 'suite-event-mail'
+
+    def __init__(self, proc_pool):
+        self.proc_pool = proc_pool
+
+    @staticmethod
+    def get_events_conf(config, key, default=None):
+        """Return a named [cylc][[events]] configuration."""
+        for getter in [
+                config.cfg['cylc']['events'],
+                GLOBAL_CFG.get(['cylc', 'events'])]:
+            try:
+                value = getter[key]
+            except KeyError:
+                pass
+            else:
+                if value is not None:
+                    return value
+        return default
+
+    def handle(self, config, ctx):
+        """Handle a suite event."""
+        self._run_event_mail(config, ctx)
+        self._run_event_custom_handlers(config, ctx)
+
+    def _run_event_mail(self, config, ctx):
+        """Helper for "run_event_handlers", do mail notification."""
+        if ctx.event in self.get_events_conf(config, 'mail events', []):
+            # SMTP server
+            env = dict(os.environ)
+            mail_smtp = self.get_events_conf(config, 'mail smtp')
+            if mail_smtp:
+                env['smtp'] = mail_smtp
+            subject = '[suite %(event)s] %(suite)s' % {
+                'suite': ctx.suite, 'event': ctx.event}
+            stdin_str = ''
+            for name, value in [
+                    ('suite event', ctx.event),
+                    ('reason', ctx.reason),
+                    ('suite', ctx.suite),
+                    ('host', ctx.host),
+                    ('port', ctx.port),
+                    ('owner', ctx.owner)]:
+                if value:
+                    stdin_str += '%s: %s\n' % (name, value)
+            mail_footer_tmpl = self.get_events_conf(config, 'mail footer')
+            if mail_footer_tmpl:
+                stdin_str += (mail_footer_tmpl + '\n') % {
+                    'host': ctx.host,
+                    'port': ctx.port,
+                    'owner': ctx.owner,
+                    'suite': ctx.suite}
+            proc_ctx = SuiteProcContext(
+                (self.SUITE_EVENT_HANDLER, ctx.event),
+                [
+                    'mail',
+                    '-s', subject,
+                    '-r', self.get_events_conf(
+                        config,
+                        'mail from', 'notifications@' + get_suite_host()),
+                    self.get_events_conf(config, 'mail to', USER),
+                ],
+                env=env,
+                stdin_str=stdin_str)
+            if self.proc_pool.is_closed():
+                # Run command in foreground if process pool is closed
+                self.proc_pool.run_command(proc_ctx)
+                self._run_event_handlers_callback(proc_ctx)
+            else:
+                # Run command using process pool otherwise
+                self.proc_pool.put_command(
+                    proc_ctx, self._run_event_mail_callback)
+
+    def _run_event_custom_handlers(self, config, ctx):
+        """Helper for "run_event_handlers", custom event handlers."""
+        # Look for event handlers
+        # 1. Handlers for specific event
+        # 2. General handlers
+        handlers = self.get_events_conf(config, '%s handler' % ctx.event)
+        if not handlers and (
+                ctx.event in
+                self.get_events_conf(config, 'handler events', [])):
+            handlers = self.get_events_conf(config, 'handlers')
+        if not handlers:
+            return
+
+        for i, handler in enumerate(handlers):
+            cmd_key = ('%s-%02d' % (self.SUITE_EVENT_HANDLER, i), ctx.event)
+            # Handler command may be a string for substitution
+            cmd = handler % {
+                'event': quote(ctx.event),
+                'suite': quote(ctx.suite),
+                'message': quote(ctx.reason),
+                'suite_url': quote(config.cfg['URL']),
+            }
+            if cmd == handler:
+                # Nothing substituted, assume classic interface
+                cmd = "%s '%s' '%s' '%s'" % (
+                    handler, ctx.event, ctx.suite, ctx.reason)
+            proc_ctx = SuiteProcContext(
+                cmd_key, cmd, env=dict(os.environ), shell=True)
+            abort_on_error = self.get_events_conf(
+                config, 'abort if %s handler fails' % ctx.event)
+            if abort_on_error or self.proc_pool.is_closed():
+                # Run command in foreground if abort on failure is set or if
+                # process pool is closed
+                self.proc_pool.run_command(proc_ctx)
+                self._run_event_handlers_callback(
+                    proc_ctx, abort_on_error=abort_on_error)
+            else:
+                # Run command using process pool otherwise
+                self.proc_pool.put_command(
+                    proc_ctx, self._run_event_handlers_callback)
+
+    def _run_event_handlers_callback(self, proc_ctx, abort_on_error=False):
+        """Callback on completion of a suite event handler."""
+        if proc_ctx.ret_code:
+            msg = '%s EVENT HANDLER FAILED' % proc_ctx.cmd_key[1]
+            LOG.error(str(proc_ctx))
+            ERR.error(msg)
+            if abort_on_error:
+                raise SuiteEventError(msg)
+        else:
+            LOG.info(str(proc_ctx))
+
+    @staticmethod
+    def _run_event_mail_callback(proc_ctx):
+        """Callback the mail command for notification of a suite event."""
+        if proc_ctx.ret_code:
+            LOG.warning(str(proc_ctx))
+        else:
+            LOG.info(str(proc_ctx))
diff --git a/lib/cylc/suite_logging.py b/lib/cylc/suite_logging.py
index 47588e8..d082408 100644
--- a/lib/cylc/suite_logging.py
+++ b/lib/cylc/suite_logging.py
@@ -439,6 +439,16 @@ class STDLogger(object):
         self.logger = logging.getLogger(log)
 
     def log(self, level, *args, **kwargs):
+        try:
+            itask = kwargs.pop("itask")
+        except KeyError:
+            pass
+        else:
+            try:
+                args = ("[%s] -%s" % (itask.identity, args[0]),) + args[1:]
+            except AttributeError:
+                args = ("[%s] -%s" % (itask, args[0]),) + args[1:]
+            args = tuple(args)
         if self.logger.handlers:
             # If this logger has file handlers write out to it.
             self.logger.log(level, *args, **kwargs)
diff --git a/lib/cylc/suite_srv_files_mgr.py b/lib/cylc/suite_srv_files_mgr.py
index 199e70c..ba80905 100644
--- a/lib/cylc/suite_srv_files_mgr.py
+++ b/lib/cylc/suite_srv_files_mgr.py
@@ -116,9 +116,8 @@ class SuiteSrvFilesManager(object):
         if is_remote_host(old_host):
             import shlex
             from cylc.cfgspec.globalcfg import GLOBAL_CFG
-            ssh_tmpl = str(GLOBAL_CFG.get_host_item(
-                "remote shell template", old_host))
-            cmd = shlex.split(ssh_tmpl) + ["-n", old_host] + cmd
+            ssh_str = str(GLOBAL_CFG.get_host_item("ssh command", old_host))
+            cmd = shlex.split(ssh_str) + ["-n", old_host] + cmd
         from subprocess import Popen, PIPE
         from time import sleep, time
         proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
@@ -495,6 +494,7 @@ To see if %(suite)s is running on '%(host)s:%(port)s':
         cert_obj.gmtime_adj_notAfter(10 * 365 * 24 * 60 * 60)  # 10 years.
         cert_obj.set_issuer(cert_obj.get_subject())
         cert_obj.set_pubkey(pkey_obj)
+        cert_obj.set_serial_number(1)
         cert_obj.add_extensions([ext])
         cert_obj.sign(pkey_obj, 'sha256')
         self._dump_item(
@@ -609,7 +609,7 @@ To see if %(suite)s is running on '%(host)s:%(port)s':
         }
         import shlex
         command = shlex.split(
-            GLOBAL_CFG.get_host_item('remote shell template', host, owner))
+            GLOBAL_CFG.get_host_item('ssh command', host, owner))
         command += ['-n', owner + '@' + host, script]
         from subprocess import Popen, PIPE
         try:
diff --git a/lib/cylc/task_action_timer.py b/lib/cylc/task_action_timer.py
new file mode 100644
index 0000000..8b871a3
--- /dev/null
+++ b/lib/cylc/task_action_timer.py
@@ -0,0 +1,105 @@
+#!/usr/bin/env python
+
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2017 NIWA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""Timer for task actions."""
+
+from time import time
+
+from cylc.wallclock import (
+    get_seconds_as_interval_string, get_time_string_from_unix_time)
+
+
+class TaskActionTimer(object):
+    """A timer with delays for task actions."""
+
+    # Memory optimization - constrain possible attributes to this list.
+    __slots__ = ["ctx", "delays", "num", "delay", "timeout", "is_waiting"]
+
+    def __init__(self, ctx=None, delays=None, num=0, delay=None, timeout=None):
+        self.ctx = ctx
+        self.delays = None
+        self.set_delays(delays)
+        self.num = int(num)
+        if delay is not None:
+            delay = float(delay)
+        self.delay = delay
+        if timeout is not None:
+            timeout = float(timeout)
+        self.timeout = timeout
+        self.is_waiting = False
+
+    def delay_as_seconds(self):
+        """Return the delay as PTnS, where n is number of seconds."""
+        return get_seconds_as_interval_string(self.delay)
+
+    def is_delay_done(self, now=None):
+        """Is timeout done?"""
+        if self.timeout is None:
+            return False
+        if now is None:
+            now = time()
+        return now > self.timeout
+
+    def is_timeout_set(self):
+        """Return True if timeout is set."""
+        return self.timeout is not None
+
+    def next(self, no_exhaust=False):
+        """Return the next retry delay.
+
+        When delay list has no more item:
+        * Return None if no_exhaust is False
+        * Return the final delay if no_exhaust is True.
+        """
+        try:
+            self.delay = self.delays[self.num]
+        except IndexError:
+            if not no_exhaust:
+                self.delay = None
+        if self.delay is not None:
+            self.timeout = time() + self.delay
+            self.num += 1
+        return self.delay
+
+    def reset(self):
+        """Reset num, delay, timeout and is_waiting."""
+        self.num = 0
+        self.delay = None
+        self.timeout = None
+        self.is_waiting = False
+
+    def set_delays(self, delays=None):
+        """Set delays, ensuring that the values are floats."""
+        if delays is None:
+            self.delays = [float(0)]
+        else:
+            self.delays = [float(delay) for delay in delays]
+
+    def set_waiting(self):
+        """Set waiting flag, while waiting for action to complete."""
+        self.delay = None
+        self.is_waiting = True
+        self.timeout = None
+
+    def unset_waiting(self):
+        """Unset waiting flag after an action has completed."""
+        self.is_waiting = False
+
+    def timeout_as_str(self):
+        """Return the timeout as an ISO8601 date-time string."""
+        return get_time_string_from_unix_time(self.timeout)
diff --git a/lib/cylc/task_events_mgr.py b/lib/cylc/task_events_mgr.py
new file mode 100644
index 0000000..27cfd36
--- /dev/null
+++ b/lib/cylc/task_events_mgr.py
@@ -0,0 +1,845 @@
+#!/usr/bin/env python
+
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2017 NIWA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""Task events manager.
+
+This module provides logic to:
+* Manage task messages (incoming or internal).
+* Set up retries on task job failures (submission or execution).
+* Generate task event handlers.
+  * Retrieval of log files for completed remote jobs.
+  * Email notification.
+  * Custom event handlers.
+* Manage invoking and retrying of task event handlers.
+"""
+
+from collections import namedtuple
+from logging import getLevelName, CRITICAL, ERROR, WARNING, INFO, DEBUG
+import os
+from pipes import quote
+import re
+import shlex
+from time import time
+import traceback
+
+from parsec.config import ItemNotFoundError
+
+from cylc.cfgspec.globalcfg import GLOBAL_CFG
+import cylc.flags
+from cylc.mp_pool import SuiteProcContext
+from cylc.network.suite_broadcast_server import BroadcastServer
+from cylc.owner import USER
+from cylc.suite_logging import ERR, LOG
+from cylc.suite_host import get_suite_host
+from cylc.task_action_timer import TaskActionTimer
+from cylc.task_message import TaskMessage
+from cylc.task_state import (
+    TASK_STATUSES_ACTIVE, TASK_STATUS_READY, TASK_STATUS_SUBMITTED,
+    TASK_STATUS_SUBMIT_RETRYING, TASK_STATUS_SUBMIT_FAILED,
+    TASK_STATUS_RUNNING, TASK_STATUS_RETRYING, TASK_STATUS_FAILED,
+    TASK_STATUS_SUCCEEDED)
+from cylc.task_outputs import (
+    TASK_OUTPUT_SUBMITTED, TASK_OUTPUT_STARTED, TASK_OUTPUT_SUCCEEDED,
+    TASK_OUTPUT_FAILED)
+from cylc.wallclock import (
+    get_current_time_string,
+    get_unix_time_from_time_string,
+    RE_DATE_TIME_FORMAT_EXTENDED)
+
+
+CustomTaskEventHandlerContext = namedtuple(
+    "CustomTaskEventHandlerContext",
+    ["key", "ctx_type", "cmd"])
+
+
+TaskEventMailContext = namedtuple(
+    "TaskEventMailContext",
+    ["key", "ctx_type", "mail_from", "mail_to", "mail_smtp"])
+
+
+TaskJobLogsRetrieveContext = namedtuple(
+    "TaskJobLogsRetrieveContext",
+    ["key", "ctx_type", "user_at_host", "max_size"])
+
+
+class TaskEventsManager(object):
+    """Task events manager.
+
+    This class does the following:
+    * Manage task messages (incoming or otherwise).
+    * Set up task (submission) retries on job (submission) failures.
+    * Generate and manage task event handlers.
+    """
+
+    EVENT_FAILED = TASK_OUTPUT_FAILED
+    EVENT_RETRY = "retry"
+    EVENT_STARTED = TASK_OUTPUT_STARTED
+    EVENT_SUBMITTED = TASK_OUTPUT_SUBMITTED
+    EVENT_SUBMIT_FAILED = "submission failed"
+    EVENT_SUBMIT_RETRY = "submission retry"
+    EVENT_SUCCEEDED = TASK_OUTPUT_SUCCEEDED
+    HANDLER_CUSTOM = "event-handler"
+    HANDLER_MAIL = "event-mail"
+    HANDLER_JOB_LOGS_RETRIEVE = "job-logs-retrieve"
+    INCOMING_FLAG = ">"
+    LEVELS = {
+        "INFO": INFO,
+        "NORMAL": INFO,
+        "WARNING": WARNING,
+        "ERROR": ERROR,
+        "CRITICAL": CRITICAL,
+        "DEBUG": DEBUG,
+    }
+    NN = "NN"
+    POLLED_INDICATOR = "(polled)"
+    RE_MESSAGE_TIME = re.compile(
+        '\A(.+) at (' + RE_DATE_TIME_FORMAT_EXTENDED + ')\Z')
+
+    def __init__(self, suite, proc_pool, suite_db_mgr):
+        self.suite = suite
+        self.suite_url = None
+        self.proc_pool = proc_pool
+        self.suite_db_mgr = suite_db_mgr
+        self.mail_interval = 0.0
+        self.mail_footer = None
+        self.next_mail_time = None
+        self.event_timers = {}
+
+    @staticmethod
+    def get_host_conf(itask, key, default=None, skey="remote"):
+        """Return a host setting from suite then global configuration."""
+        overrides = BroadcastServer.get_inst().get(itask.identity)
+        if skey in overrides and overrides[skey].get(key) is not None:
+            return overrides[skey][key]
+        elif itask.tdef.rtconfig[skey].get(key) is not None:
+            return itask.tdef.rtconfig[skey][key]
+        else:
+            try:
+                return GLOBAL_CFG.get_host_item(
+                    key, itask.task_host, itask.task_owner)
+            except (KeyError, ItemNotFoundError):
+                pass
+        return default
+
+    def get_task_job_activity_log(
+            self, suite, point, name, submit_num=None):
+        """Shorthand for get_task_job_log(..., tail="job-activity.log")."""
+        return self.get_task_job_log(
+            suite, point, name, submit_num, "job-activity.log")
+
+    def get_task_job_log(
+            self, suite, point, name, submit_num=None, tail=None):
+        """Return the job log path."""
+        args = [
+            GLOBAL_CFG.get_derived_host_item(suite, "suite job log directory"),
+            self.get_task_job_id(point, name, submit_num)]
+        if tail:
+            args.append(tail)
+        return os.path.join(*args)
+
+    def get_task_job_id(self, point, name, submit_num=None):
+        """Return the job log path."""
+        try:
+            submit_num = "%02d" % submit_num
+        except TypeError:
+            submit_num = self.NN
+        return os.path.join(str(point), name, submit_num)
+
+    def log_task_job_activity(self, ctx, suite, point, name, submit_num=NN):
+        """Log an activity for a task job."""
+        ctx_str = str(ctx)
+        if not ctx_str:
+            return
+        if isinstance(ctx.cmd_key, tuple):  # An event handler
+            submit_num = ctx.cmd_key[-1]
+        job_activity_log = self.get_task_job_activity_log(
+            suite, point, name, submit_num)
+        try:
+            with open(job_activity_log, "ab") as handle:
+                handle.write(ctx_str + '\n')
+        except IOError as exc:
+            LOG.warning("%s: write failed\n%s" % (job_activity_log, exc))
+        if ctx.cmd and ctx.ret_code:
+            LOG.error(ctx_str)
+        elif ctx.cmd:
+            LOG.debug(ctx_str)
+
+    def process_events(self, schd_ctx):
+        """Process task events that were created by "setup_event_handlers".
+
+        schd_ctx is an instance of "Schduler" in "cylc.scheduler".
+        """
+        ctx_groups = {}
+        now = time()
+        for id_key, timer in self.event_timers.copy().items():
+            key1, point, name, submit_num = id_key
+            if timer.is_waiting:
+                continue
+            # Set timer if timeout is None.
+            if not timer.is_timeout_set():
+                if timer.next() is None:
+                    LOG.warning("%s/%s/%02d %s failed" % (
+                        point, name, submit_num, key1))
+                    del self.event_timers[id_key]
+                    continue
+                # Report retries and delayed 1st try
+                tmpl = None
+                if timer.num > 1:
+                    tmpl = "%s/%s/%02d %s failed, retrying in %s (after %s)"
+                elif timer.delay:
+                    tmpl = "%s/%s/%02d %s will run after %s (after %s)"
+                if tmpl:
+                    LOG.debug(tmpl % (
+                        point, name, submit_num, key1,
+                        timer.delay_as_seconds(),
+                        timer.timeout_as_str()))
+            # Ready to run?
+            if not timer.is_delay_done() or (
+                # Avoid flooding user's mail box with mail notification.
+                # Group together as many notifications as possible within a
+                # given interval.
+                timer.ctx.ctx_type == self.HANDLER_MAIL and
+                not schd_ctx.stop_mode and
+                self.next_mail_time is not None and
+                self.next_mail_time > now
+            ):
+                continue
+
+            timer.set_waiting()
+            if timer.ctx.ctx_type == self.HANDLER_CUSTOM:
+                # Run custom event handlers on their own
+                self.proc_pool.put_command(
+                    SuiteProcContext(
+                        (key1, submit_num),
+                        timer.ctx.cmd, env=os.environ, shell=True,
+                    ),
+                    self._custom_handler_callback, [schd_ctx, id_key])
+            else:
+                # Group together built-in event handlers, where possible
+                if timer.ctx not in ctx_groups:
+                    ctx_groups[timer.ctx] = []
+                ctx_groups[timer.ctx].append(id_key)
+
+        next_mail_time = now + self.mail_interval
+        for ctx, id_keys in ctx_groups.items():
+            if ctx.ctx_type == self.HANDLER_MAIL:
+                # Set next_mail_time if any mail sent
+                self.next_mail_time = next_mail_time
+                self._process_event_email(schd_ctx, ctx, id_keys)
+            elif ctx.ctx_type == self.HANDLER_JOB_LOGS_RETRIEVE:
+                self._process_job_logs_retrieval(schd_ctx, ctx, id_keys)
+
+    def process_message(self, itask, priority, message, poll_event_time=None,
+                        is_incoming=False):
+        """Parse an incoming task message and update task state.
+
+        Incoming is e.g. "succeeded at <TIME>".
+
+        Correctly handle late (out of order) message which would otherwise set
+        the state backward in the natural order of events.
+
+        """
+        is_polled = poll_event_time is not None
+
+        # Log incoming messages with '>' to distinguish non-message log entries
+        message_flag = ""
+        if is_incoming:
+            message_flag = self.INCOMING_FLAG
+        log_message = '(current:%s)%s %s' % (
+            itask.state.status, message_flag, message)
+        if poll_event_time is not None:
+            log_message += ' %s' % self.POLLED_INDICATOR
+        LOG.log(self.LEVELS.get(priority, INFO), log_message, itask=itask)
+
+        # Strip the "at TIME" suffix.
+        event_time = poll_event_time
+        if not event_time:
+            match = self.RE_MESSAGE_TIME.match(message)
+            if match:
+                message, event_time = match.groups()
+        if not event_time:
+            event_time = get_current_time_string()
+
+        # always update the suite state summary for latest message
+        itask.summary['latest_message'] = message
+        if is_polled:
+            itask.summary['latest_message'] += " %s" % self.POLLED_INDICATOR
+        cylc.flags.iflag = True
+
+        # Failed tasks do not send messages unless declared resurrectable
+        if itask.state.status == TASK_STATUS_FAILED:
+            if itask.tdef.rtconfig['enable resurrection']:
+                LOG.warning(
+                    'message received while in the failed state:' +
+                    ' I am returning from the dead!',
+                    itask=itask)
+            else:
+                LOG.warning(
+                    'message rejected while in the failed state:\n  %s' %
+                    message,
+                    itask=itask)
+                return
+
+        # Check registered outputs.
+        if itask.state.outputs.exists(message):
+            if not itask.state.outputs.is_completed(message):
+                cylc.flags.pflag = True
+                itask.state.outputs.set_completed(message)
+                self._db_events_insert(itask, "output completed", message)
+            elif not is_polled:
+                # This output has already been reported complete. Not an error
+                # condition - maybe the network was down for a bit. Ok for
+                # polling as multiple polls *should* produce the same result.
+                LOG.warning(
+                    "Unexpected output (already completed):\n  %s" % message,
+                    itask=itask)
+
+        if is_polled and itask.state.status not in TASK_STATUSES_ACTIVE:
+            # A poll result can come in after a task finishes.
+            LOG.warning(
+                "Ignoring late poll result: task is not active",
+                itask=itask)
+            return
+
+        if priority == TaskMessage.WARNING:
+            self.setup_event_handlers(itask, "warning", message)
+
+        if (message == TASK_OUTPUT_STARTED and
+                itask.state.status in [
+                    TASK_STATUS_READY, TASK_STATUS_SUBMITTED,
+                    TASK_STATUS_SUBMIT_FAILED]):
+            self._process_message_started(itask, event_time)
+        elif (message == TASK_OUTPUT_SUCCEEDED and
+                itask.state.status in [
+                    TASK_STATUS_READY, TASK_STATUS_SUBMITTED,
+                    TASK_STATUS_SUBMIT_FAILED, TASK_STATUS_RUNNING,
+                    TASK_STATUS_FAILED]):
+            self._process_message_succeeded(itask, event_time, is_polled)
+        elif (message == TASK_OUTPUT_FAILED and
+                itask.state.status in [
+                    TASK_STATUS_READY, TASK_STATUS_SUBMITTED,
+                    TASK_STATUS_SUBMIT_FAILED, TASK_STATUS_RUNNING]):
+            # (submit- states in case of very fast submission and execution).
+            self._process_message_failed(itask, event_time)
+        elif message == self.EVENT_SUBMIT_FAILED:
+            self._process_message_submit_failed(itask, event_time)
+        elif message == TASK_OUTPUT_SUBMITTED:
+            self._process_message_submitted(itask, event_time)
+        elif message.startswith(TaskMessage.FAIL_MESSAGE_PREFIX):
+            # capture and record signals sent to task proxy
+            self._db_events_insert(itask, "signaled", message)
+            signal = message.replace(TaskMessage.FAIL_MESSAGE_PREFIX, "")
+            self.suite_db_mgr.put_update_task_jobs(itask, {
+                "run_signal": signal})
+        elif message.startswith(TaskMessage.VACATION_MESSAGE_PREFIX):
+            cylc.flags.pflag = True
+            itask.state.reset_state(TASK_STATUS_SUBMITTED)
+            self._db_events_insert(itask, "vacated", message)
+            itask.set_event_time('started')  # reset
+            if TASK_STATUS_SUBMIT_RETRYING in itask.try_timers:
+                itask.try_timers[TASK_STATUS_SUBMIT_RETRYING].num = 0
+            itask.job_vacated = True
+            try:
+                itask.timeout_timers[TASK_STATUS_SUBMITTED] = (
+                    itask.summary['submitted_time'] +
+                    float(self._get_events_conf(itask, 'submission timeout')))
+            except (TypeError, ValueError):
+                itask.timeout_timers[TASK_STATUS_SUBMITTED] = None
+        else:
+            # Unhandled messages. These include:
+            #  * general non-output/progress messages
+            #  * poll messages that repeat previous results
+            # Note that all messages are logged already at the top.
+            LOG.debug(
+                '(current: %s) unhandled: %s' % (itask.state.status, message),
+                itask=itask)
+            if priority in [CRITICAL, ERROR, WARNING, INFO, DEBUG]:
+                priority = getLevelName(priority)
+            self._db_events_insert(
+                itask, ("message %s" % str(priority).lower()), message)
+
+    def setup_event_handlers(self, itask, event, message):
+        """Set up handlers for a task event."""
+        if itask.tdef.run_mode != 'live':
+            return
+        msg = ""
+        if message != "job %s" % event:
+            msg = message
+        self._db_events_insert(itask, event, msg)
+        self._setup_job_logs_retrieval(itask, event)
+        self._setup_event_mail(itask, event)
+        self._setup_custom_event_handlers(itask, event, message)
+
+    def set_poll_time(self, itask, now=None):
+        """Set the next task execution/submission poll time.
+
+        If now is set, set the timer only if the previous delay is done.
+        Return the next delay.
+        """
+        key = itask.state.status
+        timer = itask.poll_timers.get(key)
+        if timer is None:
+            return
+        if now is not None and not timer.is_delay_done(now):
+            return
+        if timer.num is None:
+            timer.num = 0
+        delay = timer.next(no_exhaust=True)
+        if delay is not None:
+            LOG.info(
+                'next job poll in %s (after %s)' % (
+                    timer.delay_as_seconds(), timer.timeout_as_str()),
+                itask=itask)
+        return delay
+
+    def _custom_handler_callback(self, ctx, schd_ctx, id_key):
+        """Callback when a custom event handler is done."""
+        _, point, name, submit_num = id_key
+        self.log_task_job_activity(
+            ctx, schd_ctx.suite, point, name, submit_num)
+        if ctx.ret_code == 0:
+            del self.event_timers[id_key]
+        else:
+            self.event_timers[id_key].unset_waiting()
+
+    def _db_events_insert(self, itask, event="", message=""):
+        """Record an event to the DB."""
+        self.suite_db_mgr.put_insert_task_events(itask, {
+            "time": get_current_time_string(),
+            "event": event,
+            "message": message})
+
+    def _process_event_email(self, schd_ctx, ctx, id_keys):
+        """Process event notification, by email."""
+        if len(id_keys) == 1:
+            # 1 event from 1 task
+            (_, event), point, name, submit_num = id_keys[0]
+            subject = "[%s/%s/%02d %s] %s" % (
+                point, name, submit_num, event, schd_ctx.suite)
+        else:
+            event_set = set([id_key[0][1] for id_key in id_keys])
+            if len(event_set) == 1:
+                # 1 event from n tasks
+                subject = "[%d tasks %s] %s" % (
+                    len(id_keys), event_set.pop(), schd_ctx.suite)
+            else:
+                # n events from n tasks
+                subject = "[%d task events] %s" % (
+                    len(id_keys), schd_ctx.suite)
+        cmd = ["mail", "-s", subject]
+        # From: and To:
+        cmd.append("-r")
+        cmd.append(ctx.mail_from)
+        cmd.append(ctx.mail_to)
+        # STDIN for mail, tasks
+        stdin_str = ""
+        for id_key in sorted(id_keys):
+            (_, event), point, name, submit_num = id_key
+            stdin_str += "%s: %s/%s/%02d\n" % (event, point, name, submit_num)
+        # STDIN for mail, event info + suite detail
+        stdin_str += "\n"
+        for label, value in [
+                ('suite', schd_ctx.suite),
+                ("host", schd_ctx.host),
+                ("port", schd_ctx.port),
+                ("owner", schd_ctx.owner)]:
+            if value:
+                stdin_str += "%s: %s\n" % (label, value)
+        if self.mail_footer:
+            stdin_str += (self.mail_footer + "\n") % {
+                "host": schd_ctx.host,
+                "port": schd_ctx.port,
+                "owner": schd_ctx.owner,
+                "suite": schd_ctx.suite}
+        # SMTP server
+        env = dict(os.environ)
+        mail_smtp = ctx.mail_smtp
+        if mail_smtp:
+            env["smtp"] = mail_smtp
+        self.proc_pool.put_command(
+            SuiteProcContext(
+                ctx, cmd, env=env, stdin_str=stdin_str, id_keys=id_keys,
+            ),
+            self._event_email_callback, [schd_ctx])
+
+    def _event_email_callback(self, proc_ctx, schd_ctx):
+        """Call back when email notification command exits."""
+        for id_key in proc_ctx.cmd_kwargs["id_keys"]:
+            key1, point, name, submit_num = id_key
+            try:
+                if proc_ctx.ret_code == 0:
+                    del self.event_timers[id_key]
+                    log_ctx = SuiteProcContext((key1, submit_num), None)
+                    log_ctx.ret_code = 0
+                    self.log_task_job_activity(
+                        log_ctx, schd_ctx.suite, point, name, submit_num)
+                else:
+                    self.event_timers[id_key].unset_waiting()
+            except KeyError:
+                if cylc.flags.debug:
+                    ERR.debug(traceback.format_exc())
+
+    @staticmethod
+    def _get_events_conf(itask, key, default=None):
+        """Return an events setting from suite then global configuration."""
+        for getter in [
+                BroadcastServer.get_inst().get(itask.identity).get("events"),
+                itask.tdef.rtconfig["events"],
+                GLOBAL_CFG.get()["task events"]]:
+            try:
+                value = getter.get(key)
+            except (AttributeError, ItemNotFoundError, KeyError):
+                pass
+            else:
+                if value is not None:
+                    return value
+        return default
+
+    def _process_job_logs_retrieval(self, schd_ctx, ctx, id_keys):
+        """Process retrieval of task job logs from remote user at host."""
+        if ctx.user_at_host and "@" in ctx.user_at_host:
+            s_user, s_host = ctx.user_at_host.split("@", 1)
+        else:
+            s_user, s_host = (None, ctx.user_at_host)
+        ssh_str = str(GLOBAL_CFG.get_host_item("ssh command", s_host, s_user))
+        rsync_str = str(GLOBAL_CFG.get_host_item(
+            "retrieve job logs command", s_host, s_user))
+
+        cmd = shlex.split(rsync_str) + ["--rsh=" + ssh_str]
+        if cylc.flags.debug:
+            cmd.append("-v")
+        if ctx.max_size:
+            cmd.append("--max-size=%s" % (ctx.max_size,))
+        # Includes and excludes
+        includes = set()
+        for _, point, name, submit_num in id_keys:
+            # Include relevant directories, all levels needed
+            includes.add("/%s" % (point))
+            includes.add("/%s/%s" % (point, name))
+            includes.add("/%s/%s/%02d" % (point, name, submit_num))
+            includes.add("/%s/%s/%02d/**" % (point, name, submit_num))
+        cmd += ["--include=%s" % (include) for include in sorted(includes)]
+        cmd.append("--exclude=/**")  # exclude everything else
+        # Remote source
+        cmd.append(ctx.user_at_host + ":" + GLOBAL_CFG.get_derived_host_item(
+            schd_ctx.suite, "suite job log directory", s_host, s_user) + "/")
+        # Local target
+        cmd.append(GLOBAL_CFG.get_derived_host_item(
+            schd_ctx.suite, "suite job log directory") + "/")
+        self.proc_pool.put_command(
+            SuiteProcContext(ctx, cmd, env=dict(os.environ), id_keys=id_keys),
+            self._job_logs_retrieval_callback, [schd_ctx])
+
+    def _job_logs_retrieval_callback(self, proc_ctx, schd_ctx):
+        """Call back when log job retrieval completes."""
+        for id_key in proc_ctx.cmd_kwargs["id_keys"]:
+            key1, point, name, submit_num = id_key
+            try:
+                # All completed jobs are expected to have a "job.out".
+                fnames = ["job.out"]
+                try:
+                    if key1[1] not in 'succeeded':
+                        fnames.append("job.err")
+                except TypeError:
+                    pass
+                fname_oks = {}
+                for fname in fnames:
+                    fname_oks[fname] = os.path.exists(self.get_task_job_log(
+                        schd_ctx.suite, point, name, submit_num, fname))
+                # All expected paths must exist to record a good attempt
+                log_ctx = SuiteProcContext((key1, submit_num), None)
+                if all(fname_oks.values()):
+                    log_ctx.ret_code = 0
+                    del self.event_timers[id_key]
+                else:
+                    log_ctx.ret_code = 1
+                    log_ctx.err = "File(s) not retrieved:"
+                    for fname, exist_ok in sorted(fname_oks.items()):
+                        if not exist_ok:
+                            log_ctx.err += " %s" % fname
+                    self.event_timers[id_key].unset_waiting()
+                self.log_task_job_activity(
+                    log_ctx, schd_ctx.suite, point, name, submit_num)
+            except KeyError:
+                if cylc.flags.debug:
+                    ERR.debug(traceback.format_exc())
+
+    def _process_message_failed(self, itask, event_time):
+        """Helper for process_message, handle a failed message."""
+        if event_time is None:
+            event_time = get_current_time_string()
+        itask.set_event_time('finished', event_time)
+        self.suite_db_mgr.put_update_task_jobs(itask, {
+            "run_status": 1,
+            "time_run_exit": itask.summary['finished_time_string'],
+        })
+        if (TASK_STATUS_RETRYING not in itask.try_timers or
+                itask.try_timers[TASK_STATUS_RETRYING].next() is None):
+            # No retry lined up: definitive failure.
+            # Note the TASK_STATUS_FAILED output is only added if needed.
+            cylc.flags.pflag = True
+            itask.state.reset_state(TASK_STATUS_FAILED)
+            self.setup_event_handlers(itask, "failed", 'job failed')
+        else:
+            # There is a retry lined up
+            timeout_str = (
+                itask.try_timers[TASK_STATUS_RETRYING].timeout_as_str())
+            delay_msg = "retrying in %s" % (
+                itask.try_timers[TASK_STATUS_RETRYING].delay_as_seconds())
+            msg = "failed, %s (after %s)" % (delay_msg, timeout_str)
+            LOG.info("job(%02d) %s" % (itask.submit_num, msg), itask=itask)
+            itask.summary['latest_message'] = msg
+            self.setup_event_handlers(
+                itask, "retry", "job failed, %s" % delay_msg)
+            itask.state.reset_state(TASK_STATUS_RETRYING)
+
+    def _process_message_started(self, itask, event_time):
+        """Helper for process_message, handle a started message."""
+        if itask.job_vacated:
+            itask.job_vacated = False
+            LOG.warning("Vacated job restarted", itask=itask)
+        cylc.flags.pflag = True
+        itask.state.reset_state(TASK_STATUS_RUNNING)
+        itask.set_event_time('started', event_time)
+        self.suite_db_mgr.put_update_task_jobs(itask, {
+            "time_run": itask.summary['started_time_string']})
+        if itask.summary['execution_time_limit']:
+            execution_timeout = itask.summary['execution_time_limit']
+        else:
+            execution_timeout = self._get_events_conf(
+                itask, 'execution timeout')
+        try:
+            itask.timeout_timers[TASK_STATUS_RUNNING] = (
+                itask.summary['started_time'] + float(execution_timeout))
+        except (TypeError, ValueError):
+            itask.timeout_timers[TASK_STATUS_RUNNING] = None
+
+        # submission was successful so reset submission try number
+        if TASK_STATUS_SUBMIT_RETRYING in itask.try_timers:
+            itask.try_timers[TASK_STATUS_SUBMIT_RETRYING].num = 0
+        self.setup_event_handlers(itask, 'started', 'job started')
+        self.set_poll_time(itask)
+
+    def _process_message_succeeded(self, itask, event_time, is_polled=False):
+        """Helper for process_message, handle a succeeded message."""
+        cylc.flags.pflag = True
+        itask.set_event_time('finished', event_time)
+        self.suite_db_mgr.put_update_task_jobs(itask, {
+            "run_status": 0,
+            "time_run_exit": itask.summary['finished_time_string'],
+        })
+        # Update mean elapsed time only on task succeeded.
+        if itask.summary['started_time'] is not None:
+            itask.tdef.elapsed_times.append(
+                itask.summary['finished_time'] -
+                itask.summary['started_time'])
+        if not itask.state.outputs.all_completed():
+            err = "Succeeded with unreported outputs:"
+            for msg in itask.state.outputs.get_not_completed():
+                err += "\n  " + msg
+            LOG.warning(err, itask=itask)
+            if not is_polled:
+                # A succeeded task MUST have submitted and started.
+                # TODO - just poll for outputs in the job status file?
+                for output in [TASK_OUTPUT_SUBMITTED, TASK_OUTPUT_STARTED]:
+                    if not itask.state.outputs.is_completed(output):
+                        LOG.warning(
+                            "Assuming output completed:  \n %s" % output,
+                            itask=itask)
+        itask.state.reset_state(TASK_STATUS_SUCCEEDED)
+        self.setup_event_handlers(itask, "succeeded", "job succeeded")
+
+    def _process_message_submit_failed(self, itask, event_time):
+        """Helper for process_message, handle a submit-failed message."""
+        LOG.error(self.EVENT_SUBMIT_FAILED, itask=itask)
+        if event_time is None:
+            event_time = get_current_time_string()
+        self.suite_db_mgr.put_update_task_jobs(itask, {
+            "time_submit_exit": get_current_time_string(),
+            "submit_status": 1,
+        })
+        try:
+            del itask.summary['submit_method_id']
+        except KeyError:
+            pass
+        if (TASK_STATUS_SUBMIT_RETRYING not in itask.try_timers or
+                itask.try_timers[TASK_STATUS_SUBMIT_RETRYING].next() is None):
+            # No submission retry lined up: definitive failure.
+            itask.set_event_time('finished', event_time)
+            cylc.flags.pflag = True
+            # See github #476.
+            self.setup_event_handlers(
+                itask, self.EVENT_SUBMIT_FAILED,
+                'job %s' % self.EVENT_SUBMIT_FAILED)
+            itask.state.reset_state(TASK_STATUS_SUBMIT_FAILED)
+        else:
+            # There is a submission retry lined up.
+            timer = itask.try_timers[TASK_STATUS_SUBMIT_RETRYING]
+            timeout_str = timer.timeout_as_str()
+            delay_msg = "submit-retrying in %s" % timer.delay_as_seconds()
+            msg = "%s, %s (after %s)" % (
+                self.EVENT_SUBMIT_FAILED, delay_msg, timeout_str)
+            LOG.info("job(%02d) %s" % (itask.submit_num, msg), itask=itask)
+            itask.summary['latest_message'] = msg
+            self.setup_event_handlers(
+                itask, self.EVENT_SUBMIT_RETRY,
+                "job %s, %s" % (self.EVENT_SUBMIT_FAILED, delay_msg))
+            itask.state.reset_state(TASK_STATUS_SUBMIT_RETRYING)
+
+    def _process_message_submitted(self, itask, event_time):
+        """Helper for process_message, handle a submit-succeeded message."""
+        if itask.summary.get('submit_method_id') is not None:
+            LOG.info(
+                'submit_method_id=%s' % itask.summary['submit_method_id'],
+                itask=itask)
+        self.suite_db_mgr.put_update_task_jobs(itask, {
+            "time_submit_exit": get_unix_time_from_time_string(event_time),
+            "submit_status": 0,
+            "batch_sys_job_id": itask.summary.get('submit_method_id')})
+
+        if itask.tdef.run_mode == 'simulation':
+            # Simulate job execution at this point.
+            itask.set_event_time('started', event_time)
+            itask.state.reset_state(TASK_STATUS_RUNNING)
+            itask.state.outputs.set_completed(TASK_OUTPUT_STARTED)
+            return
+
+        itask.set_event_time('submitted', event_time)
+        itask.set_event_time('started')
+        itask.set_event_time('finished')
+        itask.summary['latest_message'] = TASK_OUTPUT_SUBMITTED
+        self.setup_event_handlers(
+            itask, TASK_OUTPUT_SUBMITTED, 'job submitted')
+
+        cylc.flags.pflag = True
+        if itask.state.status == TASK_STATUS_READY:
+            # In rare occassions, the submit command of a batch system has sent
+            # the job to its server, and the server has started the job before
+            # the job submit command returns.
+            itask.state.reset_state(TASK_STATUS_SUBMITTED)
+            try:
+                itask.timeout_timers[TASK_STATUS_SUBMITTED] = (
+                    itask.summary['submitted_time'] +
+                    float(self._get_events_conf(itask, 'submission timeout')))
+            except (TypeError, ValueError):
+                itask.timeout_timers[TASK_STATUS_SUBMITTED] = None
+            self.set_poll_time(itask)
+
+    def _setup_job_logs_retrieval(self, itask, event):
+        """Set up remote job logs retrieval.
+
+        For a task with a job completion event, i.e. succeeded, failed,
+        (execution) retry.
+        """
+        id_key = (
+            (self.HANDLER_JOB_LOGS_RETRIEVE, event),
+            str(itask.point), itask.tdef.name, itask.submit_num)
+        if itask.task_owner:
+            user_at_host = itask.task_owner + "@" + itask.task_host
+        else:
+            user_at_host = itask.task_host
+        events = (self.EVENT_FAILED, self.EVENT_RETRY, self.EVENT_SUCCEEDED)
+        if (event not in events or
+                user_at_host in [USER + '@localhost', 'localhost'] or
+                not self.get_host_conf(itask, "retrieve job logs") or
+                id_key in self.event_timers):
+            return
+        retry_delays = self.get_host_conf(
+            itask, "retrieve job logs retry delays")
+        if not retry_delays:
+            retry_delays = [0]
+        self.event_timers[id_key] = TaskActionTimer(
+            TaskJobLogsRetrieveContext(
+                self.HANDLER_JOB_LOGS_RETRIEVE,  # key
+                self.HANDLER_JOB_LOGS_RETRIEVE,  # ctx_type
+                user_at_host,
+                self.get_host_conf(itask, "retrieve job logs max size"),
+            ),
+            retry_delays)
+
+    def _setup_event_mail(self, itask, event):
+        """Set up task event notification, by email."""
+        id_key = (
+            (self.HANDLER_MAIL, event),
+            str(itask.point), itask.tdef.name, itask.submit_num)
+        if (id_key in self.event_timers or
+                event not in self._get_events_conf(itask, "mail events", [])):
+            return
+        retry_delays = self._get_events_conf(itask, "mail retry delays")
+        if not retry_delays:
+            retry_delays = [0]
+        self.event_timers[id_key] = TaskActionTimer(
+            TaskEventMailContext(
+                self.HANDLER_MAIL,  # key
+                self.HANDLER_MAIL,  # ctx_type
+                self._get_events_conf(  # mail_from
+                    itask,
+                    "mail from",
+                    "notifications@" + get_suite_host(),
+                ),
+                self._get_events_conf(itask, "mail to", USER),  # mail_to
+                self._get_events_conf(itask, "mail smtp"),  # mail_smtp
+            ),
+            retry_delays)
+
+    def _setup_custom_event_handlers(self, itask, event, message):
+        """Set up custom task event handlers."""
+        handlers = self._get_events_conf(itask, event + ' handler')
+        if (handlers is None and
+                event in self._get_events_conf(itask, 'handler events', [])):
+            handlers = self._get_events_conf(itask, 'handlers')
+        if handlers is None:
+            return
+        retry_delays = self._get_events_conf(
+            itask,
+            'handler retry delays',
+            self.get_host_conf(itask, "task event handler retry delays"))
+        if not retry_delays:
+            retry_delays = [0]
+        # There can be multiple custom event handlers
+        for i, handler in enumerate(handlers):
+            key1 = ("%s-%02d" % (self.HANDLER_CUSTOM, i), event)
+            id_key = (
+                key1, str(itask.point), itask.tdef.name, itask.submit_num)
+            if id_key in self.event_timers:
+                continue
+            # Custom event handler can be a command template string
+            # or a command that takes 4 arguments (classic interface)
+            cmd = handler % {
+                "event": quote(event),
+                "suite": quote(self.suite),
+                "point": quote(str(itask.point)),
+                "name": quote(itask.tdef.name),
+                "submit_num": itask.submit_num,
+                "id": quote(itask.identity),
+                "message": quote(message),
+                "task_url": quote(itask.tdef.rtconfig['URL']),
+                "suite_url": quote(self.suite_url),
+            }
+            if cmd == handler:
+                # Nothing substituted, assume classic interface
+                cmd = "%s '%s' '%s' '%s' '%s'" % (
+                    handler, event, self.suite, itask.identity, message)
+            LOG.debug("Queueing %s handler: %s" % (event, cmd), itask=itask)
+            self.event_timers[id_key] = (
+                TaskActionTimer(
+                    CustomTaskEventHandlerContext(
+                        key1,
+                        self.HANDLER_CUSTOM,
+                        cmd,
+                    ),
+                    retry_delays))
diff --git a/lib/cylc/task_job_mgr.py b/lib/cylc/task_job_mgr.py
new file mode 100644
index 0000000..d1b7b9a
--- /dev/null
+++ b/lib/cylc/task_job_mgr.py
@@ -0,0 +1,948 @@
+#!/usr/bin/env python
+
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2017 NIWA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""Manage task jobs.
+
+This module provides logic to:
+* Set up the directory structure on remote job hosts.
+  * Copy suite service files to remote job hosts for communication clients.
+  * Clean up of service files on suite shutdown.
+* Prepare task job files.
+* Prepare task jobs submission, and manage the callbacks.
+* Prepare task jobs poll/kill, and manage the callbacks.
+"""
+
+from logging import CRITICAL, INFO, WARNING
+import os
+from pipes import quote
+import shlex
+from shutil import rmtree
+from subprocess import Popen, PIPE
+from time import time
+import traceback
+from uuid import uuid4
+
+from parsec.util import pdeepcopy, poverride
+
+from cylc.batch_sys_manager import BatchSysManager
+from cylc.cfgspec.globalcfg import GLOBAL_CFG
+from cylc.envvar import expandvars
+import cylc.flags
+from cylc.host_select import get_task_host
+from cylc.job_file import JobFileWriter
+from cylc.mkdir_p import mkdir_p
+from cylc.mp_pool import SuiteProcPool, SuiteProcContext
+from cylc.network.suite_broadcast_server import BroadcastServer
+from cylc.owner import is_remote_user, USER
+from cylc.suite_host import is_remote_host
+from cylc.suite_logging import ERR, LOG
+from cylc.task_events_mgr import TaskEventsManager
+from cylc.task_message import TaskMessage
+from cylc.task_outputs import (
+    TASK_OUTPUT_SUBMITTED, TASK_OUTPUT_STARTED, TASK_OUTPUT_SUCCEEDED,
+    TASK_OUTPUT_FAILED)
+from cylc.task_action_timer import TaskActionTimer
+from cylc.task_state import (
+    TASK_STATUSES_ACTIVE, TASK_STATUS_READY, TASK_STATUS_SUBMITTED,
+    TASK_STATUS_RUNNING, TASK_STATUS_SUBMIT_RETRYING, TASK_STATUS_RETRYING)
+from cylc.wallclock import (
+    get_current_time_string, get_seconds_as_interval_string)
+
+
+class RemoteJobHostInitError(Exception):
+    """Cannot initialise suite run directory of remote job host."""
+
+    MSG_INIT = "%s: initialisation did not complete:\n"  # %s user_at_host
+    MSG_TIDY = "%s: clean up did not complete:\n"  # %s user_at_host
+
+    def __str__(self):
+        msg, user_at_host, cmd_str, ret_code, out, err = self.args
+        ret = (msg + "COMMAND FAILED (%d): %s\n") % (
+            user_at_host, ret_code, cmd_str)
+        for label, item in ("STDOUT", out), ("STDERR", err):
+            if item:
+                for line in item.splitlines(True):  # keep newline chars
+                    ret += "COMMAND %s: %s" % (label, line)
+        return ret
+
+
+class TaskJobManager(object):
+    """Manage task job submit, poll and kill.
+
+    This class provides logic to:
+    * Submit task jobs.
+    * Poll task jobs.
+    * Kill task jobs.
+    * Set up the directory structure on job hosts.
+    * Install suite communicate client files on job hosts.
+    * Remove suite contact files on job hosts.
+    """
+
+    JOB_FILE_BASE = BatchSysManager.JOB_FILE_BASE
+    JOBS_KILL = "jobs-kill"
+    JOBS_POLL = "jobs-poll"
+    JOBS_SUBMIT = SuiteProcPool.JOBS_SUBMIT
+    KEY_EXECUTE_TIME_LIMIT = "execution_time_limit"
+
+    def __init__(
+            self, suite, proc_pool, suite_db_mgr, suite_srv_files_mgr):
+        self.suite = suite
+        self.proc_pool = proc_pool
+        self.suite_db_mgr = suite_db_mgr
+        self.task_events_mgr = TaskEventsManager(
+            suite, proc_pool, suite_db_mgr)
+        self.job_file_writer = JobFileWriter()
+        self.suite_srv_files_mgr = suite_srv_files_mgr
+        self.init_host_map = {}  # {(user, host): should_unlink, ...}
+        self.single_task_mode = False
+
+    def check_task_jobs(self, suite, task_pool):
+        """Check submission and execution timeout and polling timers.
+
+        Poll tasks that have timed out and/or have reached next polling time.
+        """
+        now = time()
+        poll_tasks = set()
+        for itask in task_pool.get_tasks():
+            if (self._check_timeout(itask, now) or
+                    self.task_events_mgr.set_poll_time(itask, now)):
+                poll_tasks.add(itask)
+        self.poll_task_jobs(suite, poll_tasks)
+
+    def init_host(self, reg, host, owner):
+        """Initialise suite run dir on a user at host.
+
+        Create SUITE_RUN_DIR/log/job/ if necessary.
+        Install suite contact environment file.
+        Install suite python modules.
+
+        Raise RemoteJobHostInitError if initialisation cannot complete.
+
+        """
+        if host is None:
+            host = 'localhost'
+        if ((host, owner) in [('localhost', None), ('localhost', USER)] or
+                (host, owner) in self.init_host_map or self.single_task_mode):
+            return
+        user_at_host = host
+        if owner:
+            user_at_host = owner + '@' + host
+
+        r_suite_run_dir = GLOBAL_CFG.get_derived_host_item(
+            reg, 'suite run directory', host, owner)
+        r_log_job_dir = GLOBAL_CFG.get_derived_host_item(
+            reg, 'suite job log directory', host, owner)
+        r_suite_srv_dir = os.path.join(
+            r_suite_run_dir, self.suite_srv_files_mgr.DIR_BASE_SRV)
+
+        # Create a UUID file in the service directory.
+        # If remote host has the file in its service directory, we can assume
+        # that the remote host has a shared file system with the suite host.
+        ssh_tmpl = GLOBAL_CFG.get_host_item('ssh command', host, owner)
+        uuid_str = str(uuid4())
+        uuid_fname = os.path.join(
+            self.suite_srv_files_mgr.get_suite_srv_dir(reg), uuid_str)
+        try:
+            open(uuid_fname, 'wb').close()
+            proc = Popen(
+                shlex.split(ssh_tmpl) + [
+                    '-n', user_at_host,
+                    'test', '-e', os.path.join(r_suite_srv_dir, uuid_str)],
+                stdout=PIPE, stderr=PIPE)
+            if proc.wait() == 0:
+                # Initialised, but no need to tidy up
+                self.init_host_map[(host, owner)] = False
+                return
+        finally:
+            try:
+                os.unlink(uuid_fname)
+            except OSError:
+                pass
+
+        cmds = []
+        # Command to create suite directory structure on remote host.
+        cmds.append(shlex.split(ssh_tmpl) + [
+            '-n', user_at_host,
+            'mkdir', '-p',
+            r_suite_run_dir, r_log_job_dir, r_suite_srv_dir])
+        # Command to copy contact and authentication files to remote host.
+        # Note: no need to do this if task communication method is "poll".
+        should_unlink = GLOBAL_CFG.get_host_item(
+            'task communication method', host, owner) != "poll"
+        if should_unlink:
+            scp_tmpl = GLOBAL_CFG.get_host_item('scp command', host, owner)
+            cmds.append(shlex.split(scp_tmpl) + [
+                '-p',
+                self.suite_srv_files_mgr.get_contact_file(reg),
+                self.suite_srv_files_mgr.get_auth_item(
+                    self.suite_srv_files_mgr.FILE_BASE_PASSPHRASE, reg),
+                self.suite_srv_files_mgr.get_auth_item(
+                    self.suite_srv_files_mgr.FILE_BASE_SSL_CERT, reg),
+                user_at_host + ':' + r_suite_srv_dir + '/'])
+        # Command to copy python library to remote host.
+        suite_run_py = os.path.join(
+            GLOBAL_CFG.get_derived_host_item(reg, 'suite run directory'),
+            'python')
+        if os.path.isdir(suite_run_py):
+            cmds.append(shlex.split(scp_tmpl) + [
+                '-pr',
+                suite_run_py, user_at_host + ':' + r_suite_run_dir + '/'])
+        # Run commands in sequence.
+        for cmd in cmds:
+            proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
+            out, err = proc.communicate()
+            if proc.wait():
+                raise RemoteJobHostInitError(
+                    RemoteJobHostInitError.MSG_INIT,
+                    user_at_host, ' '.join([quote(item) for item in cmd]),
+                    proc.returncode, out, err)
+        self.init_host_map[(host, owner)] = should_unlink
+        LOG.info('Initialised %s:%s' % (user_at_host, r_suite_run_dir))
+
+    def kill_task_jobs(self, suite, itasks, warn_skips=False):
+        """Kill jobs of active tasks, and hold the tasks.
+
+        If items is specified, kill active tasks matching given IDs.
+
+        """
+        active_itasks = []
+        for itask in itasks:
+            if itask.state.status in TASK_STATUSES_ACTIVE:
+                itask.state.set_held()
+                active_itasks.append(itask)
+            elif warn_skips:  # and not active
+                LOG.warning(
+                    '%s: skip kill, task not killable' % itask.identity)
+        self._run_job_cmd(
+            self.JOBS_KILL, suite, active_itasks,
+            self._kill_task_jobs_callback)
+
+    def poll_task_jobs(self, suite, itasks, warn_skips=False):
+        """Poll jobs of specified tasks."""
+        active_itasks = []
+        for itask in itasks:
+            if itask.state.status in TASK_STATUSES_ACTIVE:
+                active_itasks.append(itask)
+            elif warn_skips:  # and not active
+                LOG.warning(
+                    '%s: skip poll, task not pollable' % itask.identity)
+        self._run_job_cmd(
+            self.JOBS_POLL, suite, active_itasks,
+            self._poll_task_jobs_callback)
+
+    def prep_submit_task_jobs(self, suite, itasks, dry_run=False):
+        """Prepare task jobs for submit."""
+        if not itasks:
+            return
+        prepared_tasks = []
+        for itask in itasks:
+            if self._prep_submit_task_job(suite, itask, dry_run) is not None:
+                prepared_tasks.append(itask)
+        return prepared_tasks
+
+    def submit_task_jobs(self, suite, itasks, is_simulation=False):
+        """Prepare and submit task jobs."""
+        if is_simulation:
+            return self._simulation_submit_task_jobs(itasks)
+
+        # Prepare tasks for job submission
+        prepared_tasks = self.prep_submit_task_jobs(suite, itasks)
+        if not prepared_tasks:
+            return
+
+        # Submit task jobs
+        auth_itasks = {}
+        for itask in prepared_tasks:
+            # The job file is now (about to be) used: reset the file write flag
+            # so that subsequent manual retrigger will generate a new job file.
+            itask.local_job_file_path = None
+            itask.state.reset_state(TASK_STATUS_READY)
+            if (itask.task_host, itask.task_owner) not in auth_itasks:
+                auth_itasks[(itask.task_host, itask.task_owner)] = []
+            auth_itasks[(itask.task_host, itask.task_owner)].append(itask)
+        for auth, itasks in sorted(auth_itasks.items()):
+            cmd = ["cylc", self.JOBS_SUBMIT]
+            if cylc.flags.debug:
+                cmd.append("--debug")
+            host, owner = auth
+            remote_mode = False
+            kwargs = {}
+            for key, value, test_func in [
+                    ('host', host, is_remote_host),
+                    ('user', owner, is_remote_user)]:
+                if test_func(value):
+                    cmd.append('--%s=%s' % (key, value))
+                    remote_mode = True
+                    kwargs[key] = value
+            if remote_mode:
+                cmd.append('--remote-mode')
+            cmd.append("--")
+            cmd.append(GLOBAL_CFG.get_derived_host_item(
+                suite, 'suite job log directory', host, owner))
+            stdin_file_paths = []
+            job_log_dirs = []
+            for itask in sorted(itasks, key=lambda itask: itask.identity):
+                if remote_mode:
+                    stdin_file_paths.append(
+                        self.task_events_mgr.get_task_job_log(
+                            suite, itask.point, itask.tdef.name,
+                            itask.submit_num, self.JOB_FILE_BASE))
+                job_log_dirs.append(self.task_events_mgr.get_task_job_id(
+                    itask.point, itask.tdef.name, itask.submit_num))
+            cmd += job_log_dirs
+            self.proc_pool.put_command(
+                SuiteProcContext(
+                    self.JOBS_SUBMIT,
+                    cmd,
+                    stdin_file_paths=stdin_file_paths,
+                    job_log_dirs=job_log_dirs,
+                    **kwargs
+                ),
+                self._submit_task_jobs_callback, [suite, itasks])
+
+    def unlink_hosts_contacts(self, reg):
+        """Remove suite contact files from initialised hosts.
+
+        This is called on shutdown, so we don't want anything to hang.
+        Terminate any incomplete SSH commands after 10 seconds.
+        """
+        # Issue all SSH commands in parallel
+        procs = {}
+        for (host, owner), should_unlink in self.init_host_map.items():
+            if not should_unlink:
+                continue
+            user_at_host = host
+            if owner:
+                user_at_host = owner + '@' + host
+            ssh_tmpl = GLOBAL_CFG.get_host_item('ssh command', host, owner)
+            r_suite_contact_file = os.path.join(
+                GLOBAL_CFG.get_derived_host_item(
+                    reg, 'suite run directory', host, owner),
+                self.suite_srv_files_mgr.DIR_BASE_SRV,
+                self.suite_srv_files_mgr.FILE_BASE_CONTACT)
+            cmd = shlex.split(ssh_tmpl) + [
+                '-n', user_at_host, 'rm', '-f', r_suite_contact_file]
+            procs[user_at_host] = (cmd, Popen(cmd, stdout=PIPE, stderr=PIPE))
+        # Wait for commands to complete for a max of 10 seconds
+        timeout = time() + 10.0
+        while procs and time() < timeout:
+            for user_at_host, (cmd, proc) in procs.copy().items():
+                if proc.poll() is None:
+                    continue
+                del procs[user_at_host]
+                out, err = proc.communicate()
+                if proc.wait():
+                    ERR.warning(RemoteJobHostInitError(
+                        RemoteJobHostInitError.MSG_TIDY,
+                        user_at_host, ' '.join([quote(item) for item in cmd]),
+                        proc.returncode, out, err))
+        # Terminate any remaining commands
+        for user_at_host, (cmd, proc) in procs.items():
+            try:
+                proc.terminate()
+            except OSError:
+                pass
+            out, err = proc.communicate()
+            if proc.wait():
+                ERR.warning(RemoteJobHostInitError(
+                    RemoteJobHostInitError.MSG_TIDY,
+                    user_at_host, ' '.join([quote(item) for item in cmd]),
+                    proc.returncode, out, err))
+
+    def _check_timeout(self, itask, now):
+        """Check/handle submission/execution timeouts."""
+        if itask.state.status == TASK_STATUS_RUNNING:
+            timer = itask.poll_timers.get(self.KEY_EXECUTE_TIME_LIMIT)
+            if timer is not None:
+                if not timer.is_timeout_set():
+                    timer.next()
+                if not timer.is_delay_done():
+                    # Don't poll
+                    return False
+                if timer.next() is not None:
+                    # Poll now, and more retries lined up
+                    return True
+                # No more retry lined up, can issue execution timeout event
+        if itask.state.status in itask.timeout_timers:
+            timeout = itask.timeout_timers[itask.state.status]
+            if timeout is None or now <= timeout:
+                return False
+            itask.timeout_timers[itask.state.status] = None
+            if itask.state.status == TASK_STATUS_RUNNING:
+                msg = 'job started %s ago, but has not finished' % (
+                    get_seconds_as_interval_string(
+                        timeout - itask.summary['started_time']))
+                event = 'execution timeout'
+            else:  # if itask.state.status == TASK_STATUS_SUBMITTED:
+                msg = 'job submitted %s ago, but has not started' % (
+                    get_seconds_as_interval_string(
+                        timeout - itask.summary['submitted_time']))
+                event = 'submission timeout'
+            LOG.warning(msg, itask=itask)
+            self.task_events_mgr.setup_event_handlers(itask, event, msg)
+            return True
+
+    def _create_job_log_path(self, suite, itask):
+        """Create job log directory for a task job, etc.
+
+        Create local job directory, and NN symbolic link.
+        If NN => 01, remove numbered directories with submit numbers greater
+        than 01.
+        Return a string in the form "POINT/NAME/SUBMIT_NUM".
+
+        """
+        job_file_dir = self.task_events_mgr.get_task_job_log(
+            suite, itask.point, itask.tdef.name, itask.submit_num)
+        task_log_dir = os.path.dirname(job_file_dir)
+        if itask.submit_num == 1:
+            try:
+                names = os.listdir(task_log_dir)
+            except OSError:
+                pass
+            else:
+                for name in names:
+                    if name not in ["01", self.task_events_mgr.NN]:
+                        rmtree(
+                            os.path.join(task_log_dir, name),
+                            ignore_errors=True)
+        else:
+            rmtree(job_file_dir, ignore_errors=True)
+
+        mkdir_p(job_file_dir)
+        target = os.path.join(task_log_dir, self.task_events_mgr.NN)
+        source = os.path.basename(job_file_dir)
+        try:
+            prev_source = os.readlink(target)
+        except OSError:
+            prev_source = None
+        if prev_source == source:
+            return
+        try:
+            if prev_source:
+                os.unlink(target)
+            os.symlink(source, target)
+        except OSError as exc:
+            if not exc.filename:
+                exc.filename = target
+            raise exc
+
+    @staticmethod
+    def _get_job_scripts(itask, rtconfig):
+        """Return pre-script, script, post-script for a job."""
+        script = rtconfig['script']
+        pre_script = rtconfig['pre-script']
+        post_script = rtconfig['post-script']
+        if itask.tdef.suite_polling_cfg:
+            # Automatic suite state polling script
+            comstr = "cylc suite-state " + \
+                     " --task=" + itask.tdef.suite_polling_cfg['task'] + \
+                     " --point=" + str(itask.point) + \
+                     " --status=" + itask.tdef.suite_polling_cfg['status']
+            if cylc.flags.debug:
+                comstr += ' --debug'
+            for key, fmt in [
+                    ('user', ' --%s=%s'),
+                    ('host', ' --%s=%s'),
+                    ('interval', ' --%s=%d'),
+                    ('max-polls', ' --%s=%s'),
+                    ('run-dir', ' --%s=%s'),
+                    ('template', ' --%s=%s')]:
+                if rtconfig['suite state polling'][key]:
+                    comstr += fmt % (key, rtconfig['suite state polling'][key])
+            comstr += " " + itask.tdef.suite_polling_cfg['suite']
+            script = "echo " + comstr + "\n" + comstr
+        return pre_script, script, post_script
+
+    def _job_cmd_out_callback(self, suite, itask, cmd_ctx, line):
+        """Callback on job command STDOUT/STDERR."""
+        if cmd_ctx.cmd_kwargs.get("host") and cmd_ctx.cmd_kwargs.get("user"):
+            user_at_host = "(%(user)s@%(host)s) " % cmd_ctx.cmd_kwargs
+        elif cmd_ctx.cmd_kwargs.get("host"):
+            user_at_host = "(%(host)s) " % cmd_ctx.cmd_kwargs
+        elif cmd_ctx.cmd_kwargs.get("user"):
+            user_at_host = "(%(user)s at localhost) " % cmd_ctx.cmd_kwargs
+        else:
+            user_at_host = ""
+        try:
+            timestamp, _, content = line.split("|")
+        except ValueError:
+            pass
+        else:
+            line = "%s %s" % (timestamp, content)
+        job_activity_log = self.task_events_mgr.get_task_job_activity_log(
+            suite, itask.point, itask.tdef.name)
+        try:
+            with open(job_activity_log, "ab") as handle:
+                if not line.endswith("\n"):
+                    line += "\n"
+                handle.write(user_at_host + line)
+        except IOError as exc:
+            LOG.warning("%s: write failed\n%s" % (job_activity_log, exc))
+
+    def _kill_task_jobs_callback(self, ctx, suite, itasks):
+        """Callback when kill tasks command exits."""
+        self._manip_task_jobs_callback(
+            ctx,
+            suite,
+            itasks,
+            self._kill_task_job_callback,
+            {BatchSysManager.OUT_PREFIX_COMMAND: self._job_cmd_out_callback})
+
+    def _kill_task_job_callback(self, suite, itask, cmd_ctx, line):
+        """Helper for _kill_task_jobs_callback, on one task job."""
+        ctx = SuiteProcContext(self.JOBS_KILL, None)
+        ctx.out = line
+        try:
+            ctx.timestamp, _, ctx.ret_code = line.split("|", 2)
+        except ValueError:
+            ctx.ret_code = 1
+            ctx.cmd = cmd_ctx.cmd  # print original command on failure
+        else:
+            ctx.ret_code = int(ctx.ret_code)
+            if ctx.ret_code:
+                ctx.cmd = cmd_ctx.cmd  # print original command on failure
+        self.task_events_mgr.log_task_job_activity(
+            ctx, suite, itask.point, itask.tdef.name)
+        log_lvl = INFO
+        log_msg = 'killed'
+        if ctx.ret_code:  # non-zero exit status
+            log_lvl = WARNING
+            log_msg = 'kill failed'
+            itask.state.kill_failed = True
+        elif itask.state.status == TASK_STATUS_SUBMITTED:
+            self.task_events_mgr.process_message(
+                itask, CRITICAL, "%s at %s" % (
+                    self.task_events_mgr.EVENT_SUBMIT_FAILED, ctx.timestamp))
+            cylc.flags.iflag = True
+        elif itask.state.status == TASK_STATUS_RUNNING:
+            self.task_events_mgr.process_message(
+                itask, CRITICAL, TASK_OUTPUT_FAILED)
+            cylc.flags.iflag = True
+        else:
+            log_lvl = WARNING
+            log_msg = (
+                'ignoring job kill result, unexpected task state: %s' %
+                itask.state.status)
+        itask.summary['latest_message'] = log_msg
+        LOG.log(log_lvl, "[%s] -job(%02d) %s" % (
+            itask.identity, itask.submit_num, log_msg))
+
+    @staticmethod
+    def _manip_task_jobs_callback(
+            ctx, suite, itasks, summary_callback, more_callbacks=None):
+        """Callback when submit/poll/kill tasks command exits."""
+        if ctx.ret_code:
+            LOG.error(ctx)
+        else:
+            LOG.debug(ctx)
+        tasks = {}
+        # Note for "kill": It is possible for a job to trigger its trap and
+        # report back to the suite back this logic is called. If so, the task
+        # will no longer be TASK_STATUS_SUBMITTED or TASK_STATUS_RUNNING, and
+        # its output line will be ignored here.
+        for itask in itasks:
+            if itask.point is not None and itask.submit_num:
+                submit_num = "%02d" % (itask.submit_num)
+                tasks[(str(itask.point), itask.tdef.name, submit_num)] = itask
+        handlers = [(BatchSysManager.OUT_PREFIX_SUMMARY, summary_callback)]
+        if more_callbacks:
+            for prefix, callback in more_callbacks.items():
+                handlers.append((prefix, callback))
+        out = ctx.out
+        if not out:
+            out = ""
+            # Something is very wrong here
+            # Fallback to use "job_log_dirs" list to report the problem
+            job_log_dirs = ctx.cmd_kwargs.get("job_log_dirs", [])
+            for job_log_dir in job_log_dirs:
+                point, name, submit_num = job_log_dir.split(os.sep, 2)
+                itask = tasks[(point, name, submit_num)]
+                out += (BatchSysManager.OUT_PREFIX_SUMMARY +
+                        "|".join([ctx.timestamp, job_log_dir, "1"]) + "\n")
+        for line in out.splitlines(True):
+            for prefix, callback in handlers:
+                if line.startswith(prefix):
+                    line = line[len(prefix):].strip()
+                    try:
+                        path = line.split("|", 2)[1]  # timestamp, path, status
+                        point, name, submit_num = path.split(os.sep, 2)
+                        itask = tasks[(point, name, submit_num)]
+                        callback(suite, itask, ctx, line)
+                    except (KeyError, ValueError):
+                        if cylc.flags.debug:
+                            LOG.warning('Unhandled %s output: %s' % (
+                                ctx.cmd_key, line))
+                            LOG.warning(traceback.format_exc())
+
+    def _poll_task_jobs_callback(self, ctx, suite, itasks):
+        """Callback when poll tasks command exits."""
+        self._manip_task_jobs_callback(
+            ctx,
+            suite,
+            itasks,
+            self._poll_task_job_callback,
+            {BatchSysManager.OUT_PREFIX_MESSAGE:
+             self._poll_task_job_message_callback})
+
+    def _poll_task_job_callback(self, suite, itask, cmd_ctx, line):
+        """Helper for _poll_task_jobs_callback, on one task job."""
+        ctx = SuiteProcContext(self.JOBS_POLL, None)
+        ctx.out = line
+        ctx.ret_code = 0
+
+        items = line.split("|")
+        # See cylc.batch_sys_manager.JobPollContext
+        try:
+            (
+                batch_sys_exit_polled, run_status, run_signal,
+                time_submit_exit, time_run, time_run_exit
+            ) = items[4:10]
+        except IndexError:
+            itask.summary['latest_message'] = 'poll failed'
+            cylc.flags.iflag = True
+            ctx.cmd = cmd_ctx.cmd  # print original command on failure
+            return
+        finally:
+            self.task_events_mgr.log_task_job_activity(
+                ctx, suite, itask.point, itask.tdef.name)
+        if run_status == "1" and run_signal in ["ERR", "EXIT"]:
+            # Failed normally
+            self.task_events_mgr.process_message(
+                itask, INFO, TASK_OUTPUT_FAILED, time_run_exit)
+        elif run_status == "1" and batch_sys_exit_polled == "1":
+            # Failed by a signal, and no longer in batch system
+            self.task_events_mgr.process_message(
+                itask, INFO, TASK_OUTPUT_FAILED, time_run_exit)
+            self.task_events_mgr.process_message(
+                itask, INFO, TaskMessage.FAIL_MESSAGE_PREFIX + run_signal,
+                time_run_exit)
+        elif run_status == "1":
+            # The job has terminated, but is still managed by batch system.
+            # Some batch system may restart a job in this state, so don't
+            # mark as failed yet.
+            self.task_events_mgr.process_message(
+                itask, INFO, TASK_OUTPUT_STARTED, time_run)
+        elif run_status == "0":
+            # The job succeeded
+            self.task_events_mgr.process_message(
+                itask, INFO, TASK_OUTPUT_SUCCEEDED, time_run_exit)
+        elif time_run and batch_sys_exit_polled == "1":
+            # The job has terminated without executing the error trap
+            self.task_events_mgr.process_message(
+                itask, INFO, TASK_OUTPUT_FAILED, "")
+        elif time_run:
+            # The job has started, and is still managed by batch system
+            self.task_events_mgr.process_message(
+                itask, INFO, TASK_OUTPUT_STARTED, time_run)
+        elif batch_sys_exit_polled == "1":
+            # The job never ran, and no longer in batch system
+            self.task_events_mgr.process_message(
+                itask, INFO, self.task_events_mgr.EVENT_SUBMIT_FAILED,
+                time_submit_exit)
+        else:
+            # The job never ran, and is in batch system
+            self.task_events_mgr.process_message(
+                itask, INFO, TASK_STATUS_SUBMITTED, time_submit_exit)
+
+    def _poll_task_job_message_callback(self, suite, itask, cmd_ctx, line):
+        """Helper for _poll_task_jobs_callback, on message of one task job."""
+        ctx = SuiteProcContext(self.JOBS_POLL, None)
+        ctx.out = line
+        try:
+            event_time, priority, message = line.split("|")[2:5]
+        except ValueError:
+            ctx.ret_code = 1
+            ctx.cmd = cmd_ctx.cmd  # print original command on failure
+        else:
+            ctx.ret_code = 0
+            self.task_events_mgr.process_message(
+                itask, priority, message, event_time)
+        self.task_events_mgr.log_task_job_activity(
+            ctx, suite, itask.point, itask.tdef.name)
+
+    def _run_job_cmd(self, cmd_key, suite, itasks, callback):
+        """Run job commands, e.g. poll, kill, etc.
+
+        Group itasks with their user at host.
+        Put a job command for each user at host to the multiprocess pool.
+
+        """
+        if not itasks:
+            return
+        auth_itasks = {}
+        for itask in itasks:
+            if (itask.task_host, itask.task_owner) not in auth_itasks:
+                auth_itasks[(itask.task_host, itask.task_owner)] = []
+            auth_itasks[(itask.task_host, itask.task_owner)].append(itask)
+        for (host, owner), itasks in sorted(auth_itasks.items()):
+            cmd = ["cylc", cmd_key]
+            if cylc.flags.debug:
+                cmd.append("--debug")
+            try:
+                if is_remote_host(host):
+                    cmd.append("--host=%s" % (host))
+            except IOError:
+                # Bad host, run the command any way, command will fail and
+                # callback will deal with it
+                cmd.append("--host=%s" % (host))
+            if is_remote_user(owner):
+                cmd.append("--user=%s" % (owner))
+            cmd.append("--")
+            cmd.append(GLOBAL_CFG.get_derived_host_item(
+                suite, "suite job log directory", host, owner))
+            job_log_dirs = []
+            for itask in sorted(itasks, key=lambda itask: itask.identity):
+                job_log_dirs.append(self.task_events_mgr.get_task_job_id(
+                    itask.point, itask.tdef.name, itask.submit_num))
+            cmd += job_log_dirs
+            self.proc_pool.put_command(
+                SuiteProcContext(cmd_key, cmd), callback, [suite, itasks])
+
+    @staticmethod
+    def _set_retry_timers(itask, rtconfig=None):
+        """Set try number and retry delays."""
+        if rtconfig is None:
+            rtconfig = itask.tdef.rtconfig
+        try:
+            no_retry = (
+                rtconfig[itask.tdef.run_mode + ' mode']['disable retries'])
+        except KeyError:
+            no_retry = False
+        if not no_retry:
+            for key, cfg_key in [
+                    (TASK_STATUS_SUBMIT_RETRYING, 'submission retry delays'),
+                    (TASK_STATUS_RETRYING, 'execution retry delays')]:
+                delays = rtconfig['job'][cfg_key]
+                try:
+                    itask.try_timers[key].set_delays(delays)
+                except KeyError:
+                    itask.try_timers[key] = TaskActionTimer(delays=delays)
+
+    def _simulation_submit_task_jobs(self, itasks):
+        """Simulation mode task jobs submission."""
+        for itask in itasks:
+            self._set_retry_timers(itask)
+            itask.task_host = 'SIMULATION'
+            itask.task_owner = 'SIMULATION'
+            itask.summary['batch_sys_name'] = 'SIMULATION'
+            itask.summary[self.KEY_EXECUTE_TIME_LIMIT] = (
+                itask.tdef.rtconfig['job']['simulated run length'])
+            self.task_events_mgr.process_message(
+                itask, INFO, TASK_OUTPUT_SUBMITTED)
+
+    def _submit_task_jobs_callback(self, ctx, suite, itasks):
+        """Callback when submit task jobs command exits."""
+        self._manip_task_jobs_callback(
+            ctx,
+            suite,
+            itasks,
+            self._submit_task_job_callback,
+            {BatchSysManager.OUT_PREFIX_COMMAND: self._job_cmd_out_callback})
+
+    def _submit_task_job_callback(self, suite, itask, cmd_ctx, line):
+        """Helper for _submit_task_jobs_callback, on one task job."""
+        ctx = SuiteProcContext(self.JOBS_SUBMIT, None)
+        ctx.out = line
+        items = line.split("|")
+        try:
+            ctx.timestamp, _, ctx.ret_code = items[0:3]
+        except ValueError:
+            ctx.ret_code = 1
+            ctx.cmd = cmd_ctx.cmd  # print original command on failure
+        else:
+            ctx.ret_code = int(ctx.ret_code)
+            if ctx.ret_code:
+                ctx.cmd = cmd_ctx.cmd  # print original command on failure
+        self.task_events_mgr.log_task_job_activity(
+            ctx, suite, itask.point, itask.tdef.name)
+
+        if ctx.ret_code == SuiteProcPool.JOB_SKIPPED_FLAG:
+            return
+
+        try:
+            itask.summary['submit_method_id'] = items[3]
+        except IndexError:
+            itask.summary['submit_method_id'] = None
+        if itask.summary['submit_method_id'] == "None":
+            itask.summary['submit_method_id'] = None
+        if itask.summary['submit_method_id'] and ctx.ret_code == 0:
+            self.task_events_mgr.process_message(
+                itask, INFO, '%s at %s' % (
+                    TASK_OUTPUT_SUBMITTED, ctx.timestamp))
+        else:
+            self.task_events_mgr.process_message(
+                itask, CRITICAL, '%s at %s' % (
+                    self.task_events_mgr.EVENT_SUBMIT_FAILED, ctx.timestamp))
+
+    def _prep_submit_task_job(self, suite, itask, dry_run):
+        """Prepare a task job submission.
+
+        Return itask on a good preparation.
+
+        """
+        if itask.local_job_file_path and not dry_run:
+            return itask
+
+        try:
+            job_conf = self._prep_submit_task_job_impl(suite, itask)
+            local_job_file_path = self.task_events_mgr.get_task_job_log(
+                suite, itask.point, itask.tdef.name, itask.submit_num,
+                self.JOB_FILE_BASE)
+            self.job_file_writer.write(local_job_file_path, job_conf)
+        except Exception, exc:
+            # Could be a bad command template.
+            ERR.error(traceback.format_exc())
+            LOG.error(traceback.format_exc())
+            self.task_events_mgr.log_task_job_activity(
+                SuiteProcContext(
+                    self.JOBS_SUBMIT,
+                    '(prepare job file)', err=exc, ret_code=1),
+                suite, itask.point, itask.tdef.name)
+            if not dry_run:
+                self.task_events_mgr.process_message(
+                    itask, CRITICAL, self.task_events_mgr.EVENT_SUBMIT_FAILED)
+            return
+        itask.local_job_file_path = local_job_file_path
+
+        if dry_run:
+            # This will be shown next to submit num in gcylc:
+            itask.summary['latest_message'] = 'job file written (edit/dry-run)'
+            LOG.debug("[%s] -%s" % (
+                itask.identity, itask.summary['latest_message']))
+
+        # Return value used by "cylc submit" and "cylc jobscript":
+        return itask
+
+    def _prep_submit_task_job_impl(self, suite, itask):
+        """Helper for self._prep_submit_task_job."""
+        overrides = BroadcastServer.get_inst().get(itask.identity)
+        if overrides:
+            rtconfig = pdeepcopy(itask.tdef.rtconfig)
+            poverride(rtconfig, overrides)
+        else:
+            rtconfig = itask.tdef.rtconfig
+
+        # Retry delays, needed for the try_num
+        self._set_retry_timers(itask, rtconfig)
+
+        # Submit number and try number
+        LOG.debug("[%s] -incrementing submit number" % (itask.identity,))
+        itask.submit_num += 1
+        itask.summary['submit_num'] = itask.submit_num
+        itask.local_job_file_path = None
+        self.suite_db_mgr.put_insert_task_jobs(itask, {
+            "is_manual_submit": itask.is_manual_submit,
+            "try_num": itask.get_try_num(),
+            "time_submit": get_current_time_string(),
+        })
+
+        itask.summary['batch_sys_name'] = rtconfig['job']['batch system']
+        for name in rtconfig['extra log files']:
+            itask.summary['logfiles'].append(expandvars(name))
+
+        # Determine task host settings now, just before job submission,
+        # because dynamic host selection may be used.
+
+        # host may be None (= run task on suite host)
+        itask.task_host = get_task_host(rtconfig['remote']['host'])
+        if not itask.task_host:
+            itask.task_host = 'localhost'
+        elif itask.task_host != "localhost":
+            LOG.info("[%s] -Task host: %s" % (
+                itask.identity, itask.task_host))
+
+        itask.task_owner = rtconfig['remote']['owner']
+
+        if itask.task_owner:
+            user_at_host = itask.task_owner + "@" + itask.task_host
+        else:
+            user_at_host = itask.task_host
+        itask.summary['host'] = user_at_host
+        itask.summary['job_hosts'][itask.submit_num] = user_at_host
+        try:
+            batch_sys_conf = self.task_events_mgr.get_host_conf(
+                itask, 'batch systems')[rtconfig['job']['batch system']]
+        except (TypeError, KeyError):
+            batch_sys_conf = {}
+        try:
+            itask.summary[self.KEY_EXECUTE_TIME_LIMIT] = float(
+                rtconfig['job']['execution time limit'])
+        except TypeError:
+            pass
+        if itask.summary[self.KEY_EXECUTE_TIME_LIMIT]:
+            # Default = 1, 2 and 7 minutes intervals, roughly 1, 3 and 10
+            # minutes after time limit exceeded
+            itask.poll_timers[self.KEY_EXECUTE_TIME_LIMIT] = (
+                TaskActionTimer(delays=batch_sys_conf.get(
+                    'execution time limit polling intervals', [60, 120, 420])))
+        for label, key in [
+                ('submission polling intervals', TASK_STATUS_SUBMITTED),
+                ('execution polling intervals', TASK_STATUS_RUNNING)]:
+            if key in itask.poll_timers:
+                itask.poll_timers[key].reset()
+            else:
+                values = self.task_events_mgr.get_host_conf(
+                    itask, label, skey='job')
+                if values:
+                    itask.poll_timers[key] = TaskActionTimer(delays=values)
+
+        self.init_host(suite, itask.task_host, itask.task_owner)
+        self.suite_db_mgr.put_update_task_jobs(itask, {
+            "user_at_host": user_at_host,
+            "batch_sys_name": itask.summary['batch_sys_name'],
+        })
+        itask.is_manual_submit = False
+
+        scripts = self._get_job_scripts(itask, rtconfig)
+
+        # Location of job file, etc
+        self._create_job_log_path(suite, itask)
+        job_d = self.task_events_mgr.get_task_job_id(
+            itask.point, itask.tdef.name, itask.submit_num)
+        job_file_path = os.path.join(
+            GLOBAL_CFG.get_derived_host_item(
+                suite, "suite job log directory",
+                itask.task_host, itask.task_owner),
+            job_d, self.JOB_FILE_BASE)
+
+        return {
+            'batch_system_name': rtconfig['job']['batch system'],
+            'batch_submit_command_template': (
+                rtconfig['job']['batch submit command template']),
+            'batch_system_conf': batch_sys_conf,
+            'directives': rtconfig['directives'],
+            'environment': rtconfig['environment'],
+            'execution_time_limit': itask.summary[self.KEY_EXECUTE_TIME_LIMIT],
+            'env-script': rtconfig['env-script'],
+            'err-script': rtconfig['err-script'],
+            'host': itask.task_host,
+            'init-script': rtconfig['init-script'],
+            'job_file_path': job_file_path,
+            'job_d': job_d,
+            'namespace_hierarchy': itask.tdef.namespace_hierarchy,
+            'owner': itask.task_owner,
+            'post-script': scripts[2],
+            'pre-script': scripts[0],
+            'remote_suite_d': rtconfig['remote']['suite definition directory'],
+            'script': scripts[1],
+            'shell': rtconfig['job']['shell'],
+            'submit_num': itask.submit_num,
+            'suite_name': suite,
+            'task_id': itask.identity,
+            'try_num': itask.get_try_num(),
+            'work_d': rtconfig['work sub-directory'],
+        }
diff --git a/lib/cylc/task_outputs.py b/lib/cylc/task_outputs.py
index cac44e0..5a8d09e 100644
--- a/lib/cylc/task_outputs.py
+++ b/lib/cylc/task_outputs.py
@@ -15,11 +15,8 @@
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""Task output message manager and constants."""
 
-"""Task output messages and associated logic."""
-
-
-import sys
 
 # Standard task output strings, used for triggering.
 TASK_OUTPUT_EXPIRED = "expired"
@@ -29,91 +26,145 @@ TASK_OUTPUT_STARTED = "started"
 TASK_OUTPUT_SUCCEEDED = "succeeded"
 TASK_OUTPUT_FAILED = "failed"
 
+_SORT_ORDERS = (
+    TASK_OUTPUT_EXPIRED,
+    TASK_OUTPUT_SUBMITTED,
+    TASK_OUTPUT_SUBMIT_FAILED,
+    TASK_OUTPUT_STARTED,
+    TASK_OUTPUT_SUCCEEDED,
+    TASK_OUTPUT_FAILED)
 
-class TaskOutputs(object):
-
-    # Memory optimization - constrain possible attributes to this list.
-    __slots__ = ["owner_id", "completed", "not_completed"]
+_TRIGGER = 0
+_MESSAGE = 1
+_IS_COMPLETED = 2
 
-    def __init__(self, owner_id):
 
-        self.owner_id = owner_id
-        # Store completed and not-completed outputs in separate
-        # dicts to allow quick passing of completed to the broker.
+class TaskOutputs(object):
+    """Task output message manager.
 
-        # Using rhs of dict as a cheap way to get owner ID to receiving
-        # tasks via the dependency broker object:
-        # self.(not)completed[message] = owner_id
+    Manage standard task outputs and custom outputs, e.g.:
+    [scheduling]
+        [[dependencies]]
+            graph = t1:trigger1 => t2
+    [runtime]
+        [[t1]]
+            [[[outputs]]]
+                trigger1 = message 1
 
-        self.completed = {}
-        self.not_completed = {}
+    Can search item by message string or by trigger string.
+    """
 
-    def count(self):
-        return len(self.completed) + len(self.not_completed)
+    # Memory optimization - constrain possible attributes to this list.
+    __slots__ = ["_by_message", "_by_trigger"]
 
-    def count_completed(self):
-        return len(self.completed)
+    def __init__(self, tdef, point):
+        self._by_message = {}
+        self._by_trigger = {}
+        for trigger, message in tdef.get_outputs(point):
+            self.add(message, trigger)
 
-    def dump(self):
-        # return a list of strings representing each message and its state
-        res = []
-        for key in self.not_completed:
-            res.append([key, False])
-        for key in self.completed:
-            res.append([key, True])
-        return res
+    def add(self, message, trigger=None, is_completed=False):
+        """Add a new output message"""
+        if trigger is None:
+            trigger = message
+        self._by_message[message] = [trigger, message, is_completed]
+        self._by_trigger[trigger] = self._by_message[message]
 
     def all_completed(self):
-        return len(self.not_completed) == 0
-
-    def is_completed(self, msg):
-        return self._qualify(msg) in self.completed
+        """Return True if all all outputs completed."""
+        return all([val[_IS_COMPLETED] for val in self._by_message.values()])
 
-    def _qualify(self, msg):
-        # Prefix a message string with task ID.
-        return "%s %s" % (self.owner_id, msg)
+    def exists(self, message=None, trigger=None):
+        """Return True if message/trigger is identified as an output."""
+        try:
+            return self._get_item(message, trigger) is not None
+        except KeyError:
+            return False
+
+    def get_all(self):
+        """Return an iterator for all outputs."""
+        return sorted(self._by_message.values(), cmp=self._sort_by_message)
+
+    def get_completed(self):
+        """Return all completed output messages."""
+        ret = []
+        for value in self.get_all():
+            if value[_IS_COMPLETED]:
+                ret.append(value[_MESSAGE])
+        return ret
+
+    def get_not_completed(self):
+        """Return all not-completed output messages."""
+        ret = []
+        for value in self.get_all():
+            if not value[_IS_COMPLETED]:
+                ret.append(value[_MESSAGE])
+        return ret
+
+    def is_completed(self, message=None, trigger=None):
+        """Return True if output of message is completed."""
+        try:
+            return self._get_item(message, trigger)[_IS_COMPLETED]
+        except KeyError:
+            return False
 
-    def set_completed(self, msg):
-        message = self._qualify(msg)
+    def remove(self, message=None, trigger=None):
+        """Remove an output by message, if it exists."""
         try:
-            del self.not_completed[message]
-        except:
+            trigger, message, _ = self._get_item(message, trigger)
+        except KeyError:
             pass
-        self.completed[message] = self.owner_id
+        else:
+            del self._by_message[message]
+            del self._by_trigger[trigger]
 
-    def exists(self, msg):
-        message = self._qualify(msg)
-        return message in self.completed or message in self.not_completed
+    def set_all_completed(self):
+        """Set all outputs to complete."""
+        for value in self._by_message.values():
+            value[_IS_COMPLETED] = True
 
     def set_all_incomplete(self):
-        for message in self.completed.keys():
-            del self.completed[message]
-            self.not_completed[message] = self.owner_id
+        """Set all outputs to incomplete."""
+        for value in self._by_message.values():
+            value[_IS_COMPLETED] = False
 
-    def set_all_completed(self):
-        for message in self.not_completed.keys():
-            del self.not_completed[message]
-            self.completed[message] = self.owner_id
-
-    def add(self, msg, completed=False):
-        # Add a new output message, prepend my task ID.
-        message = self._qualify(msg)
-        if message in self.completed or message in self.not_completed:
-            # duplicate output messages are an error.
-            print >> sys.stderr, (
-                'WARNING: output already registered: ' + message)
-        if not completed:
-            self.not_completed[message] = self.owner_id
+    def set_completed(self, message=None, trigger=None, is_completed=True):
+        """Set the output identified by message/trigger as completed."""
+        try:
+            item = self._get_item(message, trigger)
+            old_is_completed = item[_IS_COMPLETED]
+            item[_IS_COMPLETED] = is_completed
+        except KeyError:
+            pass
         else:
-            self.completed[message] = self.owner_id
+            return bool(old_is_completed) != bool(is_completed)
 
-    def remove(self, msg):
-        """Remove an output, if it exists."""
-        message = self._qualify(msg)
+    def _get_item(self, message, trigger):
+        """Return self._by_trigger[trigger] or self._by_message[message].
+
+        whichever is relevant.
+        """
+        if message is None:
+            return self._by_trigger[trigger]
+        else:
+            return self._by_message[message]
+
+    @staticmethod
+    def _sort_by_message(item1, item2):
+        """Compare by _MESSAGE."""
         try:
-            del self.completed[message]
-        except:
-            try:
-                del self.not_completed[message]
-            except:
-                pass
+            idx1 = _SORT_ORDERS.index(item1[_MESSAGE])
+        except ValueError:
+            idx1 = None
+        try:
+            idx2 = _SORT_ORDERS.index(item2[_MESSAGE])
+        except ValueError:
+            idx2 = None
+        if idx1 is None and idx2 is None:
+            return cmp(item1[_MESSAGE], item2[_MESSAGE])
+        elif idx1 is None:
+            return 1
+        elif idx2 is None:
+            return -1
+        else:
+            return cmp(idx1, idx2)
diff --git a/lib/cylc/task_pool.py b/lib/cylc/task_pool.py
index 353b2fd..71af7d4 100644
--- a/lib/cylc/task_pool.py
+++ b/lib/cylc/task_pool.py
@@ -32,38 +32,30 @@ tasks against the new stop cycle.
 """
 
 from fnmatch import fnmatchcase
-from logging import DEBUG, INFO, WARNING, getLogger
-import os
 import pickle
-import Queue
+from random import randrange
 from time import time
 import traceback
 
-from cylc.network import COMMS_TASK_MESSAGE_OBJ_NAME
-from cylc.network.task_msg_server import TaskMessageServer
-from cylc.batch_sys_manager import BATCH_SYS_MANAGER
-from cylc.cfgspec.globalcfg import GLOBAL_CFG
-from cylc.config import SuiteConfig
+from cylc.config import SuiteConfigError
 from cylc.cycling.loader import (
     get_interval, get_interval_cls, get_point, ISO8601_CYCLING_TYPE,
     standardise_point_string)
 import cylc.flags
-from cylc.get_task_proxy import get_task_proxy
-from cylc.mp_pool import SuiteProcPool, SuiteProcContext
 from cylc.network.ext_trigger_server import ExtTriggerServer
-from cylc.network.suite_broadcast_server import BroadcastServer
-from cylc.owner import is_remote_user
-from cylc.rundb import CylcSuiteDAO
-from cylc.suite_host import is_remote_host
+from cylc.suite_logging import ERR, LOG, OUT
+from cylc.task_action_timer import TaskActionTimer
+from cylc.task_id import TaskID
+from cylc.task_proxy import TaskProxy
 from cylc.task_state import (
-    TASK_STATUSES_ACTIVE, TASK_STATUSES_NOT_STALLED, TASK_STATUSES_FINAL,
+    TASK_STATUSES_ACTIVE, TASK_STATUSES_NOT_STALLED,
     TASK_STATUS_HELD, TASK_STATUS_WAITING, TASK_STATUS_EXPIRED,
     TASK_STATUS_QUEUED, TASK_STATUS_READY, TASK_STATUS_SUBMITTED,
     TASK_STATUS_SUBMIT_FAILED, TASK_STATUS_SUBMIT_RETRYING,
     TASK_STATUS_RUNNING, TASK_STATUS_SUCCEEDED, TASK_STATUS_FAILED,
     TASK_STATUS_RETRYING)
-from cylc.wallclock import (get_current_time_string,
-                            get_time_string_from_unix_time)
+from cylc.wallclock import (
+    get_current_time_string, get_time_string_from_unix_time)
 
 
 class TaskPool(object):
@@ -71,9 +63,6 @@ class TaskPool(object):
 
     ERR_PREFIX_TASKID_MATCH = "No matching tasks found: "
     ERR_PREFIX_TASK_NOT_ON_SEQUENCE = "Invalid cycle point for task: "
-    JOBS_KILL = "jobs-kill"
-    JOBS_POLL = "jobs-poll"
-    JOBS_SUBMIT = SuiteProcPool.JOBS_SUBMIT
 
     STOP_AUTO = 'AUTOMATIC'
     STOP_AUTO_ON_TASK_FAILURE = 'AUTOMATIC(ON-TASK-FAILURE)'
@@ -81,35 +70,19 @@ class TaskPool(object):
     STOP_REQUEST_NOW = 'REQUEST(NOW)'
     STOP_REQUEST_NOW_NOW = 'REQUEST(NOW-NOW)'
 
-    TABLE_SUITE_PARAMS = CylcSuiteDAO.TABLE_SUITE_PARAMS
-    TABLE_SUITE_TEMPLATE_VARS = CylcSuiteDAO.TABLE_SUITE_TEMPLATE_VARS
-    TABLE_TASK_POOL = CylcSuiteDAO.TABLE_TASK_POOL
-    TABLE_TASK_ACTION_TIMERS = CylcSuiteDAO.TABLE_TASK_ACTION_TIMERS
-    TABLE_CHECKPOINT_ID = CylcSuiteDAO.TABLE_CHECKPOINT_ID
-
-    def __init__(self, suite, pri_dao, pub_dao, stop_point, comms_daemon, log,
-                 run_mode):
-        self.suite_name = suite
-        self.comms_daemon = comms_daemon
-        self.run_mode = run_mode
-        self.log = log
+    def __init__(self, config, stop_point, suite_db_mgr, task_events_mgr):
+        self.config = config
         self.stop_point = stop_point
-        self.do_reload = False
-        self.pri_dao = pri_dao
-        self.pub_dao = pub_dao
+        self.suite_db_mgr = suite_db_mgr
+        self.task_events_mgr = task_events_mgr
 
-        config = SuiteConfig.get_inst()
-        self.custom_runahead_limit = config.get_custom_runahead_limit()
+        self.do_reload = False
+        self.custom_runahead_limit = self.config.get_custom_runahead_limit()
         self.max_future_offset = None
         self._prev_runahead_base_point = None
         self.max_num_active_cycle_points = (
-            config.get_max_num_active_cycle_points())
-        self._prev_runahead_base_point = None
+            self.config.get_max_num_active_cycle_points())
         self._prev_runahead_sequence_points = None
-        self.message_queue = TaskMessageServer(self.suite_name)
-
-        self.comms_daemon.connect(
-            self.message_queue, COMMS_TASK_MESSAGE_OBJ_NAME)
 
         self.pool = {}
         self.runahead_pool = {}
@@ -127,23 +100,11 @@ class TaskPool(object):
         self.held_future_tasks = []
 
         self.orphans = []
-        self.task_name_list = config.get_task_name_list()
-
-        self.db_deletes_map = {
-            self.TABLE_SUITE_PARAMS: [],
-            self.TABLE_TASK_POOL: [],
-            self.TABLE_TASK_ACTION_TIMERS: []}
-        self.db_inserts_map = {
-            self.TABLE_SUITE_PARAMS: [],
-            self.TABLE_SUITE_TEMPLATE_VARS: [],
-            self.TABLE_CHECKPOINT_ID: [],
-            self.TABLE_TASK_POOL: [],
-            self.TABLE_TASK_ACTION_TIMERS: []}
+        self.task_name_list = self.config.get_task_name_list()
 
     def assign_queues(self):
         """self.myq[taskname] = qfoo"""
-        config = SuiteConfig.get_inst()
-        qconfig = config.cfg['scheduling']['queues']
+        qconfig = self.config.cfg['scheduling']['queues']
         self.myq = {}
         for queue in qconfig:
             for taskname in qconfig[queue]['members']:
@@ -152,48 +113,28 @@ class TaskPool(object):
     def insert_tasks(self, items, stop_point_str, no_check=False):
         """Insert tasks."""
         n_warnings = 0
-        config = SuiteConfig.get_inst()
-        names = config.get_task_name_list()
-        fams = config.runtime['first-parent descendants']
-        task_ids = []
+        task_items = []
         for item in items:
             point_str, name_str, _ = self._parse_task_item(item)
             if point_str is None:
-                self.log.warning(
+                LOG.warning(
                     "%s: task ID for insert must contain cycle point" % (item))
                 n_warnings += 1
                 continue
             try:
                 point_str = standardise_point_string(point_str)
             except ValueError as exc:
-                self.log.warning(
+                LOG.warning(
                     self.ERR_PREFIX_TASKID_MATCH + ("%s (%s)" % (item, exc)))
                 n_warnings += 1
                 continue
-            i_names = []
-            if name_str in names:
-                i_names.append(name_str)
-            elif name_str in fams:
-                for name in fams[name_str]:
-                    if name in names:
-                        i_names.append(name)
-            else:
-                for name in names:
-                    if fnmatchcase(name, name_str):
-                        i_names.append(name)
-                for fam, fam_names in fams.items():
-                    if not fnmatchcase(fam, name_str):
-                        continue
-                    for name in fam_names:
-                        if name in names:
-                            i_names.append(name)
-            if i_names:
-                for name in i_names:
-                    task_ids.append((name, point_str))
-            else:
-                self.log.warning(self.ERR_PREFIX_TASKID_MATCH + item)
+            taskdefs = self.config.find_taskdefs(name_str)
+            if not taskdefs:
+                LOG.warning(self.ERR_PREFIX_TASKID_MATCH + item)
                 n_warnings += 1
                 continue
+            for taskdef in taskdefs:
+                task_items.append([(taskdef.name, point_str), taskdef])
         if stop_point_str is None:
             stop_point = None
         else:
@@ -201,40 +142,38 @@ class TaskPool(object):
                 stop_point = get_point(
                     standardise_point_string(stop_point_str))
             except ValueError as exc:
-                self.log.warning("Invalid stop point: %s (%s)" % (
+                LOG.warning("Invalid stop point: %s (%s)" % (
                     stop_point_str, exc))
                 n_warnings += 1
                 return n_warnings
-        task_states_data = self.pri_dao.select_task_states_by_task_ids(
-            ["submit_num"], task_ids)
-        for name_str, point_str in task_ids:
+        task_states_data = (
+            self.suite_db_mgr.pri_dao.select_task_states_by_task_ids(
+                ["submit_num"], [task_item[0] for task_item in task_items]))
+        for key, taskdef in task_items:
             # TODO - insertion of start-up tasks? (startup=False assumed here)
 
             # Check that the cycle point is on one of the tasks sequences.
-            on_sequence = False
+            point_str = key[1]
             point = get_point(point_str)
             if not no_check:  # Check if cycle point is on the tasks sequence.
-                for sequence in config.taskdefs[name_str].sequences:
+                for sequence in taskdef.sequences:
                     if sequence.is_on_sequence(point):
-                        on_sequence = True
                         break
-                if not on_sequence:
-                    self.log.warning(self.ERR_PREFIX_TASK_NOT_ON_SEQUENCE +
-                                     name_str + ', ' + point_str)
+                else:
+                    LOG.warning("%s%s, %s" % (
+                        self.ERR_PREFIX_TASK_NOT_ON_SEQUENCE, taskdef.name,
+                        point_str))
                     continue
 
             submit_num = None
-            if (name_str, point_str) in task_states_data:
-                submit_num = task_states_data[(name_str, point_str)].get(
-                    "submit_num")
-            new_task = get_task_proxy(
-                name_str, get_point(point_str), stop_point=stop_point,
-                submit_num=submit_num, message_queue=self.message_queue)
-            if new_task:
-                self.add_to_runahead_pool(new_task)
+            if key in task_states_data:
+                submit_num = task_states_data[key].get("submit_num")
+            self.add_to_runahead_pool(TaskProxy(
+                taskdef, get_point(point_str),
+                stop_point=stop_point, submit_num=submit_num))
         return n_warnings
 
-    def add_to_runahead_pool(self, itask):
+    def add_to_runahead_pool(self, itask, is_restart=False):
         """Add a new task to the runahead pool if possible.
 
         Tasks whose recurrences allow them to spawn beyond the suite
@@ -246,39 +185,54 @@ class TaskPool(object):
         # do not add if a task with the same ID already exists
         # e.g. an inserted task caught up with an existing one
         if self.get_task_by_id(itask.identity) is not None:
-            self.log.warning(
-                itask.identity +
-                ' cannot be added to pool: task ID already exists')
-            return False
+            LOG.warning(
+                '%s cannot be added to pool: task ID already exists' %
+                itask.identity)
+            return
 
         # do not add if an inserted task is beyond its own stop point
         # (note this is not the same as recurrence bounds)
         if itask.stop_point and itask.point > itask.stop_point:
-            self.log.info(
-                itask.identity + ' not adding to pool: beyond task stop cycle')
-            return False
+            LOG.info(
+                '%s not adding to pool: beyond task stop cycle' %
+                itask.identity)
+            return
 
         # add in held state if beyond the suite hold point
         if self.hold_point and itask.point > self.hold_point:
-            itask.log(
-                INFO,
-                "holding (beyond suite hold point) " + str(self.hold_point))
-            itask.state.reset_state(TASK_STATUS_HELD)
+            LOG.info(
+                "holding (beyond suite hold point) %s" % self.hold_point,
+                itask=itask)
+            itask.state.set_held()
         elif (itask.point <= self.stop_point and
                 self.task_has_future_trigger_overrun(itask)):
-            itask.log(INFO, "holding (future trigger beyond stop point)")
+            LOG.info("holding (future trigger beyond stop point)", itask=itask)
             self.held_future_tasks.append(itask.identity)
-            itask.state.reset_state(TASK_STATUS_HELD)
+            itask.state.set_held()
         elif self.is_held and itask.state.status == TASK_STATUS_WAITING:
             # Hold newly-spawned tasks in a held suite (e.g. due to manual
             # triggering of a held task).
-            itask.state.reset_state(TASK_STATUS_HELD)
+            itask.state.set_held()
 
         # add to the runahead pool
         self.runahead_pool.setdefault(itask.point, {})
         self.runahead_pool[itask.point][itask.identity] = itask
         self.rhpool_changed = True
-        return True
+
+        if is_restart:
+            return itask
+
+        # store in persistent
+        if itask.submit_num > 0:
+            self.suite_db_mgr.put_update_task_states(itask, {
+                "time_updated": get_current_time_string(),
+                "status": itask.state.status})
+        else:
+            self.suite_db_mgr.put_insert_task_states(itask, {
+                "time_created": get_current_time_string(),
+                "time_updated": get_current_time_string(),
+                "status": itask.state.status})
+        return itask
 
     def release_runahead_tasks(self):
         """Release tasks from the runahead pool to the main pool.
@@ -329,8 +283,7 @@ class TaskPool(object):
             sequence_points = self._prev_runahead_sequence_points
         else:
             sequence_points = []
-            config = SuiteConfig.get_inst()
-            for sequence in config.sequences:
+            for sequence in self.config.sequences:
                 point = runahead_base_point
                 for _ in range(limit):
                     point = sequence.get_next_point(point)
@@ -359,7 +312,7 @@ class TaskPool(object):
             if (self._prev_runahead_base_point is None or
                     self._prev_runahead_base_point != runahead_base_point):
                 if self.custom_runahead_limit < self.max_future_offset:
-                    self.log.warning(
+                    LOG.warning(
                         ('custom runahead limit of %s is less than ' +
                          'future triggering offset %s: suite may stall.') % (
                             self.custom_runahead_limit,
@@ -378,6 +331,117 @@ class TaskPool(object):
                     released = True
         return released
 
+    def load_db_task_pool_for_restart(self, row_idx, row):
+        """Load a task from previous task pool.
+
+        The state of task prerequisites (satisfied or not) and outputs
+        (completed or not) is determined by the recorded TASK_STATUS:
+
+        TASK_STATUS_WAITING    - prerequisites and outputs unsatisified
+        TASK_STATUS_HELD       - ditto (only waiting tasks can be held)
+        TASK_STATUS_QUEUED     - prereqs satisfied, outputs not completed
+                                 (only tasks ready to run can get queued)
+        TASK_STATUS_READY      - ditto
+        TASK_STATUS_SUBMITTED  - ditto (but see *)
+        TASK_STATUS_SUBMIT_RETRYING - ditto
+        TASK_STATUS_RUNNING    - ditto (but see *)
+        TASK_STATUS_FAILED     - ditto (tasks must run in order to fail)
+        TASK_STATUS_RETRYING   - ditto (tasks must fail in order to retry)
+        TASK_STATUS_SUCCEEDED  - prerequisites satisfied, outputs completed
+
+        (*) tasks reloaded with TASK_STATUS_SUBMITTED or TASK_STATUS_RUNNING
+        are polled to determine what their true status is.
+        """
+        if row_idx == 0:
+            OUT.info("LOADING task proxies")
+        (cycle, name, spawned, status, hold_swap, submit_num, _,
+         user_at_host) = row
+        try:
+            itask = TaskProxy(
+                self.config.get_taskdef(name),
+                get_point(cycle),
+                status=status,
+                hold_swap=hold_swap,
+                has_spawned=bool(spawned),
+                submit_num=submit_num)
+        except SuiteConfigError as exc:
+            if cylc.flags.debug:
+                ERR.error(traceback.format_exc())
+            else:
+                ERR.error(str(exc))
+            ERR.warning((
+                "ignoring task %s from the suite run database\n"
+                "(its task definition has probably been deleted).") % name)
+        except Exception:
+            ERR.error(traceback.format_exc())
+            ERR.error("could not load task %s" % name)
+        else:
+            if status in (TASK_STATUS_SUBMITTED, TASK_STATUS_RUNNING):
+                itask.state.set_prerequisites_all_satisfied()
+                # update the task proxy with user at host
+                try:
+                    itask.task_owner, itask.task_host = user_at_host.split(
+                        "@", 1)
+                except ValueError:
+                    itask.task_owner = None
+                    itask.task_host = user_at_host
+
+            elif status in (TASK_STATUS_SUBMIT_FAILED, TASK_STATUS_FAILED):
+                itask.state.set_prerequisites_all_satisfied()
+
+            elif status in (TASK_STATUS_QUEUED, TASK_STATUS_READY):
+                # reset to waiting as these had not been submitted yet.
+                itask.state.reset_state(TASK_STATUS_WAITING)
+                itask.state.set_prerequisites_all_satisfied()
+
+            elif status in (TASK_STATUS_SUBMIT_RETRYING, TASK_STATUS_RETRYING):
+                itask.state.set_prerequisites_all_satisfied()
+
+            elif status == TASK_STATUS_SUCCEEDED:
+                itask.state.set_prerequisites_all_satisfied()
+                # TODO - just poll for outputs in the job status file.
+                itask.state.outputs.set_all_completed()
+
+            if user_at_host:
+                itask.summary['job_hosts'][int(submit_num)] = user_at_host
+            if hold_swap:
+                OUT.info("+ %s.%s %s (%s)" % (name, cycle, status, hold_swap))
+            else:
+                OUT.info("+ %s.%s %s" % (name, cycle, status))
+            self.add_to_runahead_pool(itask, is_restart=True)
+
+    def load_db_task_action_timers(self, row_idx, row):
+        """Load a task action timer, e.g. event handlers, retry states."""
+        if row_idx == 0:
+            OUT.info("LOADING task action timers")
+        (cycle, name, ctx_key_pickle, ctx_pickle, delays_pickle, num, delay,
+         timeout) = row
+        id_ = TaskID.get(name, cycle)
+        ctx_key = "?"
+        try:
+            ctx_key = pickle.loads(str(ctx_key_pickle))
+            ctx = pickle.loads(str(ctx_pickle))
+            delays = pickle.loads(str(delays_pickle))
+            if ctx_key and ctx_key[0] in ["poll_timers", "try_timers"]:
+                itask = self.get_task_by_id(id_)
+                if itask is None:
+                    ERR.warning("%(id)s: task not found, skip" % {"id": id_})
+                    return
+                getattr(itask, ctx_key[0])[ctx_key[1]] = TaskActionTimer(
+                    ctx, delays, num, delay, timeout)
+            else:
+                key1, submit_num = ctx_key
+                key = (key1, cycle, name, submit_num)
+                self.task_events_mgr.event_timers[key] = TaskActionTimer(
+                    ctx, delays, num, delay, timeout)
+        except (EOFError, TypeError, LookupError, ValueError):
+            ERR.warning(
+                "%(id)s: skip action timer %(ctx_key)s" %
+                {"id": id_, "ctx_key": ctx_key})
+            ERR.warning(traceback.format_exc())
+            return
+        OUT.info("+ %s.%s %s" % (name, cycle, ctx_key))
+
     def release_runahead_task(self, itask):
         """Release itask to the appropriate queue in the active pool."""
         queue = self.myq[itask.tdef.name]
@@ -388,7 +452,7 @@ class TaskPool(object):
         self.pool[itask.point][itask.identity] = itask
         self.pool_changed = True
         cylc.flags.pflag = True
-        itask.log(DEBUG, "released to the task pool")
+        LOG.debug("released to the task pool", itask=itask)
         del self.runahead_pool[itask.point][itask.identity]
         if not self.runahead_pool[itask.point]:
             del self.runahead_pool[itask.point]
@@ -417,8 +481,8 @@ class TaskPool(object):
         self.pool_changed = True
         msg = "task proxy removed"
         if reason:
-            msg += " (" + reason + ")"
-        itask.log(DEBUG, msg)
+            msg += " (%s)" % reason
+        LOG.debug(msg, itask=itask)
         if itask.tdef.max_future_prereq_offset is not None:
             self.set_max_future_offset()
         del itask
@@ -464,13 +528,13 @@ class TaskPool(object):
 
         Return None if task does not exist.
         """
-        for itask_ids in self.runahead_pool.values() + self.queues.values():
+        for itask_ids in self.queues.values() + self.runahead_pool.values():
             try:
                 return itask_ids[id_]
             except KeyError:
                 pass
 
-    def submit_tasks(self):
+    def get_ready_tasks(self):
         """
         1) queue tasks that are ready to run (prerequisites satisfied,
         clock-trigger time up) or if their manual trigger flag is set.
@@ -488,22 +552,22 @@ class TaskPool(object):
         after use so that two manual trigger ops are required to submit
         an initially unqueued task that is queue-limited.
 
-        Return the number of tasks that are dequeued.
+        Return the tasks that are dequeued.
         """
 
         # 1) queue unqueued tasks that are ready to run or manually forced
+        now = time()
         for itask in self.get_tasks():
-            if not itask.state.status == TASK_STATUS_QUEUED:
+            if itask.state.status != TASK_STATUS_QUEUED:
                 # only need to check that unqueued tasks are ready
-                if itask.manual_trigger or itask.ready_to_run():
+                if itask.manual_trigger or itask.ready_to_run(now):
                     # queue the task
-                    itask.state.set_state(TASK_STATUS_QUEUED)
+                    itask.state.reset_state(TASK_STATUS_QUEUED)
                     itask.reset_manual_trigger()
 
         # 2) submit queued tasks if manually forced or not queue-limited
         ready_tasks = []
-        config = SuiteConfig.get_inst()
-        qconfig = config.cfg['scheduling']['queues']
+        qconfig = self.config.cfg['scheduling']['queues']
         for queue in self.queues:
             # 2.1) count active tasks and compare to queue limit
             n_active = 0
@@ -531,92 +595,9 @@ class TaskPool(object):
                     itask.reset_manual_trigger()
                 # else leaved queued
 
-        self.log.debug('%d task(s) de-queued' % len(ready_tasks))
-
-        self.submit_task_jobs(ready_tasks)
-        return len(ready_tasks)
-
-    def submit_task_jobs(self, ready_tasks):
-        """Prepare and submit task jobs."""
-        if not ready_tasks:
-            return
-
-        # Prepare tasks for job submission
-        config = SuiteConfig.get_inst()
-        bcast = BroadcastServer.get_inst()
-        prepared_tasks = []
-        for itask in ready_tasks:
-            if (config.cfg['cylc']['log resolved dependencies'] and
-                    not itask.local_job_file_path):
-                itask.log(INFO,
-                          'triggered off %s' % (
-                              itask.state.get_resolved_dependencies()))
-            overrides = bcast.get(itask.identity)
-            if self.run_mode == 'simulation':
-                itask.job_submission_succeeded()
-            elif itask.prep_submit(overrides=overrides) is not None:
-                prepared_tasks.append(itask)
-
-        if not prepared_tasks:
-            return
+        LOG.debug('%d task(s) de-queued' % len(ready_tasks))
 
-        # Submit task jobs
-        auth_itasks = {}
-        for itask in prepared_tasks:
-            # The job file is now (about to be) used: reset the file write flag
-            # so that subsequent manual retrigger will generate a new job file.
-            itask.local_job_file_path = None
-            itask.state.set_state(TASK_STATUS_READY)
-            if (itask.task_host, itask.task_owner) not in auth_itasks:
-                auth_itasks[(itask.task_host, itask.task_owner)] = []
-            auth_itasks[(itask.task_host, itask.task_owner)].append(itask)
-        for auth, itasks in sorted(auth_itasks.items()):
-            cmd = ["cylc", self.JOBS_SUBMIT]
-            if cylc.flags.debug:
-                cmd.append("--debug")
-            host, owner = auth
-            remote_mode = False
-            kwargs = {}
-            for key, value, test_func in [
-                    ('host', host, is_remote_host),
-                    ('user', owner, is_remote_user)]:
-                if test_func(value):
-                    cmd.append('--%s=%s' % (key, value))
-                    remote_mode = True
-                    kwargs[key] = value
-            if remote_mode:
-                cmd.append('--remote-mode')
-            cmd.append("--")
-            cmd.append(GLOBAL_CFG.get_derived_host_item(
-                self.suite_name, 'suite job log directory', host, owner))
-            stdin_file_paths = []
-            job_log_dirs = []
-            for itask in sorted(itasks, key=lambda itask: itask.identity):
-                if remote_mode:
-                    stdin_file_paths.append(itask.get_job_log_path(
-                        itask.HEAD_MODE_LOCAL, tail=itask.JOB_FILE_BASE))
-                job_log_dirs.append(itask.get_job_log_path())
-            cmd += job_log_dirs
-            SuiteProcPool.get_inst().put_command(
-                SuiteProcContext(
-                    self.JOBS_SUBMIT,
-                    cmd,
-                    stdin_file_paths=stdin_file_paths,
-                    job_log_dirs=job_log_dirs,
-                    **kwargs
-                ),
-                self.submit_task_jobs_callback)
-
-    def submit_task_jobs_callback(self, ctx):
-        """Callback when submit task jobs command exits."""
-        self._manip_task_jobs_callback(
-            ctx,
-            lambda itask, ctx, line: itask.job_submit_callback(ctx, line),
-            {
-                BATCH_SYS_MANAGER.OUT_PREFIX_COMMAND:
-                lambda itask, ctx, line: itask.job_cmd_out_callback(ctx, line),
-            },
-        )
+        return ready_tasks
 
     def task_has_future_trigger_overrun(self, itask):
         """Check for future triggers extending beyond the final cycle."""
@@ -639,10 +620,10 @@ class TaskPool(object):
                 interval = get_interval(interval)
         if interval is None:
             # No limit
-            self.log.warning("setting NO custom runahead limit")
+            LOG.warning("setting NO custom runahead limit")
             self.custom_runahead_limit = None
         else:
-            self.log.info("setting custom runahead limit to %s" % interval)
+            LOG.info("setting custom runahead limit to %s" % interval)
             self.custom_runahead_limit = interval
         self.release_runahead_tasks()
 
@@ -680,14 +661,14 @@ class TaskPool(object):
                 max_offset = itask.tdef.max_future_prereq_offset
         self.max_future_offset = max_offset
 
-    def reconfigure(self, stop_point):
+    def reconfigure(self, config, stop_point):
         """Set the task pool to reload mode."""
+        self.config = config
         self.do_reload = True
 
-        config = SuiteConfig.get_inst()
-        self.custom_runahead_limit = config.get_custom_runahead_limit()
+        self.custom_runahead_limit = self.config.get_custom_runahead_limit()
         self.max_num_active_cycle_points = (
-            config.get_max_num_active_cycle_points())
+            self.config.get_max_num_active_cycle_points())
         self.stop_point = stop_point
 
         # reassign live tasks from the old queues to the new.
@@ -706,7 +687,7 @@ class TaskPool(object):
 
         # find any old tasks that have been removed from the suite
         old_task_name_list = self.task_name_list
-        self.task_name_list = config.get_task_name_list()
+        self.task_name_list = self.config.get_task_name_list()
         for name in old_task_name_list:
             if name not in self.task_name_list:
                 self.orphans.append(name)
@@ -714,15 +695,15 @@ class TaskPool(object):
             if name in self.orphans:
                 self.orphans.remove(name)
         # adjust the new suite config to handle the orphans
-        config.adopt_orphans(self.orphans)
+        self.config.adopt_orphans(self.orphans)
 
     def reload_taskdefs(self):
         """Reload task definitions."""
-        self.log.info("Reloading task definitions.")
+        LOG.info("Reloading task definitions.")
         # Log tasks orphaned by a reload that were not in the task pool.
         for task in self.orphans:
             if task not in [tsk.tdef.name for tsk in self.get_all_tasks()]:
-                getLogger("log").log(WARNING, "Removed task: '%s'" % (task,))
+                LOG.warning("Removed task: '%s'" % (task,))
         for itask in self.get_all_tasks():
             if itask.tdef.name in self.orphans:
                 if itask.state.status in [
@@ -730,22 +711,28 @@ class TaskPool(object):
                         TASK_STATUS_SUBMIT_RETRYING, TASK_STATUS_RETRYING,
                         TASK_STATUS_HELD]:
                     # Remove orphaned task if it hasn't started running yet.
-                    itask.log(WARNING, "(task orphaned by suite reload)")
+                    LOG.warning("(task orphaned by suite reload)", itask=itask)
                     self.remove(itask)
                 else:
                     # Keep active orphaned task, but stop it from spawning.
                     itask.has_spawned = True
-                    itask.log(WARNING, "last instance (orphaned by reload)")
+                    LOG.warning(
+                        "last instance (orphaned by reload)", itask=itask)
             else:
-                new_task = get_task_proxy(
-                    itask.tdef.name, itask.point, itask.state.status,
-                    stop_point=itask.stop_point, submit_num=itask.submit_num,
-                    is_reload_or_restart=True, pre_reload_inst=itask)
                 self.remove(itask, '(suite definition reload)')
-                self.add_to_runahead_pool(new_task)
-        self.log.info("Reload completed.")
+                new_task = self.add_to_runahead_pool(TaskProxy(
+                    self.config.get_taskdef(itask.tdef.name), itask.point,
+                    itask.state.status, stop_point=itask.stop_point,
+                    submit_num=itask.submit_num))
+                new_task.copy_pre_reload(itask)
+                LOG.info('reloaded task definition', itask=itask)
+                if itask.state.status in TASK_STATUSES_ACTIVE:
+                    LOG.warning(
+                        "job(%0d2) active with pre-reload settings" %
+                        itask.submit_num,
+                        itask=itask)
+        LOG.info("Reload completed.")
         self.do_reload = False
-        self.pri_dao.take_checkpoints("reload-done", other_daos=[self.pub_dao])
 
     def set_stop_point(self, stop_point):
         """Set the global suite stop point."""
@@ -755,10 +742,11 @@ class TaskPool(object):
             if (self.stop_point and itask.point > self.stop_point and
                     itask.state.status in [TASK_STATUS_WAITING,
                                            TASK_STATUS_QUEUED]):
-                itask.log(WARNING,
-                          "not running (beyond suite stop cycle) " +
-                          str(self.stop_point))
-                itask.state.reset_state(TASK_STATUS_HELD)
+                LOG.warning(
+                    "not running (beyond suite stop cycle) %s" %
+                    self.stop_point,
+                    itask=itask)
+                itask.state.set_held()
 
     def can_stop(self, stop_mode):
         """Return True if suite can stop.
@@ -771,9 +759,9 @@ class TaskPool(object):
             return False
         if stop_mode == self.STOP_REQUEST_NOW_NOW:
             return True
+        if self.task_events_mgr.event_timers:
+            return False
         for itask in self.get_tasks():
-            if itask.event_handler_try_timers:
-                return False
             if (stop_mode == self.STOP_REQUEST_CLEAN and
                     itask.state.status in TASK_STATUSES_ACTIVE and
                     not itask.state.kill_failed):
@@ -785,15 +773,14 @@ class TaskPool(object):
         for itask in self.get_tasks():
             if (itask.state.status in TASK_STATUSES_ACTIVE and
                     itask.state.kill_failed):
-                self.log.warning("%s: orphaned task (%s, kill failed)" % (
+                LOG.warning("%s: orphaned task (%s, kill failed)" % (
                     itask.identity, itask.state.status))
             elif itask.state.status in TASK_STATUSES_ACTIVE:
-                self.log.warning("%s: orphaned task (%s)" % (
+                LOG.warning("%s: orphaned task (%s)" % (
                     itask.identity, itask.state.status))
-            elif itask.event_handler_try_timers:
-                for key in itask.event_handler_try_timers:
-                    self.log.warning("%s: incomplete task event handler %s" % (
-                        itask.identity, key))
+        for key1, point, name, submit_num in self.task_events_mgr.event_timers:
+            LOG.warning("%s/%s/%s: incomplete task event handler %s" % (
+                point, name, submit_num, key1))
 
     def is_stalled(self):
         """Return True if the suite is stalled.
@@ -857,121 +844,9 @@ class TaskPool(object):
                     break
 
         for id_, prereqs in prereqs_map.items():
-            self.log.warning("Unmet prerequisites for %s:" % id_)
+            LOG.warning("Unmet prerequisites for %s:" % id_)
             for prereq in prereqs:
-                self.log.warning(" * %s" % prereq)
-
-    def poll_task_jobs(self, items=None):
-        """Poll jobs of active tasks.
-
-        If items is specified, poll active tasks matching given IDs.
-
-        """
-        if self.run_mode == 'simulation':
-            return
-        itasks, bad_items = self._filter_task_proxies(items)
-        active_itasks = []
-        for itask in itasks:
-            if itask.state.status in TASK_STATUSES_ACTIVE:
-                active_itasks.append(itask)
-            elif items:  # and not active
-                self.log.warning(
-                    '%s: skip poll, task not pollable' % itask.identity)
-        self._run_job_cmd(
-            self.JOBS_POLL, active_itasks, self.poll_task_jobs_callback)
-        return len(bad_items)
-
-    def poll_task_jobs_callback(self, ctx):
-        """Callback when poll tasks command exits."""
-        self._manip_task_jobs_callback(
-            ctx,
-            lambda itask, ctx, line: itask.job_poll_callback(ctx, line),
-            {
-                BATCH_SYS_MANAGER.OUT_PREFIX_MESSAGE:
-                lambda itask, ctx, line: itask.job_poll_message_callback(
-                    ctx, line),
-            },
-        )
-
-    def kill_task_jobs(self, items=None):
-        """Kill jobs of active tasks.
-
-        If items is specified, kill active tasks matching given IDs.
-
-        """
-        itasks, bad_items = self._filter_task_proxies(items)
-        active_itasks = []
-        for itask in itasks:
-            is_active = itask.state.status in TASK_STATUSES_ACTIVE
-            if is_active and self.run_mode == 'simulation':
-                itask.state.reset_state(TASK_STATUS_FAILED)
-            elif is_active:
-                itask.state.reset_state(TASK_STATUS_HELD)
-                active_itasks.append(itask)
-            elif items:  # and not active
-                self.log.warning(
-                    '%s: skip kill, task not killable' % itask.identity)
-        self._run_job_cmd(
-            self.JOBS_KILL, active_itasks, self.kill_task_jobs_callback)
-        return len(bad_items)
-
-    def kill_task_jobs_callback(self, ctx):
-        """Callback when kill tasks command exits."""
-        self._manip_task_jobs_callback(
-            ctx,
-            lambda itask, ctx, line: itask.job_kill_callback(ctx, line),
-            {
-                BATCH_SYS_MANAGER.OUT_PREFIX_COMMAND:
-                lambda itask, ctx, line: itask.job_cmd_out_callback(ctx, line),
-            },
-        )
-
-    def _manip_task_jobs_callback(
-            self, ctx, summary_callback, more_callbacks=None):
-        """Callback when submit/poll/kill tasks command exits."""
-        if ctx.ret_code:
-            self.log.error(ctx)
-        else:
-            self.log.debug(ctx)
-        tasks = {}
-        # Note for "kill": It is possible for a job to trigger its trap and
-        # report back to the suite back this logic is called. If so, the task
-        # will no longer be TASK_STATUS_SUBMITTED or TASK_STATUS_RUNNING, and
-        # its output line will be ignored here.
-        for itask in self.get_tasks():
-            if itask.point is not None and itask.submit_num:
-                submit_num = "%02d" % (itask.submit_num)
-                tasks[(str(itask.point), itask.tdef.name, submit_num)] = itask
-        handlers = [(BATCH_SYS_MANAGER.OUT_PREFIX_SUMMARY, summary_callback)]
-        if more_callbacks:
-            for prefix, callback in more_callbacks.items():
-                handlers.append((prefix, callback))
-        out = ctx.out
-        if not out:
-            out = ""
-            # Something is very wrong here
-            # Fallback to use "job_log_dirs" list to report the problem
-            job_log_dirs = ctx.cmd_kwargs.get("job_log_dirs", [])
-            for job_log_dir in job_log_dirs:
-                point, name, submit_num = job_log_dir.split(os.sep, 2)
-                itask = tasks[(point, name, submit_num)]
-                out += (BATCH_SYS_MANAGER.OUT_PREFIX_SUMMARY +
-                        "|".join([ctx.timestamp, job_log_dir, "1"]) + "\n")
-        for line in out.splitlines(True):
-            for prefix, callback in handlers:
-                if line.startswith(prefix):
-                    line = line[len(prefix):].strip()
-                    try:
-                        path = line.split("|", 2)[1]  # timestamp, path, status
-                        point, name, submit_num = path.split(os.sep, 2)
-                        itask = tasks[(point, name, submit_num)]
-                        callback(itask, ctx, line)
-                    except (KeyError, ValueError):
-                        if cylc.flags.debug:
-                            self.log.warning(
-                                'Unhandled %s output: %s' % (
-                                    ctx.cmd_key, line))
-                            traceback.print_exc()
+                LOG.warning(" * %s" % prereq)
 
     def get_hold_point(self):
         """Return the point after which tasks must be held."""
@@ -983,37 +858,36 @@ class TaskPool(object):
         if point is not None:
             for itask in self.get_all_tasks():
                 if itask.point > point:
-                    itask.state.reset_state(TASK_STATUS_HELD)
+                    itask.state.set_held()
 
     def hold_tasks(self, items):
         """Hold tasks with IDs matching any item in "ids"."""
-        itasks, bad_items = self._filter_task_proxies(items)
+        itasks, bad_items = self.filter_task_proxies(items)
         for itask in itasks:
-            itask.state.reset_state(TASK_STATUS_HELD)
+            itask.state.set_held()
         return len(bad_items)
 
     def release_tasks(self, items):
         """Release held tasks with IDs matching any item in "ids"."""
-        itasks, bad_items = self._filter_task_proxies(items)
+        itasks, bad_items = self.filter_task_proxies(items)
         for itask in itasks:
-            itask.state.release()
+            itask.state.unset_held()
+        return len(bad_items)
 
     def hold_all_tasks(self):
         """Hold all tasks."""
-        self.log.info("Holding all waiting or queued tasks now")
+        LOG.info("Holding all waiting or queued tasks now")
         self.is_held = True
         for itask in self.get_all_tasks():
-            itask.state.reset_state(TASK_STATUS_HELD)
-        self.db_inserts_map[self.TABLE_SUITE_PARAMS].append(
-            {"key": "is_held", "value": 1})
+            itask.state.set_held()
 
     def release_all_tasks(self):
         """Release all held tasks."""
         self.is_held = False
         self.release_tasks(None)
-        self.db_deletes_map[self.TABLE_SUITE_PARAMS].append({"key": "is_held"})
 
     def get_failed_tasks(self):
+        """Return failed and submission failed tasks."""
         failed = []
         for itask in self.get_tasks():
             if itask.state.status in [TASK_STATUS_FAILED,
@@ -1022,6 +896,7 @@ class TaskPool(object):
         return failed
 
     def any_task_failed(self):
+        """Return True if any tasks in the pool failed."""
         for itask in self.get_tasks():
             if itask.state.status in [TASK_STATUS_FAILED,
                                       TASK_STATUS_SUBMIT_FAILED]:
@@ -1037,116 +912,55 @@ class TaskPool(object):
         """
         all_outputs = {}   # all_outputs[message] = taskid
         for itask in self.get_tasks():
-            all_outputs.update(itask.state.outputs.completed)
+            for message in itask.state.outputs.get_completed():
+                all_outputs["%s %s" % (itask.identity, message)] = (
+                    itask.identity)
         all_output_msgs = set(all_outputs)
         for itask in self.get_tasks():
             # Try to satisfy itask if not already satisfied.
             if itask.state.prerequisites_are_not_all_satisfied():
                 itask.state.satisfy_me(all_output_msgs, all_outputs)
 
-    def process_queued_task_messages(self):
-        """Handle incoming task messages for each task proxy."""
-        queue = self.message_queue.get_queue()
-        task_id_messages = {}
-        while queue.qsize():
-            try:
-                task_id, priority, message = queue.get(block=False)
-            except Queue.Empty:
-                break
-            queue.task_done()
-            task_id_messages.setdefault(task_id, [])
-            task_id_messages[task_id].append((priority, message))
-        for itask in self.get_tasks():
-            if itask.identity in task_id_messages:
-                for priority, message in task_id_messages[itask.identity]:
-                    itask.process_incoming_message(priority, message)
-
-    def process_queued_db_ops(self):
-        """Handle queued db operations for each task proxy."""
-        for itask in self.get_all_tasks():
-            # (runahead pool tasks too, to get new state recorders).
-            if any(itask.db_inserts_map.values()):
-                for table_name, db_inserts in sorted(
-                        itask.db_inserts_map.items()):
-                    while db_inserts:
-                        db_insert = db_inserts.pop(0)
-                        db_insert.update({
-                            "name": itask.tdef.name,
-                            "cycle": str(itask.point),
-                        })
-                        if "submit_num" not in db_insert:
-                            db_insert["submit_num"] = itask.submit_num
-                        self.pri_dao.add_insert_item(table_name, db_insert)
-                        self.pub_dao.add_insert_item(table_name, db_insert)
-
-            if any(itask.db_updates_map.values()):
-                for table_name, db_updates in sorted(
-                        itask.db_updates_map.items()):
-                    while db_updates:
-                        set_args = db_updates.pop(0)
-                        where_args = {
-                            "cycle": str(itask.point),
-                            "name": itask.tdef.name
-                        }
-                        if "submit_num" not in set_args:
-                            where_args["submit_num"] = itask.submit_num
-                        self.pri_dao.add_update_item(
-                            table_name, set_args, where_args)
-                        self.pub_dao.add_update_item(
-                            table_name, set_args, where_args)
-
-        # Record suite parameters and tasks in pool
-        # Record any broadcast settings to be dumped out
-        for obj in self, BroadcastServer.get_inst():
-            if any(obj.db_deletes_map.values()):
-                for table_name, db_deletes in sorted(
-                        obj.db_deletes_map.items()):
-                    while db_deletes:
-                        where_args = db_deletes.pop(0)
-                        self.pri_dao.add_delete_item(table_name, where_args)
-                        self.pub_dao.add_delete_item(table_name, where_args)
-            if any(obj.db_inserts_map.values()):
-                for table_name, db_inserts in sorted(
-                        obj.db_inserts_map.items()):
-                    while db_inserts:
-                        db_insert = db_inserts.pop(0)
-                        self.pri_dao.add_insert_item(table_name, db_insert)
-                        self.pub_dao.add_insert_item(table_name, db_insert)
-
-        # Previously, we used a separate thread for database writes. This has
-        # now been removed. For the private database, there is no real
-        # advantage in using a separate thread as it needs to be always in sync
-        # with what is current. For the public database, which does not need to
-        # be fully in sync, there is some advantage of using a separate
-        # thread/process, if writing to it becomes a bottleneck. At the moment,
-        # there is no evidence that this is a bottleneck, so it is better to
-        # keep the logic simple.
-        self.pri_dao.execute_queued_items()
-        self.pub_dao.execute_queued_items()
-
     def force_spawn(self, itask):
         """Spawn successor of itask."""
         if itask.has_spawned:
             return None
         itask.has_spawned = True
-        itask.log(DEBUG, 'forced spawning')
-        new_task = itask.spawn(TASK_STATUS_WAITING)
-        if new_task and self.add_to_runahead_pool(new_task):
-            return new_task
-        else:
-            return None
+        LOG.debug('forced spawning', itask=itask)
+        next_point = itask.next_point()
+        if next_point is None:
+            return
+        new_task = TaskProxy(
+            itask.tdef, start_point=next_point, stop_point=itask.stop_point)
+        return self.add_to_runahead_pool(new_task)
 
     def spawn_all_tasks(self):
         """Spawn successors of tasks in pool, if they're ready.
 
         Return the number of spawned tasks.
         """
-        spawned_tasks = 0
+        n_spawned = 0
         for itask in self.get_tasks():
-            if itask.ready_to_spawn():
-                self.force_spawn(itask)
-                spawned_tasks += 1
-        return spawned_tasks
+            # A task proxy is never ready to spawn if:
+            #    * it has spawned already
+            #    * its state is submit-failed (avoid running multiple instances
+            #      of a task with bad job submission config).
+            # Otherwise a task proxy is ready to spawn if either:
+            #    * self.tdef.spawn ahead is True (results in spawning out to
+            #      max active cycle points), OR
+            #    * its state is >= submitted (allows successive instances
+            #      to run concurrently, but not out of order).
+            if (
+                not itask.has_spawned and
+                itask.state.status != TASK_STATUS_SUBMIT_FAILED and
+                (
+                    itask.tdef.spawn_ahead or
+                    itask.state.is_greater_than(TASK_STATUS_READY)
+                )
+            ):
+                if self.force_spawn(itask) is not None:
+                    n_spawned += 1
+        return n_spawned
 
     def remove_suiciding_tasks(self):
         """Remove any tasks that have suicide-triggered.
@@ -1160,9 +974,9 @@ class TaskPool(object):
                     if itask.state.status in [TASK_STATUS_READY,
                                               TASK_STATUS_SUBMITTED,
                                               TASK_STATUS_RUNNING]:
-                        itask.log(WARNING, 'suiciding while active')
+                        LOG.warning('suiciding while active', itask=itask)
                     else:
-                        itask.log(INFO, 'suiciding')
+                        LOG.info('suiciding', itask=itask)
                     self.force_spawn(itask)
                     self.remove(itask, 'suicide')
                     num_removed += 1
@@ -1211,7 +1025,6 @@ class TaskPool(object):
             if (itask.state.status in [TASK_STATUS_SUCCEEDED,
                                        TASK_STATUS_EXPIRED] and
                     itask.has_spawned and
-                    not itask.event_handler_try_timers and
                     itask.cleanup_cutoff is not None and
                     cutoff > itask.cleanup_cutoff):
                 spent.append(itask)
@@ -1223,37 +1036,66 @@ class TaskPool(object):
         """Force tasks to spawn successors if they haven't already.
 
         """
-        itasks, bad_items = self._filter_task_proxies(items)
+        itasks, bad_items = self.filter_task_proxies(items)
         for itask in itasks:
             if not itask.has_spawned:
-                itask.log(INFO, "forced spawning")
+                LOG.info("forced spawning", itask=itask)
                 self.force_spawn(itask)
         return len(bad_items)
 
-    def reset_task_states(self, items, status):
+    def reset_task_states(self, items, status, outputs):
         """Reset task states."""
-        itasks, bad_items = self._filter_task_proxies(items)
+        itasks, bad_items = self.filter_task_proxies(items)
         for itask in itasks:
-            itask.log(INFO, "resetting state to %s" % status)
-            if status == TASK_STATUS_READY:
-                # Pseudo state (in this context) - set waiting and satisified.
-                itask.state.reset_state(TASK_STATUS_WAITING)
-                itask.state.set_prerequisites_all_satisfied()
-                itask.state.unset_special_outputs()
-                itask.state.outputs.set_all_incomplete()
-            elif status in [TASK_STATUS_FAILED, TASK_STATUS_SUBMIT_FAILED]:
-                itask.state.reset_state(status)
-                time_ = time()
-                itask.summary['finished_time'] = time_
-                itask.summary['finished_time_string'] = (
-                    get_time_string_from_unix_time(time_))
-            else:
-                itask.state.reset_state(status)
+            if status and status != itask.state.status:
+                LOG.info("resetting state to %s" % status, itask=itask)
+                if status == TASK_STATUS_READY:
+                    # Pseudo state (in this context) -
+                    # set waiting and satisified.
+                    itask.state.reset_state(TASK_STATUS_WAITING)
+                    itask.state.set_prerequisites_all_satisfied()
+                    itask.state.unset_special_outputs()
+                    itask.state.outputs.set_all_incomplete()
+                else:
+                    itask.state.reset_state(status)
+                    if status in [
+                            TASK_STATUS_FAILED, TASK_STATUS_SUBMIT_FAILED]:
+                        itask.set_event_time('finished', time())
+            if outputs:
+                for output in outputs:
+                    is_completed = True
+                    if output.startswith('!'):
+                        is_completed = False
+                        output = output[1:]
+                    if output == '*' and is_completed:
+                        itask.state.outputs.set_all_completed()
+                        LOG.info("reset all output to completed", itask=itask)
+                    elif output == '*':
+                        itask.state.outputs.set_all_incomplete()
+                        LOG.info("reset all output to incomplete", itask=itask)
+                    else:
+                        ret = itask.state.outputs.set_completed(
+                            message=output, is_completed=is_completed)
+                        if ret is None:
+                            ret = itask.state.outputs.set_completed(
+                                trigger=output, is_completed=is_completed)
+                        if ret is None:
+                            LOG.warning(
+                                "cannot reset output: %s" % output,
+                                itask=itask)
+                        elif ret:
+                            LOG.info(
+                                "reset output to complete: %s" % output,
+                                itask=itask)
+                        else:
+                            LOG.info(
+                                "reset output to incomplete: %s" % output,
+                                itask=itask)
         return len(bad_items)
 
     def remove_tasks(self, items, spawn=False):
         """Remove tasks from pool."""
-        itasks, bad_items = self._filter_task_proxies(items)
+        itasks, bad_items = self.filter_task_proxies(items)
         for itask in itasks:
             if spawn:
                 self.force_spawn(itask)
@@ -1262,11 +1104,11 @@ class TaskPool(object):
 
     def trigger_tasks(self, items):
         """Trigger tasks."""
-        itasks, bad_items = self._filter_task_proxies(items)
+        itasks, bad_items = self.filter_task_proxies(items)
         n_warnings = len(bad_items)
         for itask in itasks:
             if itask.state.status in TASK_STATUSES_ACTIVE:
-                self.log.warning('%s: already triggered' % itask.identity)
+                LOG.warning('%s: already triggered' % itask.identity)
                 n_warnings += 1
                 continue
             itask.manual_trigger = True
@@ -1274,42 +1116,14 @@ class TaskPool(object):
                 itask.state.reset_state(TASK_STATUS_READY)
         return n_warnings
 
-    def dry_run_task(self, items):
-        """Create job file for "cylc trigger --edit"."""
-        itasks, bad_items = self._filter_task_proxies(items)
-        n_warnings = len(bad_items)
-        if len(itasks) > 1:
-            self.log.warning("Unique task match not found: %s" % items)
-            return n_warnings + 1
-        overrides = BroadcastServer.get_inst().get(itasks[0].identity)
-        if itasks[0].prep_submit(overrides=overrides, dry_run=True) is None:
-            return n_warnings + 1
-        else:
-            return n_warnings
-
-    def check_task_timers(self):
-        """Check submission and execution timeout timers for current tasks.
-
-        Not called in simulation mode.
-
-        """
-        now = time()
-        poll_task_ids = set()
-        for itask in self.get_tasks():
-            if itask.check_poll_ready(now):
-                poll_task_ids.add(itask.identity)
-        if poll_task_ids:
-            self.poll_task_jobs(poll_task_ids)
-
     def check_auto_shutdown(self):
         """Check if we should do a normal automatic shutdown."""
         shutdown = True
         for itask in self.get_all_tasks():
             if self.stop_point is None:
                 # Don't if any unsucceeded task exists.
-                if (itask.state.status not in [TASK_STATUS_SUCCEEDED,
-                                               TASK_STATUS_EXPIRED] or
-                        itask.event_handler_try_timers):
+                if itask.state.status not in [
+                        TASK_STATUS_SUCCEEDED, TASK_STATUS_EXPIRED]:
                     shutdown = False
                     break
             elif (itask.point <= self.stop_point and
@@ -1322,15 +1136,50 @@ class TaskPool(object):
                     break
         return shutdown
 
-    def sim_time_check(self):
-        sim_task_succeeded = False
+    def sim_time_check(self, message_queue):
+        """Simulation mode: simulate task run times and set states."""
+        sim_task_state_changed = False
         for itask in self.get_tasks():
-            if itask.state.status == TASK_STATUS_RUNNING:
-                # Automatically set sim-mode tasks to TASK_STATUS_SUCCEEDED
-                # after their alotted run time.
-                if itask.sim_time_check():
-                    sim_task_succeeded = True
-        return sim_task_succeeded
+            if itask.state.status != TASK_STATUS_RUNNING:
+                continue
+            timeout = (itask.summary['started_time'] +
+                       itask.tdef.rtconfig['job']['simulated run length'])
+            if time() > timeout:
+                conf = itask.tdef.rtconfig['simulation']
+                if (itask.point in conf['fail cycle points'] and
+                        (itask.get_try_num() == 1 or
+                         not conf['fail try 1 only'])):
+                    message_queue.put(
+                        itask.identity, 'CRITICAL', TASK_STATUS_FAILED)
+                else:
+                    # Simulate message outputs.
+                    for msg in itask.tdef.rtconfig['outputs'].values():
+                        message_queue.put(itask.identity, 'NORMAL', msg)
+                    message_queue.put(
+                        itask.identity, 'NORMAL', TASK_STATUS_SUCCEEDED)
+                sim_task_state_changed = True
+        return sim_task_state_changed
+
+    def set_expired_tasks(self):
+        """Check if any waiting tasks expired.
+
+        Set their status accordingly.
+        """
+        now = time()
+        for itask in self.get_tasks():
+            if (itask.state.status != TASK_STATUS_WAITING or
+                    itask.tdef.expiration_offset is None):
+                continue
+            if itask.expire_time is None:
+                itask.expire_time = (
+                    itask.get_point_as_seconds() +
+                    itask.get_offset_as_seconds(itask.tdef.expiration_offset))
+            if now > itask.expire_time:
+                msg = 'Task expired (skipping job).'
+                LOG.warning(msg, itask=itask)
+                self.task_events_mgr.setup_event_handlers(
+                    itask, "expired", msg)
+                itask.state.reset_state(TASK_STATUS_EXPIRED)
 
     def waiting_tasks_ready(self):
         """Waiting tasks can become ready for internal reasons.
@@ -1338,23 +1187,24 @@ class TaskPool(object):
         Namely clock-triggers or retry-delay timers
 
         """
+        now = time()
         result = False
         for itask in self.get_tasks():
-            if itask.ready_to_run():
+            if itask.ready_to_run(now):
                 result = True
                 break
         return result
 
     def task_succeeded(self, id_):
-        res = False
+        """Return True if task with id_ is in the succeeded state."""
         for itask in self.get_tasks():
             if (itask.identity == id_ and
                     itask.state.status == TASK_STATUS_SUCCEEDED):
-                res = True
-                break
-        return res
+                return True
+        return False
 
     def ping_task(self, id_, exists_only=False):
+        """Return message to indicate if task exists and/or is running."""
         found = False
         running = False
         for itask in self.get_tasks():
@@ -1363,27 +1213,14 @@ class TaskPool(object):
                 if itask.state.status == TASK_STATUS_RUNNING:
                     running = True
                 break
-        if not found:
-            return False, "task not found"
+        if running:
+            return True, " running"
+        elif found and exists_only:
+            return True, "task found"
+        elif found:
+            return False, "task not running"
         else:
-            if exists_only:
-                return True, "task found"
-            else:
-                if running:
-                    return True, " running"
-                else:
-                    return False, "task not running"
-
-    def get_task_jobfile_path(self, id_):
-        """Return a task job log dir, sans submit number."""
-        for itask in self.get_tasks():
-            if itask.identity == id_:
-                path = itask.get_job_log_path(
-                    head_mode=itask.HEAD_MODE_LOCAL, submit_num=itask.NN,
-                    tail=itask.JOB_FILE_BASE)
-                # Note: 2nd value for back compat
-                return path, os.path.dirname(os.path.dirname(path))
-        return False, "task not found"
+            return False, "task not found"
 
     def get_task_requisites(self, items, list_prereqs=False):
         """Return task prerequisites.
@@ -1399,8 +1236,9 @@ class TaskPool(object):
             ...
         }
         """
-        itasks, bad_items = self._filter_task_proxies(items)
+        itasks, bad_items = self.filter_task_proxies(items)
         results = {}
+        now = time()
         for itask in itasks:
             if list_prereqs:
                 results[itask.identity] = {
@@ -1411,8 +1249,9 @@ class TaskPool(object):
             extras = {}
             if itask.tdef.clocktrigger_offset is not None:
                 extras['Clock trigger time reached'] = (
-                    itask.start_time_reached())
-                extras['Triggers at'] = itask.delayed_start_str
+                    itask.start_time_reached(now))
+                extras['Triggers at'] = get_time_string_from_unix_time(
+                    itask.delayed_start)
             for trig, satisfied in itask.state.external_triggers.items():
                 if satisfied:
                     state = 'satisfied'
@@ -1420,10 +1259,13 @@ class TaskPool(object):
                     state = 'NOT satisfied'
                 extras['External trigger "%s"' % trig] = state
 
+            outputs = []
+            for _, msg, is_completed in itask.state.outputs.get_all():
+                outputs.append(["%s %s" % (itask.identity, msg), is_completed])
             results[itask.identity] = {
                 "descriptions": itask.tdef.describe(),
                 "prerequisites": itask.state.prerequisites_dump(),
-                "outputs": itask.state.outputs.dump(),
+                "outputs": outputs,
                 "extras": extras}
         return results, bad_items
 
@@ -1434,79 +1276,7 @@ class TaskPool(object):
             if itask.state.external_triggers:
                 ets.retrieve(itask)
 
-    def put_rundb_suite_params(self, initial_point, final_point, format=None):
-        """Put run mode, initial/final cycle point in runtime database.
-
-        This method queues the relevant insert statements.
-        """
-        self.db_inserts_map[self.TABLE_SUITE_PARAMS].extend([
-            {"key": "run_mode", "value": self.run_mode},
-            {"key": "initial_point", "value": str(initial_point)},
-            {"key": "final_point", "value": str(final_point)},
-        ])
-        if format:
-            self.db_inserts_map[self.TABLE_SUITE_PARAMS].extend([
-                {"key": "cycle_point_format", "value": str(format)}
-            ])
-        if self.is_held:
-            self.db_inserts_map[self.TABLE_SUITE_PARAMS].append(
-                {"key": "is_held", "value": 1})
-
-    def put_rundb_suite_template_vars(self, template_vars):
-        """Put template_vars in runtime database.
-
-        This method queues the relevant insert statements.
-        """
-        for key, value in template_vars.items():
-            self.db_inserts_map[self.TABLE_SUITE_TEMPLATE_VARS].append(
-                {"key": key, "value": value})
-
-    def put_rundb_task_pool(self):
-        """Put statements to update the task_pool table in runtime database.
-
-        Update the task_pool table and the task_action_timers table.
-        Queue delete (everything) statements to wipe the tables, and queue the
-        relevant insert statements for the current tasks in the pool.
-        """
-        self.db_deletes_map[self.TABLE_TASK_POOL].append({})
-        self.db_deletes_map[self.TABLE_TASK_ACTION_TIMERS].append({})
-        for itask in self.get_all_tasks():
-            self.db_inserts_map[self.TABLE_TASK_POOL].append({
-                "name": itask.tdef.name,
-                "cycle": str(itask.point),
-                "spawned": int(itask.has_spawned),
-                "status": itask.state.status,
-                "hold_swap": itask.state.hold_swap})
-            for ctx_key_0 in ["poll_timers", "try_timers"]:
-                for ctx_key_1, timer in getattr(itask, ctx_key_0).items():
-                    if timer is None:
-                        continue
-                    self.db_inserts_map[self.TABLE_TASK_ACTION_TIMERS].append({
-                        "name": itask.tdef.name,
-                        "cycle": str(itask.point),
-                        "ctx_key_pickle": pickle.dumps((ctx_key_0, ctx_key_1)),
-                        "ctx_pickle": pickle.dumps(timer.ctx),
-                        "delays_pickle": pickle.dumps(timer.delays),
-                        "num": timer.num,
-                        "delay": timer.delay,
-                        "timeout": timer.timeout})
-            for ctx_key, timer in itask.event_handler_try_timers.items():
-                self.db_inserts_map[self.TABLE_TASK_ACTION_TIMERS].append({
-                    "name": itask.tdef.name,
-                    "cycle": str(itask.point),
-                    "ctx_key_pickle": pickle.dumps(ctx_key),
-                    "ctx_pickle": pickle.dumps(timer.ctx),
-                    "delays_pickle": pickle.dumps(timer.delays),
-                    "num": timer.num,
-                    "delay": timer.delay,
-                    "timeout": timer.timeout})
-        self.db_inserts_map[self.TABLE_CHECKPOINT_ID].append({
-            # id = -1 for latest
-            "id": CylcSuiteDAO.CHECKPOINT_LATEST_ID,
-            "time": get_current_time_string(),
-            "event": CylcSuiteDAO.CHECKPOINT_LATEST_EVENT})
-
-    def _filter_task_proxies(self, items):
+    def filter_task_proxies(self, items):
         """Return task proxies that match names, points, states in items.
 
         Return (itasks, bad_items).
@@ -1542,7 +1312,7 @@ class TaskPool(object):
                         itasks.append(itask)
                         tasks_found = True
                 if not tasks_found:
-                    self.log.warning(self.ERR_PREFIX_TASKID_MATCH + item)
+                    LOG.warning(self.ERR_PREFIX_TASKID_MATCH + item)
                     bad_items.append(item)
         return itasks, bad_items
 
@@ -1560,44 +1330,3 @@ class TaskPool(object):
         else:
             name_str, point_str = (head, None)
         return (point_str, name_str, state_str)
-
-    def _run_job_cmd(self, cmd_key, itasks, callback, **kwargs):
-        """Run job commands, e.g. poll, kill, etc.
-
-        Group itasks with their user at host.
-        Put a job command for each user at host to the multiprocess pool.
-
-        """
-        if not itasks:
-            return
-        auth_itasks = {}
-        for itask in itasks:
-            if (itask.task_host, itask.task_owner) not in auth_itasks:
-                auth_itasks[(itask.task_host, itask.task_owner)] = []
-            auth_itasks[(itask.task_host, itask.task_owner)].append(itask)
-        for (host, owner), itasks in sorted(auth_itasks.items()):
-            cmd = ["cylc", cmd_key]
-            if cylc.flags.debug:
-                cmd.append("--debug")
-            try:
-                if is_remote_host(host):
-                    cmd.append("--host=%s" % (host))
-                    kwargs["host"] = host
-            except IOError:
-                # Bad host, run the command any way, command will fail and
-                # callback will deal with it
-                cmd.append("--host=%s" % (host))
-                kwargs["host"] = host
-            if is_remote_user(owner):
-                cmd.append("--user=%s" % (owner))
-                kwargs["user"] = owner
-            cmd.append("--")
-            cmd.append(GLOBAL_CFG.get_derived_host_item(
-                self.suite_name, "suite job log directory", host, owner))
-            job_log_dirs = []
-            for itask in sorted(itasks, key=lambda itask: itask.identity):
-                job_log_dirs.append(itask.get_job_log_path())
-            cmd += job_log_dirs
-            kwargs["job_log_dirs"] = job_log_dirs
-            SuiteProcPool.get_inst().put_command(
-                SuiteProcContext(cmd_key, cmd, **kwargs), callback)
diff --git a/lib/cylc/task_proxy.py b/lib/cylc/task_proxy.py
index 092f9d4..000283d 100644
--- a/lib/cylc/task_proxy.py
+++ b/lib/cylc/task_proxy.py
@@ -18,136 +18,13 @@
 
 """Provide a class to represent a task proxy in a running suite."""
 
-from collections import namedtuple
-from copy import copy
-from logging import (
-    getLevelName, getLogger, CRITICAL, ERROR, WARNING, INFO, DEBUG)
-import os
-from pipes import quote
-from random import randrange
-import re
-from shutil import rmtree
-import time
-import traceback
-
 from isodatetime.timezone import get_local_time_zone
 
-from cylc.mkdir_p import mkdir_p
-from cylc.cfgspec.globalcfg import GLOBAL_CFG
 import cylc.cycling.iso8601
-from cylc.envvar import expandvars
-import cylc.flags as flags
-from cylc.wallclock import (
-    get_current_time_string,
-    get_seconds_as_interval_string,
-    get_time_string_from_unix_time,
-    get_unix_time_from_time_string,
-    RE_DATE_TIME_FORMAT_EXTENDED,
-)
-from cylc.host_select import get_task_host
-from cylc.job_file import JobFile
-from cylc.job_host import RemoteJobHostManager
-from cylc.batch_sys_manager import BATCH_SYS_MANAGER
-from cylc.owner import is_remote_user, USER
-from cylc.suite_host import is_remote_host, get_suite_host
-from parsec.OrderedDict import OrderedDictWithDefaults
-from cylc.mp_pool import SuiteProcPool, SuiteProcContext
-from cylc.rundb import CylcSuiteDAO
 from cylc.task_id import TaskID
-from cylc.task_message import TaskMessage
-from parsec.util import pdeepcopy, poverride
-from parsec.config import ItemNotFoundError
 from cylc.task_state import (
-    TaskState, TASK_STATUSES_ACTIVE, TASK_STATUS_WAITING,
-    TASK_STATUS_READY, TASK_STATUS_SUBMITTED, TASK_STATUS_SUBMIT_FAILED,
-    TASK_STATUS_RUNNING, TASK_STATUS_SUCCEEDED, TASK_STATUS_FAILED)
-from cylc.task_outputs import (
-    TASK_OUTPUT_STARTED, TASK_OUTPUT_SUCCEEDED, TASK_OUTPUT_FAILED)
-from cylc.suite_logging import LOG
-
-
-CustomTaskEventHandlerContext = namedtuple(
-    "CustomTaskEventHandlerContext",
-    ["key", "ctx_type", "cmd"])
-
-
-TaskEventMailContext = namedtuple(
-    "TaskEventMailContext",
-    ["key", "ctx_type", "mail_from", "mail_to", "mail_smtp"])
-
-
-TaskJobLogsRetrieveContext = namedtuple(
-    "TaskJobLogsRetrieveContext",
-    ["key", "ctx_type", "user_at_host", "max_size"])
-
-
-class TaskActionTimer(object):
-    """A timer with delays for task actions."""
-
-    # Memory optimization - constrain possible attributes to this list.
-    __slots__ = ["ctx", "delays", "num", "delay", "timeout", "is_waiting"]
-
-    def __init__(self, ctx=None, delays=None, num=0, delay=None, timeout=None):
-        self.ctx = ctx
-        if delays is None:
-            self.delays = [float(0)]
-        else:
-            self.delays = [float(delay) for delay in delays]
-        self.num = int(num)
-        if delay is not None:
-            delay = float(delay)
-        self.delay = delay
-        if timeout is not None:
-            timeout = float(timeout)
-        self.timeout = timeout
-        self.is_waiting = False
-
-    def delay_as_seconds(self):
-        """Return the delay as PTnS, where n is number of seconds."""
-        return get_seconds_as_interval_string(self.delay)
-
-    def is_delay_done(self, now=None):
-        """Is timeout done?"""
-        if self.timeout is None:
-            return False
-        if now is None:
-            now = time.time()
-        return now > self.timeout
-
-    def is_timeout_set(self):
-        """Return True if timeout is set."""
-        return self.timeout is not None
-
-    def next(self, no_exhaust=False):
-        """Return the next retry delay.
-
-        When delay list has no more item:
-        * Return None if no_exhaust is False
-        * Return the final delay if no_exhaust is True.
-        """
-        try:
-            self.delay = self.delays[self.num]
-        except IndexError:
-            if not no_exhaust:
-                self.delay = None
-        if self.delay is not None:
-            self.timeout = time.time() + self.delay
-            self.num += 1
-        return self.delay
-
-    def set_waiting(self):
-        """Set waiting flag, while waiting for action to complete."""
-        self.delay = None
-        self.is_waiting = True
-        self.timeout = None
-
-    def unset_waiting(self):
-        """Unset waiting flag after an action has completed."""
-        self.is_waiting = False
-
-    def timeout_as_str(self):
-        """Return the timeout as an ISO8601 date-time string."""
-        return get_time_string_from_unix_time(self.timeout)
+    TaskState, TASK_STATUS_WAITING, TASK_STATUS_RETRYING)
+from cylc.wallclock import get_unix_time_from_time_string
 
 
 class TaskProxySequenceBoundsError(ValueError):
@@ -160,91 +37,23 @@ class TaskProxySequenceBoundsError(ValueError):
 class TaskProxy(object):
     """The task proxy."""
 
-    # RETRY LOGIC:
-    #  1) ABSOLUTE SUBMIT NUMBER increments every time a task is
-    #  submitted, manually or automatically by (submission or execution)
-    # retries; whether or not the task actually begins executing, and is
-    # appended to the task log root filename.
-    #  2) SUBMISSION TRY NUMBER increments when task job submission
-    # fails, if submission retries are configured, but resets to 1 if
-    # the task begins executing; and is used for accounting purposes.
-    #  3) EXECUTION TRY NUMBER increments only when task execution fails,
-    # if execution retries are configured; and is passed to task
-    # environments to allow changed behaviour after previous failures.
-
     # Memory optimization - constrain possible attributes to this list.
-    __slots__ = ["JOB_LOG_FMT_1", "JOB_LOG_FMT_M", "CUSTOM_EVENT_HANDLER",
-                 "EVENT_MAIL", "HEAD_MODE_LOCAL", "HEAD_MODE_REMOTE",
-                 "JOB_FILE_BASE", "JOB_KILL", "JOB_LOGS_RETRIEVE", "JOB_POLL",
-                 "JOB_SUBMIT", "KEY_EXECUTE", "KEY_SUBMIT",
-                 "MANAGE_JOB_LOGS_TRY_DELAYS", "NN",
-                 "LOGGING_LVL_OF", "RE_MESSAGE_TIME", "TABLE_TASK_JOBS",
-                 "TABLE_TASK_EVENTS", "TABLE_TASK_STATES", "POLLED_INDICATOR",
-                 "stop_sim_mode_job_submission", "tdef",
-                 "submit_num", "validate_mode", "message_queue", "point",
-                 "cleanup_cutoff", "identity", "has_spawned",
+    __slots__ = ["tdef", "submit_num",
+                 "point", "cleanup_cutoff", "identity", "has_spawned",
                  "point_as_seconds", "stop_point", "manual_trigger",
                  "is_manual_submit", "summary", "local_job_file_path",
-                 "retries_configured", "try_timers",
-                 "event_handler_try_timers", "db_inserts_map",
-                 "db_updates_map", "suite_name", "task_host", "task_owner",
-                 "job_vacated", "poll_timers",
-                 "event_hooks", "sim_mode_run_length",
-                 "delayed_start_str", "delayed_start", "expire_time_str",
-                 "expire_time", "state"]
-
-    # Format string for single line output
-    JOB_LOG_FMT_1 = "%(timestamp)s [%(cmd_key)s %(attr)s] %(mesg)s"
-    # Format string for multi-line output
-    JOB_LOG_FMT_M = "%(timestamp)s [%(cmd_key)s %(attr)s]\n\n%(mesg)s\n"
-
-    CUSTOM_EVENT_HANDLER = "event-handler"
-    EVENT_MAIL = "event-mail"
-    HEAD_MODE_LOCAL = "local"
-    HEAD_MODE_REMOTE = "remote"
-    JOB_FILE_BASE = BATCH_SYS_MANAGER.JOB_FILE_BASE
-    JOB_KILL = "job-kill"
-    JOB_LOGS_RETRIEVE = "job-logs-retrieve"
-    JOB_POLL = "job-poll"
-    JOB_SUBMIT = "job-submit"
-    KEY_EXECUTE = "execution"
-    KEY_EXECUTE_TIME_LIMIT = "execution_time_limit"
-    KEY_SUBMIT = "submission"
-    MANAGE_JOB_LOGS_TRY_DELAYS = (0, 30, 180)  # PT0S, PT30S, PT3M
-    NN = "NN"
-
-    LOGGING_LVL_OF = {
-        "INFO": INFO,
-        "NORMAL": INFO,
-        "WARNING": WARNING,
-        "ERROR": ERROR,
-        "CRITICAL": CRITICAL,
-        "DEBUG": DEBUG,
-    }
-    RE_MESSAGE_TIME = re.compile(
-        '\A(.+) at (' + RE_DATE_TIME_FORMAT_EXTENDED + ')\Z')
-
-    TABLE_TASK_JOBS = CylcSuiteDAO.TABLE_TASK_JOBS
-    TABLE_TASK_EVENTS = CylcSuiteDAO.TABLE_TASK_EVENTS
-    TABLE_TASK_STATES = CylcSuiteDAO.TABLE_TASK_STATES
-
-    POLLED_INDICATOR = "(polled)"
-
-    stop_sim_mode_job_submission = False
+                 "try_timers", "task_host", "task_owner",
+                 "job_vacated", "poll_timers", "timeout_timers",
+                 "delayed_start", "expire_time", "state"]
 
     def __init__(
             self, tdef, start_point, status=TASK_STATUS_WAITING,
             hold_swap=None, has_spawned=False, stop_point=None,
-            is_startup=False, validate_mode=False, submit_num=0,
-            is_reload_or_restart=False, pre_reload_inst=None,
-            message_queue=None):
+            is_startup=False, submit_num=0):
         self.tdef = tdef
         if submit_num is None:
-            self.submit_num = 0
-        else:
-            self.submit_num = submit_num
-        self.validate_mode = validate_mode
-        self.message_queue = message_queue
+            submit_num = 0
+        self.submit_num = submit_num
 
         if is_startup:
             # adjust up to the first on-sequence cycle point
@@ -258,14 +67,11 @@ class TaskProxy(object):
                 # This task is out of sequence bounds
                 raise TaskProxySequenceBoundsError(self.tdef.name)
             self.point = min(adjusted)
-            self.cleanup_cutoff = self.tdef.get_cleanup_cutoff_point(
-                self.point, self.tdef.intercycle_offsets)
-            self.identity = TaskID.get(self.tdef.name, self.point)
         else:
             self.point = start_point
-            self.cleanup_cutoff = self.tdef.get_cleanup_cutoff_point(
-                self.point, self.tdef.intercycle_offsets)
-            self.identity = TaskID.get(self.tdef.name, self.point)
+        self.cleanup_cutoff = self.tdef.get_cleanup_cutoff_point(
+            self.point, self.tdef.intercycle_offsets)
+        self.identity = TaskID.get(self.tdef.name, self.point)
 
         self.has_spawned = has_spawned
 
@@ -294,66 +100,21 @@ class TaskProxy(object):
             'job_hosts': {},
             'execution_time_limit': None,
         }
-        for lfile in self.tdef.rtconfig['extra log files']:
-            self.summary['logfiles'].append(expandvars(lfile))
 
         self.local_job_file_path = None
 
-        self.retries_configured = False
-
-        self.try_timers = {
-            self.KEY_EXECUTE: TaskActionTimer(delays=[]),
-            self.KEY_SUBMIT: TaskActionTimer(delays=[])}
-        self.event_handler_try_timers = {}
-        self.poll_timers = {}
-
-        self.db_inserts_map = {
-            self.TABLE_TASK_JOBS: [],
-            self.TABLE_TASK_STATES: [],
-            self.TABLE_TASK_EVENTS: [],
-        }
-        self.db_updates_map = {
-            self.TABLE_TASK_JOBS: [],
-            self.TABLE_TASK_STATES: [],
-        }
-
-        # TODO - should take suite name from config!
-        self.suite_name = os.environ['CYLC_SUITE_NAME']
-
-        # In case task owner and host are needed by db_events_insert()
-        # for pre-submission events, set their initial values as if
-        # local (we can't know the correct host prior to this because
-        # dynamic host selection could be used).
         self.task_host = 'localhost'
         self.task_owner = None
 
         self.job_vacated = False
+        self.poll_timers = {}
+        self.timeout_timers = {}
+        self.try_timers = {}
 
-        # An initial db state entry is created at task proxy init. On reloading
-        # or restarting the suite, the task proxies already have this db entry.
-        if (not self.validate_mode and not is_reload_or_restart and
-                self.submit_num == 0):
-            self.db_inserts_map[self.TABLE_TASK_STATES].append({
-                "time_created": get_current_time_string(),
-                "time_updated": get_current_time_string(),
-                "status": status})
-
-        if not self.validate_mode and self.submit_num > 0:
-            self.db_updates_map[self.TABLE_TASK_STATES].append({
-                "time_updated": get_current_time_string(),
-                "status": status})
-
-        self.event_hooks = None
-        self.sim_mode_run_length = None
-        self.set_from_rtconfig()
-        self.delayed_start_str = None
         self.delayed_start = None
-        self.expire_time_str = None
         self.expire_time = None
 
-        self.state = TaskState(
-            status, hold_swap, self.point, self.identity, tdef,
-            self.db_events_insert, self.db_update_status, self.log)
+        self.state = TaskState(tdef, self.point, status, hold_swap)
 
         if tdef.sequential:
             # Adjust clean-up cutoff.
@@ -370,118 +131,27 @@ class TaskProxy(object):
                         self.cleanup_cutoff < p_next):
                     self.cleanup_cutoff = p_next
 
-        if is_reload_or_restart and pre_reload_inst is not None:
-            self.log(INFO, 'reloaded task definition')
-            if pre_reload_inst.state.status in TASK_STATUSES_ACTIVE:
-                self.log(WARNING, "job is active with pre-reload settings")
-            # Retain some state from my pre suite-reload predecessor.
-            self.submit_num = pre_reload_inst.submit_num
-            self.has_spawned = pre_reload_inst.has_spawned
-            self.manual_trigger = pre_reload_inst.manual_trigger
-            self.is_manual_submit = pre_reload_inst.is_manual_submit
-            self.summary = pre_reload_inst.summary
-            self.local_job_file_path = pre_reload_inst.local_job_file_path
-            self.try_timers = pre_reload_inst.try_timers
-            self.event_handler_try_timers = (
-                pre_reload_inst.event_handler_try_timers)
-            self.db_inserts_map = pre_reload_inst.db_inserts_map
-            self.db_updates_map = pre_reload_inst.db_updates_map
-            self.task_host = pre_reload_inst.task_host
-            self.task_owner = pre_reload_inst.task_owner
-            self.job_vacated = pre_reload_inst.job_vacated
-            self.poll_timers = pre_reload_inst.poll_timers
-            # Retain status of outputs.
-            for msg, oid in pre_reload_inst.state.outputs.completed.items():
-                self.state.outputs.completed[msg] = oid
-                try:
-                    del self.state.outputs.not_completed[msg]
-                except KeyError:
-                    pass
-
-    def _get_events_conf(self, key, default=None):
-        """Return an events setting from suite then global configuration."""
-        for getter in [self.event_hooks, GLOBAL_CFG.get()["task events"]]:
-            try:
-                value = getter.get(key)
-                if value is not None:
-                    return value
-            except (ItemNotFoundError, KeyError):
-                pass
-        return default
-
-    def _get_host_conf(self, key, default=None, skey="remote"):
-        """Return a host setting from suite then global configuration."""
-        if self.tdef.rtconfig[skey].get(key) is not None:
-            return self.tdef.rtconfig[skey][key]
-        else:
-            try:
-                return GLOBAL_CFG.get_host_item(
-                    key, self.task_host, self.task_owner)
-            except (KeyError, ItemNotFoundError):
-                pass
-        return default
-
-    def log(self, lvl=INFO, msg=""):
-        """Log a message of this task proxy."""
-        msg = "[%s] -%s" % (self.identity, msg)
-        LOG.log(lvl, msg)
-
-    def command_log(self, ctx):
-        """Log an activity for a job of this task proxy."""
-        ctx_str = str(ctx)
-        if not ctx_str:
-            return
-        submit_num = self.NN
-        if isinstance(ctx.cmd_key, tuple):  # An event handler
-            submit_num = ctx.cmd_key[-1]
-        job_activity_log = self.get_job_log_path(
-            self.HEAD_MODE_LOCAL, submit_num, "job-activity.log")
-        try:
-            with open(job_activity_log, "ab") as handle:
-                handle.write(ctx_str + '\n')
-        except IOError as exc:
-            LOG.warning("%s: write failed\n%s" % (job_activity_log, exc))
-        if ctx.cmd and ctx.ret_code:
-            LOG.error(ctx_str)
-        elif ctx.cmd:
-            LOG.debug(ctx_str)
-
-    def db_events_insert(self, event="", message=""):
-        """Record an event to the DB."""
-        self.db_inserts_map[self.TABLE_TASK_EVENTS].append({
-            "time": get_current_time_string(),
-            "event": event,
-            "message": message})
+    def copy_pre_reload(self, pre_reload_inst):
+        """Copy attributes from pre-reload instant."""
+        self.submit_num = pre_reload_inst.submit_num
+        self.has_spawned = pre_reload_inst.has_spawned
+        self.manual_trigger = pre_reload_inst.manual_trigger
+        self.is_manual_submit = pre_reload_inst.is_manual_submit
+        self.summary = pre_reload_inst.summary
+        self.local_job_file_path = pre_reload_inst.local_job_file_path
+        self.try_timers = pre_reload_inst.try_timers
+        self.task_host = pre_reload_inst.task_host
+        self.task_owner = pre_reload_inst.task_owner
+        self.job_vacated = pre_reload_inst.job_vacated
+        self.poll_timers = pre_reload_inst.poll_timers
+        self.timeout_timers = pre_reload_inst.timeout_timers
+        self.state.outputs = pre_reload_inst.state.outputs
 
-    def db_update_status(self):
-        """Update suite runtime DB task states table."""
-        self.db_updates_map[self.TABLE_TASK_STATES].append({
-            "time_updated": get_current_time_string(),
-            "submit_num": self.submit_num,
-            "try_num": self.try_timers[self.KEY_EXECUTE].num + 1,
-            "status": self.state.status})
-
-    def retry_delay_done(self):
-        """Is retry delay done? Can I retry now?"""
-        now = time.time()
-        return (self.try_timers[self.KEY_EXECUTE].is_delay_done(now) or
-                self.try_timers[self.KEY_SUBMIT].is_delay_done(now))
-
-    def ready_to_run(self):
-        """Am I in a pre-run state but ready to run?
-
-        Queued tasks are not counted as they've already been deemed ready.
-
-        """
-        ready = self.state.is_ready_to_run(self.retry_delay_done(),
-                                           self.start_time_reached())
-        if ready and self._has_expired():
-            self.log(WARNING, 'Task expired (skipping job).')
-            self.setup_event_handlers(
-                "expired", 'Task expired (skipping job).')
-            self.state.set_expired()
-            return False
-        return ready
+    @staticmethod
+    def get_offset_as_seconds(offset):
+        """Return an ISO interval as seconds."""
+        iso_offset = cylc.cycling.iso8601.interval_parse(str(offset))
+        return int(iso_offset.get_seconds())
 
     def get_point_as_seconds(self):
         """Compute and store my cycle point as seconds."""
@@ -497,1015 +167,6 @@ class TaskProxy(object):
                 self.point_as_seconds += utc_offset_in_seconds
         return self.point_as_seconds
 
-    @staticmethod
-    def get_offset_as_seconds(offset):
-        """Return an ISO interval as seconds."""
-        iso_offset = cylc.cycling.iso8601.interval_parse(str(offset))
-        return int(iso_offset.get_seconds())
-
-    def start_time_reached(self):
-        """Has this task reached its clock trigger time?"""
-        if self.tdef.clocktrigger_offset is None:
-            return True
-        if self.delayed_start is None:
-            self.delayed_start = (
-                self.get_point_as_seconds() +
-                self.get_offset_as_seconds(self.tdef.clocktrigger_offset))
-            self.delayed_start_str = get_time_string_from_unix_time(
-                self.delayed_start)
-        return time.time() > self.delayed_start
-
-    def _has_expired(self):
-        """Is this task past its use-by date?"""
-        if self.tdef.expiration_offset is None:
-            return False
-        if self.expire_time is None:
-            self.expire_time = (
-                self.get_point_as_seconds() +
-                self.get_offset_as_seconds(self.tdef.expiration_offset))
-            self.expire_time_str = get_time_string_from_unix_time(
-                self.expire_time)
-        return time.time() > self.expire_time
-
-    def job_submission_callback(self, result):
-        """Callback on job submission."""
-        if result.out is not None:
-            out = ""
-            for line in result.out.splitlines(True):
-                if line.startswith(
-                        BATCH_SYS_MANAGER.CYLC_BATCH_SYS_JOB_ID + "="):
-                    self.summary['submit_method_id'] = line.strip().replace(
-                        BATCH_SYS_MANAGER.CYLC_BATCH_SYS_JOB_ID + "=", "")
-                else:
-                    out += line
-            result.out = out
-        self.command_log(result)
-
-        if result.ret_code == SuiteProcPool.JOB_SKIPPED_FLAG:
-            return
-
-        if self.summary['submit_method_id'] and result.ret_code == 0:
-            self.job_submission_succeeded()
-        else:
-            self.job_submission_failed()
-
-    def job_poll_callback(self, cmd_ctx, line):
-        """Callback on job poll."""
-        ctx = SuiteProcContext(self.JOB_POLL, None)
-        ctx.out = line
-        ctx.ret_code = 0
-
-        items = line.split("|")
-        # See cylc.batch_sys_manager.JobPollContext
-        try:
-            (
-                batch_sys_exit_polled, run_status, run_signal,
-                time_submit_exit, time_run, time_run_exit
-            ) = items[4:10]
-        except IndexError:
-            self.summary['latest_message'] = 'poll failed'
-            flags.iflag = True
-            ctx.cmd = cmd_ctx.cmd  # print original command on failure
-            return
-        finally:
-            self.command_log(ctx)
-        if run_status == "1" and run_signal in ["ERR", "EXIT"]:
-            # Failed normally
-            self.process_incoming_message(
-                INFO, TASK_OUTPUT_FAILED, time_run_exit)
-        elif run_status == "1" and batch_sys_exit_polled == "1":
-            # Failed by a signal, and no longer in batch system
-            self.process_incoming_message(
-                INFO, TASK_OUTPUT_FAILED, time_run_exit)
-            self.process_incoming_message(
-                INFO, TaskMessage.FAIL_MESSAGE_PREFIX + run_signal,
-                time_run_exit)
-        elif run_status == "1":
-            # The job has terminated, but is still managed by batch system.
-            # Some batch system may restart a job in this state, so don't
-            # mark as failed yet.
-            self.process_incoming_message(INFO, TASK_OUTPUT_STARTED, time_run)
-        elif run_status == "0":
-            # The job succeeded
-            self.process_incoming_message(
-                INFO, TASK_OUTPUT_SUCCEEDED, time_run_exit)
-        elif time_run and batch_sys_exit_polled == "1":
-            # The job has terminated without executing the error trap
-            self.process_incoming_message(
-                INFO, TASK_OUTPUT_FAILED, "")
-        elif time_run:
-            # The job has started, and is still managed by batch system
-            self.process_incoming_message(INFO, TASK_OUTPUT_STARTED, time_run)
-        elif batch_sys_exit_polled == "1":
-            # The job never ran, and no longer in batch system
-            self.process_incoming_message(
-                INFO, "submission failed", time_submit_exit)
-        else:
-            # The job never ran, and is in batch system
-            self.process_incoming_message(
-                INFO, TASK_STATUS_SUBMITTED, time_submit_exit)
-
-    def job_poll_message_callback(self, cmd_ctx, line):
-        """Callback on job poll message."""
-        ctx = SuiteProcContext(self.JOB_POLL, None)
-        ctx.out = line
-        try:
-            event_time, priority, message = line.split("|")[2:5]
-        except ValueError:
-            ctx.ret_code = 1
-            ctx.cmd = cmd_ctx.cmd  # print original command on failure
-        else:
-            ctx.ret_code = 0
-            self.process_incoming_message(priority, message, event_time)
-        self.command_log(ctx)
-
-    def job_kill_callback(self, cmd_ctx, line):
-        """Callback on job kill."""
-        ctx = SuiteProcContext(self.JOB_KILL, None)
-        ctx.out = line
-        try:
-            ctx.timestamp, _, ctx.ret_code = line.split("|", 2)
-        except ValueError:
-            ctx.ret_code = 1
-            ctx.cmd = cmd_ctx.cmd  # print original command on failure
-        else:
-            ctx.ret_code = int(ctx.ret_code)
-            if ctx.ret_code:
-                ctx.cmd = cmd_ctx.cmd  # print original command on failure
-        self.command_log(ctx)
-        log_lvl = INFO
-        log_msg = 'killed'
-        if ctx.ret_code:  # non-zero exit status
-            log_lvl = WARNING
-            log_msg = 'kill failed'
-            self.state.kill_failed = True
-        elif self.state.status == TASK_STATUS_SUBMITTED:
-            self.job_submission_failed()
-            flags.iflag = True
-        elif self.state.status == TASK_STATUS_RUNNING:
-            self.job_execution_failed()
-            flags.iflag = True
-        else:
-            log_lvl = WARNING
-            log_msg = (
-                'ignoring job kill result, unexpected task state: %s' %
-                self.state.status)
-        self.summary['latest_message'] = log_msg
-        self.log(log_lvl, "job(%02d) %s" % (self.submit_num, log_msg))
-
-    def job_submit_callback(self, cmd_ctx, line):
-        """Callback on job submit."""
-        ctx = SuiteProcContext(self.JOB_SUBMIT, None)
-        ctx.out = line
-        items = line.split("|")
-        try:
-            ctx.timestamp, _, ctx.ret_code = items[0:3]
-        except ValueError:
-            ctx.ret_code = 1
-            ctx.cmd = cmd_ctx.cmd  # print original command on failure
-        else:
-            ctx.ret_code = int(ctx.ret_code)
-            if ctx.ret_code:
-                ctx.cmd = cmd_ctx.cmd  # print original command on failure
-        self.command_log(ctx)
-
-        if ctx.ret_code == SuiteProcPool.JOB_SKIPPED_FLAG:
-            return
-
-        try:
-            self.summary['submit_method_id'] = items[3]
-        except IndexError:
-            self.summary['submit_method_id'] = None
-        if self.summary['submit_method_id'] and ctx.ret_code == 0:
-            self.job_submission_succeeded()
-        else:
-            self.job_submission_failed()
-
-    def job_cmd_out_callback(self, cmd_ctx, line):
-        """Callback on job command STDOUT/STDERR."""
-        job_activity_log = self.get_job_log_path(
-            self.HEAD_MODE_LOCAL, self.NN, "job-activity.log")
-        if cmd_ctx.cmd_kwargs.get("host") and cmd_ctx.cmd_kwargs.get("user"):
-            user_at_host = "(%(user)s@%(host)s) " % cmd_ctx.cmd_kwargs
-        elif cmd_ctx.cmd_kwargs.get("host"):
-            user_at_host = "(%(host)s) " % cmd_ctx.cmd_kwargs
-        elif cmd_ctx.cmd_kwargs.get("user"):
-            user_at_host = "(%(user)s at localhost) " % cmd_ctx.cmd_kwargs
-        else:
-            user_at_host = ""
-        try:
-            timestamp, _, content = line.split("|")
-        except ValueError:
-            pass
-        else:
-            line = "%s %s" % (timestamp, content)
-        try:
-            with open(job_activity_log, "ab") as handle:
-                if not line.endswith("\n"):
-                    line += "\n"
-                handle.write(user_at_host + line)
-        except IOError as exc:
-            self.log(WARNING, "%s: write failed\n%s" % (job_activity_log, exc))
-
-    def setup_event_handlers(
-            self, event, message, db_update=True, db_event=None, db_msg=None):
-        """Set up event handlers."""
-        # extra args for inconsistent use between events, logging, and db
-        # updates
-        db_event = db_event or event
-        if db_update:
-            self.db_events_insert(event=db_event, message=db_msg)
-
-        if self.tdef.run_mode != 'live':
-            return
-
-        self.setup_job_logs_retrieval(event, message)
-        self.setup_event_mail(event, message)
-        self.setup_custom_event_handlers(event, message)
-
-    def setup_job_logs_retrieval(self, event, _=None):
-        """Set up remote job logs retrieval."""
-        # TODO - use string constants for event names.
-        key2 = (self.JOB_LOGS_RETRIEVE, self.submit_num)
-        if self.task_owner:
-            user_at_host = self.task_owner + "@" + self.task_host
-        else:
-            user_at_host = self.task_host
-        if (event not in ['failed', 'retry', 'succeeded'] or
-                user_at_host in [USER + '@localhost', 'localhost'] or
-                not self._get_host_conf("retrieve job logs") or
-                key2 in self.event_handler_try_timers):
-            return
-        retry_delays = self._get_host_conf("retrieve job logs retry delays")
-        if not retry_delays:
-            retry_delays = [0]
-        self.event_handler_try_timers[key2] = TaskActionTimer(
-            TaskJobLogsRetrieveContext(
-                self.JOB_LOGS_RETRIEVE,  # key
-                self.JOB_LOGS_RETRIEVE,  # ctx_type
-                user_at_host,
-                self._get_host_conf("retrieve job logs max size"),  # max_size
-            ),
-            retry_delays)
-
-    def setup_event_mail(self, event, _):
-        """Event notification, by email."""
-        key2 = ((self.EVENT_MAIL, event), self.submit_num)
-        if (key2 in self.event_handler_try_timers or
-                event not in self._get_events_conf("mail events", [])):
-            return
-        retry_delays = self._get_events_conf("mail retry delays")
-        if not retry_delays:
-            retry_delays = [0]
-        self.event_handler_try_timers[key2] = TaskActionTimer(
-            TaskEventMailContext(
-                self.EVENT_MAIL,  # key
-                self.EVENT_MAIL,  # ctx_type
-                self._get_events_conf(  # mail_from
-                    "mail from",
-                    "notifications@" + get_suite_host(),
-                ),
-                self._get_events_conf("mail to", USER),  # mail_to
-                self._get_events_conf("mail smtp"),  # mail_smtp
-            ),
-            retry_delays)
-
-    def setup_custom_event_handlers(self, event, message, only_list=None):
-        """Call custom event handlers."""
-        handlers = []
-        if self.event_hooks[event + ' handler']:
-            handlers = self.event_hooks[event + ' handler']
-        elif (self._get_events_conf('handlers', []) and
-                event in self._get_events_conf('handler events', [])):
-            handlers = self._get_events_conf('handlers', [])
-        retry_delays = self._get_events_conf(
-            'handler retry delays',
-            self._get_host_conf("task event handler retry delays"))
-        if not retry_delays:
-            retry_delays = [0]
-        for i, handler in enumerate(handlers):
-            key1 = ("%s-%02d" % (self.CUSTOM_EVENT_HANDLER, i), event)
-            if (key1, self.submit_num) in self.event_handler_try_timers or (
-                    only_list and i not in only_list):
-                continue
-            cmd = handler % {
-                "event": quote(event),
-                "suite": quote(self.suite_name),
-                "point": quote(str(self.point)),
-                "name": quote(self.tdef.name),
-                "submit_num": self.submit_num,
-                "id": quote(self.identity),
-                "message": quote(message),
-            }
-            if cmd == handler:
-                # Nothing substituted, assume classic interface
-                cmd = "%s '%s' '%s' '%s' '%s'" % (
-                    handler, event, self.suite_name, self.identity, message)
-            self.log(DEBUG, "Queueing %s handler: %s" % (event, cmd))
-            self.event_handler_try_timers[(key1, self.submit_num)] = (
-                TaskActionTimer(
-                    CustomTaskEventHandlerContext(
-                        key1,
-                        self.CUSTOM_EVENT_HANDLER,
-                        cmd,
-                    ),
-                    retry_delays))
-
-    def custom_event_handler_callback(self, result):
-        """Callback when a custom event handler is done."""
-        self.command_log(result)
-        try:
-            if result.ret_code == 0:
-                del self.event_handler_try_timers[result.cmd_key]
-            else:
-                self.event_handler_try_timers[result.cmd_key].unset_waiting()
-        except KeyError:
-            pass
-
-    def job_submission_failed(self, event_time=None):
-        """Handle job submission failure."""
-        self.log(ERROR, 'submission failed')
-        if event_time is None:
-            event_time = get_current_time_string()
-        self.db_updates_map[self.TABLE_TASK_JOBS].append({
-            "time_submit_exit": get_current_time_string(),
-            "submit_status": 1,
-        })
-        try:
-            del self.summary['submit_method_id']
-        except KeyError:
-            pass
-        if self.try_timers[self.KEY_SUBMIT].next() is None:
-            # No submission retry lined up: definitive failure.
-            self.summary['finished_time'] = float(
-                get_unix_time_from_time_string(event_time))
-            self.summary['finished_time_string'] = event_time
-            flags.pflag = True
-            # See github #476.
-            self.setup_event_handlers(
-                'submission failed', 'job submission failed')
-            self.state.set_submit_failed()
-        else:
-            # There is a submission retry lined up.
-            timeout_str = self.try_timers[self.KEY_SUBMIT].timeout_as_str()
-
-            delay_msg = "submit-retrying in %s" % (
-                self.try_timers[self.KEY_SUBMIT].delay_as_seconds())
-            msg = "submission failed, %s (after %s)" % (delay_msg, timeout_str)
-            self.log(INFO, "job(%02d) " % self.submit_num + msg)
-            self.summary['latest_message'] = msg
-            self.db_events_insert(
-                event="submission failed", message=delay_msg)
-            # TODO - is this insert redundant with setup_event_handlers?
-            self.db_events_insert(
-                event="submission failed",
-                message="submit-retrying in " + str(
-                    self.try_timers[self.KEY_SUBMIT].delay))
-            self.setup_event_handlers(
-                "submission retry", "job submission failed, " + delay_msg)
-            self.state.set_submit_retry()
-
-    def job_submission_succeeded(self):
-        """Handle job submission succeeded."""
-        if self.summary.get('submit_method_id') is not None:
-            self.log(
-                INFO, 'submit_method_id=' + self.summary['submit_method_id'])
-        self.log(INFO, 'submission succeeded')
-        now = time.time()
-        now_string = get_time_string_from_unix_time(now)
-        self.db_updates_map[self.TABLE_TASK_JOBS].append({
-            "time_submit_exit": now,
-            "submit_status": 0,
-            "batch_sys_job_id": self.summary.get('submit_method_id')})
-
-        if self.tdef.run_mode == 'simulation':
-            # Simulate job execution at this point.
-            if self.__class__.stop_sim_mode_job_submission:
-                self.state.set_ready_to_submit()
-            else:
-                self.summary['started_time'] = now
-                self.summary['started_time_string'] = now_string
-                self.state.set_executing()
-            return
-
-        self.summary['started_time'] = None
-        self.summary['started_time_string'] = None
-        self.summary['finished_time'] = None
-        self.summary['finished_time_string'] = None
-
-        self.summary['submitted_time'] = now
-        self.summary['submitted_time_string'] = now_string
-        self.summary['latest_message'] = TASK_STATUS_SUBMITTED
-        self.setup_event_handlers("submitted", 'job submitted',
-                                  db_event='submission succeeded')
-
-        if self.state.set_submit_succeeded():
-            try:
-                self.state.submission_timer_timeout = (
-                    self.summary['submitted_time'] +
-                    float(self._get_events_conf('submission timeout')))
-            except (TypeError, ValueError):
-                self.state.submission_timer_timeout = None
-            self._set_next_poll_time(self.KEY_SUBMIT)
-
-    def job_execution_failed(self, event_time=None):
-        """Handle a job failure."""
-        if event_time is None:
-            self.summary['finished_time'] = time.time()
-            self.summary['finished_time_string'] = (
-                get_time_string_from_unix_time(self.summary['finished_time']))
-        else:
-            self.summary['finished_time'] = float(
-                get_unix_time_from_time_string(event_time))
-            self.summary['finished_time_string'] = event_time
-        self.db_updates_map[self.TABLE_TASK_JOBS].append({
-            "run_status": 1,
-            "time_run_exit": self.summary['finished_time_string'],
-        })
-        self.state.execution_timer_timeout = None
-        if self.try_timers[self.KEY_EXECUTE].next() is None:
-            # No retry lined up: definitive failure.
-            # Note the TASK_STATUS_FAILED output is only added if needed.
-            flags.pflag = True
-            self.state.set_execution_failed()
-            self.setup_event_handlers("failed", 'job failed')
-        else:
-            # There is a retry lined up
-            timeout_str = self.try_timers[self.KEY_EXECUTE].timeout_as_str()
-            delay_msg = "retrying in %s" % (
-                self.try_timers[self.KEY_EXECUTE].delay_as_seconds())
-            msg = "failed, %s (after %s)" % (delay_msg, timeout_str)
-            self.log(INFO, "job(%02d) " % self.submit_num + msg)
-            self.summary['latest_message'] = msg
-            self.setup_event_handlers(
-                "retry", "job failed, " + delay_msg, db_msg=delay_msg)
-            self.state.set_execution_retry()
-
-    def reset_manual_trigger(self):
-        """This is called immediately after manual trigger flag used."""
-        if self.manual_trigger:
-            self.manual_trigger = False
-            self.is_manual_submit = True
-            # unset any retry delay timers
-            self.try_timers[self.KEY_EXECUTE].timeout = None
-            self.try_timers[self.KEY_SUBMIT].timeout = None
-
-    def set_from_rtconfig(self, cfg=None):
-        """Populate task proxy with runtime configuration.
-
-        Some [runtime] config requiring consistency checking on reload,
-        and self variables requiring updating for the same.
-
-        """
-
-        if cfg:
-            rtconfig = cfg
-        else:
-            rtconfig = self.tdef.rtconfig
-
-        if not self.retries_configured:
-            # configure retry delays before the first try
-            self.retries_configured = True
-            # TODO - saving the retry delay lists here is not necessary
-            # (it can be handled like the polling interval lists).
-            if (self.tdef.run_mode == 'live' or
-                    (self.tdef.run_mode == 'simulation' and
-                        not rtconfig['simulation mode']['disable retries']) or
-                    (self.tdef.run_mode == 'dummy' and
-                        not rtconfig['dummy mode']['disable retries'])):
-                # note that a *copy* of the retry delays list is needed
-                # so that all instances of the same task don't pop off
-                # the same deque (but copy of rtconfig above solves this).
-                self.try_timers[self.KEY_EXECUTE].delays = list(
-                    rtconfig['job']['execution retry delays'])
-                self.try_timers[self.KEY_SUBMIT].delays = list(
-                    rtconfig['job']['submission retry delays'])
-
-        rrange = rtconfig['simulation mode']['run time range']
-        if len(rrange) != 2:
-            raise Exception("ERROR, " + self.tdef.name + ": simulation mode " +
-                            "run time range should be ISO 8601-compatible")
-        try:
-            self.sim_mode_run_length = randrange(rrange[0], rrange[1])
-        except Exception, exc:
-            traceback.print_exc(exc)
-            raise Exception(
-                "ERROR: simulation mode task run time range must be [MIN,MAX)")
-
-        self.event_hooks = rtconfig['events']
-
-        for key in self.KEY_SUBMIT, self.KEY_EXECUTE:
-            values = self._get_host_conf(
-                key + ' polling intervals', skey='job')
-            if values:
-                self.poll_timers[key] = TaskActionTimer(delays=values)
-
-    def submit(self):
-        """For "cylc submit". See also "TaskPool.submit_task_jobs"."""
-
-        self.state.set_ready_to_submit()
-
-        # Reset flag so any re-triggering will generate a new job file.
-        self.local_job_file_path = None
-
-        cmd_key = self.JOB_SUBMIT
-        args = [self.get_job_log_path(
-            self.HEAD_MODE_REMOTE, tail=self.JOB_FILE_BASE)]
-        stdin_file_paths = [self.get_job_log_path(
-            self.HEAD_MODE_LOCAL, tail=self.JOB_FILE_BASE)]
-
-        cmd = ["cylc", cmd_key]
-        if cylc.flags.debug:
-            cmd.append("--debug")
-        remote_mode = False
-        for key, value, test_func in [
-                ('host', self.task_host, is_remote_host),
-                ('user', self.task_owner, is_remote_user)]:
-            if test_func(value):
-                cmd.append('--%s=%s' % (key, value))
-                remote_mode = True
-        if remote_mode:
-            cmd.append('--remote-mode')
-        cmd.append("--")
-        cmd += list(args)
-
-        self.log(INFO, "job(%02d) initiate %s" % (self.submit_num, cmd_key))
-        ctx = SuiteProcContext(
-            cmd_key, cmd, stdin_file_paths=stdin_file_paths)
-        return SuiteProcPool.get_inst().put_command(
-            ctx, self.job_submission_callback)
-
-    def prep_submit(self, dry_run=False, overrides=None):
-        """Prepare job submission.
-
-        Return self on a good preparation.
-
-        """
-        if self.tdef.run_mode == 'simulation' or (
-                self.local_job_file_path and not dry_run):
-            return self
-
-        try:
-            job_conf = self._prep_submit_impl(overrides)
-            local_job_file_path = self.get_job_log_path(
-                self.HEAD_MODE_LOCAL, tail=self.JOB_FILE_BASE)
-            JobFile.get_inst().write(local_job_file_path, job_conf)
-        except Exception, exc:
-            # Could be a bad command template.
-            if flags.debug:
-                traceback.print_exc()
-            self.command_log(SuiteProcContext(
-                self.JOB_SUBMIT, '(prepare job file)', err=exc,
-                ret_code=1))
-            if not dry_run:
-                self.job_submission_failed()
-            return
-        self.local_job_file_path = local_job_file_path
-
-        if dry_run:
-            # This will be shown next to submit num in gcylc:
-            self.summary['latest_message'] = 'job file written (edit/dry-run)'
-            self.log(DEBUG, self.summary['latest_message'])
-
-        # Return value used by "cylc submit" and "cylc jobscript":
-        return self
-
-    def _prep_submit_impl(self, overrides=None):
-        """Helper for self.prep_submit."""
-        self.log(DEBUG, "incrementing submit number")
-        self.submit_num += 1
-        self.summary['submit_num'] = self.submit_num
-        self.local_job_file_path = None
-        self.db_events_insert(event="incrementing submit number")
-        self.db_inserts_map[self.TABLE_TASK_JOBS].append({
-            "is_manual_submit": self.is_manual_submit,
-            "try_num": self.try_timers[self.KEY_EXECUTE].num + 1,
-            "time_submit": get_current_time_string(),
-        })
-        if overrides:
-            rtconfig = pdeepcopy(self.tdef.rtconfig)
-            poverride(rtconfig, overrides)
-        else:
-            rtconfig = self.tdef.rtconfig
-
-        self.set_from_rtconfig(rtconfig)
-
-        # construct the job_sub_method here so that a new one is used if
-        # the task is re-triggered by the suite operator - so it will
-        # get new stdout/stderr logfiles and not overwrite the old ones.
-
-        # dynamic instantiation - don't know job sub method till run time.
-        self.summary['batch_sys_name'] = rtconfig['job']['batch system']
-        self.summary['execution_time_limit'] = (
-            rtconfig['job']['execution time limit'])
-
-        # Determine task host settings now, just before job submission,
-        # because dynamic host selection may be used.
-
-        # host may be None (= run task on suite host)
-        self.task_host = get_task_host(rtconfig['remote']['host'])
-        if not self.task_host:
-            self.task_host = 'localhost'
-        elif self.task_host != "localhost":
-            self.log(INFO, "Task host: " + self.task_host)
-
-        self.task_owner = rtconfig['remote']['owner']
-
-        if self.task_owner:
-            user_at_host = self.task_owner + "@" + self.task_host
-        else:
-            user_at_host = self.task_host
-        self.summary['host'] = user_at_host
-        self.summary['job_hosts'][self.submit_num] = user_at_host
-        try:
-            batch_sys_conf = self._get_host_conf('batch systems')[
-                rtconfig['job']['batch system']]
-        except (TypeError, KeyError):
-            batch_sys_conf = OrderedDictWithDefaults()
-        if self.summary['execution_time_limit']:
-            # Default = 1, 2 and 7 minutes intervals, roughly 1, 3 and 10
-            # minutes after time limit exceeded
-            self.poll_timers[self.KEY_EXECUTE_TIME_LIMIT] = (
-                TaskActionTimer(delays=batch_sys_conf.get(
-                    'execution time limit polling intervals', [60, 120, 420])))
-
-        RemoteJobHostManager.get_inst().init_suite_run_dir(
-            self.suite_name, self.task_host, self.task_owner)
-        self.db_updates_map[self.TABLE_TASK_JOBS].append({
-            "user_at_host": user_at_host,
-            "batch_sys_name": self.summary['batch_sys_name'],
-        })
-        self.is_manual_submit = False
-
-        script, pre_script, post_script = self._get_job_scripts(rtconfig)
-        execution_time_limit = rtconfig['job']['execution time limit']
-        if execution_time_limit:
-            execution_time_limit = float(execution_time_limit)
-
-        # Location of job file, etc
-        self._create_job_log_path()
-
-        return {
-            'batch_system_name': rtconfig['job']['batch system'],
-            'batch_submit_command_template': (
-                rtconfig['job']['batch submit command template']),
-            'batch_system_conf': batch_sys_conf,
-            'directives': rtconfig['directives'],
-            'environment': rtconfig['environment'],
-            'execution_time_limit': execution_time_limit,
-            'env-script': rtconfig['env-script'],
-            'err-script': rtconfig['err-script'],
-            'host': self.task_host,
-            'init-script': rtconfig['init-script'],
-            'job_file_path': self.get_job_log_path(
-                self.HEAD_MODE_REMOTE, tail=self.JOB_FILE_BASE),
-            'job_d': self.get_job_log_path(),
-            'namespace_hierarchy': self.tdef.namespace_hierarchy,
-            'owner': self.task_owner,
-            'post-script': post_script,
-            'pre-script': pre_script,
-            'remote_suite_d': rtconfig['remote']['suite definition directory'],
-            'script': script,
-            'shell': rtconfig['job']['shell'],
-            'submit_num': self.submit_num,
-            'suite_name': self.suite_name,
-            'task_id': self.identity,
-            'try_num': self.try_timers[self.KEY_EXECUTE].num + 1,
-            'work_d': rtconfig['work sub-directory'],
-        }
-
-    def _get_job_scripts(self, rtconfig):
-        """Return script, pre-script, post-script for a job."""
-        script = rtconfig['script']
-        pre_script = rtconfig['pre-script']
-        post_script = rtconfig['post-script']
-        if self.tdef.run_mode == 'dummy':
-            # Use dummy script items in dummy mode.
-            script = rtconfig['dummy mode']['script']
-            if rtconfig['dummy mode']['disable pre-script']:
-                pre_script = None
-            if rtconfig['dummy mode']['disable post-script']:
-                post_script = None
-        elif self.tdef.suite_polling_cfg:
-            # Automatic suite state polling script
-            comstr = "cylc suite-state " + \
-                     " --task=" + self.tdef.suite_polling_cfg['task'] + \
-                     " --point=" + str(self.point) + \
-                     " --status=" + self.tdef.suite_polling_cfg['status']
-            for key, fmt in [
-                    ('user', ' --%s=%s'),
-                    ('host', ' --%s=%s'),
-                    ('interval', ' --%s=%d'),
-                    ('max-polls', ' --%s=%s'),
-                    ('run-dir', ' --%s=%s'),
-                    ('template', ' --%s=%s')]:
-                if rtconfig['suite state polling'][key]:
-                    comstr += fmt % (key, rtconfig['suite state polling'][key])
-            comstr += " " + self.tdef.suite_polling_cfg['suite']
-            script = "echo " + comstr + "\n" + comstr
-        return script, pre_script, post_script
-
-    def _create_job_log_path(self):
-        """Create job log directory, etc.
-
-        Create local job directory, and NN symbolic link.
-        If NN => 01, remove numbered directories with submit numbers greater
-        than 01.
-        Return a string in the form "POINT/NAME/SUBMIT_NUM".
-
-        """
-        job_file_dir = self.get_job_log_path(self.HEAD_MODE_LOCAL)
-        task_log_dir = os.path.dirname(job_file_dir)
-        if self.submit_num == 1:
-            try:
-                names = os.listdir(task_log_dir)
-            except OSError:
-                pass
-            else:
-                for name in names:
-                    if name not in ["01", self.NN]:
-                        rmtree(
-                            os.path.join(task_log_dir, name),
-                            ignore_errors=True)
-        else:
-            rmtree(job_file_dir, ignore_errors=True)
-
-        mkdir_p(job_file_dir)
-        target = os.path.join(task_log_dir, self.NN)
-        source = os.path.basename(job_file_dir)
-        try:
-            prev_source = os.readlink(target)
-        except OSError:
-            prev_source = None
-        if prev_source == source:
-            return
-        try:
-            if prev_source:
-                os.unlink(target)
-            os.symlink(source, target)
-        except OSError as exc:
-            if not exc.filename:
-                exc.filename = target
-            raise exc
-
-    def check_submission_timeout(self, now):
-        """Check/handle submission timeout, called if TASK_STATUS_SUBMITTED."""
-        timeout = self.state.submission_timer_timeout
-        if timeout is None or now <= timeout:
-            return False
-        # Extend timeout so the job can be polled again at next timeout
-        # just in case the job is still stuck in a queue
-        msg = 'job submitted %s ago, but has not started' % (
-            get_seconds_as_interval_string(
-                timeout - self.summary['submitted_time']))
-        self.state.submission_timer_timeout = None
-        self.log(WARNING, msg)
-        self.setup_event_handlers('submission timeout', msg)
-        return True
-
-    def check_execution_timeout(self, now):
-        """Check/handle execution timeout, called if TASK_STATUS_RUNNING."""
-        timeout = self.state.execution_timer_timeout
-        if timeout is None or now <= timeout:
-            return False
-        if self.summary['execution_time_limit']:
-            timer = self.poll_timers[self.KEY_EXECUTE_TIME_LIMIT]
-            if not timer.is_timeout_set():
-                timer.next()
-            if not timer.is_delay_done():
-                # Don't poll
-                return False
-            if timer.next() is not None:
-                # Poll now, and more retries lined up
-                return True
-        # No more retry lined up, issue execution timeout event
-        msg = 'job started %s ago, but has not finished' % (
-            get_seconds_as_interval_string(
-                timeout - self.summary['started_time']))
-        self.state.execution_timer_timeout = None
-        self.log(WARNING, msg)
-        self.setup_event_handlers('execution timeout', msg)
-        return True
-
-    def sim_time_check(self):
-        """Check simulation time."""
-        timeout = self.summary['started_time'] + self.sim_mode_run_length
-        if time.time() > timeout:
-            if self.tdef.rtconfig['simulation mode']['simulate failure']:
-                self.message_queue.put(
-                    self.identity, 'NORMAL', TASK_STATUS_SUBMITTED)
-                self.message_queue.put(
-                    self.identity, 'CRITICAL', TASK_STATUS_FAILED)
-            else:
-                self.message_queue.put(
-                    self.identity, 'NORMAL', TASK_STATUS_SUBMITTED)
-                self.message_queue.put(
-                    self.identity, 'NORMAL', TASK_STATUS_SUCCEEDED)
-            return True
-        else:
-            return False
-
-    def reject_if_failed(self, message):
-        """Reject a message if in the failed state.
-
-        Handle 'enable resurrection' mode.
-
-        """
-        if self.state.status == TASK_STATUS_FAILED:
-            if self.tdef.rtconfig['enable resurrection']:
-                self.log(
-                    WARNING,
-                    'message receive while failed:' +
-                    ' I am returning from the dead!'
-                )
-                return False
-            else:
-                self.log(
-                    WARNING,
-                    'rejecting a message received while in the failed state:'
-                )
-                self.log(WARNING, '  ' + message)
-            return True
-        else:
-            return False
-
-    def process_incoming_message(
-            self, priority, message, polled_event_time=None):
-        """Parse an incoming task message and update task state.
-
-        Incoming is e.g. "succeeded at <TIME>".
-
-        Correctly handle late (out of order) message which would otherwise set
-        the state backward in the natural order of events.
-
-        """
-        is_polled = polled_event_time is not None
-
-        # Log incoming messages with '>' to distinguish non-message log entries
-        log_message = '(current:%s)> %s' % (self.state.status, message)
-        if polled_event_time is not None:
-            log_message += ' %s' % self.POLLED_INDICATOR
-        self.log(self.LOGGING_LVL_OF.get(priority, INFO), log_message)
-
-        # Strip the "at TIME" suffix.
-        event_time = polled_event_time
-        if not event_time:
-            match = self.RE_MESSAGE_TIME.match(message)
-            if match:
-                message, event_time = match.groups()
-        if not event_time:
-            event_time = get_current_time_string()
-
-        # always update the suite state summary for latest message
-        self.summary['latest_message'] = message
-        if is_polled:
-            self.summary['latest_message'] += " %s" % self.POLLED_INDICATOR
-        flags.iflag = True
-
-        if self.reject_if_failed(message):
-            # Failed tasks do not send messages unless declared resurrectable
-            return
-
-        # Check registered outputs.
-        if not self.state.record_output(message, is_polled):
-            self.log(WARNING, (
-                "Unexpected output (already completed):\n  " + message))
-
-        if is_polled and self.state.status not in TASK_STATUSES_ACTIVE:
-            # A poll result can come in after a task finishes.
-            self.log(WARNING, "Ignoring late poll result: task is not active")
-            return
-
-        if priority == TaskMessage.WARNING:
-            self.setup_event_handlers('warning', message, db_update=False)
-
-        if (message == TASK_OUTPUT_STARTED and
-                self.state.status in [TASK_STATUS_READY, TASK_STATUS_SUBMITTED,
-                                      TASK_STATUS_SUBMIT_FAILED]):
-            if self.job_vacated:
-                self.job_vacated = False
-                self.log(WARNING, "Vacated job restarted: " + message)
-            # Received a 'task started' message
-            flags.pflag = True
-            self.state.set_executing()
-            self.summary['started_time'] = float(
-                get_unix_time_from_time_string(event_time))
-            self.summary['started_time_string'] = event_time
-            self.db_updates_map[self.TABLE_TASK_JOBS].append({
-                "time_run": self.summary['started_time_string']})
-            if self.summary['execution_time_limit']:
-                execution_timeout = self.summary['execution_time_limit']
-            else:
-                execution_timeout = self._get_events_conf('execution timeout')
-            try:
-                self.state.execution_timer_timeout = (
-                    self.summary['started_time'] + float(execution_timeout))
-            except (TypeError, ValueError):
-                self.state.execution_timer_timeout = None
-
-            # submission was successful so reset submission try number
-            self.try_timers[self.KEY_SUBMIT].num = 0
-            self.setup_event_handlers('started', 'job started')
-            self._set_next_poll_time(self.KEY_EXECUTE)
-
-        elif (message == TASK_OUTPUT_SUCCEEDED and
-                self.state.status in [
-                    TASK_STATUS_READY, TASK_STATUS_SUBMITTED,
-                    TASK_STATUS_SUBMIT_FAILED, TASK_STATUS_RUNNING,
-                    TASK_STATUS_FAILED]):
-            # Received a 'task succeeded' message
-            self.state.execution_timer_timeout = None
-            flags.pflag = True
-            self.summary['finished_time'] = float(
-                get_unix_time_from_time_string(event_time))
-            self.summary['finished_time_string'] = event_time
-            self.db_updates_map[self.TABLE_TASK_JOBS].append({
-                "run_status": 0,
-                "time_run_exit": self.summary['finished_time_string'],
-            })
-            # Update mean elapsed time only on task succeeded.
-            if self.summary['started_time'] is not None:
-                self.tdef.elapsed_times.append(
-                    self.summary['finished_time'] -
-                    self.summary['started_time'])
-            self.setup_event_handlers("succeeded", "job succeeded")
-            warnings = self.state.set_execution_succeeded(is_polled)
-            for warning in warnings:
-                self.log(WARNING, warning)
-
-        elif (message == TASK_OUTPUT_FAILED and
-                self.state.status in [
-                    TASK_STATUS_READY, TASK_STATUS_SUBMITTED,
-                    TASK_STATUS_SUBMIT_FAILED, TASK_STATUS_RUNNING]):
-            # (submit- states in case of very fast submission and execution).
-            self.job_execution_failed(event_time)
-
-        elif message.startswith(TaskMessage.FAIL_MESSAGE_PREFIX):
-            # capture and record signals sent to task proxy
-            self.db_events_insert(event="signaled", message=message)
-            signal = message.replace(TaskMessage.FAIL_MESSAGE_PREFIX, "")
-            self.db_updates_map[self.TABLE_TASK_JOBS].append(
-                {"run_signal": signal})
-
-        elif message.startswith(TaskMessage.VACATION_MESSAGE_PREFIX):
-            flags.pflag = True
-            self.state.set_state(TASK_STATUS_SUBMITTED)
-            self.db_events_insert(event="vacated", message=message)
-            self.state.execution_timer_timeout = None
-            self.summary['started_time'] = None
-            self.summary['started_time_string'] = None
-            self.try_timers[self.KEY_SUBMIT].num = 0
-            self.job_vacated = True
-
-        elif message == "submission failed":
-            # This can arrive via a poll.
-            self.state.submission_timer_timeout = None
-            self.job_submission_failed(event_time)
-
-        else:
-            # Unhandled messages. These include:
-            #  * general non-output/progress messages
-            #  * poll messages that repeat previous results
-            # Note that all messages are logged already at the top.
-            self.log(DEBUG, '(current: %s) unhandled: %s' % (
-                self.state.status, message))
-            if priority in [CRITICAL, ERROR, WARNING, INFO, DEBUG]:
-                priority = getLevelName(priority)
-            self.db_events_insert(
-                event=("message %s" % str(priority).lower()), message=message)
-
-    def spawn(self, state):
-        """Spawn the successor of this task proxy."""
-        self.has_spawned = True
-        next_point = self.next_point()
-        if next_point:
-            return TaskProxy(
-                self.tdef, next_point, state, None, False, self.stop_point,
-                message_queue=self.message_queue)
-        else:
-            # next_point instance is out of the sequence bounds
-            return None
-
-    def ready_to_spawn(self):
-        """Return True if ready to spawn my next-cycle successor.
-
-        A task proxy is never ready to spawn if:
-           * it has spawned already
-           * its state is submit-failed (avoid running multiple instances
-             of a task with bad job submission config).
-        Otherwise a task proxy is ready to spawn if either:
-           * self.tdef.spawn ahead is True (results in spawning out to max
-             active cycle points), OR
-           * its state is >= submitted (allows successive instances
-             to run concurrently, but not out of order).
-        """
-        if (self.has_spawned or
-                self.state.status == TASK_STATUS_SUBMIT_FAILED):
-            return False
-        else:
-            return (self.tdef.spawn_ahead or
-                    self.state.is_greater_than(TASK_STATUS_READY))
-
     def get_state_summary(self):
         """Return a dict containing the state summary of this task proxy."""
         self.summary['state'] = self.state.status
@@ -1515,13 +176,20 @@ class TaskProxy(object):
             self.summary['mean_elapsed_time'] = (
                 float(sum(self.tdef.elapsed_times)) / count)
         elif self.summary['execution_time_limit']:
-            self.summary['mean_elapsed_time'] = \
-                self.summary['execution_time_limit']
+            self.summary['mean_elapsed_time'] = float(
+                self.summary['execution_time_limit'])
         else:
             self.summary['mean_elapsed_time'] = None
 
         return self.summary
 
+    def get_try_num(self):
+        """Return the number of automatic tries (try number)."""
+        try:
+            return self.try_timers[TASK_STATUS_RETRYING].num + 1
+        except (AttributeError, KeyError):
+            return 0
+
     def next_point(self):
         """Return the next cycle point."""
         p_next = None
@@ -1535,58 +203,51 @@ class TaskProxy(object):
             p_next = min(adjusted)
         return p_next
 
-    def get_job_log_path(self, head_mode=None, submit_num=None, tail=None):
-        """Return the job log path."""
-        args = [str(self.point), self.tdef.name]
-        if submit_num is None:
-            submit_num = self.submit_num
-        try:
-            submit_num = "%02d" % submit_num
-        except TypeError:
-            pass
-        if submit_num:
-            args.append(submit_num)
-        if head_mode == self.HEAD_MODE_LOCAL:
-            args.insert(0, GLOBAL_CFG.get_derived_host_item(
-                self.suite_name, "suite job log directory"))
-        elif head_mode == self.HEAD_MODE_REMOTE:
-            args.insert(0, GLOBAL_CFG.get_derived_host_item(
-                self.suite_name, 'suite job log directory',
-                self.task_host, self.task_owner))
-        if tail:
-            args.append(tail)
-        return os.path.join(*args)
+    def ready_to_run(self, now):
+        """Am I in a pre-run state but ready to run?
 
-    def check_poll_ready(self, now=None):
-        """Check if it is the next poll time."""
-        return (
-            self.state.status == TASK_STATUS_SUBMITTED and (
-                self.check_submission_timeout(now) or
-                self._check_poll_timer(self.KEY_SUBMIT, now)
-            )
-        ) or (
-            self.state.status == TASK_STATUS_RUNNING and (
-                self.check_execution_timeout(now) or
-                self._check_poll_timer(self.KEY_EXECUTE, now)
+        Queued tasks are not counted as they've already been deemed ready.
+
+        """
+        return self.start_time_reached(now) and (
+            (
+                self.state.status == TASK_STATUS_WAITING and
+                self.state.prerequisites_are_all_satisfied() and
+                all(self.state.external_triggers.values())
+            ) or
+            (
+                self.state.status in self.try_timers and
+                self.try_timers[self.state.status].is_delay_done(now)
             )
         )
 
-    def _check_poll_timer(self, key, now=None):
-        """Set the next execution/submission poll time."""
-        timer = self.poll_timers.get(key)
-        if timer is not None and timer.is_delay_done(now):
-            self._set_next_poll_time(key)
-            return True
+    def reset_manual_trigger(self):
+        """This is called immediately after manual trigger flag used."""
+        if self.manual_trigger:
+            self.manual_trigger = False
+            self.is_manual_submit = True
+            # unset any retry delay timers
+            for timer in self.try_timers.values():
+                timer.timeout = None
+
+    def set_event_time(self, event_key, time_str=None):
+        """Set event time in self.summary
+
+        Set values of both event_key + "_time" and event_key + "_time_string".
+        """
+        if time_str is None:
+            self.summary[event_key + '_time'] = None
         else:
-            return False
+            self.summary[event_key + '_time'] = float(
+                get_unix_time_from_time_string(time_str))
+        self.summary[event_key + '_time_string'] = time_str
 
-    def _set_next_poll_time(self, key):
-        """Set the next execution/submission poll time."""
-        timer = self.poll_timers.get(key)
-        if timer is not None:
-            if timer.num is None:
-                timer.num = 0
-            delay = timer.next(no_exhaust=True)
-            if delay is not None:
-                self.log(INFO, 'next job poll in %s (after %s)' % (
-                    timer.delay_as_seconds(), timer.timeout_as_str()))
+    def start_time_reached(self, now):
+        """Has this task reached its clock trigger time?"""
+        if self.tdef.clocktrigger_offset is None:
+            return True
+        if self.delayed_start is None:
+            self.delayed_start = (
+                self.get_point_as_seconds() +
+                self.get_offset_as_seconds(self.tdef.clocktrigger_offset))
+        return now > self.delayed_start
diff --git a/lib/cylc/task_state.py b/lib/cylc/task_state.py
index ece1f21..2d24329 100644
--- a/lib/cylc/task_state.py
+++ b/lib/cylc/task_state.py
@@ -19,16 +19,16 @@
 """Task state related logic."""
 
 
-from logging import WARNING, INFO, DEBUG
-
+from cylc.cycling.loader import get_point_relative
 import cylc.flags as flags
-from cylc.task_outputs import TaskOutputs
 from cylc.prerequisite import Prerequisite
-from cylc.cycling.loader import get_point_relative
+from cylc.suite_logging import LOG
 from cylc.task_id import TaskID
 from cylc.task_outputs import (
+    TaskOutputs,
     TASK_OUTPUT_EXPIRED, TASK_OUTPUT_SUBMITTED, TASK_OUTPUT_SUBMIT_FAILED,
     TASK_OUTPUT_STARTED, TASK_OUTPUT_SUCCEEDED, TASK_OUTPUT_FAILED)
+from cylc.wallclock import get_current_time_string
 
 
 # Task status names.
@@ -154,100 +154,20 @@ TASK_STATUSES_AUTO_EXPAND = set([
 ])
 
 
-class TaskStateError(ValueError):
-    """Illegal task state."""
-    pass
-
-
 class TaskState(object):
     """Task status and utilities."""
 
     # Memory optimization - constrain possible attributes to this list.
-    __slots__ = ["_STATUS_MAP", "status", "identity", "db_events_insert",
-                 "db_update_status", "log",
+    __slots__ = ["identity", "status", "hold_swap",
                  "_is_satisfied", "_suicide_is_satisfied", "prerequisites",
                  "suicide_prerequisites", "external_triggers", "outputs",
-                 "kill_failed", "hold_swap", "run_mode",
-                 "submission_timer_timeout", "execution_timer_timeout"]
-
-    # Associate status names with other properties.
-    _STATUS_MAP = {
-        TASK_STATUS_RUNAHEAD: {
-            "gtk_label": "r_unahead",  # GTK widget labels.
-            "ascii_ctrl": "\033[1;37;44m"  # Terminal color control codes.
-        },
-        TASK_STATUS_WAITING: {
-            "gtk_label": "_waiting",
-            "ascii_ctrl": "\033[1;36m"
-        },
-        TASK_STATUS_HELD: {
-            "gtk_label": "_held",
-            "ascii_ctrl": "\033[1;37;43m"
-        },
-        TASK_STATUS_QUEUED: {
-            "gtk_label": "_queued",
-            "ascii_ctrl": "\033[1;38;44m"
-        },
-        TASK_STATUS_READY: {
-            "gtk_label": "rea_dy",
-            "ascii_ctrl": "\033[1;32m"
-        },
-        TASK_STATUS_EXPIRED: {
-            "gtk_label": "e_xpired",
-            "ascii_ctrl": "\033[1;37;40m"
-        },
-        TASK_STATUS_SUBMITTED: {
-            "gtk_label": "sub_mitted",
-            "ascii_ctrl": "\033[1;33m"
-        },
-        TASK_STATUS_SUBMIT_FAILED: {
-            "gtk_label": "submit-f_ailed",
-            "ascii_ctrl": "\033[1;34m"
-        },
-        TASK_STATUS_SUBMIT_RETRYING: {
-            "gtk_label": "submit-retryin_g",
-            "ascii_ctrl": "\033[1;31m"
-        },
-        TASK_STATUS_RUNNING: {
-            "gtk_label": "_running",
-            "ascii_ctrl": "\033[1;37;42m"
-        },
-        TASK_STATUS_SUCCEEDED: {
-            "gtk_label": "_succeeded",
-            "ascii_ctrl": "\033[0m"
-        },
-        TASK_STATUS_FAILED: {
-            "gtk_label": "_failed",
-            "ascii_ctrl": "\033[1;37;41m"
-        },
-        TASK_STATUS_RETRYING: {
-            "gtk_label": "retr_ying",
-            "ascii_ctrl": "\033[1;35m"
-        }
-    }
-
-    @classmethod
-    def get_status_prop(cls, status, key, subst=None):
-        """Return property for a task status."""
-        if key == "ascii_ctrl":
-            if subst is not None:
-                return "%s%s\033[0m" % (cls._STATUS_MAP[status][key], subst)
-            else:
-                return "%s%s\033[0m" % (cls._STATUS_MAP[status][key], status)
-        try:
-            return cls._STATUS_MAP[status][key]
-        except KeyError:
-            raise TaskStateError("Bad task status (%s, %s)" % (status, key))
-
-    def __init__(self, status, hold_swap, point, identity, tdef,
-                 db_events_insert, db_update_status, log):
+                 "kill_failed", "time_updated"]
 
+    def __init__(self, tdef, point, status, hold_swap):
+        self.identity = TaskID.get(tdef.name, str(point))
         self.status = status
         self.hold_swap = hold_swap
-        self.identity = identity
-        self.db_events_insert = db_events_insert
-        self.db_update_status = db_update_status
-        self.log = log
+        self.time_updated = None
 
         self._is_satisfied = None
         self._suicide_is_satisfied = None
@@ -255,7 +175,7 @@ class TaskState(object):
         # Prerequisites.
         self.prerequisites = []
         self.suicide_prerequisites = []
-        self._add_prerequisites(point, identity, tdef)
+        self._add_prerequisites(point, tdef)
 
         # External Triggers.
         self.external_triggers = {}
@@ -267,9 +187,7 @@ class TaskState(object):
             self.external_triggers[ext] = False
 
         # Message outputs.
-        self.outputs = TaskOutputs(identity)
-        for outp in tdef.outputs:
-            self.outputs.add(outp.get_string(point))
+        self.outputs = TaskOutputs(tdef, point)
 
         # Standard outputs.
         self.outputs.add(TASK_OUTPUT_SUBMITTED)
@@ -277,12 +195,6 @@ class TaskState(object):
         self.outputs.add(TASK_OUTPUT_SUCCEEDED)
 
         self.kill_failed = False
-        self.run_mode = tdef.run_mode
-
-        # TODO - these are here because current use in reset_state(); should be
-        # disentangled and put in the task_proxy module.
-        self.submission_timer_timeout = None
-        self.execution_timer_timeout = None
 
     def satisfy_me(self, task_output_msgs, task_outputs):
         """Attempt to get my prerequisites satisfied."""
@@ -371,7 +283,22 @@ class TaskState(object):
         self.outputs.remove(TASK_OUTPUT_SUBMIT_FAILED)
         self.outputs.remove(TASK_OUTPUT_FAILED)
 
-    def release(self):
+    def set_held(self):
+        """Set state to TASK_STATUS_HELD, if possible.
+
+        If state can be held, set hold_swap to current state.
+        If state is active, set hold_swap to TASK_STATUS_HELD.
+        If state cannot be held, do nothing.
+        """
+        if self.status in TASK_STATUSES_ACTIVE:
+            self.hold_swap = TASK_STATUS_HELD
+            return
+        elif self.status in [
+                TASK_STATUS_WAITING, TASK_STATUS_QUEUED,
+                TASK_STATUS_SUBMIT_RETRYING, TASK_STATUS_RETRYING]:
+            return self._set_state(TASK_STATUS_HELD)
+
+    def unset_held(self):
         """Reset to my pre-held state, if not beyond the stop point."""
         if self.status != TASK_STATUS_HELD:
             return
@@ -380,53 +307,15 @@ class TaskState(object):
         elif self.hold_swap == TASK_STATUS_HELD:
             self.hold_swap = None
         else:
-            self.submission_timer_timeout = None
-            self.execution_timer_timeout = None
             self.reset_state(self.hold_swap)
 
-    def set_state(self, status, loglvl=DEBUG):
-        """Set, log and record task status (normal change, not forced - don't
-        update task_events table)."""
-        if self.status == self.hold_swap:
-            self.hold_swap = None
-        if status == self.status and self.hold_swap is None:
-            return
-        if status == TASK_STATUS_HELD:
-            self.log(loglvl, '%s => %s (%s)' % (
-                self.status, status, self.status))
-            self.hold_swap = self.status
-        elif (self.hold_swap == TASK_STATUS_HELD and
-                status not in TASK_STATUSES_FINAL):
-            self.log(loglvl, '%s (%s) => %s (%s)' % (
-                self.status, TASK_STATUS_HELD,
-                TASK_STATUS_HELD, status))
-            self.hold_swap = status
-            status = TASK_STATUS_HELD
-        elif self.hold_swap:
-            self.log(loglvl, '%s (%s) => %s' % (
-                self.status, self.hold_swap, status))
-            self.hold_swap = None
-        else:
-            self.log(loglvl, '%s => %s' % (self.status, status))
-        self.status = status
-        flags.iflag = True
-        self.db_update_status()
-
     def reset_state(self, status):
         """Reset status of task."""
-        if status == TASK_STATUS_HELD:
-            if self.status in TASK_STATUSES_ACTIVE:
-                self.hold_swap = TASK_STATUS_HELD
-                return
-            if self.status not in [
-                    TASK_STATUS_WAITING, TASK_STATUS_QUEUED,
-                    TASK_STATUS_SUBMIT_RETRYING, TASK_STATUS_RETRYING]:
-                return
-        elif status == TASK_STATUS_EXPIRED:
+        if status == TASK_STATUS_EXPIRED:
             self.set_prerequisites_all_satisfied()
             self.unset_special_outputs()
             self.outputs.set_all_incomplete()
-            self.outputs.add(TASK_OUTPUT_EXPIRED, True)
+            self.outputs.add(TASK_OUTPUT_EXPIRED, is_completed=True)
         elif status == TASK_STATUS_WAITING:
             self.set_prerequisites_not_satisfied()
             self.unset_special_outputs()
@@ -435,138 +324,74 @@ class TaskState(object):
             self.set_prerequisites_all_satisfied()
             self.unset_special_outputs()
             self.outputs.set_all_incomplete()
+        elif status == TASK_STATUS_SUBMITTED:
+            self.set_prerequisites_all_satisfied()
+            self.outputs.set_completed(TASK_OUTPUT_SUBMITTED)
+        elif status == TASK_STATUS_SUBMIT_RETRYING:
+            self.set_prerequisites_all_satisfied()
+            self.outputs.remove(TASK_OUTPUT_SUBMITTED)
+        elif status == TASK_STATUS_SUBMIT_FAILED:
+            self.set_prerequisites_all_satisfied()
+            self.outputs.remove(TASK_OUTPUT_SUBMITTED)
+            self.outputs.add(TASK_OUTPUT_SUBMIT_FAILED, is_completed=True)
         elif status == TASK_STATUS_SUCCEEDED:
             self.set_prerequisites_all_satisfied()
             self.unset_special_outputs()
-            # TODO - for message outputs this should be optional (see #1551):
-            self.outputs.set_all_completed()
+            self.outputs.set_completed(TASK_OUTPUT_SUBMITTED)
+            self.outputs.set_completed(TASK_OUTPUT_STARTED)
+            self.outputs.set_completed(TASK_OUTPUT_SUCCEEDED)
+        elif status == TASK_STATUS_RETRYING:
+            self.set_prerequisites_all_satisfied()
+            self.outputs.set_all_incomplete()
         elif status == TASK_STATUS_FAILED:
             self.set_prerequisites_all_satisfied()
             self.outputs.set_all_incomplete()
             # Set a new failed output just as if a failure message came in
-            self.outputs.add(TASK_OUTPUT_FAILED, True)
-
-        self.submission_timer_timeout = None
-        self.execution_timer_timeout = None
-        return self.set_state(status)
-
-    def is_ready_to_run(self, retry_delay_done, start_time_reached):
-        """With current status, is the task ready to run?"""
-        return (
-            (
-                (
-                    self.status == TASK_STATUS_WAITING and
-                    self.prerequisites_are_all_satisfied() and
-                    all(self.external_triggers.values())
-                ) or
-                (
-                    self.status in [TASK_STATUS_SUBMIT_RETRYING,
-                                    TASK_STATUS_RETRYING] and
-                    retry_delay_done
-                )
-            ) and start_time_reached
-        )
+            self.outputs.add(TASK_OUTPUT_FAILED, is_completed=True)
+
+        return self._set_state(status)
+
+    def _set_state(self, status):
+        """Set, log and record task status (normal change, not forced - don't
+        update task_events table)."""
+        if self.status == self.hold_swap:
+            self.hold_swap = None
+        if status == self.status and self.hold_swap is None:
+            return
+        o_status, o_hold_swap = self.status, self.hold_swap
+        if status == TASK_STATUS_HELD:
+            self.hold_swap = self.status
+        elif (self.hold_swap == TASK_STATUS_HELD and
+                status not in TASK_STATUSES_FINAL):
+            self.hold_swap = status
+            status = TASK_STATUS_HELD
+        elif self.hold_swap:
+            self.hold_swap = None
+        self.status = status
+        self.time_updated = get_current_time_string()
+        flags.iflag = True
+        # Log
+        message = str(o_status)
+        if o_hold_swap:
+            message += " (%s)" % o_hold_swap
+        message += " => %s" % self.status
+        if self.hold_swap:
+            message += " (%s)" % self.hold_swap
+        LOG.debug(message, itask=self.identity)
 
     def is_greater_than(self, status):
         """"Return True if self.status > status."""
         return (TASK_STATUSES_ORDERED.index(self.status) >
                 TASK_STATUSES_ORDERED.index(status))
 
-    def set_expired(self):
-        """Manipulate state for task expired."""
-        self.reset_state(TASK_STATUS_EXPIRED)
-
-    def set_ready_to_submit(self):
-        """Manipulate state just prior to job submission."""
-        self.set_state(TASK_STATUS_READY)
-
-    def set_submit_failed(self):
-        """Manipulate state after job submission failure."""
-        self.set_state(TASK_STATUS_SUBMIT_FAILED)
-        self.outputs.remove(TASK_OUTPUT_SUBMITTED)
-        self.outputs.add(TASK_OUTPUT_SUBMIT_FAILED, True)
-
-    def set_submit_retry(self):
-        """Manipulate state for job submission retry."""
-        self.outputs.remove(TASK_OUTPUT_SUBMITTED)
-        self.set_state(TASK_STATUS_SUBMIT_RETRYING)
-        self.set_prerequisites_all_satisfied()
-
-    def set_submit_succeeded(self):
-        """Set status to submitted."""
-        if not self.outputs.is_completed(TASK_OUTPUT_SUBMITTED):
-            self.outputs.set_completed(TASK_OUTPUT_SUBMITTED)
-            # Allow submitted tasks to spawn even if nothing else is happening.
-            flags.pflag = True
-        if self.status == TASK_STATUS_READY:
-            # In rare occassions, the submit command of a batch system has sent
-            # the job to its server, and the server has started the job before
-            # the job submit command returns.
-            self.set_state(TASK_STATUS_SUBMITTED)
-            return True
-        else:
-            return False
-
-    def set_executing(self):
-        """Manipulate state for job execution."""
-        self.set_state(TASK_STATUS_RUNNING)
-        if self.run_mode == 'simulation':
-            self.outputs.set_completed(TASK_OUTPUT_STARTED)
-
-    def set_execution_succeeded(self, msg_was_polled):
-        """Manipulate state for job execution success."""
-        self.set_state(TASK_STATUS_SUCCEEDED)
-        warnings = []
-        if not self.outputs.all_completed():
-            err = "Succeeded with unreported outputs:"
-            for key in self.outputs.not_completed:
-                err += "\n  " + key
-            warnings.append(err)
-            if msg_was_polled:
-                # Assume all outputs complete (e.g. poll at restart).
-                # TODO - just poll for outputs in the job status file.
-                warnings.append("Assuming ALL outputs completed.")
-                self.outputs.set_all_completed()
-            else:
-                # A succeeded task MUST have submitted and started.
-                # TODO - just poll for outputs in the job status file?
-                for output in [TASK_OUTPUT_SUBMITTED, TASK_OUTPUT_STARTED]:
-                    if not self.outputs.is_completed(output):
-                        warnings.append(
-                            "Assuming output completed:  \n %s" % output)
-                        self.outputs.set_completed(output)
-        return warnings
-
-    def set_execution_failed(self):
-        """Manipulate state for job execution failure."""
-        self.reset_state(TASK_STATUS_FAILED)
-
-    def set_execution_retry(self):
-        """Manipulate state for job execution retry."""
-        self.set_state(TASK_STATUS_RETRYING)
-        self.set_prerequisites_all_satisfied()
-
-    def record_output(self, msg, msg_was_polled):
-        """Record a completed output."""
-        if self.outputs.exists(msg):
-            if not self.outputs.is_completed(msg):
-                flags.pflag = True
-                self.outputs.set_completed(msg)
-                self.db_events_insert(event="output completed", message=msg)
-            elif not msg_was_polled:
-                # This output has already been reported complete. Not an error
-                # condition - maybe the network was down for a bit. Ok for
-                # polling as multiple polls *should* produce the same result.
-                return False
-        return True
-
-    def _add_prerequisites(self, point, identity, tdef):
+    def _add_prerequisites(self, point, tdef):
         """Add task prerequisites."""
         # self.triggers[sequence] = [triggers for sequence]
         # Triggers for sequence_i only used if my cycle point is a
         # valid member of sequence_i's sequence of cycle points.
         self._is_satisfied = None
         self._suicide_is_satisfied = None
+        identity = TaskID.get(tdef.name, str(point))
 
         for sequence, exps in tdef.triggers.items():
             for ctrig, exp in exps:
diff --git a/lib/cylc/task_state_prop.py b/lib/cylc/task_state_prop.py
new file mode 100644
index 0000000..44c72db
--- /dev/null
+++ b/lib/cylc/task_state_prop.py
@@ -0,0 +1,99 @@
+#!/usr/bin/env python
+
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2017 NIWA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""Task state properties for display."""
+
+from cylc.task_state import (
+    TASK_STATUS_RUNAHEAD,
+    TASK_STATUS_WAITING,
+    TASK_STATUS_HELD,
+    TASK_STATUS_QUEUED,
+    TASK_STATUS_READY,
+    TASK_STATUS_EXPIRED,
+    TASK_STATUS_SUBMITTED,
+    TASK_STATUS_SUBMIT_FAILED,
+    TASK_STATUS_SUBMIT_RETRYING,
+    TASK_STATUS_RUNNING,
+    TASK_STATUS_SUCCEEDED,
+    TASK_STATUS_FAILED,
+    TASK_STATUS_RETRYING)
+
+
+_STATUS_MAP = {
+    TASK_STATUS_RUNAHEAD: {
+        "gtk_label": "r_unahead",  # GTK widget labels.
+        "ascii_ctrl": "\033[1;37;44m"  # Terminal color control codes.
+    },
+    TASK_STATUS_WAITING: {
+        "gtk_label": "_waiting",
+        "ascii_ctrl": "\033[1;36m"
+    },
+    TASK_STATUS_HELD: {
+        "gtk_label": "_held",
+        "ascii_ctrl": "\033[1;37;43m"
+    },
+    TASK_STATUS_QUEUED: {
+        "gtk_label": "_queued",
+        "ascii_ctrl": "\033[1;38;44m"
+    },
+    TASK_STATUS_READY: {
+        "gtk_label": "rea_dy",
+        "ascii_ctrl": "\033[1;32m"
+    },
+    TASK_STATUS_EXPIRED: {
+        "gtk_label": "e_xpired",
+        "ascii_ctrl": "\033[1;37;40m"
+    },
+    TASK_STATUS_SUBMITTED: {
+        "gtk_label": "sub_mitted",
+        "ascii_ctrl": "\033[1;33m"
+    },
+    TASK_STATUS_SUBMIT_FAILED: {
+        "gtk_label": "submit-f_ailed",
+        "ascii_ctrl": "\033[1;34m"
+    },
+    TASK_STATUS_SUBMIT_RETRYING: {
+        "gtk_label": "submit-retryin_g",
+        "ascii_ctrl": "\033[1;31m"
+    },
+    TASK_STATUS_RUNNING: {
+        "gtk_label": "_running",
+        "ascii_ctrl": "\033[1;37;42m"
+    },
+    TASK_STATUS_SUCCEEDED: {
+        "gtk_label": "_succeeded",
+        "ascii_ctrl": "\033[0m"
+    },
+    TASK_STATUS_FAILED: {
+        "gtk_label": "_failed",
+        "ascii_ctrl": "\033[1;37;41m"
+    },
+    TASK_STATUS_RETRYING: {
+        "gtk_label": "retr_ying",
+        "ascii_ctrl": "\033[1;35m"
+    }
+}
+
+
+def get_status_prop(status, key, subst=None):
+    """Return property for a task status."""
+    if key == "ascii_ctrl" and subst is not None:
+        return "%s%s\033[0m" % (_STATUS_MAP[status][key], subst)
+    elif key == "ascii_ctrl":
+        return "%s%s\033[0m" % (_STATUS_MAP[status][key], status)
+    else:
+        return _STATUS_MAP[status][key]
diff --git a/lib/cylc/task_trigger.py b/lib/cylc/task_trigger.py
index f955342..402185b 100644
--- a/lib/cylc/task_trigger.py
+++ b/lib/cylc/task_trigger.py
@@ -19,9 +19,12 @@
 import re
 import sys
 
-from cylc.task_id import TaskID
 from cylc.cycling.loader import (
     get_interval, get_interval_cls, get_point_relative)
+from cylc.task_id import TaskID
+from cylc.task_outputs import (
+    TASK_OUTPUT_EXPIRED, TASK_OUTPUT_SUBMITTED, TASK_OUTPUT_SUBMIT_FAILED,
+    TASK_OUTPUT_STARTED, TASK_OUTPUT_SUCCEEDED, TASK_OUTPUT_FAILED)
 
 
 warned = False
@@ -30,21 +33,14 @@ BCOMPAT_MSG_RE_C6 = re.compile('^(.*)\[\s*(([+-])?\s*(.*))?\s*\](.*)$')
 DEPRECN_WARN_TMPL = "WARNING: message trigger offsets are deprecated\n  %s"
 
 # Task trigger names (e.g. foo:fail => bar).
-TASK_TRIGGER_EXPIRED = "expired"
-TASK_TRIGGER_SUBMITTED = "submitted"
-TASK_TRIGGER_SUBMIT_FAILED = "submit-failed"
-TASK_TRIGGER_STARTED = "started"
-TASK_TRIGGER_SUCCEEDED = "succeeded"
-TASK_TRIGGER_FAILED = "failed"
-
 # Can use "foo:fail => bar" or "foo:failed => bar", etc.
 _ALT_TRIGGER_NAMES = {
-    TASK_TRIGGER_EXPIRED: ["expire"],
-    TASK_TRIGGER_SUBMITTED: ["submit"],
-    TASK_TRIGGER_SUBMIT_FAILED: ["submit-fail"],
-    TASK_TRIGGER_STARTED: ["start"],
-    TASK_TRIGGER_SUCCEEDED: ["succeed"],
-    TASK_TRIGGER_FAILED: ["fail"],
+    TASK_OUTPUT_EXPIRED: ["expire"],
+    TASK_OUTPUT_SUBMITTED: ["submit"],
+    TASK_OUTPUT_SUBMIT_FAILED: ["submit-fail"],
+    TASK_OUTPUT_STARTED: ["start"],
+    TASK_OUTPUT_SUCCEEDED: ["succeed"],
+    TASK_OUTPUT_FAILED: ["fail"],
 }
 
 
@@ -126,7 +122,7 @@ It generates a concrete prerequisite string given a task's cycle point value.
         self.message = None
         self.message_offset = None
         self.builtin = None
-        qualifier = qualifier or TASK_TRIGGER_SUCCEEDED
+        qualifier = qualifier or TASK_OUTPUT_SUCCEEDED
 
         try:
             # Message trigger?
diff --git a/lib/cylc/taskdef.py b/lib/cylc/taskdef.py
index 52fa9a1..24cabb1 100644
--- a/lib/cylc/taskdef.py
+++ b/lib/cylc/taskdef.py
@@ -19,10 +19,12 @@
 """Task definition."""
 
 from collections import deque
+import re
 
 from cylc.cycling.loader import (
     get_point_relative, get_interval, is_offset_absolute)
 from cylc.task_id import TaskID
+from cylc.task_trigger import get_message_offset
 
 
 class TaskDefError(Exception):
@@ -165,3 +167,12 @@ class TaskDef(object):
             return max_cutoff_point
         # There aren't any dependent tasks in other cycles for my_point.
         return my_point
+
+    def get_outputs(self, point):
+        """Return task message outputs for initialisation of TaskOutputs."""
+        for (key, msg), base_interval in self.outputs:
+            new_point = point
+            msg_offset = get_message_offset(msg, base_interval)
+            if msg_offset:
+                new_point = point + msg_offset
+            yield (key, re.sub('\[.*\]', str(new_point), msg))
diff --git a/lib/cylc/time_parser.py b/lib/cylc/time_parser.py
index 21e402c..fc47a79 100644
--- a/lib/cylc/time_parser.py
+++ b/lib/cylc/time_parser.py
@@ -184,7 +184,7 @@ class CylcTimeParser(object):
                          context_start_point=None,
                          context_end_point=None):
         """Parse an expression in abbrev. or full ISO recurrence format."""
-        expression, exclusion = parse_exclusion(expression)
+        expression, exclusions = parse_exclusion(expression)
         if context_start_point is None:
             context_start_point = self.context_start_point
         if context_end_point is None:
@@ -193,6 +193,7 @@ class CylcTimeParser(object):
             result = rec_object.search(expression)
             if not result:
                 continue
+
             props = {}
             repetitions = result.groupdict().get("reps")
             if repetitions is not None:
@@ -216,13 +217,20 @@ class CylcTimeParser(object):
                 raise CylcMissingFinalCyclePointError(
                     "This suite requires a final cycle point."
                 )
+
             exclusion_point = None
-            if exclusion:
-                exclusion_point, excl_off = self._get_point_from_expression(
-                    exclusion, None, is_required=False, allow_truncated=False
-                )
-                if excl_off:
-                    exclusion_point += excl_off
+            exclusion_points = []
+            # Convert the exclusion strings to ISO8601 points
+            if exclusions is not None:
+                for exclusion in exclusions:
+                    exclusion_point, excl_off = (
+                        self._get_point_from_expression(
+                            exclusion, None, is_required=False,
+                            allow_truncated=False))
+                    if excl_off:
+                        exclusion_point += excl_off
+                    exclusion_points.append(exclusion_point)
+
             intv = result.groupdict().get("intv")
             intv_context_truncated_point = None
             if start_point is not None and start_point.truncated:
@@ -265,7 +273,7 @@ class CylcTimeParser(object):
                 start_point=start_point,
                 duration=interval,
                 end_point=end_point
-            ), exclusion_point
+            ), exclusion_points
 
         raise CylcTimeSyntaxError("Could not parse %s" % expression)
 
@@ -315,6 +323,7 @@ class CylcTimeParser(object):
 
     def _get_point_from_expression(self, expr, context, is_required=False,
                                    allow_truncated=False):
+        """Gets a TimePoint from an expression"""
         if expr is None:
             if is_required and allow_truncated:
                 if context is None:
diff --git a/lib/isodatetime/data.py b/lib/isodatetime/data.py
index e394626..2c77944 100644
--- a/lib/isodatetime/data.py
+++ b/lib/isodatetime/data.py
@@ -2153,5 +2153,5 @@ def _type_checker(*objects):
 
 PARSE_PROPERTY_TRANSLATORS = {
     "seconds_since_unix_epoch":
-        get_timepoint_properties_from_seconds_since_unix_epoch
+    get_timepoint_properties_from_seconds_since_unix_epoch
 }
diff --git a/lib/jinja2/__init__.py b/lib/jinja2/__init__.py
index 029fb2e..4b0b7a8 100644
--- a/lib/jinja2/__init__.py
+++ b/lib/jinja2/__init__.py
@@ -23,11 +23,11 @@
         {% endblock %}
 
 
-    :copyright: (c) 2010 by the Jinja Team.
+    :copyright: (c) 2017 by the Jinja Team.
     :license: BSD, see LICENSE for more details.
 """
 __docformat__ = 'restructuredtext en'
-__version__ = '2.8'
+__version__ = '2.9.6'
 
 # high level interface
 from jinja2.environment import Environment, Template
@@ -55,7 +55,7 @@ from jinja2.filters import environmentfilter, contextfilter, \
      evalcontextfilter
 from jinja2.utils import Markup, escape, clear_caches, \
      environmentfunction, evalcontextfunction, contextfunction, \
-     is_undefined
+     is_undefined, select_autoescape
 
 __all__ = [
     'Environment', 'Template', 'BaseLoader', 'FileSystemLoader',
@@ -67,4 +67,16 @@ __all__ = [
     'ModuleLoader', 'environmentfilter', 'contextfilter', 'Markup', 'escape',
     'environmentfunction', 'contextfunction', 'clear_caches', 'is_undefined',
     'evalcontextfilter', 'evalcontextfunction', 'make_logging_undefined',
+    'select_autoescape',
 ]
+
+
+def _patch_async():
+    from jinja2.utils import have_async_gen
+    if have_async_gen:
+        from jinja2.asyncsupport import patch_all
+        patch_all()
+
+
+_patch_async()
+del _patch_async
diff --git a/lib/jinja2/_compat.py b/lib/jinja2/_compat.py
index 143962f..61d8530 100644
--- a/lib/jinja2/_compat.py
+++ b/lib/jinja2/_compat.py
@@ -45,7 +45,6 @@ if not PY2:
     implements_iterator = _identity
     implements_to_string = _identity
     encode_filename = _identity
-    get_next = lambda x: x.__next__
 
 else:
     unichr = unichr
@@ -77,8 +76,6 @@ else:
         cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
         return cls
 
-    get_next = lambda x: x.next
-
     def encode_filename(filename):
         if isinstance(filename, unicode):
             return filename.encode('utf-8')
@@ -86,23 +83,14 @@ else:
 
 
 def with_metaclass(meta, *bases):
+    """Create a base class with a metaclass."""
     # This requires a bit of explanation: the basic idea is to make a
-    # dummy metaclass for one level of class instanciation that replaces
-    # itself with the actual metaclass.  Because of internal type checks
-    # we also need to make sure that we downgrade the custom metaclass
-    # for one level to something closer to type (that's why __call__ and
-    # __init__ comes back from type etc.).
-    #
-    # This has the advantage over six.with_metaclass in that it does not
-    # introduce dummy classes into the final MRO.
-    class metaclass(meta):
-        __call__ = type.__call__
-        __init__ = type.__init__
+    # dummy metaclass for one level of class instantiation that replaces
+    # itself with the actual metaclass.
+    class metaclass(type):
         def __new__(cls, name, this_bases, d):
-            if this_bases is None:
-                return type.__new__(cls, name, (), d)
             return meta(name, bases, d)
-    return metaclass('temporary_class', None, {})
+    return type.__new__(metaclass, 'temporary_class', (), {})
 
 
 try:
diff --git a/lib/jinja2/_stringdefs.py b/lib/jinja2/_stringdefs.py
index da5830e..a5689f6 100644
--- a/lib/jinja2/_stringdefs.py
+++ b/lib/jinja2/_stringdefs.py
@@ -9,93 +9,23 @@
     Inspired by chartypes_create.py from the MoinMoin project, original
     implementation from Pygments.
 
-    :copyright: Copyright 2006-2009 by the Jinja team, see AUTHORS.
+    :copyright: Copyright 2006-2017 by the Jinja team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
 
-from jinja2._compat import unichr
+# Generated code start
 
-Cc = u'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f\x7f\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f'
-
-Cf = u'\xad\u0600\u0601\u0602\u0603\u06dd\u070f\u17b4\u17b5\u200b\u200c\u200d\u200e\u200f\u202a\u202b\u202c\u202d\u202e\u2060\u2061\u2062\u2063\u206a\u206b\u206c\u206d\u206e\u206f\ufeff\ufff9\ufffa\ufffb'
-
-Cn = u'\u0242\u0243\u0244\u0245\u0246\u0247\u0248\u0249\u024a\u024b\u024c\u024d\u024e\u024f\u0370\u0371\u0372\u0373\u0376\u0377\u0378\u0379\u037b\u037c\u037d\u037f\u0380\u0381\u0382\u0383\u038b\u038d\u03a2\u03cf\u0487\u04cf\u04fa\u04fb\u04fc\u04fd\u04fe\u04ff\u0510\u0511\u0512\u0513\u0514\u0515\u0516\u0517\u0518\u0519\u051a\u051b\u051c\u051d\u051e\u051f\u0520\u0521\u0522\u0523\u0524\u0525\u0526\u0527\u0528\u0529\u052a\u052b\u052c\u052d\u052e\u052f\u0530\u0557\u0558\u0560\u0588\u058b\u058 [...]
-
-Co = u'\ue000\ue001\ue002\ue003\ue004\ue005\ue006\ue007\ue008\ue009\ue00a\ue00b\ue00c\ue00d\ue00e\ue00f\ue010\ue011\ue012\ue013\ue014\ue015\ue016\ue017\ue018\ue019\ue01a\ue01b\ue01c\ue01d\ue01e\ue01f\ue020\ue021\ue022\ue023\ue024\ue025\ue026\ue027\ue028\ue029\ue02a\ue02b\ue02c\ue02d\ue02e\ue02f\ue030\ue031\ue032\ue033\ue034\ue035\ue036\ue037\ue038\ue039\ue03a\ue03b\ue03c\ue03d\ue03e\ue03f\ue040\ue041\ue042\ue043\ue044\ue045\ue046\ue047\ue048\ue049\ue04a\ue04b\ue04c\ue04d\ue04e\ue04f\ue05 [...]
-
-try:
-    Cs = eval(r"'\ud800\ud801\ud802\ud803\ud804\ud805\ud806\ud807\ud808\ud809\ud80a\ud80b\ud80c\ud80d\ud80e\ud80f\ud810\ud811\ud812\ud813\ud814\ud815\ud816\ud817\ud818\ud819\ud81a\ud81b\ud81c\ud81d\ud81e\ud81f\ud820\ud821\ud822\ud823\ud824\ud825\ud826\ud827\ud828\ud829\ud82a\ud82b\ud82c\ud82d\ud82e\ud82f\ud830\ud831\ud832\ud833\ud834\ud835\ud836\ud837\ud838\ud839\ud83a\ud83b\ud83c\ud83d\ud83e\ud83f\ud840\ud841\ud842\ud843\ud844\ud845\ud846\ud847\ud848\ud849\ud84a\ud84b\ud84c\ud84d\ud84e\ [...]
-except UnicodeDecodeError:
-    Cs = '' # Jython can't handle isolated surrogates
-
-Ll = u'abcdefghijklmnopqrstuvwxyz\xaa\xb5\xba\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff\u0101\u0103\u0105\u0107\u0109\u010b\u010d\u010f\u0111\u0113\u0115\u0117\u0119\u011b\u011d\u011f\u0121\u0123\u0125\u0127\u0129\u012b\u012d\u012f\u0131\u0133\u0135\u0137\u0138\u013a\u013c\u013e\u0140\u0142\u0144\u0146\u0148\u0149\u014b\u014d\u014f\u0151\u0153\u0155\u0157\u0159\u015b\u015d\u015f\u0161\u0163\u0165\u0167\ [...]
-
-Lm = u'\u02b0\u02b1\u02b2\u02b3\u02b4\u02b5\u02b6\u02b7\u02b8\u02b9\u02ba\u02bb\u02bc\u02bd\u02be\u02bf\u02c0\u02c1\u02c6\u02c7\u02c8\u02c9\u02ca\u02cb\u02cc\u02cd\u02ce\u02cf\u02d0\u02d1\u02e0\u02e1\u02e2\u02e3\u02e4\u02ee\u037a\u0559\u0640\u06e5\u06e6\u0e46\u0ec6\u10fc\u17d7\u1843\u1d2c\u1d2d\u1d2e\u1d2f\u1d30\u1d31\u1d32\u1d33\u1d34\u1d35\u1d36\u1d37\u1d38\u1d39\u1d3a\u1d3b\u1d3c\u1d3d\u1d3e\u1d3f\u1d40\u1d41\u1d42\u1d43\u1d44\u1d45\u1d46\u1d47\u1d48\u1d49\u1d4a\u1d4b\u1d4c\u1d4d\u1d4 [...]
-
-Lo = u'\u01bb\u01c0\u01c1\u01c2\u01c3\u05d0\u05d1\u05d2\u05d3\u05d4\u05d5\u05d6\u05d7\u05d8\u05d9\u05da\u05db\u05dc\u05dd\u05de\u05df\u05e0\u05e1\u05e2\u05e3\u05e4\u05e5\u05e6\u05e7\u05e8\u05e9\u05ea\u05f0\u05f1\u05f2\u0621\u0622\u0623\u0624\u0625\u0626\u0627\u0628\u0629\u062a\u062b\u062c\u062d\u062e\u062f\u0630\u0631\u0632\u0633\u0634\u0635\u0636\u0637\u0638\u0639\u063a\u0641\u0642\u0643\u0644\u0645\u0646\u0647\u0648\u0649\u064a\u066e\u066f\u0671\u0672\u0673\u0674\u0675\u0676\u0677\u067 [...]
-
-Lt = u'\u01c5\u01c8\u01cb\u01f2\u1f88\u1f89\u1f8a\u1f8b\u1f8c\u1f8d\u1f8e\u1f8f\u1f98\u1f99\u1f9a\u1f9b\u1f9c\u1f9d\u1f9e\u1f9f\u1fa8\u1fa9\u1faa\u1fab\u1fac\u1fad\u1fae\u1faf\u1fbc\u1fcc\u1ffc'
-
-Lu = u'ABCDEFGHIJKLMNOPQRSTUVWXYZ\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd8\xd9\xda\xdb\xdc\xdd\xde\u0100\u0102\u0104\u0106\u0108\u010a\u010c\u010e\u0110\u0112\u0114\u0116\u0118\u011a\u011c\u011e\u0120\u0122\u0124\u0126\u0128\u012a\u012c\u012e\u0130\u0132\u0134\u0136\u0139\u013b\u013d\u013f\u0141\u0143\u0145\u0147\u014a\u014c\u014e\u0150\u0152\u0154\u0156\u0158\u015a\u015c\u015e\u0160\u0162\u0164\u0166\u0168\u016a\u016c\u016e\u0170\u0 [...]
-
-Mc = u'\u0903\u093e\u093f\u0940\u0949\u094a\u094b\u094c\u0982\u0983\u09be\u09bf\u09c0\u09c7\u09c8\u09cb\u09cc\u09d7\u0a03\u0a3e\u0a3f\u0a40\u0a83\u0abe\u0abf\u0ac0\u0ac9\u0acb\u0acc\u0b02\u0b03\u0b3e\u0b40\u0b47\u0b48\u0b4b\u0b4c\u0b57\u0bbe\u0bbf\u0bc1\u0bc2\u0bc6\u0bc7\u0bc8\u0bca\u0bcb\u0bcc\u0bd7\u0c01\u0c02\u0c03\u0c41\u0c42\u0c43\u0c44\u0c82\u0c83\u0cbe\u0cc0\u0cc1\u0cc2\u0cc3\u0cc4\u0cc7\u0cc8\u0cca\u0ccb\u0cd5\u0cd6\u0d02\u0d03\u0d3e\u0d3f\u0d40\u0d46\u0d47\u0d48\u0d4a\u0d4b\u0d4 [...]
-
-Me = u'\u0488\u0489\u06de\u20dd\u20de\u20df\u20e0\u20e2\u20e3\u20e4'
-
-Mn = u'\u0300\u0301\u0302\u0303\u0304\u0305\u0306\u0307\u0308\u0309\u030a\u030b\u030c\u030d\u030e\u030f\u0310\u0311\u0312\u0313\u0314\u0315\u0316\u0317\u0318\u0319\u031a\u031b\u031c\u031d\u031e\u031f\u0320\u0321\u0322\u0323\u0324\u0325\u0326\u0327\u0328\u0329\u032a\u032b\u032c\u032d\u032e\u032f\u0330\u0331\u0332\u0333\u0334\u0335\u0336\u0337\u0338\u0339\u033a\u033b\u033c\u033d\u033e\u033f\u0340\u0341\u0342\u0343\u0344\u0345\u0346\u0347\u0348\u0349\u034a\u034b\u034c\u034d\u034e\u034f\u035 [...]
-
-Nd = u'0123456789\u0660\u0661\u0662\u0663\u0664\u0665\u0666\u0667\u0668\u0669\u06f0\u06f1\u06f2\u06f3\u06f4\u06f5\u06f6\u06f7\u06f8\u06f9\u0966\u0967\u0968\u0969\u096a\u096b\u096c\u096d\u096e\u096f\u09e6\u09e7\u09e8\u09e9\u09ea\u09eb\u09ec\u09ed\u09ee\u09ef\u0a66\u0a67\u0a68\u0a69\u0a6a\u0a6b\u0a6c\u0a6d\u0a6e\u0a6f\u0ae6\u0ae7\u0ae8\u0ae9\u0aea\u0aeb\u0aec\u0aed\u0aee\u0aef\u0b66\u0b67\u0b68\u0b69\u0b6a\u0b6b\u0b6c\u0b6d\u0b6e\u0b6f\u0be6\u0be7\u0be8\u0be9\u0bea\u0beb\u0bec\u0bed\u0bee\ [...]
-
-Nl = u'\u16ee\u16ef\u16f0\u2160\u2161\u2162\u2163\u2164\u2165\u2166\u2167\u2168\u2169\u216a\u216b\u216c\u216d\u216e\u216f\u2170\u2171\u2172\u2173\u2174\u2175\u2176\u2177\u2178\u2179\u217a\u217b\u217c\u217d\u217e\u217f\u2180\u2181\u2182\u2183\u3007\u3021\u3022\u3023\u3024\u3025\u3026\u3027\u3028\u3029\u3038\u3039\u303a'
-
-No = u'\xb2\xb3\xb9\xbc\xbd\xbe\u09f4\u09f5\u09f6\u09f7\u09f8\u09f9\u0bf0\u0bf1\u0bf2\u0f2a\u0f2b\u0f2c\u0f2d\u0f2e\u0f2f\u0f30\u0f31\u0f32\u0f33\u1369\u136a\u136b\u136c\u136d\u136e\u136f\u1370\u1371\u1372\u1373\u1374\u1375\u1376\u1377\u1378\u1379\u137a\u137b\u137c\u17f0\u17f1\u17f2\u17f3\u17f4\u17f5\u17f6\u17f7\u17f8\u17f9\u2070\u2074\u2075\u2076\u2077\u2078\u2079\u2080\u2081\u2082\u2083\u2084\u2085\u2086\u2087\u2088\u2089\u2153\u2154\u2155\u2156\u2157\u2158\u2159\u215a\u215b\u215c\u215 [...]
-
-Pc = u'_\u203f\u2040\u2054\ufe33\ufe34\ufe4d\ufe4e\ufe4f\uff3f'
-
-Pd = u'-\u058a\u1806\u2010\u2011\u2012\u2013\u2014\u2015\u2e17\u301c\u3030\u30a0\ufe31\ufe32\ufe58\ufe63\uff0d'
-
-Pe = u')]}\u0f3b\u0f3d\u169c\u2046\u207e\u208e\u232a\u23b5\u2769\u276b\u276d\u276f\u2771\u2773\u2775\u27c6\u27e7\u27e9\u27eb\u2984\u2986\u2988\u298a\u298c\u298e\u2990\u2992\u2994\u2996\u2998\u29d9\u29db\u29fd\u3009\u300b\u300d\u300f\u3011\u3015\u3017\u3019\u301b\u301e\u301f\ufd3f\ufe18\ufe36\ufe38\ufe3a\ufe3c\ufe3e\ufe40\ufe42\ufe44\ufe48\ufe5a\ufe5c\ufe5e\uff09\uff3d\uff5d\uff60\uff63'
-
-Pf = u'\xbb\u2019\u201d\u203a\u2e03\u2e05\u2e0a\u2e0d\u2e1d'
-
-Pi = u'\xab\u2018\u201b\u201c\u201f\u2039\u2e02\u2e04\u2e09\u2e0c\u2e1c'
-
-Po = u'!"#%&\'*,./:;?@\\\xa1\xb7\xbf\u037e\u0387\u055a\u055b\u055c\u055d\u055e\u055f\u0589\u05be\u05c0\u05c3\u05c6\u05f3\u05f4\u060c\u060d\u061b\u061e\u061f\u066a\u066b\u066c\u066d\u06d4\u0700\u0701\u0702\u0703\u0704\u0705\u0706\u0707\u0708\u0709\u070a\u070b\u070c\u070d\u0964\u0965\u0970\u0df4\u0e4f\u0e5a\u0e5b\u0f04\u0f05\u0f06\u0f07\u0f08\u0f09\u0f0a\u0f0b\u0f0c\u0f0d\u0f0e\u0f0f\u0f10\u0f11\u0f12\u0f85\u0fd0\u0fd1\u104a\u104b\u104c\u104d\u104e\u104f\u10fb\u1361\u1362\u1363\u1364\u1365 [...]
-
-Ps = u'([{\u0f3a\u0f3c\u169b\u201a\u201e\u2045\u207d\u208d\u2329\u23b4\u2768\u276a\u276c\u276e\u2770\u2772\u2774\u27c5\u27e6\u27e8\u27ea\u2983\u2985\u2987\u2989\u298b\u298d\u298f\u2991\u2993\u2995\u2997\u29d8\u29da\u29fc\u3008\u300a\u300c\u300e\u3010\u3014\u3016\u3018\u301a\u301d\ufd3e\ufe17\ufe35\ufe37\ufe39\ufe3b\ufe3d\ufe3f\ufe41\ufe43\ufe47\ufe59\ufe5b\ufe5d\uff08\uff3b\uff5b\uff5f\uff62'
-
-Sc = u'$\xa2\xa3\xa4\xa5\u060b\u09f2\u09f3\u0af1\u0bf9\u0e3f\u17db\u20a0\u20a1\u20a2\u20a3\u20a4\u20a5\u20a6\u20a7\u20a8\u20a9\u20aa\u20ab\u20ac\u20ad\u20ae\u20af\u20b0\u20b1\u20b2\u20b3\u20b4\u20b5\ufdfc\ufe69\uff04\uffe0\uffe1\uffe5\uffe6'
-
-Sk = u'^`\xa8\xaf\xb4\xb8\u02c2\u02c3\u02c4\u02c5\u02d2\u02d3\u02d4\u02d5\u02d6\u02d7\u02d8\u02d9\u02da\u02db\u02dc\u02dd\u02de\u02df\u02e5\u02e6\u02e7\u02e8\u02e9\u02ea\u02eb\u02ec\u02ed\u02ef\u02f0\u02f1\u02f2\u02f3\u02f4\u02f5\u02f6\u02f7\u02f8\u02f9\u02fa\u02fb\u02fc\u02fd\u02fe\u02ff\u0374\u0375\u0384\u0385\u1fbd\u1fbf\u1fc0\u1fc1\u1fcd\u1fce\u1fcf\u1fdd\u1fde\u1fdf\u1fed\u1fee\u1fef\u1ffd\u1ffe\u309b\u309c\ua700\ua701\ua702\ua703\ua704\ua705\ua706\ua707\ua708\ua709\ua70a\ua70b\ua70 [...]
-
-Sm = u'+<=>|~\xac\xb1\xd7\xf7\u03f6\u2044\u2052\u207a\u207b\u207c\u208a\u208b\u208c\u2140\u2141\u2142\u2143\u2144\u214b\u2190\u2191\u2192\u2193\u2194\u219a\u219b\u21a0\u21a3\u21a6\u21ae\u21ce\u21cf\u21d2\u21d4\u21f4\u21f5\u21f6\u21f7\u21f8\u21f9\u21fa\u21fb\u21fc\u21fd\u21fe\u21ff\u2200\u2201\u2202\u2203\u2204\u2205\u2206\u2207\u2208\u2209\u220a\u220b\u220c\u220d\u220e\u220f\u2210\u2211\u2212\u2213\u2214\u2215\u2216\u2217\u2218\u2219\u221a\u221b\u221c\u221d\u221e\u221f\u2220\u2221\u2222\ [...]
-
-So = u'\xa6\xa7\xa9\xae\xb0\xb6\u0482\u060e\u060f\u06e9\u06fd\u06fe\u09fa\u0b70\u0bf3\u0bf4\u0bf5\u0bf6\u0bf7\u0bf8\u0bfa\u0f01\u0f02\u0f03\u0f13\u0f14\u0f15\u0f16\u0f17\u0f1a\u0f1b\u0f1c\u0f1d\u0f1e\u0f1f\u0f34\u0f36\u0f38\u0fbe\u0fbf\u0fc0\u0fc1\u0fc2\u0fc3\u0fc4\u0fc5\u0fc7\u0fc8\u0fc9\u0fca\u0fcb\u0fcc\u0fcf\u1360\u1390\u1391\u1392\u1393\u1394\u1395\u1396\u1397\u1398\u1399\u1940\u19e0\u19e1\u19e2\u19e3\u19e4\u19e5\u19e6\u19e7\u19e8\u19e9\u19ea\u19eb\u19ec\u19ed\u19ee\u19ef\u19f0\u19f [...]
-
-Zl = u'\u2028'
-
-Zp = u'\u2029'
-
-Zs = u' \xa0\u1680\u180e\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200a\u202f\u205f\u3000'
-
-cats = ['Cc', 'Cf', 'Cn', 'Co', 'Cs', 'Ll', 'Lm', 'Lo', 'Lt', 'Lu', 'Mc', 'Me', 'Mn', 'Nd', 'Nl', 'No', 'Pc', 'Pd', 'Pe', 'Pf', 'Pi', 'Po', 'Ps', 'Sc', 'Sk', 'Sm', 'So', 'Zl', 'Zp', 'Zs']
-
-def combine(*args):
-    return u''.join([globals()[cat] for cat in args])
-
-xid_start = u'\u0041-\u005A\u005F\u0061-\u007A\u00AA\u00B5\u00BA\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u01BA\u01BB\u01BC-\u01BF\u01C0-\u01C3\u01C4-\u0241\u0250-\u02AF\u02B0-\u02C1\u02C6-\u02D1\u02E0-\u02E4\u02EE\u0386\u0388-\u038A\u038C\u038E-\u03A1\u03A3-\u03CE\u03D0-\u03F5\u03F7-\u0481\u048A-\u04CE\u04D0-\u04F9\u0500-\u050F\u0531-\u0556\u0559\u0561-\u0587\u05D0-\u05EA\u05F0-\u05F2\u0621-\u063A\u0640\u0641-\u064A\u066E-\u066F\u0671-\u06D3\u06D5\u06E5-\u06E6\u06EE-\u06EF\u06FA-\u06FC\u06FF\u0 [...]
-
-xid_continue = u'\u0030-\u0039\u0041-\u005A\u005F\u0061-\u007A\u00AA\u00B5\u00B7\u00BA\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u01BA\u01BB\u01BC-\u01BF\u01C0-\u01C3\u01C4-\u0241\u0250-\u02AF\u02B0-\u02C1\u02C6-\u02D1\u02E0-\u02E4\u02EE\u0300-\u036F\u0386\u0388-\u038A\u038C\u038E-\u03A1\u03A3-\u03CE\u03D0-\u03F5\u03F7-\u0481\u0483-\u0486\u048A-\u04CE\u04D0-\u04F9\u0500-\u050F\u0531-\u0556\u0559\u0561-\u0587\u0591-\u05B9\u05BB-\u05BD\u05BF\u05C1-\u05C2\u05C4-\u05C5\u05C7\u05D0-\u05EA\u05F0-\u05F2 [...]
-
-def allexcept(*args):
-    newcats = cats[:]
-    for arg in args:
-        newcats.remove(arg)
-    return u''.join([globals()[cat] for cat in newcats])
+xid_start = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz\xaa\xb5\xba\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff\u0100\u0101\u0102\u0103\u0104\u0105\u0106\u0107\u0108\u0109\u010a\u010b\u010c\u010d\u010e\u010f\u0110\u0111\u0112\u0113\u0114\u0115\u0116\u0117\u0118\u0119\u011a\u01 [...]
+xid_continue = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz\xaa\xb5\xb7\xba\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff\u0100\u0101\u0102\u0103\u0104\u0105\u0106\u0107\u0108\u0109\u010a\u010b\u010c\u010d\u010e\u010f\u0110\u0111\u0112\u0113\u0114\u0115\u0116\u0117\u011 [...]
+# Generated code end
 
 if __name__ == '__main__':
+    import sys
     import unicodedata
 
+    if sys.version_info[0] < 3:
+        raise RuntimeError('This needs to run on python 3')
+
     categories = {}
 
     f = open(__file__.rstrip('co'))
@@ -104,29 +34,38 @@ if __name__ == '__main__':
     finally:
         f.close()
 
-    header = content[:content.find('Cc =')]
-    footer = content[content.find("def combine("):]
+    start = '# Generated code start\n'
+    header = content[:content.find(start) + len(start)] + '\n'
+    footer = content[content.find("# Generated code end\n"):]
 
     for code in range(65535):
-        c = unichr(code)
+        c = chr(code)
         cat = unicodedata.category(c)
         categories.setdefault(cat, []).append(c)
 
+    # from 8.0.0 PropList (Other_ID_Start) + underscore
+    id_start = set(u'_\u2118\u212E\u309B\u309C')
+    for cat in 'Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl':
+        id_start.update(categories[cat])
+
+    # from 8.0.0 PropList (Other_ID_Continue)
+    id_continue = set(id_start)
+    id_continue.update(u'\u00B7\u0387\u1369\u1370\u1371\u19DA')
+    for cat in 'Mn', 'Mc', 'Nd', 'Pc':
+        id_continue.update(categories[cat])
+
+    xid_start = u''.join(sorted(c for c in id_continue if
+                                unicodedata.normalize('NFKC', c)
+                                in id_start))
+    xid_continue = u''.join(sorted(c for c in id_continue if
+                                   unicodedata.normalize('NFKC', c) in
+                                   id_continue))
+
     f = open(__file__, 'w')
     f.write(header)
 
-    for cat in sorted(categories):
-        val = u''.join(categories[cat])
-        if cat == 'Cs':
-            # Jython can't handle isolated surrogates
-            f.write("""\
-try:
-    Cs = eval(r"%r")
-except UnicodeDecodeError:
-    Cs = '' # Jython can't handle isolated surrogates\n\n""" % val)
-        else:
-            f.write('%s = %r\n\n' % (cat, val))
-    f.write('cats = %r\n\n' % sorted(categories.keys()))
+    f.write('xid_start = %s\nxid_continue = %s\n' % (ascii(xid_start),
+                                                     ascii(xid_continue)))
 
     f.write(footer)
     f.close()
diff --git a/lib/jinja2/bccache.py b/lib/jinja2/bccache.py
index f5bd314..d687d03 100644
--- a/lib/jinja2/bccache.py
+++ b/lib/jinja2/bccache.py
@@ -11,7 +11,7 @@
     Situations where this is useful are often forking web applications that
     are initialized on the first request.
 
-    :copyright: (c) 2010 by the Jinja Team.
+    :copyright: (c) 2017 by the Jinja Team.
     :license: BSD.
 """
 from os import path, listdir
@@ -45,7 +45,7 @@ else:
         return marshal.loads(f.read())
 
 
-bc_version = 2
+bc_version = 3
 
 # magic version used to only change with new jinja versions.  With 2.6
 # we change this to also take Python version changes into account.  The
diff --git a/lib/jinja2/compiler.py b/lib/jinja2/compiler.py
index fad007b..b2ab6fe 100644
--- a/lib/jinja2/compiler.py
+++ b/lib/jinja2/compiler.py
@@ -5,19 +5,23 @@
 
     Compiles nodes into python code.
 
-    :copyright: (c) 2010 by the Jinja Team.
+    :copyright: (c) 2017 by the Jinja Team.
     :license: BSD, see LICENSE for more details.
 """
 from itertools import chain
 from copy import deepcopy
 from keyword import iskeyword as is_python_keyword
+from functools import update_wrapper
 from jinja2 import nodes
 from jinja2.nodes import EvalContext
 from jinja2.visitor import NodeVisitor
+from jinja2.optimizer import Optimizer
 from jinja2.exceptions import TemplateAssertionError
 from jinja2.utils import Markup, concat, escape
 from jinja2._compat import range_type, text_type, string_types, \
-     iteritems, NativeStringIO, imap
+     iteritems, NativeStringIO, imap, izip
+from jinja2.idtracking import Symbols, VAR_LOAD_PARAMETER, \
+     VAR_LOAD_RESOLVE, VAR_LOAD_ALIAS, VAR_LOAD_UNDEFINED
 
 
 operators = {
@@ -38,27 +42,43 @@ if hasattr(dict, 'iteritems'):
 else:
     dict_item_iter = 'items'
 
+code_features = ['division']
 
-# does if 0: dummy(x) get us x into the scope?
-def unoptimize_before_dead_code():
-    x = 42
-    def f():
-        if 0: dummy(x)
-    return f
+# does this python version support generator stops? (PEP 0479)
+try:
+    exec('from __future__ import generator_stop')
+    code_features.append('generator_stop')
+except SyntaxError:
+    pass
 
-# The getattr is necessary for pypy which does not set this attribute if
-# no closure is on the function
-unoptimize_before_dead_code = bool(
-    getattr(unoptimize_before_dead_code(), '__closure__', None))
+# does this python version support yield from?
+try:
+    exec('def f(): yield from x()')
+except SyntaxError:
+    supports_yield_from = False
+else:
+    supports_yield_from = True
+
+
+def optimizeconst(f):
+    def new_func(self, node, frame, **kwargs):
+        # Only optimize if the frame is not volatile
+        if self.optimized and not frame.eval_ctx.volatile:
+            new_node = self.optimizer.visit(node, frame.eval_ctx)
+            if new_node != node:
+                return self.visit(new_node, frame)
+        return f(self, node, frame, **kwargs)
+    return update_wrapper(new_func, f)
 
 
 def generate(node, environment, name, filename, stream=None,
-             defer_init=False):
+             defer_init=False, optimized=True):
     """Generate the python source for a node tree."""
     if not isinstance(node, nodes.Template):
         raise TypeError('Can\'t compile non template nodes')
     generator = environment.code_generator_class(environment, name, filename,
-                                                 stream, defer_init)
+                                                 stream, defer_init,
+                                                 optimized)
     generator.visit(node)
     if stream is None:
         return generator.stream.getvalue()
@@ -68,15 +88,14 @@ def has_safe_repr(value):
     """Does the node have a safe representation?"""
     if value is None or value is NotImplemented or value is Ellipsis:
         return True
-    if isinstance(value, (bool, int, float, complex, range_type,
-            Markup) + string_types):
+    if type(value) in (bool, int, float, complex, range_type, Markup) + string_types:
         return True
-    if isinstance(value, (tuple, list, set, frozenset)):
+    if type(value) in (tuple, list, set, frozenset):
         for item in value:
             if not has_safe_repr(item):
                 return False
         return True
-    elif isinstance(value, dict):
+    elif type(value) is dict:
         for key, value in iteritems(value):
             if not has_safe_repr(key):
                 return False
@@ -99,41 +118,13 @@ def find_undeclared(nodes, names):
     return visitor.undeclared
 
 
-class Identifiers(object):
-    """Tracks the status of identifiers in frames."""
-
-    def __init__(self):
-        # variables that are known to be declared (probably from outer
-        # frames or because they are special for the frame)
-        self.declared = set()
-
-        # undeclared variables from outer scopes
-        self.outer_undeclared = set()
-
-        # names that are accessed without being explicitly declared by
-        # this one or any of the outer scopes.  Names can appear both in
-        # declared and undeclared.
-        self.undeclared = set()
-
-        # names that are declared locally
-        self.declared_locally = set()
-
-        # names that are declared by parameters
-        self.declared_parameter = set()
-
-    def add_special(self, name):
-        """Register a special name like `loop`."""
-        self.undeclared.discard(name)
-        self.declared.add(name)
-
-    def is_declared(self, name):
-        """Check if a name is declared in this or an outer scope."""
-        if name in self.declared_locally or name in self.declared_parameter:
-            return True
-        return name in self.declared
+class MacroRef(object):
 
-    def copy(self):
-        return deepcopy(self)
+    def __init__(self, node):
+        self.node = node
+        self.accesses_caller = False
+        self.accesses_kwargs = False
+        self.accesses_varargs = False
 
 
 class Frame(object):
@@ -141,7 +132,7 @@ class Frame(object):
 
     def __init__(self, eval_ctx, parent=None):
         self.eval_ctx = eval_ctx
-        self.identifiers = Identifiers()
+        self.symbols = Symbols(parent and parent.symbols or None)
 
         # a toplevel frame is the root + soft frames such as if conditions.
         self.toplevel = False
@@ -164,50 +155,19 @@ class Frame(object):
         # the name of the block we're in, otherwise None.
         self.block = parent and parent.block or None
 
-        # a set of actually assigned names
-        self.assigned_names = set()
-
         # the parent of this frame
         self.parent = parent
 
         if parent is not None:
-            self.identifiers.declared.update(
-                parent.identifiers.declared |
-                parent.identifiers.declared_parameter |
-                parent.assigned_names
-            )
-            self.identifiers.outer_undeclared.update(
-                parent.identifiers.undeclared -
-                self.identifiers.declared
-            )
             self.buffer = parent.buffer
 
     def copy(self):
         """Create a copy of the current one."""
         rv = object.__new__(self.__class__)
         rv.__dict__.update(self.__dict__)
-        rv.identifiers = object.__new__(self.identifiers.__class__)
-        rv.identifiers.__dict__.update(self.identifiers.__dict__)
+        rv.symbols = self.symbols.copy()
         return rv
 
-    def inspect(self, nodes):
-        """Walk the node and check for identifiers.  If the scope is hard (eg:
-        enforce on a python level) overrides from outer scopes are tracked
-        differently.
-        """
-        visitor = FrameIdentifierVisitor(self.identifiers)
-        for node in nodes:
-            visitor.visit(node)
-
-    def find_shadowed(self, extra=()):
-        """Find all the shadowed names.  extra is an iterable of variables
-        that may be defined with `add_special` which may occour scoped.
-        """
-        i = self.identifiers
-        return (i.declared | i.outer_undeclared) & \
-               (i.declared_locally | i.declared_parameter) | \
-               set(x for x in extra if i.is_declared(x))
-
     def inner(self):
         """Return an inner frame."""
         return Frame(self.eval_ctx, self)
@@ -216,6 +176,8 @@ class Frame(object):
         """Return a soft frame.  A soft frame may not be modified as
         standalone thing as it shares the resources with the frame it
         was created of, but it's not a rootlevel frame any longer.
+
+        This is only used to implement if-statements.
         """
         rv = self.copy()
         rv.rootlevel = False
@@ -269,95 +231,6 @@ class UndeclaredNameVisitor(NodeVisitor):
         """Stop visiting a blocks."""
 
 
-class FrameIdentifierVisitor(NodeVisitor):
-    """A visitor for `Frame.inspect`."""
-
-    def __init__(self, identifiers):
-        self.identifiers = identifiers
-
-    def visit_Name(self, node):
-        """All assignments to names go through this function."""
-        if node.ctx == 'store':
-            self.identifiers.declared_locally.add(node.name)
-        elif node.ctx == 'param':
-            self.identifiers.declared_parameter.add(node.name)
-        elif node.ctx == 'load' and not \
-             self.identifiers.is_declared(node.name):
-            self.identifiers.undeclared.add(node.name)
-
-    def visit_If(self, node):
-        self.visit(node.test)
-        real_identifiers = self.identifiers
-
-        old_names = real_identifiers.declared_locally | \
-                    real_identifiers.declared_parameter
-
-        def inner_visit(nodes):
-            if not nodes:
-                return set()
-            self.identifiers = real_identifiers.copy()
-            for subnode in nodes:
-                self.visit(subnode)
-            rv = self.identifiers.declared_locally - old_names
-            # we have to remember the undeclared variables of this branch
-            # because we will have to pull them.
-            real_identifiers.undeclared.update(self.identifiers.undeclared)
-            self.identifiers = real_identifiers
-            return rv
-
-        body = inner_visit(node.body)
-        else_ = inner_visit(node.else_ or ())
-
-        # the differences between the two branches are also pulled as
-        # undeclared variables
-        real_identifiers.undeclared.update(body.symmetric_difference(else_) -
-                                           real_identifiers.declared)
-
-        # remember those that are declared.
-        real_identifiers.declared_locally.update(body | else_)
-
-    def visit_Macro(self, node):
-        self.identifiers.declared_locally.add(node.name)
-
-    def visit_Import(self, node):
-        self.generic_visit(node)
-        self.identifiers.declared_locally.add(node.target)
-
-    def visit_FromImport(self, node):
-        self.generic_visit(node)
-        for name in node.names:
-            if isinstance(name, tuple):
-                self.identifiers.declared_locally.add(name[1])
-            else:
-                self.identifiers.declared_locally.add(name)
-
-    def visit_Assign(self, node):
-        """Visit assignments in the correct order."""
-        self.visit(node.node)
-        self.visit(node.target)
-
-    def visit_For(self, node):
-        """Visiting stops at for blocks.  However the block sequence
-        is visited as part of the outer scope.
-        """
-        self.visit(node.iter)
-
-    def visit_CallBlock(self, node):
-        self.visit(node.call)
-
-    def visit_FilterBlock(self, node):
-        self.visit(node.filter)
-
-    def visit_AssignBlock(self, node):
-        """Stop visiting at block assigns."""
-
-    def visit_Scope(self, node):
-        """Stop visiting at scopes."""
-
-    def visit_Block(self, node):
-        """Stop visiting at blocks."""
-
-
 class CompilerExit(Exception):
     """Raised if the compiler encountered a situation where it just
     doesn't make sense to further process the code.  Any block that
@@ -368,7 +241,7 @@ class CompilerExit(Exception):
 class CodeGenerator(NodeVisitor):
 
     def __init__(self, environment, name, filename, stream=None,
-                 defer_init=False):
+                 defer_init=False, optimized=True):
         if stream is None:
             stream = NativeStringIO()
         self.environment = environment
@@ -377,6 +250,9 @@ class CodeGenerator(NodeVisitor):
         self.stream = stream
         self.created_block_context = False
         self.defer_init = defer_init
+        self.optimized = optimized
+        if optimized:
+            self.optimizer = Optimizer(environment)
 
         # aliases for imports
         self.import_aliases = {}
@@ -420,6 +296,12 @@ class CodeGenerator(NodeVisitor):
         # the current indentation
         self._indentation = 0
 
+        # Tracks toplevel assignments
+        self._assign_stack = []
+
+        # Tracks parameter definition blocks
+        self._param_def_block = []
+
     # -- Various compilation helpers
 
     def fail(self, msg, lineno):
@@ -436,21 +318,23 @@ class CodeGenerator(NodeVisitor):
         frame.buffer = self.temporary_identifier()
         self.writeline('%s = []' % frame.buffer)
 
-    def return_buffer_contents(self, frame):
+    def return_buffer_contents(self, frame, force_unescaped=False):
         """Return the buffer contents of the frame."""
-        if frame.eval_ctx.volatile:
-            self.writeline('if context.eval_ctx.autoescape:')
-            self.indent()
-            self.writeline('return Markup(concat(%s))' % frame.buffer)
-            self.outdent()
-            self.writeline('else:')
-            self.indent()
-            self.writeline('return concat(%s)' % frame.buffer)
-            self.outdent()
-        elif frame.eval_ctx.autoescape:
-            self.writeline('return Markup(concat(%s))' % frame.buffer)
-        else:
-            self.writeline('return concat(%s)' % frame.buffer)
+        if not force_unescaped:
+            if frame.eval_ctx.volatile:
+                self.writeline('if context.eval_ctx.autoescape:')
+                self.indent()
+                self.writeline('return Markup(concat(%s))' % frame.buffer)
+                self.outdent()
+                self.writeline('else:')
+                self.indent()
+                self.writeline('return concat(%s)' % frame.buffer)
+                self.outdent()
+                return
+            elif frame.eval_ctx.autoescape:
+                self.writeline('return Markup(concat(%s))' % frame.buffer)
+                return
+        self.writeline('return concat(%s)' % frame.buffer)
 
     def indent(self):
         """Indent by one."""
@@ -480,14 +364,10 @@ class CodeGenerator(NodeVisitor):
 
     def blockvisit(self, nodes, frame):
         """Visit a list of nodes as block in a frame.  If the current frame
-        is no buffer a dummy ``if 0: yield None`` is written automatically
-        unless the force_generator parameter is set to False.
+        is no buffer a dummy ``if 0: yield None`` is written automatically.
         """
-        if frame.buffer is None:
-            self.writeline('if 0: yield None')
-        else:
-            self.writeline('pass')
         try:
+            self.writeline('pass')
             for node in nodes:
                 self.visit(node, frame)
         except CompilerExit:
@@ -573,11 +453,6 @@ class CodeGenerator(NodeVisitor):
             self.write(', **')
             self.visit(node.dyn_kwargs, frame)
 
-    def pull_locals(self, frame):
-        """Pull all the references identifiers into the local scope."""
-        for name in frame.identifiers.undeclared:
-            self.writeline('l_%s = context.resolve(%r)' % (name, name))
-
     def pull_dependencies(self, nodes):
         """Pull all the dependencies."""
         visitor = DependencyFinderVisitor()
@@ -591,163 +466,123 @@ class CodeGenerator(NodeVisitor):
                 self.writeline('%s = environment.%s[%r]' %
                                (mapping[name], dependency, name))
 
-    def unoptimize_scope(self, frame):
-        """Disable Python optimizations for the frame."""
-        # XXX: this is not that nice but it has no real overhead.  It
-        # mainly works because python finds the locals before dead code
-        # is removed.  If that breaks we have to add a dummy function
-        # that just accepts the arguments and does nothing.
-        if frame.identifiers.declared:
-            self.writeline('%sdummy(%s)' % (
-                unoptimize_before_dead_code and 'if 0: ' or '',
-                ', '.join('l_' + name for name in frame.identifiers.declared)
-            ))
-
-    def push_scope(self, frame, extra_vars=()):
-        """This function returns all the shadowed variables in a dict
-        in the form name: alias and will write the required assignments
-        into the current scope.  No indentation takes place.
-
-        This also predefines locally declared variables from the loop
-        body because under some circumstances it may be the case that
-
-        `extra_vars` is passed to `Frame.find_shadowed`.
-        """
-        aliases = {}
-        for name in frame.find_shadowed(extra_vars):
-            aliases[name] = ident = self.temporary_identifier()
-            self.writeline('%s = l_%s' % (ident, name))
-        to_declare = set()
-        for name in frame.identifiers.declared_locally:
-            if name not in aliases:
-                to_declare.add('l_' + name)
-        if to_declare:
-            self.writeline(' = '.join(to_declare) + ' = missing')
-        return aliases
-
-    def pop_scope(self, aliases, frame):
-        """Restore all aliases and delete unused variables."""
-        for name, alias in iteritems(aliases):
-            self.writeline('l_%s = %s' % (name, alias))
-        to_delete = set()
-        for name in frame.identifiers.declared_locally:
-            if name not in aliases:
-                to_delete.add('l_' + name)
-        if to_delete:
-            # we cannot use the del statement here because enclosed
-            # scopes can trigger a SyntaxError:
-            #   a = 42; b = lambda: a; del a
-            self.writeline(' = '.join(to_delete) + ' = missing')
-
-    def function_scoping(self, node, frame, children=None,
-                         find_special=True):
-        """In Jinja a few statements require the help of anonymous
-        functions.  Those are currently macros and call blocks and in
-        the future also recursive loops.  As there is currently
-        technical limitation that doesn't allow reading and writing a
-        variable in a scope where the initial value is coming from an
-        outer scope, this function tries to fall back with a common
-        error message.  Additionally the frame passed is modified so
-        that the argumetns are collected and callers are looked up.
-
-        This will return the modified frame.
-        """
-        # we have to iterate twice over it, make sure that works
-        if children is None:
-            children = node.iter_child_nodes()
-        children = list(children)
-        func_frame = frame.inner()
-        func_frame.inspect(children)
-
-        # variables that are undeclared (accessed before declaration) and
-        # declared locally *and* part of an outside scope raise a template
-        # assertion error. Reason: we can't generate reasonable code from
-        # it without aliasing all the variables.
-        # this could be fixed in Python 3 where we have the nonlocal
-        # keyword or if we switch to bytecode generation
-        overridden_closure_vars = (
-            func_frame.identifiers.undeclared &
-            func_frame.identifiers.declared &
-            (func_frame.identifiers.declared_locally |
-             func_frame.identifiers.declared_parameter)
-        )
-        if overridden_closure_vars:
-            self.fail('It\'s not possible to set and access variables '
-                      'derived from an outer scope! (affects: %s)' %
-                      ', '.join(sorted(overridden_closure_vars)), node.lineno)
-
-        # remove variables from a closure from the frame's undeclared
-        # identifiers.
-        func_frame.identifiers.undeclared -= (
-            func_frame.identifiers.undeclared &
-            func_frame.identifiers.declared
-        )
-
-        # no special variables for this scope, abort early
-        if not find_special:
-            return func_frame
-
-        func_frame.accesses_kwargs = False
-        func_frame.accesses_varargs = False
-        func_frame.accesses_caller = False
-        func_frame.arguments = args = ['l_' + x.name for x in node.args]
-
-        undeclared = find_undeclared(children, ('caller', 'kwargs', 'varargs'))
+    def enter_frame(self, frame):
+        undefs = []
+        for target, (action, param) in iteritems(frame.symbols.loads):
+            if action == VAR_LOAD_PARAMETER:
+                pass
+            elif action == VAR_LOAD_RESOLVE:
+                self.writeline('%s = resolve(%r)' %
+                               (target, param))
+            elif action == VAR_LOAD_ALIAS:
+                self.writeline('%s = %s' % (target, param))
+            elif action == VAR_LOAD_UNDEFINED:
+                undefs.append(target)
+            else:
+                raise NotImplementedError('unknown load instruction')
+        if undefs:
+            self.writeline('%s = missing' % ' = '.join(undefs))
+
+    def leave_frame(self, frame, with_python_scope=False):
+        if not with_python_scope:
+            undefs = []
+            for target, _ in iteritems(frame.symbols.loads):
+                undefs.append(target)
+            if undefs:
+                self.writeline('%s = missing' % ' = '.join(undefs))
+
+    def func(self, name):
+        if self.environment.is_async:
+            return 'async def %s' % name
+        return 'def %s' % name
+
+    def macro_body(self, node, frame):
+        """Dump the function def of a macro or call block."""
+        frame = frame.inner()
+        frame.symbols.analyze_node(node)
+        macro_ref = MacroRef(node)
+
+        explicit_caller = None
+        skip_special_params = set()
+        args = []
+        for idx, arg in enumerate(node.args):
+            if arg.name == 'caller':
+                explicit_caller = idx
+            if arg.name in ('kwargs', 'varargs'):
+                skip_special_params.add(arg.name)
+            args.append(frame.symbols.ref(arg.name))
+
+        undeclared = find_undeclared(node.body, ('caller', 'kwargs', 'varargs'))
 
         if 'caller' in undeclared:
-            func_frame.accesses_caller = True
-            func_frame.identifiers.add_special('caller')
-            args.append('l_caller')
-        if 'kwargs' in undeclared:
-            func_frame.accesses_kwargs = True
-            func_frame.identifiers.add_special('kwargs')
-            args.append('l_kwargs')
-        if 'varargs' in undeclared:
-            func_frame.accesses_varargs = True
-            func_frame.identifiers.add_special('varargs')
-            args.append('l_varargs')
-        return func_frame
-
-    def macro_body(self, node, frame, children=None):
-        """Dump the function def of a macro or call block."""
-        frame = self.function_scoping(node, frame, children)
+            # In older Jinja2 versions there was a bug that allowed caller
+            # to retain the special behavior even if it was mentioned in
+            # the argument list.  However thankfully this was only really
+            # working if it was the last argument.  So we are explicitly
+            # checking this now and error out if it is anywhere else in
+            # the argument list.
+            if explicit_caller is not None:
+                try:
+                    node.defaults[explicit_caller - len(node.args)]
+                except IndexError:
+                    self.fail('When defining macros or call blocks the '
+                              'special "caller" argument must be omitted '
+                              'or be given a default.', node.lineno)
+            else:
+                args.append(frame.symbols.declare_parameter('caller'))
+            macro_ref.accesses_caller = True
+        if 'kwargs' in undeclared and not 'kwargs' in skip_special_params:
+            args.append(frame.symbols.declare_parameter('kwargs'))
+            macro_ref.accesses_kwargs = True
+        if 'varargs' in undeclared and not 'varargs' in skip_special_params:
+            args.append(frame.symbols.declare_parameter('varargs'))
+            macro_ref.accesses_varargs = True
+
         # macros are delayed, they never require output checks
         frame.require_output_check = False
-        args = frame.arguments
-        # XXX: this is an ugly fix for the loop nesting bug
-        # (tests.test_old_bugs.test_loop_call_bug).  This works around
-        # a identifier nesting problem we have in general.  It's just more
-        # likely to happen in loops which is why we work around it.  The
-        # real solution would be "nonlocal" all the identifiers that are
-        # leaking into a new python frame and might be used both unassigned
-        # and assigned.
-        if 'loop' in frame.identifiers.declared:
-            args = args + ['l_loop=l_loop']
-        self.writeline('def macro(%s):' % ', '.join(args), node)
+        frame.symbols.analyze_node(node)
+        self.writeline('%s(%s):' % (self.func('macro'), ', '.join(args)), node)
         self.indent()
+
         self.buffer(frame)
-        self.pull_locals(frame)
+        self.enter_frame(frame)
+
+        self.push_parameter_definitions(frame)
+        for idx, arg in enumerate(node.args):
+            ref = frame.symbols.ref(arg.name)
+            self.writeline('if %s is missing:' % ref)
+            self.indent()
+            try:
+                default = node.defaults[idx - len(node.args)]
+            except IndexError:
+                self.writeline('%s = undefined(%r, name=%r)' % (
+                    ref,
+                    'parameter %r was not provided' % arg.name,
+                    arg.name))
+            else:
+                self.writeline('%s = ' % ref)
+                self.visit(default, frame)
+            self.mark_parameter_stored(ref)
+            self.outdent()
+        self.pop_parameter_definitions()
+
         self.blockvisit(node.body, frame)
-        self.return_buffer_contents(frame)
+        self.return_buffer_contents(frame, force_unescaped=True)
+        self.leave_frame(frame, with_python_scope=True)
         self.outdent()
-        return frame
 
-    def macro_def(self, node, frame):
+        return frame, macro_ref
+
+    def macro_def(self, macro_ref, frame):
         """Dump the macro definition for the def created by macro_body."""
-        arg_tuple = ', '.join(repr(x.name) for x in node.args)
-        name = getattr(node, 'name', None)
-        if len(node.args) == 1:
+        arg_tuple = ', '.join(repr(x.name) for x in macro_ref.node.args)
+        name = getattr(macro_ref.node, 'name', None)
+        if len(macro_ref.node.args) == 1:
             arg_tuple += ','
-        self.write('Macro(environment, macro, %r, (%s), (' %
-                   (name, arg_tuple))
-        for arg in node.defaults:
-            self.visit(arg, frame)
-            self.write(', ')
-        self.write('), %r, %r, %r)' % (
-            bool(frame.accesses_kwargs),
-            bool(frame.accesses_varargs),
-            bool(frame.accesses_caller)
-        ))
+        self.write('Macro(environment, macro, %r, (%s), %r, %r, %r, '
+                   'context.eval_ctx.autoescape)' %
+                   (name, arg_tuple, macro_ref.accesses_kwargs,
+                    macro_ref.accesses_varargs, macro_ref.accesses_caller))
 
     def position(self, node):
         """Return a human readable position for the node."""
@@ -756,6 +591,78 @@ class CodeGenerator(NodeVisitor):
             rv += ' in ' + repr(self.name)
         return rv
 
+    def dump_local_context(self, frame):
+        return '{%s}' % ', '.join(
+            '%r: %s' % (name, target) for name, target
+            in iteritems(frame.symbols.dump_stores()))
+
+    def write_commons(self):
+        """Writes a common preamble that is used by root and block functions.
+        Primarily this sets up common local helpers and enforces a generator
+        through a dead branch.
+        """
+        self.writeline('resolve = context.resolve_or_missing')
+        self.writeline('undefined = environment.undefined')
+        self.writeline('if 0: yield None')
+
+    def push_parameter_definitions(self, frame):
+        """Pushes all parameter targets from the given frame into a local
+        stack that permits tracking of yet to be assigned parameters.  In
+        particular this enables the optimization from `visit_Name` to skip
+        undefined expressions for parameters in macros as macros can reference
+        otherwise unbound parameters.
+        """
+        self._param_def_block.append(frame.symbols.dump_param_targets())
+
+    def pop_parameter_definitions(self):
+        """Pops the current parameter definitions set."""
+        self._param_def_block.pop()
+
+    def mark_parameter_stored(self, target):
+        """Marks a parameter in the current parameter definitions as stored.
+        This will skip the enforced undefined checks.
+        """
+        if self._param_def_block:
+            self._param_def_block[-1].discard(target)
+
+    def parameter_is_undeclared(self, target):
+        """Checks if a given target is an undeclared parameter."""
+        if not self._param_def_block:
+            return False
+        return target in self._param_def_block[-1]
+
+    def push_assign_tracking(self):
+        """Pushes a new layer for assignment tracking."""
+        self._assign_stack.append(set())
+
+    def pop_assign_tracking(self, frame):
+        """Pops the topmost level for assignment tracking and updates the
+        context variables if necessary.
+        """
+        vars = self._assign_stack.pop()
+        if not frame.toplevel or not vars:
+            return
+        public_names = [x for x in vars if x[:1] != '_']
+        if len(vars) == 1:
+            name = next(iter(vars))
+            ref = frame.symbols.ref(name)
+            self.writeline('context.vars[%r] = %s' % (name, ref))
+        else:
+            self.writeline('context.vars.update({')
+            for idx, name in enumerate(vars):
+                if idx:
+                    self.write(', ')
+                ref = frame.symbols.ref(name)
+                self.write('%r: %s' % (name, ref))
+            self.write('})')
+        if public_names:
+            if len(public_names) == 1:
+                self.writeline('context.exported_vars.add(%r)' %
+                               public_names[0])
+            else:
+                self.writeline('context.exported_vars.update((%s))' %
+                               ', '.join(imap(repr, public_names)))
+
     # -- Statement Visitors
 
     def visit_Template(self, node, frame=None):
@@ -763,10 +670,12 @@ class CodeGenerator(NodeVisitor):
         eval_ctx = EvalContext(self.environment, self.name)
 
         from jinja2.runtime import __all__ as exported
-        self.writeline('from __future__ import division')
+        self.writeline('from __future__ import %s' % ', '.join(code_features))
         self.writeline('from jinja2.runtime import ' + ', '.join(exported))
-        if not unoptimize_before_dead_code:
-            self.writeline('dummy = lambda *x: None')
+
+        if self.environment.is_async:
+            self.writeline('from jinja2.asyncsupport import auto_await, '
+                           'auto_aiter, make_async_loop_context')
 
         # if we want a deferred initialization we cannot move the
         # environment into a local name
@@ -798,22 +707,25 @@ class CodeGenerator(NodeVisitor):
         self.writeline('name = %r' % self.name)
 
         # generate the root render function.
-        self.writeline('def root(context%s):' % envenv, extra=1)
+        self.writeline('%s(context, missing=missing%s):' %
+                       (self.func('root'), envenv), extra=1)
+        self.indent()
+        self.write_commons()
 
         # process the root
         frame = Frame(eval_ctx)
-        frame.inspect(node.body)
+        if 'self' in find_undeclared(node.body, ('self',)):
+            ref = frame.symbols.declare_parameter('self')
+            self.writeline('%s = TemplateReference(context)' % ref)
+        frame.symbols.analyze_node(node)
         frame.toplevel = frame.rootlevel = True
         frame.require_output_check = have_extends and not self.has_known_extends
-        self.indent()
         if have_extends:
             self.writeline('parent_template = None')
-        if 'self' in find_undeclared(node.body, ('self',)):
-            frame.identifiers.add_special('self')
-            self.writeline('l_self = TemplateReference(context)')
-        self.pull_locals(frame)
+        self.enter_frame(frame)
         self.pull_dependencies(node.body)
         self.blockvisit(node.body, frame)
+        self.leave_frame(frame, with_python_scope=True)
         self.outdent()
 
         # make sure that the parent root is called.
@@ -822,31 +734,43 @@ class CodeGenerator(NodeVisitor):
                 self.indent()
                 self.writeline('if parent_template is not None:')
             self.indent()
-            self.writeline('for event in parent_template.'
-                           'root_render_func(context):')
-            self.indent()
-            self.writeline('yield event')
-            self.outdent(2 + (not self.has_known_extends))
+            if supports_yield_from and not self.environment.is_async:
+                self.writeline('yield from parent_template.'
+                               'root_render_func(context)')
+            else:
+                self.writeline('%sfor event in parent_template.'
+                               'root_render_func(context):' %
+                               (self.environment.is_async and 'async ' or ''))
+                self.indent()
+                self.writeline('yield event')
+                self.outdent()
+            self.outdent(1 + (not self.has_known_extends))
 
         # at this point we now have the blocks collected and can visit them too.
         for name, block in iteritems(self.blocks):
-            block_frame = Frame(eval_ctx)
-            block_frame.inspect(block.body)
-            block_frame.block = name
-            self.writeline('def block_%s(context%s):' % (name, envenv),
+            self.writeline('%s(context, missing=missing%s):' %
+                           (self.func('block_' + name), envenv),
                            block, 1)
             self.indent()
+            self.write_commons()
+            # It's important that we do not make this frame a child of the
+            # toplevel template.  This would cause a variety of
+            # interesting issues with identifier tracking.
+            block_frame = Frame(eval_ctx)
             undeclared = find_undeclared(block.body, ('self', 'super'))
             if 'self' in undeclared:
-                block_frame.identifiers.add_special('self')
-                self.writeline('l_self = TemplateReference(context)')
+                ref = block_frame.symbols.declare_parameter('self')
+                self.writeline('%s = TemplateReference(context)' % ref)
             if 'super' in undeclared:
-                block_frame.identifiers.add_special('super')
-                self.writeline('l_super = context.super(%r, '
-                               'block_%s)' % (name, name))
-            self.pull_locals(block_frame)
+                ref = block_frame.symbols.declare_parameter('super')
+                self.writeline('%s = context.super(%r, '
+                               'block_%s)' % (ref, name, name))
+            block_frame.symbols.analyze_node(block)
+            block_frame.block = name
+            self.enter_frame(block_frame)
             self.pull_dependencies(block.body)
             self.blockvisit(block.body, block_frame)
+            self.leave_frame(block_frame, with_python_scope=True)
             self.outdent()
 
         self.writeline('blocks = {%s}' % ', '.join('%r: block_%s' % (x, x)
@@ -859,7 +783,7 @@ class CodeGenerator(NodeVisitor):
 
     def visit_Block(self, node, frame):
         """Call a block and register it for the template."""
-        level = 1
+        level = 0
         if frame.toplevel:
             # if we know that we are a child template, there is no need to
             # check if we are one
@@ -869,11 +793,21 @@ class CodeGenerator(NodeVisitor):
                 self.writeline('if parent_template is None:')
                 self.indent()
                 level += 1
-        context = node.scoped and 'context.derived(locals())' or 'context'
-        self.writeline('for event in context.blocks[%r][0](%s):' % (
-                       node.name, context), node)
-        self.indent()
-        self.simple_write('event', frame)
+        context = node.scoped and (
+            'context.derived(%s)' % self.dump_local_context(frame)) or 'context'
+
+        if supports_yield_from and not self.environment.is_async and \
+           frame.buffer is None:
+            self.writeline('yield from context.blocks[%r][0](%s)' % (
+                           node.name, context), node)
+        else:
+            loop = self.environment.is_async and 'async for' or 'for'
+            self.writeline('%s event in context.blocks[%r][0](%s):' % (
+                           loop, node.name, context), node)
+            self.indent()
+            self.simple_write('event', frame)
+            self.outdent()
+
         self.outdent(level)
 
     def visit_Extends(self, node, frame):
@@ -925,8 +859,6 @@ class CodeGenerator(NodeVisitor):
 
     def visit_Include(self, node, frame):
         """Handles includes."""
-        if node.with_context:
-            self.unoptimize_scope(frame)
         if node.ignore_missing:
             self.writeline('try:')
             self.indent()
@@ -952,48 +884,69 @@ class CodeGenerator(NodeVisitor):
             self.writeline('else:')
             self.indent()
 
+        skip_event_yield = False
         if node.with_context:
-            self.writeline('for event in template.root_render_func('
-                           'template.new_context(context.parent, True, '
-                           'locals())):')
+            loop = self.environment.is_async and 'async for' or 'for'
+            self.writeline('%s event in template.root_render_func('
+                           'template.new_context(context.get_all(), True, '
+                           '%s)):' % (loop, self.dump_local_context(frame)))
+        elif self.environment.is_async:
+            self.writeline('for event in (await '
+                           'template._get_default_module_async())'
+                           '._body_stream:')
         else:
-            self.writeline('for event in template.module._body_stream:')
+            if supports_yield_from:
+                self.writeline('yield from template._get_default_module()'
+                               '._body_stream')
+                skip_event_yield = True
+            else:
+                self.writeline('for event in template._get_default_module()'
+                               '._body_stream:')
 
-        self.indent()
-        self.simple_write('event', frame)
-        self.outdent()
+        if not skip_event_yield:
+            self.indent()
+            self.simple_write('event', frame)
+            self.outdent()
 
         if node.ignore_missing:
             self.outdent()
 
     def visit_Import(self, node, frame):
         """Visit regular imports."""
-        if node.with_context:
-            self.unoptimize_scope(frame)
-        self.writeline('l_%s = ' % node.target, node)
+        self.writeline('%s = ' % frame.symbols.ref(node.target), node)
         if frame.toplevel:
             self.write('context.vars[%r] = ' % node.target)
+        if self.environment.is_async:
+            self.write('await ')
         self.write('environment.get_template(')
         self.visit(node.template, frame)
         self.write(', %r).' % self.name)
         if node.with_context:
-            self.write('make_module(context.parent, True, locals())')
+            self.write('make_module%s(context.get_all(), True, %s)'
+                       % (self.environment.is_async and '_async' or '',
+                          self.dump_local_context(frame)))
+        elif self.environment.is_async:
+            self.write('_get_default_module_async()')
         else:
-            self.write('module')
+            self.write('_get_default_module()')
         if frame.toplevel and not node.target.startswith('_'):
             self.writeline('context.exported_vars.discard(%r)' % node.target)
-        frame.assigned_names.add(node.target)
 
     def visit_FromImport(self, node, frame):
         """Visit named imports."""
         self.newline(node)
-        self.write('included_template = environment.get_template(')
+        self.write('included_template = %senvironment.get_template('
+                   % (self.environment.is_async and 'await ' or ''))
         self.visit(node.template, frame)
         self.write(', %r).' % self.name)
         if node.with_context:
-            self.write('make_module(context.parent, True)')
+            self.write('make_module%s(context.get_all(), True, %s)'
+                       % (self.environment.is_async and '_async' or '',
+                          self.dump_local_context(frame)))
+        elif self.environment.is_async:
+            self.write('_get_default_module_async()')
         else:
-            self.write('module')
+            self.write('_get_default_module()')
 
         var_names = []
         discarded_names = []
@@ -1002,15 +955,16 @@ class CodeGenerator(NodeVisitor):
                 name, alias = name
             else:
                 alias = name
-            self.writeline('l_%s = getattr(included_template, '
-                           '%r, missing)' % (alias, name))
-            self.writeline('if l_%s is missing:' % alias)
+            self.writeline('%s = getattr(included_template, '
+                           '%r, missing)' % (frame.symbols.ref(alias), name))
+            self.writeline('if %s is missing:' % frame.symbols.ref(alias))
             self.indent()
-            self.writeline('l_%s = environment.undefined(%r %% '
+            self.writeline('%s = undefined(%r %% '
                            'included_template.__name__, '
                            'name=%r)' %
-                           (alias, 'the template %%r (imported on %s) does '
-                           'not export the requested name %s' % (
+                           (frame.symbols.ref(alias),
+                            'the template %%r (imported on %s) does '
+                            'not export the requested name %s' % (
                                 self.position(node),
                                 repr(name)
                            ), name))
@@ -1019,15 +973,15 @@ class CodeGenerator(NodeVisitor):
                 var_names.append(alias)
                 if not alias.startswith('_'):
                     discarded_names.append(alias)
-            frame.assigned_names.add(alias)
 
         if var_names:
             if len(var_names) == 1:
                 name = var_names[0]
-                self.writeline('context.vars[%r] = l_%s' % (name, name))
+                self.writeline('context.vars[%r] = %s' %
+                               (name, frame.symbols.ref(name)))
             else:
                 self.writeline('context.vars.update({%s})' % ', '.join(
-                    '%r: l_%s' % (name, name) for name in var_names
+                    '%r: %s' % (name, frame.symbols.ref(name)) for name in var_names
                 ))
         if discarded_names:
             if len(discarded_names) == 1:
@@ -1038,15 +992,9 @@ class CodeGenerator(NodeVisitor):
                                'update((%s))' % ', '.join(imap(repr, discarded_names)))
 
     def visit_For(self, node, frame):
-        # when calculating the nodes for the inner frame we have to exclude
-        # the iterator contents from it
-        children = node.iter_child_nodes(exclude=('iter',))
-        if node.recursive:
-            loop_frame = self.function_scoping(node, frame, children,
-                                               find_special=False)
-        else:
-            loop_frame = frame.inner()
-            loop_frame.inspect(children)
+        loop_frame = frame.inner()
+        test_frame = frame.inner()
+        else_frame = frame.inner()
 
         # try to figure out if we have an extended loop.  An extended loop
         # is necessary if the loop is in recursive mode if the special loop
@@ -1055,111 +1003,121 @@ class CodeGenerator(NodeVisitor):
                         find_undeclared(node.iter_child_nodes(
                             only=('body',)), ('loop',))
 
+        loop_ref = None
+        if extended_loop:
+            loop_ref = loop_frame.symbols.declare_parameter('loop')
+
+        loop_frame.symbols.analyze_node(node, for_branch='body')
+        if node.else_:
+            else_frame.symbols.analyze_node(node, for_branch='else')
+
+        if node.test:
+            loop_filter_func = self.temporary_identifier()
+            test_frame.symbols.analyze_node(node, for_branch='test')
+            self.writeline('%s(fiter):' % self.func(loop_filter_func), node.test)
+            self.indent()
+            self.enter_frame(test_frame)
+            self.writeline(self.environment.is_async and 'async for ' or 'for ')
+            self.visit(node.target, loop_frame)
+            self.write(' in ')
+            self.write(self.environment.is_async and 'auto_aiter(fiter)' or 'fiter')
+            self.write(':')
+            self.indent()
+            self.writeline('if ', node.test)
+            self.visit(node.test, test_frame)
+            self.write(':')
+            self.indent()
+            self.writeline('yield ')
+            self.visit(node.target, loop_frame)
+            self.outdent(3)
+            self.leave_frame(test_frame, with_python_scope=True)
+
         # if we don't have an recursive loop we have to find the shadowed
         # variables at that point.  Because loops can be nested but the loop
         # variable is a special one we have to enforce aliasing for it.
-        if not node.recursive:
-            aliases = self.push_scope(loop_frame, ('loop',))
-
-        # otherwise we set up a buffer and add a function def
-        else:
-            self.writeline('def loop(reciter, loop_render_func, depth=0):', node)
+        if node.recursive:
+            self.writeline('%s(reciter, loop_render_func, depth=0):' %
+                           self.func('loop'), node)
             self.indent()
             self.buffer(loop_frame)
-            aliases = {}
+
+            # Use the same buffer for the else frame
+            else_frame.buffer = loop_frame.buffer
 
         # make sure the loop variable is a special one and raise a template
         # assertion error if a loop tries to write to loop
         if extended_loop:
-            self.writeline('l_loop = missing')
-            loop_frame.identifiers.add_special('loop')
+            self.writeline('%s = missing' % loop_ref)
+
         for name in node.find_all(nodes.Name):
             if name.ctx == 'store' and name.name == 'loop':
                 self.fail('Can\'t assign to special loop variable '
                           'in for-loop target', name.lineno)
 
-        self.pull_locals(loop_frame)
         if node.else_:
             iteration_indicator = self.temporary_identifier()
             self.writeline('%s = 1' % iteration_indicator)
 
-        # Create a fake parent loop if the else or test section of a
-        # loop is accessing the special loop variable and no parent loop
-        # exists.
-        if 'loop' not in aliases and 'loop' in find_undeclared(
-           node.iter_child_nodes(only=('else_', 'test')), ('loop',)):
-            self.writeline("l_loop = environment.undefined(%r, name='loop')" %
-                ("'loop' is undefined. the filter section of a loop as well "
-                 "as the else block don't have access to the special 'loop'"
-                 " variable of the current loop.  Because there is no parent "
-                 "loop it's undefined.  Happened in loop on %s" %
-                 self.position(node)))
-
-        self.writeline('for ', node)
+        self.writeline(self.environment.is_async and 'async for ' or 'for ', node)
         self.visit(node.target, loop_frame)
-        self.write(extended_loop and ', l_loop in LoopContext(' or ' in ')
-
-        # if we have an extened loop and a node test, we filter in the
-        # "outer frame".
-        if extended_loop and node.test is not None:
-            self.write('(')
-            self.visit(node.target, loop_frame)
-            self.write(' for ')
-            self.visit(node.target, loop_frame)
-            self.write(' in ')
-            if node.recursive:
-                self.write('reciter')
+        if extended_loop:
+            if self.environment.is_async:
+                self.write(', %s in await make_async_loop_context(' % loop_ref)
             else:
-                self.visit(node.iter, loop_frame)
-            self.write(' if (')
-            test_frame = loop_frame.copy()
-            self.visit(node.test, test_frame)
-            self.write('))')
+                self.write(', %s in LoopContext(' % loop_ref)
+        else:
+            self.write(' in ')
 
-        elif node.recursive:
+        if node.test:
+            self.write('%s(' % loop_filter_func)
+        if node.recursive:
             self.write('reciter')
         else:
-            self.visit(node.iter, loop_frame)
+            if self.environment.is_async and not extended_loop:
+                self.write('auto_aiter(')
+            self.visit(node.iter, frame)
+            if self.environment.is_async and not extended_loop:
+                self.write(')')
+        if node.test:
+            self.write(')')
 
         if node.recursive:
             self.write(', loop_render_func, depth):')
         else:
             self.write(extended_loop and '):' or ':')
 
-        # tests in not extended loops become a continue
-        if not extended_loop and node.test is not None:
-            self.indent()
-            self.writeline('if not ')
-            self.visit(node.test, loop_frame)
-            self.write(':')
-            self.indent()
-            self.writeline('continue')
-            self.outdent(2)
-
         self.indent()
+        self.enter_frame(loop_frame)
+
         self.blockvisit(node.body, loop_frame)
         if node.else_:
             self.writeline('%s = 0' % iteration_indicator)
         self.outdent()
+        self.leave_frame(loop_frame, with_python_scope=node.recursive
+                         and not node.else_)
 
         if node.else_:
             self.writeline('if %s:' % iteration_indicator)
             self.indent()
-            self.blockvisit(node.else_, loop_frame)
+            self.enter_frame(else_frame)
+            self.blockvisit(node.else_, else_frame)
+            self.leave_frame(else_frame)
             self.outdent()
 
-        # reset the aliases if there are any.
-        if not node.recursive:
-            self.pop_scope(aliases, loop_frame)
-
         # if the node was recursive we have to return the buffer contents
         # and start the iteration code
         if node.recursive:
             self.return_buffer_contents(loop_frame)
             self.outdent()
             self.start_write(frame, node)
+            if self.environment.is_async:
+                self.write('await ')
             self.write('loop(')
+            if self.environment.is_async:
+                self.write('auto_aiter(')
             self.visit(node.iter, frame)
+            if self.environment.is_async:
+                self.write(')')
             self.write(', loop)')
             self.end_write(frame)
 
@@ -1178,36 +1136,46 @@ class CodeGenerator(NodeVisitor):
             self.outdent()
 
     def visit_Macro(self, node, frame):
-        macro_frame = self.macro_body(node, frame)
+        macro_frame, macro_ref = self.macro_body(node, frame)
         self.newline()
         if frame.toplevel:
             if not node.name.startswith('_'):
                 self.write('context.exported_vars.add(%r)' % node.name)
+            ref = frame.symbols.ref(node.name)
             self.writeline('context.vars[%r] = ' % node.name)
-        self.write('l_%s = ' % node.name)
-        self.macro_def(node, macro_frame)
-        frame.assigned_names.add(node.name)
+        self.write('%s = ' % frame.symbols.ref(node.name))
+        self.macro_def(macro_ref, macro_frame)
 
     def visit_CallBlock(self, node, frame):
-        children = node.iter_child_nodes(exclude=('call',))
-        call_frame = self.macro_body(node, frame, children)
+        call_frame, macro_ref = self.macro_body(node, frame)
         self.writeline('caller = ')
-        self.macro_def(node, call_frame)
+        self.macro_def(macro_ref, call_frame)
         self.start_write(frame, node)
-        self.visit_Call(node.call, call_frame, forward_caller=True)
+        self.visit_Call(node.call, frame, forward_caller=True)
         self.end_write(frame)
 
     def visit_FilterBlock(self, node, frame):
         filter_frame = frame.inner()
-        filter_frame.inspect(node.iter_child_nodes())
-        aliases = self.push_scope(filter_frame)
-        self.pull_locals(filter_frame)
+        filter_frame.symbols.analyze_node(node)
+        self.enter_frame(filter_frame)
         self.buffer(filter_frame)
         self.blockvisit(node.body, filter_frame)
         self.start_write(frame, node)
         self.visit_Filter(node.filter, filter_frame)
         self.end_write(frame)
-        self.pop_scope(aliases, filter_frame)
+        self.leave_frame(filter_frame)
+
+    def visit_With(self, node, frame):
+        with_frame = frame.inner()
+        with_frame.symbols.analyze_node(node)
+        self.enter_frame(with_frame)
+        for idx, (target, expr) in enumerate(izip(node.targets, node.values)):
+            self.newline()
+            self.visit(target, with_frame)
+            self.write(' = ')
+            self.visit(expr, frame)
+        self.blockvisit(node.body, with_frame)
+        self.leave_frame(with_frame)
 
     def visit_ExprStmt(self, node, frame):
         self.newline(node)
@@ -1286,7 +1254,7 @@ class CodeGenerator(NodeVisitor):
                     if frame.buffer is None:
                         self.writeline('yield ' + val)
                     else:
-                        self.writeline(val + ', ')
+                        self.writeline(val + ',')
                 else:
                     if frame.buffer is None:
                         self.writeline('yield ', item)
@@ -1294,8 +1262,8 @@ class CodeGenerator(NodeVisitor):
                         self.newline(item)
                     close = 1
                     if frame.eval_ctx.volatile:
-                        self.write('(context.eval_ctx.autoescape and'
-                                   ' escape or to_string)(')
+                        self.write('(escape if context.eval_ctx.autoescape'
+                                   ' else to_string)(')
                     elif frame.eval_ctx.autoescape:
                         self.write('escape(')
                     else:
@@ -1309,7 +1277,7 @@ class CodeGenerator(NodeVisitor):
                     self.visit(item, frame)
                     self.write(')' * close)
                     if frame.buffer is not None:
-                        self.write(', ')
+                        self.write(',')
             if frame.buffer is not None:
                 # close the open parentheses
                 self.outdent()
@@ -1332,8 +1300,8 @@ class CodeGenerator(NodeVisitor):
                 self.newline(argument)
                 close = 0
                 if frame.eval_ctx.volatile:
-                    self.write('(context.eval_ctx.autoescape and'
-                               ' escape or to_string)(')
+                    self.write('(escape if context.eval_ctx.autoescape else'
+                               ' to_string)(')
                     close += 1
                 elif frame.eval_ctx.autoescape:
                     self.write('escape(')
@@ -1358,73 +1326,55 @@ class CodeGenerator(NodeVisitor):
         if outdent_later:
             self.outdent()
 
-    def make_assignment_frame(self, frame):
-        # toplevel assignments however go into the local namespace and
-        # the current template's context.  We create a copy of the frame
-        # here and add a set so that the Name visitor can add the assigned
-        # names here.
-        if not frame.toplevel:
-            return frame
-        assignment_frame = frame.copy()
-        assignment_frame.toplevel_assignments = set()
-        return assignment_frame
-
-    def export_assigned_vars(self, frame, assignment_frame):
-        if not frame.toplevel:
-            return
-        public_names = [x for x in assignment_frame.toplevel_assignments
-                        if not x.startswith('_')]
-        if len(assignment_frame.toplevel_assignments) == 1:
-            name = next(iter(assignment_frame.toplevel_assignments))
-            self.writeline('context.vars[%r] = l_%s' % (name, name))
-        else:
-            self.writeline('context.vars.update({')
-            for idx, name in enumerate(assignment_frame.toplevel_assignments):
-                if idx:
-                    self.write(', ')
-                self.write('%r: l_%s' % (name, name))
-            self.write('})')
-        if public_names:
-            if len(public_names) == 1:
-                self.writeline('context.exported_vars.add(%r)' %
-                               public_names[0])
-            else:
-                self.writeline('context.exported_vars.update((%s))' %
-                               ', '.join(imap(repr, public_names)))
-
     def visit_Assign(self, node, frame):
+        self.push_assign_tracking()
         self.newline(node)
-        assignment_frame = self.make_assignment_frame(frame)
-        self.visit(node.target, assignment_frame)
+        self.visit(node.target, frame)
         self.write(' = ')
         self.visit(node.node, frame)
-        self.export_assigned_vars(frame, assignment_frame)
+        self.pop_assign_tracking(frame)
 
     def visit_AssignBlock(self, node, frame):
+        self.push_assign_tracking()
         block_frame = frame.inner()
-        block_frame.inspect(node.body)
-        aliases = self.push_scope(block_frame)
-        self.pull_locals(block_frame)
+        # This is a special case.  Since a set block always captures we
+        # will disable output checks.  This way one can use set blocks
+        # toplevel even in extended templates.
+        block_frame.require_output_check = False
+        block_frame.symbols.analyze_node(node)
+        self.enter_frame(block_frame)
         self.buffer(block_frame)
         self.blockvisit(node.body, block_frame)
-        self.pop_scope(aliases, block_frame)
-
-        assignment_frame = self.make_assignment_frame(frame)
         self.newline(node)
-        self.visit(node.target, assignment_frame)
-        self.write(' = concat(%s)' % block_frame.buffer)
-        self.export_assigned_vars(frame, assignment_frame)
+        self.visit(node.target, frame)
+        self.write(' = (Markup if context.eval_ctx.autoescape '
+                   'else identity)(concat(%s))' % block_frame.buffer)
+        self.pop_assign_tracking(frame)
+        self.leave_frame(block_frame)
 
     # -- Expression Visitors
 
     def visit_Name(self, node, frame):
         if node.ctx == 'store' and frame.toplevel:
-            frame.toplevel_assignments.add(node.name)
-        self.write('l_' + node.name)
-        frame.assigned_names.add(node.name)
+            if self._assign_stack:
+                self._assign_stack[-1].add(node.name)
+        ref = frame.symbols.ref(node.name)
+
+        # If we are looking up a variable we might have to deal with the
+        # case where it's undefined.  We can skip that case if the load
+        # instruction indicates a parameter which are always defined.
+        if node.ctx == 'load':
+            load = frame.symbols.find_load(ref)
+            if not (load is not None and load[0] == VAR_LOAD_PARAMETER and \
+                    not self.parameter_is_undeclared(ref)):
+                self.write('(undefined(name=%r) if %s is missing else %s)' %
+                           (node.name, ref, ref))
+                return
+
+        self.write(ref)
 
     def visit_Const(self, node, frame):
-        val = node.value
+        val = node.as_const(frame.eval_ctx)
         if isinstance(val, float):
             self.write(str(val))
         else:
@@ -1434,7 +1384,7 @@ class CodeGenerator(NodeVisitor):
         try:
             self.write(repr(node.as_const(frame.eval_ctx)))
         except nodes.Impossible:
-            self.write('(context.eval_ctx.autoescape and Markup or identity)(%r)'
+            self.write('(Markup if context.eval_ctx.autoescape else identity)(%r)'
                        % node.data)
 
     def visit_Tuple(self, node, frame):
@@ -1465,6 +1415,7 @@ class CodeGenerator(NodeVisitor):
         self.write('}')
 
     def binop(operator, interceptable=True):
+        @optimizeconst
         def visitor(self, node, frame):
             if self.environment.sandboxed and \
                operator in self.environment.intercepted_binops:
@@ -1481,6 +1432,7 @@ class CodeGenerator(NodeVisitor):
         return visitor
 
     def uaop(operator, interceptable=True):
+        @optimizeconst
         def visitor(self, node, frame):
             if self.environment.sandboxed and \
                operator in self.environment.intercepted_unops:
@@ -1506,6 +1458,7 @@ class CodeGenerator(NodeVisitor):
     visit_Not = uaop('not ', interceptable=False)
     del binop, uaop
 
+    @optimizeconst
     def visit_Concat(self, node, frame):
         if frame.eval_ctx.volatile:
             func_name = '(context.eval_ctx.volatile and' \
@@ -1520,6 +1473,7 @@ class CodeGenerator(NodeVisitor):
             self.write(', ')
         self.write('))')
 
+    @optimizeconst
     def visit_Compare(self, node, frame):
         self.visit(node.expr, frame)
         for op in node.ops:
@@ -1529,11 +1483,13 @@ class CodeGenerator(NodeVisitor):
         self.write(' %s ' % operators[node.op])
         self.visit(node.expr, frame)
 
+    @optimizeconst
     def visit_Getattr(self, node, frame):
         self.write('environment.getattr(')
         self.visit(node.node, frame)
         self.write(', %r)' % node.attr)
 
+    @optimizeconst
     def visit_Getitem(self, node, frame):
         # slices bypass the environment getitem method.
         if isinstance(node.arg, nodes.Slice):
@@ -1558,7 +1514,10 @@ class CodeGenerator(NodeVisitor):
             self.write(':')
             self.visit(node.step, frame)
 
+    @optimizeconst
     def visit_Filter(self, node, frame):
+        if self.environment.is_async:
+            self.write('await auto_await(')
         self.write(self.filters[node.name] + '(')
         func = self.environment.filters.get(node.name)
         if func is None:
@@ -1584,7 +1543,10 @@ class CodeGenerator(NodeVisitor):
             self.write('concat(%s)' % frame.buffer)
         self.signature(node, frame)
         self.write(')')
+        if self.environment.is_async:
+            self.write(')')
 
+    @optimizeconst
     def visit_Test(self, node, frame):
         self.write(self.tests[node.name] + '(')
         if node.name not in self.environment.tests:
@@ -1593,11 +1555,12 @@ class CodeGenerator(NodeVisitor):
         self.signature(node, frame)
         self.write(')')
 
+    @optimizeconst
     def visit_CondExpr(self, node, frame):
         def write_expr2():
             if node.expr2 is not None:
                 return self.visit(node.expr2, frame)
-            self.write('environment.undefined(%r)' % ('the inline if-'
+            self.write('undefined(%r)' % ('the inline if-'
                        'expression on %s evaluated to false and '
                        'no else section was defined.' % self.position(node)))
 
@@ -1609,7 +1572,10 @@ class CodeGenerator(NodeVisitor):
         write_expr2()
         self.write(')')
 
+    @optimizeconst
     def visit_Call(self, node, frame, forward_caller=False):
+        if self.environment.is_async:
+            self.write('await auto_await(')
         if self.environment.sandboxed:
             self.write('environment.call(context, ')
         else:
@@ -1618,6 +1584,8 @@ class CodeGenerator(NodeVisitor):
         extra_kwargs = forward_caller and {'caller': 'caller'} or None
         self.signature(node, frame, extra_kwargs)
         self.write(')')
+        if self.environment.is_async:
+            self.write(')')
 
     def visit_Keyword(self, node, frame):
         self.write(node.key + '=')
@@ -1658,11 +1626,10 @@ class CodeGenerator(NodeVisitor):
 
     def visit_Scope(self, node, frame):
         scope_frame = frame.inner()
-        scope_frame.inspect(node.iter_child_nodes())
-        aliases = self.push_scope(scope_frame)
-        self.pull_locals(scope_frame)
+        scope_frame.symbols.analyze_node(node)
+        self.enter_frame(scope_frame)
         self.blockvisit(node.body, scope_frame)
-        self.pop_scope(aliases, scope_frame)
+        self.leave_frame(scope_frame)
 
     def visit_EvalContextModifier(self, node, frame):
         for keyword in node.options:
@@ -1677,10 +1644,10 @@ class CodeGenerator(NodeVisitor):
 
     def visit_ScopedEvalContextModifier(self, node, frame):
         old_ctx_name = self.temporary_identifier()
-        safed_ctx = frame.eval_ctx.save()
+        saved_ctx = frame.eval_ctx.save()
         self.writeline('%s = context.eval_ctx.save()' % old_ctx_name)
         self.visit_EvalContextModifier(node, frame)
         for child in node.body:
             self.visit(child, frame)
-        frame.eval_ctx.revert(safed_ctx)
+        frame.eval_ctx.revert(saved_ctx)
         self.writeline('context.eval_ctx.revert(%s)' % old_ctx_name)
diff --git a/lib/jinja2/constants.py b/lib/jinja2/constants.py
index cab203c..11efd1e 100644
--- a/lib/jinja2/constants.py
+++ b/lib/jinja2/constants.py
@@ -5,7 +5,7 @@
 
     Various constants.
 
-    :copyright: (c) 2010 by the Jinja Team.
+    :copyright: (c) 2017 by the Jinja Team.
     :license: BSD, see LICENSE for more details.
 """
 
diff --git a/lib/jinja2/debug.py b/lib/jinja2/debug.py
index 3252748..07c21f1 100644
--- a/lib/jinja2/debug.py
+++ b/lib/jinja2/debug.py
@@ -7,7 +7,7 @@
     ugly stuff with the Python traceback system in order to achieve tracebacks
     with correct line numbers, locals and contents.
 
-    :copyright: (c) 2010 by the Jinja Team.
+    :copyright: (c) 2017 by the Jinja Team.
     :license: BSD, see LICENSE for more details.
 """
 import sys
@@ -195,21 +195,43 @@ def translate_exception(exc_info, initial_skip=0):
     return ProcessedTraceback(exc_info[0], exc_info[1], frames)
 
 
+def get_jinja_locals(real_locals):
+    ctx = real_locals.get('context')
+    if ctx:
+        locals = ctx.get_all()
+    else:
+        locals = {}
+
+    local_overrides = {}
+
+    for name, value in iteritems(real_locals):
+        if not name.startswith('l_') or value is missing:
+            continue
+        try:
+            _, depth, name = name.split('_', 2)
+            depth = int(depth)
+        except ValueError:
+            continue
+        cur_depth = local_overrides.get(name, (-1,))[0]
+        if cur_depth < depth:
+            local_overrides[name] = (depth, value)
+
+    for name, (_, value) in iteritems(local_overrides):
+        if value is missing:
+            locals.pop(name, None)
+        else:
+            locals[name] = value
+
+    return locals
+
+
 def fake_exc_info(exc_info, filename, lineno):
     """Helper for `translate_exception`."""
     exc_type, exc_value, tb = exc_info
 
     # figure the real context out
     if tb is not None:
-        real_locals = tb.tb_frame.f_locals.copy()
-        ctx = real_locals.get('context')
-        if ctx:
-            locals = ctx.get_all()
-        else:
-            locals = {}
-        for name, value in iteritems(real_locals):
-            if name.startswith('l_') and value is not missing:
-                locals[name[2:]] = value
+        locals = get_jinja_locals(tb.tb_frame.f_locals)
 
         # if there is a local called __jinja_exception__, we get
         # rid of it to not break the debug functionality.
diff --git a/lib/jinja2/defaults.py b/lib/jinja2/defaults.py
index 3717a72..3590388 100644
--- a/lib/jinja2/defaults.py
+++ b/lib/jinja2/defaults.py
@@ -5,7 +5,7 @@
 
     Jinja default filters and tags.
 
-    :copyright: (c) 2010 by the Jinja Team.
+    :copyright: (c) 2017 by the Jinja Team.
     :license: BSD, see LICENSE for more details.
 """
 from jinja2._compat import range_type
@@ -39,5 +39,16 @@ DEFAULT_NAMESPACE = {
 }
 
 
+# default policies
+DEFAULT_POLICIES = {
+    'compiler.ascii_str':   True,
+    'urlize.rel':           'noopener',
+    'urlize.target':        None,
+    'truncate.leeway':      5,
+    'json.dumps_function':  None,
+    'json.dumps_kwargs':    {'sort_keys': True},
+}
+
+
 # export all constants
 __all__ = tuple(x for x in locals().keys() if x.isupper())
diff --git a/lib/jinja2/environment.py b/lib/jinja2/environment.py
index 8b2572b..2a4d3d7 100644
--- a/lib/jinja2/environment.py
+++ b/lib/jinja2/environment.py
@@ -5,32 +5,32 @@
 
     Provides a class that holds runtime and parsing time options.
 
-    :copyright: (c) 2010 by the Jinja Team.
+    :copyright: (c) 2017 by the Jinja Team.
     :license: BSD, see LICENSE for more details.
 """
 import os
 import sys
+import weakref
+from functools import reduce, partial
 from jinja2 import nodes
 from jinja2.defaults import BLOCK_START_STRING, \
      BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \
      COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \
      LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \
      DEFAULT_FILTERS, DEFAULT_TESTS, DEFAULT_NAMESPACE, \
-     KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS
+     DEFAULT_POLICIES, KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS
 from jinja2.lexer import get_lexer, TokenStream
 from jinja2.parser import Parser
 from jinja2.nodes import EvalContext
-from jinja2.optimizer import optimize
 from jinja2.compiler import generate, CodeGenerator
 from jinja2.runtime import Undefined, new_context, Context
 from jinja2.exceptions import TemplateSyntaxError, TemplateNotFound, \
      TemplatesNotFound, TemplateRuntimeError
 from jinja2.utils import import_string, LRUCache, Markup, missing, \
-     concat, consume, internalcode
+     concat, consume, internalcode, have_async_gen
 from jinja2._compat import imap, ifilter, string_types, iteritems, \
      text_type, reraise, implements_iterator, implements_to_string, \
-     get_next, encode_filename, PY2, PYPY
-from functools import reduce
+     encode_filename, PY2, PYPY
 
 
 # for direct template usage we have up to ten living environments
@@ -87,6 +87,16 @@ def load_extensions(environment, extensions):
     return result
 
 
+def fail_for_missing_callable(string, name):
+    msg = string % name
+    if isinstance(name, Undefined):
+        try:
+            name._fail_with_undefined_error()
+        except Exception as e:
+            msg = '%s (%s; did you forget to quote the callable name?)' % (msg, e)
+    raise TemplateRuntimeError(msg)
+
+
 def _environment_sanity_check(environment):
     """Perform a sanity check on the environment."""
     assert issubclass(environment.undefined, Undefined), 'undefined must ' \
@@ -167,7 +177,7 @@ class Environment(object):
             look at :ref:`the extensions documentation <jinja-extensions>`.
 
         `optimized`
-            should the optimizer be enabled?  Default is `True`.
+            should the optimizer be enabled?  Default is ``True``.
 
         `undefined`
             :class:`Undefined` or a subclass of it that is used to represent
@@ -176,14 +186,14 @@ class Environment(object):
         `finalize`
             A callable that can be used to process the result of a variable
             expression before it is output.  For example one can convert
-            `None` implicitly into an empty string here.
+            ``None`` implicitly into an empty string here.
 
         `autoescape`
-            If set to true the XML/HTML autoescaping feature is enabled by
+            If set to ``True`` the XML/HTML autoescaping feature is enabled by
             default.  For more details about autoescaping see
             :class:`~jinja2.utils.Markup`.  As of Jinja 2.4 this can also
             be a callable that is passed the template name and has to
-            return `True` or `False` depending on autoescape should be
+            return ``True`` or ``False`` depending on autoescape should be
             enabled by default.
 
             .. versionchanged:: 2.4
@@ -205,7 +215,7 @@ class Environment(object):
         `auto_reload`
             Some loaders load templates from locations where the template
             sources may change (ie: file system or database).  If
-            `auto_reload` is set to `True` (default) every time a template is
+            ``auto_reload`` is set to ``True`` (default) every time a template is
             requested the loader checks if the source changed and if yes, it
             will reload the template.  For higher performance it's possible to
             disable that.
@@ -216,6 +226,11 @@ class Environment(object):
             have to be parsed if they were not changed.
 
             See :ref:`bytecode-cache` for more information.
+
+        `enable_async`
+            If set to true this enables async template execution which allows
+            you to take advantage of newer Python features.  This requires
+            Python 3.6 or later.
     """
 
     #: if this environment is sandboxed.  Modifying this variable won't make
@@ -267,7 +282,8 @@ class Environment(object):
                  loader=None,
                  cache_size=400,
                  auto_reload=True,
-                 bytecode_cache=None):
+                 bytecode_cache=None,
+                 enable_async=False):
         # !!Important notice!!
         #   The constructor accepts quite a few arguments that should be
         #   passed by keyword rather than position.  However it's important to
@@ -310,9 +326,15 @@ class Environment(object):
         self.bytecode_cache = bytecode_cache
         self.auto_reload = auto_reload
 
+        # configurable policies
+        self.policies = DEFAULT_POLICIES.copy()
+
         # load extensions
         self.extensions = load_extensions(self, extensions)
 
+        self.enable_async = enable_async
+        self.is_async = self.enable_async and have_async_gen
+
         _environment_sanity_check(self)
 
     def add_extension(self, extension):
@@ -387,7 +409,7 @@ class Environment(object):
         """Get an item or attribute of an object but prefer the item."""
         try:
             return obj[argument]
-        except (TypeError, LookupError):
+        except (AttributeError, TypeError, LookupError):
             if isinstance(argument, string_types):
                 try:
                     attr = str(argument)
@@ -417,11 +439,16 @@ class Environment(object):
                     context=None, eval_ctx=None):
         """Invokes a filter on a value the same way the compiler does it.
 
+        Note that on Python 3 this might return a coroutine in case the
+        filter is running from an environment in async mode and the filter
+        supports async execution.  It's your responsibility to await this
+        if needed.
+
         .. versionadded:: 2.7
         """
         func = self.filters.get(name)
         if func is None:
-            raise TemplateRuntimeError('no filter named %r' % name)
+            fail_for_missing_callable('no filter named %r', name)
         args = [value] + list(args or ())
         if getattr(func, 'contextfilter', False):
             if context is None:
@@ -446,7 +473,7 @@ class Environment(object):
         """
         func = self.tests.get(name)
         if func is None:
-            raise TemplateRuntimeError('no test named %r' % name)
+            fail_for_missing_callable('no test named %r', name)
         return func(value, *(args or ()), **(kwargs or {}))
 
     @internalcode
@@ -512,7 +539,8 @@ class Environment(object):
 
         .. versionadded:: 2.5
         """
-        return generate(source, self, name, filename, defer_init=defer_init)
+        return generate(source, self, name, filename, defer_init=defer_init,
+                        optimized=self.optimized)
 
     def _compile(self, source, filename):
         """Internal hook that can be overridden to hook a different compile
@@ -549,8 +577,6 @@ class Environment(object):
             if isinstance(source, string_types):
                 source_hint = source
                 source = self._parse(source, name, filename)
-            if self.optimized:
-                source = optimize(source, self)
             source = self._generate(source, name, filename,
                                     defer_init=defer_init)
             if raw:
@@ -769,15 +795,7 @@ class Environment(object):
     def _load_template(self, name, globals):
         if self.loader is None:
             raise TypeError('no loader for this environment specified')
-        try:
-            # use abs path for cache key
-            cache_key = self.loader.get_source(self, name)[1]
-        except RuntimeError:
-            # if loader does not implement get_source()
-            cache_key = None
-        # if template is not file, use name for cache key
-        if cache_key is None:
-            cache_key = name
+        cache_key = (weakref.ref(self.loader), name)
         if self.cache is not None:
             template = self.cache.get(cache_key)
             if template is not None and (not self.auto_reload or
@@ -915,14 +933,15 @@ class Template(object):
                 optimized=True,
                 undefined=Undefined,
                 finalize=None,
-                autoescape=False):
+                autoescape=False,
+                enable_async=False):
         env = get_spontaneous_environment(
             block_start_string, block_end_string, variable_start_string,
             variable_end_string, comment_start_string, comment_end_string,
             line_statement_prefix, line_comment_prefix, trim_blocks,
             lstrip_blocks, newline_sequence, keep_trailing_newline,
             frozenset(extensions), optimized, undefined, finalize, autoescape,
-            None, 0, False, None)
+            None, 0, False, None, enable_async)
         return env.from_string(source, template_class=cls)
 
     @classmethod
@@ -988,6 +1007,19 @@ class Template(object):
             exc_info = sys.exc_info()
         return self.environment.handle_exception(exc_info, True)
 
+    def render_async(self, *args, **kwargs):
+        """This works similar to :meth:`render` but returns a coroutine
+        that when awaited returns the entire rendered template string.  This
+        requires the async feature to be enabled.
+
+        Example usage::
+
+            await template.render_async(knights='that say nih; asynchronously')
+        """
+        # see asyncsupport for the actual implementation
+        raise NotImplementedError('This feature is not available for this '
+                                  'version of Python')
+
     def stream(self, *args, **kwargs):
         """Works exactly like :meth:`generate` but returns a
         :class:`TemplateStream`.
@@ -1012,6 +1044,14 @@ class Template(object):
             return
         yield self.environment.handle_exception(exc_info, True)
 
+    def generate_async(self, *args, **kwargs):
+        """An async version of :meth:`generate`.  Works very similarly but
+        returns an async iterator instead.
+        """
+        # see asyncsupport for the actual implementation
+        raise NotImplementedError('This feature is not available for this '
+                                  'version of Python')
+
     def new_context(self, vars=None, shared=False, locals=None):
         """Create a new :class:`Context` for this template.  The vars
         provided will be passed to the template.  Per default the globals
@@ -1032,6 +1072,23 @@ class Template(object):
         """
         return TemplateModule(self, self.new_context(vars, shared, locals))
 
+    def make_module_async(self, vars=None, shared=False, locals=None):
+        """As template module creation can invoke template code for
+        asynchronous exections this method must be used instead of the
+        normal :meth:`make_module` one.  Likewise the module attribute
+        becomes unavailable in async mode.
+        """
+        # see asyncsupport for the actual implementation
+        raise NotImplementedError('This feature is not available for this '
+                                  'version of Python')
+
+    @internalcode
+    def _get_default_module(self):
+        if self._module is not None:
+            return self._module
+        self._module = rv = self.make_module()
+        return rv
+
     @property
     def module(self):
         """The template as module.  This is used for imports in the
@@ -1043,11 +1100,10 @@ class Template(object):
         '23'
         >>> t.module.foo() == u'42'
         True
+
+        This attribute is not available if async mode is enabled.
         """
-        if self._module is not None:
-            return self._module
-        self._module = rv = self.make_module()
-        return rv
+        return self._get_default_module()
 
     def get_corresponding_lineno(self, lineno):
         """Return the source line number of a line number in the
@@ -1086,8 +1142,15 @@ class TemplateModule(object):
     converting it into an unicode- or bytestrings renders the contents.
     """
 
-    def __init__(self, template, context):
-        self._body_stream = list(template.root_render_func(context))
+    def __init__(self, template, context, body_stream=None):
+        if body_stream is None:
+            if context.environment.is_async:
+                raise RuntimeError('Async mode requires a body stream '
+                                   'to be passed to a template module.  Use '
+                                   'the async methods of the API you are '
+                                   'using.')
+            body_stream = list(template.root_render_func(context))
+        self._body_stream = body_stream
         self.__dict__.update(context.get_exported())
         self.__name__ = template.name
 
@@ -1171,35 +1234,35 @@ class TemplateStream(object):
 
     def disable_buffering(self):
         """Disable the output buffering."""
-        self._next = get_next(self._gen)
+        self._next = partial(next, self._gen)
         self.buffered = False
 
+    def _buffered_generator(self, size):
+        buf = []
+        c_size = 0
+        push = buf.append
+
+        while 1:
+            try:
+                while c_size < size:
+                    c = next(self._gen)
+                    push(c)
+                    if c:
+                        c_size += 1
+            except StopIteration:
+                if not c_size:
+                    return
+            yield concat(buf)
+            del buf[:]
+            c_size = 0
+
     def enable_buffering(self, size=5):
         """Enable buffering.  Buffer `size` items before yielding them."""
         if size <= 1:
             raise ValueError('buffer size too small')
 
-        def generator(next):
-            buf = []
-            c_size = 0
-            push = buf.append
-
-            while 1:
-                try:
-                    while c_size < size:
-                        c = next()
-                        push(c)
-                        if c:
-                            c_size += 1
-                except StopIteration:
-                    if not c_size:
-                        return
-                yield concat(buf)
-                del buf[:]
-                c_size = 0
-
         self.buffered = True
-        self._next = get_next(generator(get_next(self._gen)))
+        self._next = partial(next, self._buffered_generator(size))
 
     def __iter__(self):
         return self
diff --git a/lib/jinja2/exceptions.py b/lib/jinja2/exceptions.py
index c9df6dc..c018a33 100644
--- a/lib/jinja2/exceptions.py
+++ b/lib/jinja2/exceptions.py
@@ -5,7 +5,7 @@
 
     Jinja exceptions.
 
-    :copyright: (c) 2010 by the Jinja Team.
+    :copyright: (c) 2017 by the Jinja Team.
     :license: BSD, see LICENSE for more details.
 """
 from jinja2._compat import imap, text_type, PY2, implements_to_string
diff --git a/lib/jinja2/ext.py b/lib/jinja2/ext.py
index 562ab50..75e1f3b 100644
--- a/lib/jinja2/ext.py
+++ b/lib/jinja2/ext.py
@@ -7,7 +7,7 @@
     tags work.  By default two example extensions exist: an i18n and a cache
     extension.
 
-    :copyright: (c) 2010 by the Jinja Team.
+    :copyright: (c) 2017 by the Jinja Team.
     :license: BSD.
 """
 from jinja2 import nodes
@@ -87,7 +87,7 @@ class Extension(with_metaclass(ExtensionRegistry, object)):
     def filter_stream(self, stream):
         """It's passed a :class:`~jinja2.lexer.TokenStream` that can be used
         to filter tokens returned.  This method has to return an iterable of
-        :class:`~jinja2.lexer.Token`\s, but it doesn't have to return a
+        :class:`~jinja2.lexer.Token`\\s, but it doesn't have to return a
         :class:`~jinja2.lexer.TokenStream`.
 
         In the `ext` folder of the Jinja2 source distribution there is a file
@@ -411,38 +411,11 @@ class LoopControlExtension(Extension):
 
 
 class WithExtension(Extension):
-    """Adds support for a django-like with block."""
-    tags = set(['with'])
-
-    def parse(self, parser):
-        node = nodes.Scope(lineno=next(parser.stream).lineno)
-        assignments = []
-        while parser.stream.current.type != 'block_end':
-            lineno = parser.stream.current.lineno
-            if assignments:
-                parser.stream.expect('comma')
-            target = parser.parse_assign_target()
-            parser.stream.expect('assign')
-            expr = parser.parse_expression()
-            assignments.append(nodes.Assign(target, expr, lineno=lineno))
-        node.body = assignments + \
-            list(parser.parse_statements(('name:endwith',),
-                                         drop_needle=True))
-        return node
+    pass
 
 
 class AutoEscapeExtension(Extension):
-    """Changes auto escape rules for a scope."""
-    tags = set(['autoescape'])
-
-    def parse(self, parser):
-        node = nodes.ScopedEvalContextModifier(lineno=next(parser.stream).lineno)
-        node.options = [
-            nodes.Keyword('autoescape', parser.parse_expression())
-        ]
-        node.body = parser.parse_statements(('name:endautoescape',),
-                                            drop_needle=True)
-        return nodes.Scope([node])
+    pass
 
 
 def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS,
diff --git a/lib/jinja2/filters.py b/lib/jinja2/filters.py
index e5c7a1a..76e04db 100644
--- a/lib/jinja2/filters.py
+++ b/lib/jinja2/filters.py
@@ -5,23 +5,24 @@
 
     Bundled jinja filters.
 
-    :copyright: (c) 2010 by the Jinja Team.
+    :copyright: (c) 2017 by the Jinja Team.
     :license: BSD, see LICENSE for more details.
 """
 import re
 import math
 
 from random import choice
-from operator import itemgetter
 from itertools import groupby
+from collections import namedtuple
 from jinja2.utils import Markup, escape, pformat, urlize, soft_unicode, \
-     unicode_urlencode
+     unicode_urlencode, htmlsafe_json_dumps
 from jinja2.runtime import Undefined
 from jinja2.exceptions import FilterArgumentError
-from jinja2._compat import imap, string_types, text_type, iteritems
+from jinja2._compat import imap, string_types, text_type, iteritems, PY2
 
 
-_word_re = re.compile(r'\w+(?u)')
+_word_re = re.compile(r'\w+', re.UNICODE)
+_word_beginning_split_re = re.compile(r'([-\s\(\{\[\<]+)', re.UNICODE)
 
 
 def contextfilter(f):
@@ -44,7 +45,7 @@ def evalcontextfilter(f):
 
 
 def environmentfilter(f):
-    """Decorator for marking evironment dependent filters.  The current
+    """Decorator for marking environment dependent filters.  The current
     :class:`Environment` is passed to the filter as first argument.
     """
     f.environmentfilter = True
@@ -183,12 +184,10 @@ def do_title(s):
     """Return a titlecased version of the value. I.e. words will start with
     uppercase letters, all remaining characters are lowercase.
     """
-    rv = []
-    for item in re.compile(r'([-\s]+)(?u)').split(soft_unicode(s)):
-        if not item:
-            continue
-        rv.append(item[0].upper() + item[1:].lower())
-    return ''.join(rv)
+    return ''.join(
+        [item[0].upper() + item[1:].lower()
+         for item in _word_beginning_split_re.split(soft_unicode(s))
+         if item])
 
 
 def do_dictsort(value, case_sensitive=False, by='key'):
@@ -410,7 +409,7 @@ def do_pprint(value, verbose=False):
 
 @evalcontextfilter
 def do_urlize(eval_ctx, value, trim_url_limit=None, nofollow=False,
-              target=None):
+              target=None, rel=None):
     """Converts URLs in plain text into clickable links.
 
     If you pass the filter an additional integer it will shorten the urls
@@ -432,7 +431,15 @@ def do_urlize(eval_ctx, value, trim_url_limit=None, nofollow=False,
     .. versionchanged:: 2.8+
        The *target* parameter was added.
     """
-    rv = urlize(value, trim_url_limit, nofollow, target)
+    policies = eval_ctx.environment.policies
+    rel = set((rel or '').split() or [])
+    if nofollow:
+        rel.add('nofollow')
+    rel.update((policies['urlize.rel'] or '').split())
+    if target is None:
+        target = policies['urlize.target']
+    rel = ' '.join(sorted(rel)) or None
+    rv = urlize(value, trim_url_limit, rel=rel, target=target)
     if eval_ctx.autoescape:
         rv = Markup(rv)
     return rv
@@ -456,31 +463,40 @@ def do_indent(s, width=4, indentfirst=False):
     return rv
 
 
-def do_truncate(s, length=255, killwords=False, end='...'):
+ at environmentfilter
+def do_truncate(env, s, length=255, killwords=False, end='...', leeway=None):
     """Return a truncated copy of the string. The length is specified
     with the first parameter which defaults to ``255``. If the second
     parameter is ``true`` the filter will cut the text at length. Otherwise
     it will discard the last word. If the text was in fact
     truncated it will append an ellipsis sign (``"..."``). If you want a
     different ellipsis sign than ``"..."`` you can specify it using the
-    third parameter.
+    third parameter. Strings that only exceed the length by the tolerance
+    margin given in the fourth parameter will not be truncated.
 
     .. sourcecode:: jinja
 
-        {{ "foo bar baz"|truncate(9) }}
-            -> "foo ..."
-        {{ "foo bar baz"|truncate(9, True) }}
+        {{ "foo bar baz qux"|truncate(9) }}
+            -> "foo..."
+        {{ "foo bar baz qux"|truncate(9, True) }}
             -> "foo ba..."
+        {{ "foo bar baz qux"|truncate(11) }}
+            -> "foo bar baz qux"
+        {{ "foo bar baz qux"|truncate(11, False, '...', 0) }}
+            -> "foo bar..."
 
+    The default leeway on newer Jinja2 versions is 5 and was 0 before but
+    can be reconfigured globally.
     """
-    if len(s) <= length:
+    if leeway is None:
+        leeway = env.policies['truncate.leeway']
+    assert length >= len(end), 'expected length >= %s, got %s' % (len(end), length)
+    assert leeway >= 0, 'expected leeway >= 0, got %s' % leeway
+    if len(s) <= length + leeway:
         return s
-    elif killwords:
+    if killwords:
         return s[:length - len(end)] + end
-
     result = s[:length - len(end)].rsplit(' ', 1)[0]
-    if len(result) < length:
-        result += ' '
     return result + end
 
 
@@ -518,9 +534,12 @@ def do_int(value, default=0, base=10):
     can also override the default base (10) in the second
     parameter, which handles input with prefixes such as
     0b, 0o and 0x for bases 2, 8 and 16 respectively.
+    The base is ignored for decimal numbers and non-string values.
     """
     try:
-        return int(value, base)
+        if isinstance(value, string_types):
+            return int(value, base)
+        return int(value)
     except (TypeError, ValueError):
         # this quirk is necessary so that "42.23"|int gives 42.
         try:
@@ -669,6 +688,15 @@ def do_round(value, precision=0, method='common'):
     return func(value * (10 ** precision)) / (10 ** precision)
 
 
+# Use a regular tuple repr here.  This is what we did in the past and we
+# really want to hide this custom type as much as possible.  In particular
+# we do not want to accidentally expose an auto generated repr in case
+# people start to print this out in comments or something similar for
+# debugging.
+_GroupTuple = namedtuple('_GroupTuple', ['grouper', 'list'])
+_GroupTuple.__repr__ = tuple.__repr__
+_GroupTuple.__str__ = tuple.__str__
+
 @environmentfilter
 def do_groupby(environment, value, attribute):
     """Group a sequence of objects by a common attribute.
@@ -709,17 +737,8 @@ def do_groupby(environment, value, attribute):
        attribute of another attribute.
     """
     expr = make_attrgetter(environment, attribute)
-    return sorted(map(_GroupTuple, groupby(sorted(value, key=expr), expr)))
-
-
-class _GroupTuple(tuple):
-    __slots__ = ()
-    grouper = property(itemgetter(0))
-    list = property(itemgetter(1))
-
-    def __new__(cls, xxx_todo_changeme):
-        (key, value) = xxx_todo_changeme
-        return tuple.__new__(cls, (key, list(value)))
+    return [_GroupTuple(key, list(values)) for key, values
+            in groupby(sorted(value, key=expr), expr)]
 
 
 @environmentfilter
@@ -827,24 +846,7 @@ def do_map(*args, **kwargs):
 
     .. versionadded:: 2.7
     """
-    context = args[0]
-    seq = args[1]
-
-    if len(args) == 2 and 'attribute' in kwargs:
-        attribute = kwargs.pop('attribute')
-        if kwargs:
-            raise FilterArgumentError('Unexpected keyword argument %r' %
-                next(iter(kwargs)))
-        func = make_attrgetter(context.environment, attribute)
-    else:
-        try:
-            name = args[2]
-            args = args[3:]
-        except LookupError:
-            raise FilterArgumentError('map requires a filter argument')
-        func = lambda item: context.environment.call_filter(
-            name, item, args, kwargs, context=context)
-
+    seq, func = prepare_map(args, kwargs)
     if seq:
         for item in seq:
             yield func(item)
@@ -852,8 +854,10 @@ def do_map(*args, **kwargs):
 
 @contextfilter
 def do_select(*args, **kwargs):
-    """Filters a sequence of objects by applying a test to the object and only
-    selecting the ones with the test succeeding.
+    """Filters a sequence of objects by applying a test to each object,
+    and only selecting the objects with the test succeeding.
+
+    If no test is specified, each object will be evaluated as a boolean.
 
     Example usage:
 
@@ -864,13 +868,15 @@ def do_select(*args, **kwargs):
 
     .. versionadded:: 2.7
     """
-    return _select_or_reject(args, kwargs, lambda x: x, False)
+    return select_or_reject(args, kwargs, lambda x: x, False)
 
 
 @contextfilter
 def do_reject(*args, **kwargs):
-    """Filters a sequence of objects by applying a test to the object and
-    rejecting the ones with the test succeeding.
+    """Filters a sequence of objects by applying a test to each object,
+    and rejecting the objects with the test succeeding.
+
+    If no test is specified, each object will be evaluated as a boolean.
 
     Example usage:
 
@@ -880,13 +886,17 @@ def do_reject(*args, **kwargs):
 
     .. versionadded:: 2.7
     """
-    return _select_or_reject(args, kwargs, lambda x: not x, False)
+    return select_or_reject(args, kwargs, lambda x: not x, False)
 
 
 @contextfilter
 def do_selectattr(*args, **kwargs):
-    """Filters a sequence of objects by applying a test to an attribute of an
-    object and only selecting the ones with the test succeeding.
+    """Filters a sequence of objects by applying a test to the specified
+    attribute of each object, and only selecting the objects with the
+    test succeeding.
+
+    If no test is specified, the attribute's value will be evaluated as
+    a boolean.
 
     Example usage:
 
@@ -897,13 +907,17 @@ def do_selectattr(*args, **kwargs):
 
     .. versionadded:: 2.7
     """
-    return _select_or_reject(args, kwargs, lambda x: x, True)
+    return select_or_reject(args, kwargs, lambda x: x, True)
 
 
 @contextfilter
 def do_rejectattr(*args, **kwargs):
-    """Filters a sequence of objects by applying a test to an attribute of an
-    object or the attribute and rejecting the ones with the test succeeding.
+    """Filters a sequence of objects by applying a test to the specified
+    attribute of each object, and rejecting the objects with the test
+    succeeding.
+
+    If no test is specified, the attribute's value will be evaluated as
+    a boolean.
 
     .. sourcecode:: jinja
 
@@ -912,10 +926,67 @@ def do_rejectattr(*args, **kwargs):
 
     .. versionadded:: 2.7
     """
-    return _select_or_reject(args, kwargs, lambda x: not x, True)
+    return select_or_reject(args, kwargs, lambda x: not x, True)
+
+
+ at evalcontextfilter
+def do_tojson(eval_ctx, value, indent=None):
+    """Dumps a structure to JSON so that it's safe to use in ``<script>``
+    tags.  It accepts the same arguments and returns a JSON string.  Note that
+    this is available in templates through the ``|tojson`` filter which will
+    also mark the result as safe.  Due to how this function escapes certain
+    characters this is safe even if used outside of ``<script>`` tags.
+
+    The following characters are escaped in strings:
+
+    -   ``<``
+    -   ``>``
+    -   ``&``
+    -   ``'``
+
+    This makes it safe to embed such strings in any place in HTML with the
+    notable exception of double quoted attributes.  In that case single
+    quote your attributes or HTML escape it in addition.
+
+    The indent parameter can be used to enable pretty printing.  Set it to
+    the number of spaces that the structures should be indented with.
+
+    Note that this filter is for use in HTML contexts only.
+
+    .. versionadded:: 2.9
+    """
+    policies = eval_ctx.environment.policies
+    dumper = policies['json.dumps_function']
+    options = policies['json.dumps_kwargs']
+    if indent is not None:
+        options = dict(options)
+        options['indent'] = indent
+    return htmlsafe_json_dumps(value, dumper=dumper, **options)
+
+
+def prepare_map(args, kwargs):
+    context = args[0]
+    seq = args[1]
+
+    if len(args) == 2 and 'attribute' in kwargs:
+        attribute = kwargs.pop('attribute')
+        if kwargs:
+            raise FilterArgumentError('Unexpected keyword argument %r' %
+                next(iter(kwargs)))
+        func = make_attrgetter(context.environment, attribute)
+    else:
+        try:
+            name = args[2]
+            args = args[3:]
+        except LookupError:
+            raise FilterArgumentError('map requires a filter argument')
+        func = lambda item: context.environment.call_filter(
+            name, item, args, kwargs, context=context)
 
+    return seq, func
 
-def _select_or_reject(args, kwargs, modfunc, lookup_attr):
+
+def prepare_select_or_reject(args, kwargs, modfunc, lookup_attr):
     context = args[0]
     seq = args[1]
     if lookup_attr:
@@ -937,9 +1008,14 @@ def _select_or_reject(args, kwargs, modfunc, lookup_attr):
     except LookupError:
         func = bool
 
+    return seq, lambda item: modfunc(func(transfunc(item)))
+
+
+def select_or_reject(args, kwargs, modfunc, lookup_attr):
+    seq, func = prepare_select_or_reject(args, kwargs, modfunc, lookup_attr)
     if seq:
         for item in seq:
-            if modfunc(func(transfunc(item))):
+            if func(item):
                 yield item
 
 
@@ -993,4 +1069,5 @@ FILTERS = {
     'wordcount':            do_wordcount,
     'wordwrap':             do_wordwrap,
     'xmlattr':              do_xmlattr,
+    'tojson':               do_tojson,
 }
diff --git a/lib/jinja2/idtracking.py b/lib/jinja2/idtracking.py
new file mode 100644
index 0000000..8479b72
--- /dev/null
+++ b/lib/jinja2/idtracking.py
@@ -0,0 +1,273 @@
+from jinja2.visitor import NodeVisitor
+from jinja2._compat import iteritems
+
+
+VAR_LOAD_PARAMETER = 'param'
+VAR_LOAD_RESOLVE = 'resolve'
+VAR_LOAD_ALIAS = 'alias'
+VAR_LOAD_UNDEFINED = 'undefined'
+
+
+def find_symbols(nodes, parent_symbols=None):
+    sym = Symbols(parent=parent_symbols)
+    visitor = FrameSymbolVisitor(sym)
+    for node in nodes:
+        visitor.visit(node)
+    return sym
+
+
+def symbols_for_node(node, parent_symbols=None):
+    sym = Symbols(parent=parent_symbols)
+    sym.analyze_node(node)
+    return sym
+
+
+class Symbols(object):
+
+    def __init__(self, parent=None):
+        if parent is None:
+            self.level = 0
+        else:
+            self.level = parent.level + 1
+        self.parent = parent
+        self.refs = {}
+        self.loads = {}
+        self.stores = set()
+
+    def analyze_node(self, node, **kwargs):
+        visitor = RootVisitor(self)
+        visitor.visit(node, **kwargs)
+
+    def _define_ref(self, name, load=None):
+        ident = 'l_%d_%s' % (self.level, name)
+        self.refs[name] = ident
+        if load is not None:
+            self.loads[ident] = load
+        return ident
+
+    def find_load(self, target):
+        if target in self.loads:
+            return self.loads[target]
+        if self.parent is not None:
+            return self.parent.find_load(target)
+
+    def find_ref(self, name):
+        if name in self.refs:
+            return self.refs[name]
+        if self.parent is not None:
+            return self.parent.find_ref(name)
+
+    def ref(self, name):
+        rv = self.find_ref(name)
+        if rv is None:
+            raise AssertionError('Tried to resolve a name to a reference that '
+                                 'was unknown to the frame (%r)' % name)
+        return rv
+
+    def copy(self):
+        rv = object.__new__(self.__class__)
+        rv.__dict__.update(self.__dict__)
+        rv.refs = self.refs.copy()
+        rv.loads = self.loads.copy()
+        rv.stores = self.stores.copy()
+        return rv
+
+    def store(self, name):
+        self.stores.add(name)
+
+        # If we have not see the name referenced yet, we need to figure
+        # out what to set it to.
+        if name not in self.refs:
+            # If there is a parent scope we check if the name has a
+            # reference there.  If it does it means we might have to alias
+            # to a variable there.
+            if self.parent is not None:
+                outer_ref = self.parent.find_ref(name)
+                if outer_ref is not None:
+                    self._define_ref(name, load=(VAR_LOAD_ALIAS, outer_ref))
+                    return
+
+            # Otherwise we can just set it to undefined.
+            self._define_ref(name, load=(VAR_LOAD_UNDEFINED, None))
+
+    def declare_parameter(self, name):
+        self.stores.add(name)
+        return self._define_ref(name, load=(VAR_LOAD_PARAMETER, None))
+
+    def load(self, name):
+        target = self.find_ref(name)
+        if target is None:
+            self._define_ref(name, load=(VAR_LOAD_RESOLVE, name))
+
+    def branch_update(self, branch_symbols):
+        stores = {}
+        for branch in branch_symbols:
+            for target in branch.stores:
+                if target in self.stores:
+                    continue
+                stores[target] = stores.get(target, 0) + 1
+
+        for sym in branch_symbols:
+            self.refs.update(sym.refs)
+            self.loads.update(sym.loads)
+            self.stores.update(sym.stores)
+
+        for name, branch_count in iteritems(stores):
+            if branch_count == len(branch_symbols):
+                continue
+            target = self.find_ref(name)
+            assert target is not None, 'should not happen'
+
+            if self.parent is not None:
+                outer_target = self.parent.find_ref(name)
+                if outer_target is not None:
+                    self.loads[target] = (VAR_LOAD_ALIAS, outer_target)
+                    continue
+            self.loads[target] = (VAR_LOAD_RESOLVE, name)
+
+    def dump_stores(self):
+        rv = {}
+        node = self
+        while node is not None:
+            for name in node.stores:
+                if name not in rv:
+                    rv[name] = self.find_ref(name)
+            node = node.parent
+        return rv
+
+    def dump_param_targets(self):
+        rv = set()
+        node = self
+        while node is not None:
+            for target, (instr, _) in iteritems(self.loads):
+                if instr == VAR_LOAD_PARAMETER:
+                    rv.add(target)
+            node = node.parent
+        return rv
+
+
+class RootVisitor(NodeVisitor):
+
+    def __init__(self, symbols):
+        self.sym_visitor = FrameSymbolVisitor(symbols)
+
+    def _simple_visit(self, node, **kwargs):
+        for child in node.iter_child_nodes():
+            self.sym_visitor.visit(child)
+
+    visit_Template = visit_Block = visit_Macro = visit_FilterBlock = \
+        visit_Scope = visit_If = visit_ScopedEvalContextModifier = \
+        _simple_visit
+
+    def visit_AssignBlock(self, node, **kwargs):
+        for child in node.body:
+            self.sym_visitor.visit(child)
+
+    def visit_CallBlock(self, node, **kwargs):
+        for child in node.iter_child_nodes(exclude=('call',)):
+            self.sym_visitor.visit(child)
+
+    def visit_For(self, node, for_branch='body', **kwargs):
+        if for_branch == 'body':
+            self.sym_visitor.visit(node.target, store_as_param=True)
+            branch = node.body
+        elif for_branch == 'else':
+            branch = node.else_
+        elif for_branch == 'test':
+            self.sym_visitor.visit(node.target, store_as_param=True)
+            if node.test is not None:
+                self.sym_visitor.visit(node.test)
+            return
+        else:
+            raise RuntimeError('Unknown for branch')
+        for item in branch or ():
+            self.sym_visitor.visit(item)
+
+    def visit_With(self, node, **kwargs):
+        for target in node.targets:
+            self.sym_visitor.visit(target)
+        for child in node.body:
+            self.sym_visitor.visit(child)
+
+    def generic_visit(self, node, *args, **kwargs):
+        raise NotImplementedError('Cannot find symbols for %r' %
+                                  node.__class__.__name__)
+
+
+class FrameSymbolVisitor(NodeVisitor):
+    """A visitor for `Frame.inspect`."""
+
+    def __init__(self, symbols):
+        self.symbols = symbols
+
+    def visit_Name(self, node, store_as_param=False, **kwargs):
+        """All assignments to names go through this function."""
+        if store_as_param or node.ctx == 'param':
+            self.symbols.declare_parameter(node.name)
+        elif node.ctx == 'store':
+            self.symbols.store(node.name)
+        elif node.ctx == 'load':
+            self.symbols.load(node.name)
+
+    def visit_If(self, node, **kwargs):
+        self.visit(node.test, **kwargs)
+
+        original_symbols = self.symbols
+
+        def inner_visit(nodes):
+            self.symbols = rv = original_symbols.copy()
+            for subnode in nodes:
+                self.visit(subnode, **kwargs)
+            self.symbols = original_symbols
+            return rv
+
+        body_symbols = inner_visit(node.body)
+        else_symbols = inner_visit(node.else_ or ())
+
+        self.symbols.branch_update([body_symbols, else_symbols])
+
+    def visit_Macro(self, node, **kwargs):
+        self.symbols.store(node.name)
+
+    def visit_Import(self, node, **kwargs):
+        self.generic_visit(node, **kwargs)
+        self.symbols.store(node.target)
+
+    def visit_FromImport(self, node, **kwargs):
+        self.generic_visit(node, **kwargs)
+        for name in node.names:
+            if isinstance(name, tuple):
+                self.symbols.store(name[1])
+            else:
+                self.symbols.store(name)
+
+    def visit_Assign(self, node, **kwargs):
+        """Visit assignments in the correct order."""
+        self.visit(node.node, **kwargs)
+        self.visit(node.target, **kwargs)
+
+    def visit_For(self, node, **kwargs):
+        """Visiting stops at for blocks.  However the block sequence
+        is visited as part of the outer scope.
+        """
+        self.visit(node.iter, **kwargs)
+
+    def visit_CallBlock(self, node, **kwargs):
+        self.visit(node.call, **kwargs)
+
+    def visit_FilterBlock(self, node, **kwargs):
+        self.visit(node.filter, **kwargs)
+
+    def visit_With(self, node, **kwargs):
+        for target in node.values:
+            self.visit(target)
+
+    def visit_AssignBlock(self, node, **kwargs):
+        """Stop visiting at block assigns."""
+        self.visit(node.target, **kwargs)
+
+    def visit_Scope(self, node, **kwargs):
+        """Stop visiting at scopes."""
+
+    def visit_Block(self, node, **kwargs):
+        """Stop visiting at blocks."""
diff --git a/lib/jinja2/lexer.py b/lib/jinja2/lexer.py
index c8dac21..30e82fb 100644
--- a/lib/jinja2/lexer.py
+++ b/lib/jinja2/lexer.py
@@ -11,17 +11,17 @@
     operators we don't allow in templates. On the other hand it separates
     template code and python code in expressions.
 
-    :copyright: (c) 2010 by the Jinja Team.
+    :copyright: (c) 2017 by the Jinja Team.
     :license: BSD, see LICENSE for more details.
 """
 import re
+import sys
 
 from operator import itemgetter
 from collections import deque
 from jinja2.exceptions import TemplateSyntaxError
 from jinja2.utils import LRUCache
-from jinja2._compat import iteritems, implements_iterator, text_type, \
-     intern, PY2
+from jinja2._compat import iteritems, implements_iterator, text_type, intern
 
 
 # cache for the lexers. Exists in order to be able to have multiple
@@ -34,17 +34,29 @@ string_re = re.compile(r"('([^'\\]*(?:\\.[^'\\]*)*)'"
                        r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S)
 integer_re = re.compile(r'\d+')
 
-# we use the unicode identifier rule if this python version is able
-# to handle unicode identifiers, otherwise the standard ASCII one.
-try:
-    compile('föö', '<unknown>', 'eval')
-except SyntaxError:
-    name_re = re.compile(r'\b[a-zA-Z_][a-zA-Z0-9_]*\b')
-else:
+def _make_name_re():
+    try:
+        compile('föö', '<unknown>', 'eval')
+    except SyntaxError:
+        return re.compile(r'\b[a-zA-Z_][a-zA-Z0-9_]*\b')
+
+    import jinja2
     from jinja2 import _stringdefs
     name_re = re.compile(r'[%s][%s]*' % (_stringdefs.xid_start,
                                          _stringdefs.xid_continue))
 
+    # Save some memory here
+    sys.modules.pop('jinja2._stringdefs')
+    del _stringdefs
+    del jinja2._stringdefs
+
+    return name_re
+
+# we use the unicode identifier rule if this python version is able
+# to handle unicode identifiers, otherwise the standard ASCII one.
+name_re = _make_name_re()
+del _make_name_re
+
 float_re = re.compile(r'(?<!\.)\d+\.\d+')
 newline_re = re.compile(r'(\r\n|\r|\n)')
 
@@ -288,7 +300,7 @@ class TokenStreamIterator(object):
 
 @implements_iterator
 class TokenStream(object):
-    """A token stream is an iterable that yields :class:`Token`\s.  The
+    """A token stream is an iterable that yields :class:`Token`\\s.  The
     parser however does not iterate over it but calls :meth:`next` to go
     one token ahead.  The current active token is stored as :attr:`current`.
     """
@@ -500,7 +512,7 @@ class Lexer(object):
             ],
             # blocks
             TOKEN_BLOCK_BEGIN: [
-                (c('(?:\-%s\s*|%s)%s' % (
+                (c(r'(?:\-%s\s*|%s)%s' % (
                     e(environment.block_end_string),
                     e(environment.block_end_string),
                     block_suffix_re
@@ -508,14 +520,14 @@ class Lexer(object):
             ] + tag_rules,
             # variables
             TOKEN_VARIABLE_BEGIN: [
-                (c('\-%s\s*|%s' % (
+                (c(r'\-%s\s*|%s' % (
                     e(environment.variable_end_string),
                     e(environment.variable_end_string)
                 )), TOKEN_VARIABLE_END, '#pop')
             ] + tag_rules,
             # raw block
             TOKEN_RAW_BEGIN: [
-                (c('(.*?)((?:\s*%s\-|%s)\s*endraw\s*(?:\-%s\s*|%s%s))' % (
+                (c(r'(.*?)((?:\s*%s\-|%s)\s*endraw\s*(?:\-%s\s*|%s%s))' % (
                     e(environment.block_start_string),
                     block_prefix_re,
                     e(environment.block_end_string),
@@ -574,15 +586,6 @@ class Lexer(object):
                 except Exception as e:
                     msg = str(e).split(':')[-1].strip()
                     raise TemplateSyntaxError(msg, lineno, name, filename)
-                # if we can express it as bytestring (ascii only)
-                # we do that for support of semi broken APIs
-                # as datetime.datetime.strftime.  On python 3 this
-                # call becomes a noop thanks to 2to3
-                if PY2:
-                    try:
-                        value = value.encode('ascii')
-                    except UnicodeError:
-                        pass
             elif token == 'integer':
                 value = int(value)
             elif token == 'float':
diff --git a/lib/jinja2/loaders.py b/lib/jinja2/loaders.py
index 44aa392..4c79793 100644
--- a/lib/jinja2/loaders.py
+++ b/lib/jinja2/loaders.py
@@ -5,7 +5,7 @@
 
     Jinja loader classes.
 
-    :copyright: (c) 2010 by the Jinja Team.
+    :copyright: (c) 2017 by the Jinja Team.
     :license: BSD, see LICENSE for more details.
 """
 import os
@@ -351,7 +351,7 @@ class PrefixLoader(BaseLoader):
         try:
             return loader.get_source(environment, name)
         except TemplateNotFound:
-            # re-raise the exception with the correct fileame here.
+            # re-raise the exception with the correct filename here.
             # (the one that includes the prefix)
             raise TemplateNotFound(template)
 
@@ -361,7 +361,7 @@ class PrefixLoader(BaseLoader):
         try:
             return loader.load(environment, local_name, globals)
         except TemplateNotFound:
-            # re-raise the exception with the correct fileame here.
+            # re-raise the exception with the correct filename here.
             # (the one that includes the prefix)
             raise TemplateNotFound(name)
 
diff --git a/lib/jinja2/meta.py b/lib/jinja2/meta.py
index 3dbab7c..7421914 100644
--- a/lib/jinja2/meta.py
+++ b/lib/jinja2/meta.py
@@ -6,12 +6,12 @@
     This module implements various functions that exposes information about
     templates that might be interesting for various kinds of applications.
 
-    :copyright: (c) 2010 by the Jinja Team, see AUTHORS for more details.
+    :copyright: (c) 2017 by the Jinja Team, see AUTHORS for more details.
     :license: BSD, see LICENSE for more details.
 """
 from jinja2 import nodes
 from jinja2.compiler import CodeGenerator
-from jinja2._compat import string_types
+from jinja2._compat import string_types, iteritems
 
 
 class TrackingCodeGenerator(CodeGenerator):
@@ -25,9 +25,12 @@ class TrackingCodeGenerator(CodeGenerator):
     def write(self, x):
         """Don't write."""
 
-    def pull_locals(self, frame):
+    def enter_frame(self, frame):
         """Remember all undeclared identifiers."""
-        self.undeclared_identifiers.update(frame.identifiers.undeclared)
+        CodeGenerator.enter_frame(self, frame)
+        for _, (action, param) in iteritems(frame.symbols.loads):
+            if action == 'resolve':
+                self.undeclared_identifiers.add(param)
 
 
 def find_undeclared_variables(ast):
diff --git a/lib/jinja2/nodes.py b/lib/jinja2/nodes.py
index d32046c..aa4df72 100644
--- a/lib/jinja2/nodes.py
+++ b/lib/jinja2/nodes.py
@@ -9,7 +9,7 @@
     `get_nodes` used by the parser and translator in order to normalize
     python and jinja nodes.
 
-    :copyright: (c) 2010 by the Jinja Team.
+    :copyright: (c) 2017 by the Jinja Team.
     :license: BSD, see LICENSE for more details.
 """
 import types
@@ -17,7 +17,7 @@ import operator
 
 from collections import deque
 from jinja2.utils import Markup
-from jinja2._compat import izip, with_metaclass, text_type
+from jinja2._compat import izip, with_metaclass, text_type, PY2
 
 
 #: the types we support for context functions
@@ -242,6 +242,35 @@ class Node(with_metaclass(NodeType, object)):
                       arg in self.fields)
         )
 
+    def dump(self):
+        def _dump(node):
+            if not isinstance(node, Node):
+                buf.append(repr(node))
+                return
+
+            buf.append('nodes.%s(' % node.__class__.__name__)
+            if not node.fields:
+                buf.append(')')
+                return
+            for idx, field in enumerate(node.fields):
+                if idx:
+                    buf.append(', ')
+                value = getattr(node, field)
+                if isinstance(value, list):
+                    buf.append('[')
+                    for idx, item in enumerate(value):
+                        if idx:
+                            buf.append(', ')
+                        _dump(item)
+                    buf.append(']')
+                else:
+                    _dump(value)
+            buf.append(')')
+        buf = []
+        _dump(self)
+        return ''.join(buf)
+
+
 
 class Stmt(Node):
     """Base node for all statements."""
@@ -308,6 +337,15 @@ class FilterBlock(Stmt):
     fields = ('body', 'filter')
 
 
+class With(Stmt):
+    """Specific node for with statements.  In older versions of Jinja the
+    with statement was implemented on the base of the `Scope` node instead.
+
+    .. versionadded:: 2.9.3
+    """
+    fields = ('targets', 'values', 'body')
+
+
 class Block(Stmt):
     """A node that represents a block."""
     fields = ('name', 'body', 'scoped')
@@ -441,7 +479,14 @@ class Const(Literal):
     fields = ('value',)
 
     def as_const(self, eval_ctx=None):
-        return self.value
+        rv = self.value
+        if PY2 and type(rv) is text_type and \
+           self.environment.policies['compiler.ascii_str']:
+            try:
+                rv = rv.encode('ascii')
+            except UnicodeError:
+                pass
+        return rv
 
     @classmethod
     def from_untrusted(cls, value, lineno=None, environment=None):
@@ -563,8 +608,15 @@ class Filter(Expr):
         filter_ = self.environment.filters.get(self.name)
         if filter_ is None or getattr(filter_, 'contextfilter', False):
             raise Impossible()
+
+        # We cannot constant handle async filters, so we need to make sure
+        # to not go down this path.
+        if eval_ctx.environment.is_async and \
+           getattr(filter_, 'asyncfiltervariant', False):
+            raise Impossible()
+
         obj = self.node.as_const(eval_ctx)
-        args = [x.as_const(eval_ctx) for x in self.args]
+        args = [obj] + [x.as_const(eval_ctx) for x in self.args]
         if getattr(filter_, 'evalcontextfilter', False):
             args.insert(0, eval_ctx)
         elif getattr(filter_, 'environmentfilter', False):
@@ -581,7 +633,7 @@ class Filter(Expr):
             except Exception:
                 raise Impossible()
         try:
-            return filter_(obj, *args, **kwargs)
+            return filter_(*args, **kwargs)
         except Exception:
             raise Impossible()
 
@@ -602,38 +654,6 @@ class Call(Expr):
     """
     fields = ('node', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
 
-    def as_const(self, eval_ctx=None):
-        eval_ctx = get_eval_context(self, eval_ctx)
-        if eval_ctx.volatile:
-            raise Impossible()
-        obj = self.node.as_const(eval_ctx)
-
-        # don't evaluate context functions
-        args = [x.as_const(eval_ctx) for x in self.args]
-        if isinstance(obj, _context_function_types):
-            if getattr(obj, 'contextfunction', False):
-                raise Impossible()
-            elif getattr(obj, 'evalcontextfunction', False):
-                args.insert(0, eval_ctx)
-            elif getattr(obj, 'environmentfunction', False):
-                args.insert(0, self.environment)
-
-        kwargs = dict(x.as_const(eval_ctx) for x in self.kwargs)
-        if self.dyn_args is not None:
-            try:
-                args.extend(self.dyn_args.as_const(eval_ctx))
-            except Exception:
-                raise Impossible()
-        if self.dyn_kwargs is not None:
-            try:
-                kwargs.update(self.dyn_kwargs.as_const(eval_ctx))
-            except Exception:
-                raise Impossible()
-        try:
-            return obj(*args, **kwargs)
-        except Exception:
-            raise Impossible()
-
 
 class Getitem(Expr):
     """Get an attribute or item from an expression and prefer the item."""
@@ -701,7 +721,7 @@ class Concat(Expr):
 
 class Compare(Expr):
     """Compares an expression with some other expressions.  `ops` must be a
-    list of :class:`Operand`\s.
+    list of :class:`Operand`\\s.
     """
     fields = ('expr', 'ops')
 
diff --git a/lib/jinja2/optimizer.py b/lib/jinja2/optimizer.py
index 00eab11..65ab3ce 100644
--- a/lib/jinja2/optimizer.py
+++ b/lib/jinja2/optimizer.py
@@ -13,7 +13,7 @@
 
     The solution would be a second syntax tree that has the scoping rules stored.
 
-    :copyright: (c) 2010 by the Jinja Team.
+    :copyright: (c) 2017 by the Jinja Team.
     :license: BSD.
 """
 from jinja2 import nodes
@@ -32,30 +32,11 @@ class Optimizer(NodeTransformer):
     def __init__(self, environment):
         self.environment = environment
 
-    def visit_If(self, node):
-        """Eliminate dead code."""
-        # do not optimize ifs that have a block inside so that it doesn't
-        # break super().
-        if node.find(nodes.Block) is not None:
-            return self.generic_visit(node)
-        try:
-            val = self.visit(node.test).as_const()
-        except nodes.Impossible:
-            return self.generic_visit(node)
-        if val:
-            body = node.body
-        else:
-            body = node.else_
-        result = []
-        for node in body:
-            result.extend(self.visit_list(node))
-        return result
-
-    def fold(self, node):
+    def fold(self, node, eval_ctx=None):
         """Do constant folding."""
         node = self.generic_visit(node)
         try:
-            return nodes.Const.from_untrusted(node.as_const(),
+            return nodes.Const.from_untrusted(node.as_const(eval_ctx),
                                               lineno=node.lineno,
                                               environment=self.environment)
         except nodes.Impossible:
diff --git a/lib/jinja2/parser.py b/lib/jinja2/parser.py
index d24da18..0bf74c9 100644
--- a/lib/jinja2/parser.py
+++ b/lib/jinja2/parser.py
@@ -5,7 +5,7 @@
 
     Implements the template parser.
 
-    :copyright: (c) 2010 by the Jinja Team.
+    :copyright: (c) 2017 by the Jinja Team.
     :license: BSD, see LICENSE for more details.
 """
 from jinja2 import nodes
@@ -16,9 +16,18 @@ from jinja2._compat import imap
 
 _statement_keywords = frozenset(['for', 'if', 'block', 'extends', 'print',
                                  'macro', 'include', 'from', 'import',
-                                 'set'])
+                                 'set', 'with', 'autoescape'])
 _compare_operators = frozenset(['eq', 'ne', 'lt', 'lteq', 'gt', 'gteq'])
 
+_math_nodes = {
+    'add': nodes.Add,
+    'sub': nodes.Sub,
+    'mul': nodes.Mul,
+    'div': nodes.Div,
+    'floordiv': nodes.FloorDiv,
+    'mod': nodes.Mod,
+}
+
 
 class Parser(object):
     """This is the central parsing class Jinja2 uses.  It's passed to
@@ -215,6 +224,34 @@ class Parser(object):
             break
         return result
 
+    def parse_with(self):
+        node = nodes.With(lineno=next(self.stream).lineno)
+        targets = []
+        values = []
+        while self.stream.current.type != 'block_end':
+            lineno = self.stream.current.lineno
+            if targets:
+                self.stream.expect('comma')
+            target = self.parse_assign_target()
+            target.set_ctx('param')
+            targets.append(target)
+            self.stream.expect('assign')
+            values.append(self.parse_expression())
+        node.targets = targets
+        node.values = values
+        node.body = self.parse_statements(('name:endwith',),
+                                          drop_needle=True)
+        return node
+
+    def parse_autoescape(self):
+        node = nodes.ScopedEvalContextModifier(lineno=next(self.stream).lineno)
+        node.options = [
+            nodes.Keyword('autoescape', self.parse_expression())
+        ]
+        node.body = self.parse_statements(('name:endautoescape',),
+                                            drop_needle=True)
+        return nodes.Scope([node])
+
     def parse_block(self):
         node = nodes.Block(lineno=next(self.stream).lineno)
         node.name = self.stream.expect('name').value
@@ -429,19 +466,19 @@ class Parser(object):
 
     def parse_compare(self):
         lineno = self.stream.current.lineno
-        expr = self.parse_add()
+        expr = self.parse_math1()
         ops = []
         while 1:
             token_type = self.stream.current.type
             if token_type in _compare_operators:
                 next(self.stream)
-                ops.append(nodes.Operand(token_type, self.parse_add()))
+                ops.append(nodes.Operand(token_type, self.parse_math1()))
             elif self.stream.skip_if('name:in'):
-                ops.append(nodes.Operand('in', self.parse_add()))
+                ops.append(nodes.Operand('in', self.parse_math1()))
             elif (self.stream.current.test('name:not') and
                   self.stream.look().test('name:in')):
                 self.stream.skip(2)
-                ops.append(nodes.Operand('notin', self.parse_add()))
+                ops.append(nodes.Operand('notin', self.parse_math1()))
             else:
                 break
             lineno = self.stream.current.lineno
@@ -449,73 +486,35 @@ class Parser(object):
             return expr
         return nodes.Compare(expr, ops, lineno=lineno)
 
-    def parse_add(self):
-        lineno = self.stream.current.lineno
-        left = self.parse_sub()
-        while self.stream.current.type == 'add':
-            next(self.stream)
-            right = self.parse_sub()
-            left = nodes.Add(left, right, lineno=lineno)
-            lineno = self.stream.current.lineno
-        return left
-
-    def parse_sub(self):
+    def parse_math1(self):
         lineno = self.stream.current.lineno
         left = self.parse_concat()
-        while self.stream.current.type == 'sub':
+        while self.stream.current.type in ('add', 'sub'):
+            cls = _math_nodes[self.stream.current.type]
             next(self.stream)
             right = self.parse_concat()
-            left = nodes.Sub(left, right, lineno=lineno)
+            left = cls(left, right, lineno=lineno)
             lineno = self.stream.current.lineno
         return left
 
     def parse_concat(self):
         lineno = self.stream.current.lineno
-        args = [self.parse_mul()]
+        args = [self.parse_math2()]
         while self.stream.current.type == 'tilde':
             next(self.stream)
-            args.append(self.parse_mul())
+            args.append(self.parse_math2())
         if len(args) == 1:
             return args[0]
         return nodes.Concat(args, lineno=lineno)
 
-    def parse_mul(self):
-        lineno = self.stream.current.lineno
-        left = self.parse_div()
-        while self.stream.current.type == 'mul':
-            next(self.stream)
-            right = self.parse_div()
-            left = nodes.Mul(left, right, lineno=lineno)
-            lineno = self.stream.current.lineno
-        return left
-
-    def parse_div(self):
-        lineno = self.stream.current.lineno
-        left = self.parse_floordiv()
-        while self.stream.current.type == 'div':
-            next(self.stream)
-            right = self.parse_floordiv()
-            left = nodes.Div(left, right, lineno=lineno)
-            lineno = self.stream.current.lineno
-        return left
-
-    def parse_floordiv(self):
-        lineno = self.stream.current.lineno
-        left = self.parse_mod()
-        while self.stream.current.type == 'floordiv':
-            next(self.stream)
-            right = self.parse_mod()
-            left = nodes.FloorDiv(left, right, lineno=lineno)
-            lineno = self.stream.current.lineno
-        return left
-
-    def parse_mod(self):
+    def parse_math2(self):
         lineno = self.stream.current.lineno
         left = self.parse_pow()
-        while self.stream.current.type == 'mod':
+        while self.stream.current.type in ('mul', 'div', 'floordiv', 'mod'):
+            cls = _math_nodes[self.stream.current.type]
             next(self.stream)
             right = self.parse_pow()
-            left = nodes.Mod(left, right, lineno=lineno)
+            left = cls(left, right, lineno=lineno)
             lineno = self.stream.current.lineno
         return left
 
@@ -835,7 +834,7 @@ class Parser(object):
                                            'name:and')):
             if self.stream.current.test('name:is'):
                 self.fail('You cannot chain multiple tests with is')
-            args = [self.parse_expression()]
+            args = [self.parse_primary()]
         else:
             args = []
         node = nodes.Test(node, name, args, kwargs, dyn_args,
diff --git a/lib/jinja2/runtime.py b/lib/jinja2/runtime.py
index 685a12d..00d5f03 100644
--- a/lib/jinja2/runtime.py
+++ b/lib/jinja2/runtime.py
@@ -5,26 +5,29 @@
 
     Runtime helpers.
 
-    :copyright: (c) 2010 by the Jinja Team.
+    :copyright: (c) 2017 by the Jinja Team.
     :license: BSD.
 """
 import sys
 
 from itertools import chain
+from types import MethodType
+
 from jinja2.nodes import EvalContext, _context_function_types
 from jinja2.utils import Markup, soft_unicode, escape, missing, concat, \
-     internalcode, object_type_repr
+     internalcode, object_type_repr, evalcontextfunction
 from jinja2.exceptions import UndefinedError, TemplateRuntimeError, \
      TemplateNotFound
 from jinja2._compat import imap, text_type, iteritems, \
-     implements_iterator, implements_to_string, string_types, PY2
+     implements_iterator, implements_to_string, string_types, PY2, \
+     with_metaclass
 
 
 # these variables are exported to the template runtime
 __all__ = ['LoopContext', 'TemplateReference', 'Macro', 'Markup',
            'TemplateRuntimeError', 'missing', 'concat', 'escape',
            'markup_join', 'unicode_join', 'to_string', 'identity',
-           'TemplateNotFound', 'make_logging_undefined']
+           'TemplateNotFound']
 
 #: the name of the function that is used to convert something into
 #: a string.  We can just use the text type here.
@@ -67,8 +70,8 @@ def new_context(environment, template_name, blocks, vars=None,
         if shared:
             parent = dict(parent)
         for key, value in iteritems(locals):
-            if key[:2] == 'l_' and value is not missing:
-                parent[key[2:]] = value
+            if value is not missing:
+                parent[key] = value
     return environment.context_class(environment, parent, template_name,
                                      blocks)
 
@@ -90,7 +93,43 @@ class TemplateReference(object):
         )
 
 
-class Context(object):
+def _get_func(x):
+    return getattr(x, '__func__', x)
+
+
+class ContextMeta(type):
+
+    def __new__(cls, name, bases, d):
+        rv = type.__new__(cls, name, bases, d)
+        if bases == ():
+            return rv
+
+        resolve = _get_func(rv.resolve)
+        default_resolve = _get_func(Context.resolve)
+        resolve_or_missing = _get_func(rv.resolve_or_missing)
+        default_resolve_or_missing = _get_func(Context.resolve_or_missing)
+
+        # If we have a changed resolve but no changed default or missing
+        # resolve we invert the call logic.
+        if resolve is not default_resolve and \
+           resolve_or_missing is default_resolve_or_missing:
+            rv._legacy_resolve_mode = True
+        elif resolve is default_resolve and \
+             resolve_or_missing is default_resolve_or_missing:
+            rv._fast_resolve_mode = True
+
+        return rv
+
+
+def resolve_or_missing(context, key, missing=missing):
+    if key in context.vars:
+        return context.vars[key]
+    if key in context.parent:
+        return context.parent[key]
+    return missing
+
+
+class Context(with_metaclass(ContextMeta)):
     """The template context holds the variables of a template.  It stores the
     values passed to the template and also the names the template exports.
     Creating instances is neither supported nor useful as it's created
@@ -100,7 +139,7 @@ class Context(object):
     The context is immutable.  Modifications on :attr:`parent` **must not**
     happen and modifications on :attr:`vars` are allowed from generated
     template code only.  Template filters and global functions marked as
-    :func:`contextfunction`\s get the active context passed as first argument
+    :func:`contextfunction`\\s get the active context passed as first argument
     and are allowed to access the context read-only.
 
     The template context supports read only dict operations (`get`,
@@ -109,8 +148,10 @@ class Context(object):
     method that doesn't fail with a `KeyError` but returns an
     :class:`Undefined` object for missing variables.
     """
-    __slots__ = ('parent', 'vars', 'environment', 'eval_ctx', 'exported_vars',
-                 'name', 'blocks', '__weakref__')
+    # XXX: we want to eventually make this be a deprecation warning and
+    # remove it.
+    _legacy_resolve_mode = False
+    _fast_resolve_mode = False
 
     def __init__(self, environment, parent, name, blocks):
         self.parent = parent
@@ -125,6 +166,11 @@ class Context(object):
         # from the template.
         self.blocks = dict((k, [v]) for k, v in iteritems(blocks))
 
+        # In case we detect the fast resolve mode we can set up an alias
+        # here that bypasses the legacy code logic.
+        if self._fast_resolve_mode:
+            self.resolve_or_missing = MethodType(resolve_or_missing, self)
+
     def super(self, name, current):
         """Render a parent block."""
         try:
@@ -150,20 +196,38 @@ class Context(object):
         """Looks up a variable like `__getitem__` or `get` but returns an
         :class:`Undefined` object with the name of the name looked up.
         """
-        if key in self.vars:
-            return self.vars[key]
-        if key in self.parent:
-            return self.parent[key]
-        return self.environment.undefined(name=key)
+        if self._legacy_resolve_mode:
+            rv = resolve_or_missing(self, key)
+        else:
+            rv = self.resolve_or_missing(key)
+        if rv is missing:
+            return self.environment.undefined(name=key)
+        return rv
+
+    def resolve_or_missing(self, key):
+        """Resolves a variable like :meth:`resolve` but returns the
+        special `missing` value if it cannot be found.
+        """
+        if self._legacy_resolve_mode:
+            rv = self.resolve(key)
+            if isinstance(rv, Undefined):
+                rv = missing
+            return rv
+        return resolve_or_missing(self, key)
 
     def get_exported(self):
         """Get a new dict with the exported variables."""
         return dict((k, self.vars[k]) for k in self.exported_vars)
 
     def get_all(self):
-        """Return a copy of the complete context as dict including the
-        exported variables.
+        """Return the complete context as dict including the exported
+        variables.  For optimizations reasons this might not return an
+        actual copy so be careful with using it.
         """
+        if not self.vars:
+            return self.parent
+        if not self.parent:
+            return self.vars
         return dict(self.parent, **self.vars)
 
     @internalcode
@@ -200,10 +264,12 @@ class Context(object):
                                                 'StopIteration exception')
 
     def derived(self, locals=None):
-        """Internal helper function to create a derived context."""
+        """Internal helper function to create a derived context.  This is
+        used in situations where the system needs a new context in the same
+        template that is independent.
+        """
         context = new_context(self.environment, self.name, {},
-                              self.parent, True, None, locals)
-        context.vars.update(self.vars)
+                              self.get_all(), True, None, locals)
         context.eval_ctx = self.eval_ctx
         context.blocks.update((k, list(v)) for k, v in iteritems(self.blocks))
         return context
@@ -232,8 +298,8 @@ class Context(object):
         """Lookup a variable or raise `KeyError` if the variable is
         undefined.
         """
-        item = self.resolve(key)
-        if isinstance(item, Undefined):
+        item = self.resolve_or_missing(key)
+        if item is missing:
             raise KeyError(key)
         return item
 
@@ -280,25 +346,17 @@ class BlockReference(object):
         return rv
 
 
-class LoopContext(object):
+class LoopContextBase(object):
     """A loop context for dynamic iteration."""
 
-    def __init__(self, iterable, recurse=None, depth0=0):
-        self._iterator = iter(iterable)
+    _after = _last_iteration
+    _length = None
+
+    def __init__(self, recurse=None, depth0=0):
         self._recurse = recurse
-        self._after = self._safe_next()
         self.index0 = -1
         self.depth0 = depth0
 
-        # try to get the length of the iterable early.  This must be done
-        # here because there are some broken iterators around where there
-        # __len__ is the number of iterations left (i'm looking at your
-        # listreverseiterator!).
-        try:
-            self._length = len(iterable)
-        except (TypeError, AttributeError):
-            self._length = None
-
     def cycle(self, *args):
         """Cycles among the arguments with the current loop index."""
         if not args:
@@ -315,15 +373,6 @@ class LoopContext(object):
     def __len__(self):
         return self.length
 
-    def __iter__(self):
-        return LoopContextIterator(self)
-
-    def _safe_next(self):
-        try:
-            return next(self._iterator)
-        except StopIteration:
-            return _last_iteration
-
     @internalcode
     def loop(self, iterable):
         if self._recurse is None:
@@ -336,6 +385,30 @@ class LoopContext(object):
     __call__ = loop
     del loop
 
+    def __repr__(self):
+        return '<%s %r/%r>' % (
+            self.__class__.__name__,
+            self.index,
+            self.length
+        )
+
+
+class LoopContext(LoopContextBase):
+
+    def __init__(self, iterable, recurse=None, depth0=0):
+        LoopContextBase.__init__(self, recurse, depth0)
+        self._iterator = iter(iterable)
+
+        # try to get the length of the iterable early.  This must be done
+        # here because there are some broken iterators around where there
+        # __len__ is the number of iterations left (i'm looking at your
+        # listreverseiterator!).
+        try:
+            self._length = len(iterable)
+        except (TypeError, AttributeError):
+            self._length = None
+        self._after = self._safe_next()
+
     @property
     def length(self):
         if self._length is None:
@@ -349,12 +422,14 @@ class LoopContext(object):
             self._length = len(iterable) + iterations_done
         return self._length
 
-    def __repr__(self):
-        return '<%s %r/%r>' % (
-            self.__class__.__name__,
-            self.index,
-            self.length
-        )
+    def __iter__(self):
+        return LoopContextIterator(self)
+
+    def _safe_next(self):
+        try:
+            return next(self._iterator)
+        except StopIteration:
+            return _last_iteration
 
 
 @implements_iterator
@@ -381,24 +456,56 @@ class LoopContextIterator(object):
 class Macro(object):
     """Wraps a macro function."""
 
-    def __init__(self, environment, func, name, arguments, defaults,
-                 catch_kwargs, catch_varargs, caller):
+    def __init__(self, environment, func, name, arguments,
+                 catch_kwargs, catch_varargs, caller,
+                 default_autoescape=None):
         self._environment = environment
         self._func = func
         self._argument_count = len(arguments)
         self.name = name
         self.arguments = arguments
-        self.defaults = defaults
         self.catch_kwargs = catch_kwargs
         self.catch_varargs = catch_varargs
         self.caller = caller
+        self.explicit_caller = 'caller' in arguments
+        if default_autoescape is None:
+            default_autoescape = environment.autoescape
+        self._default_autoescape = default_autoescape
 
     @internalcode
+    @evalcontextfunction
     def __call__(self, *args, **kwargs):
+        # This requires a bit of explanation,  In the past we used to
+        # decide largely based on compile-time information if a macro is
+        # safe or unsafe.  While there was a volatile mode it was largely
+        # unused for deciding on escaping.  This turns out to be
+        # problemtic for macros because if a macro is safe or not not so
+        # much depends on the escape mode when it was defined but when it
+        # was used.
+        #
+        # Because however we export macros from the module system and
+        # there are historic callers that do not pass an eval context (and
+        # will continue to not pass one), we need to perform an instance
+        # check here.
+        #
+        # This is considered safe because an eval context is not a valid
+        # argument to callables otherwise anwyays.  Worst case here is
+        # that if no eval context is passed we fall back to the compile
+        # time autoescape flag.
+        if args and isinstance(args[0], EvalContext):
+            autoescape = args[0].autoescape
+            args = args[1:]
+        else:
+            autoescape = self._default_autoescape
+
         # try to consume the positional arguments
         arguments = list(args[:self._argument_count])
         off = len(arguments)
 
+        # For information why this is necessary refer to the handling
+        # of caller in the `macro_body` handler in the compiler.
+        found_caller = False
+
         # if the number of arguments consumed is not the number of
         # arguments expected we start filling in keyword arguments
         # and defaults.
@@ -407,25 +514,30 @@ class Macro(object):
                 try:
                     value = kwargs.pop(name)
                 except KeyError:
-                    try:
-                        value = self.defaults[idx - self._argument_count + off]
-                    except IndexError:
-                        value = self._environment.undefined(
-                            'parameter %r was not provided' % name, name=name)
+                    value = missing
+                if name == 'caller':
+                    found_caller = True
                 arguments.append(value)
+        else:
+            found_caller = self.explicit_caller
 
         # it's important that the order of these arguments does not change
         # if not also changed in the compiler's `function_scoping` method.
         # the order is caller, keyword arguments, positional arguments!
-        if self.caller:
+        if self.caller and not found_caller:
             caller = kwargs.pop('caller', None)
             if caller is None:
                 caller = self._environment.undefined('No caller defined',
                                                      name='caller')
             arguments.append(caller)
+
         if self.catch_kwargs:
             arguments.append(kwargs)
         elif kwargs:
+            if 'caller' in kwargs:
+                raise TypeError('macro %r was invoked with two values for '
+                                'the special caller argument.  This is '
+                                'most likely a bug.' % self.name)
             raise TypeError('macro %r takes no keyword argument %r' %
                             (self.name, next(iter(kwargs))))
         if self.catch_varargs:
@@ -433,7 +545,15 @@ class Macro(object):
         elif len(args) > self._argument_count:
             raise TypeError('macro %r takes not more than %d argument(s)' %
                             (self.name, len(self.arguments)))
-        return self._func(*arguments)
+
+        return self._invoke(arguments, autoescape)
+
+    def _invoke(self, arguments, autoescape):
+        """This method is being swapped out by the async implementation."""
+        rv = self._func(*arguments)
+        if autoescape:
+            rv = Markup(rv)
+        return rv
 
     def __repr__(self):
         return '<%s %s>' % (
@@ -498,8 +618,8 @@ class Undefined(object):
         __truediv__ = __rtruediv__ = __floordiv__ = __rfloordiv__ = \
         __mod__ = __rmod__ = __pos__ = __neg__ = __call__ = \
         __getitem__ = __lt__ = __le__ = __gt__ = __ge__ = __int__ = \
-        __float__ = __complex__ = __pow__ = __rpow__ = \
-        _fail_with_undefined_error
+        __float__ = __complex__ = __pow__ = __rpow__ = __sub__ = \
+        __rsub__ = _fail_with_undefined_error
 
     def __eq__(self, other):
         return type(self) is type(other)
diff --git a/lib/jinja2/sandbox.py b/lib/jinja2/sandbox.py
index 7e40ab3..32e2435 100644
--- a/lib/jinja2/sandbox.py
+++ b/lib/jinja2/sandbox.py
@@ -9,14 +9,19 @@
 
     The behavior can be changed by subclassing the environment.
 
-    :copyright: (c) 2010 by the Jinja Team.
+    :copyright: (c) 2017 by the Jinja Team.
     :license: BSD.
 """
 import types
 import operator
+from collections import Mapping
 from jinja2.environment import Environment
 from jinja2.exceptions import SecurityError
 from jinja2._compat import string_types, PY2
+from jinja2.utils import Markup
+
+from markupsafe import EscapeFormatter
+from string import Formatter
 
 
 #: maximum number of items a range may produce
@@ -38,6 +43,12 @@ UNSAFE_METHOD_ATTRIBUTES = set(['im_class', 'im_func', 'im_self'])
 #: unsafe generator attirbutes.
 UNSAFE_GENERATOR_ATTRIBUTES = set(['gi_frame', 'gi_code'])
 
+#: unsafe attributes on coroutines
+UNSAFE_COROUTINE_ATTRIBUTES = set(['cr_frame', 'cr_code'])
+
+#: unsafe attributes on async generators
+UNSAFE_ASYNC_GENERATOR_ATTRIBUTES = set(['ag_code', 'ag_frame'])
+
 import warnings
 
 # make sure we don't warn in python 2.6 about stuff we don't care about
@@ -68,13 +79,11 @@ except ImportError:
     pass
 
 #: register Python 2.6 abstract base classes
-try:
-    from collections import MutableSet, MutableMapping, MutableSequence
-    _mutable_set_types += (MutableSet,)
-    _mutable_mapping_types += (MutableMapping,)
-    _mutable_sequence_types += (MutableSequence,)
-except ImportError:
-    pass
+from collections import MutableSet, MutableMapping, MutableSequence
+_mutable_set_types += (MutableSet,)
+_mutable_mapping_types += (MutableMapping,)
+_mutable_sequence_types += (MutableSequence,)
+
 
 _mutable_spec = (
     (_mutable_set_types, frozenset([
@@ -94,6 +103,47 @@ _mutable_spec = (
 )
 
 
+class _MagicFormatMapping(Mapping):
+    """This class implements a dummy wrapper to fix a bug in the Python
+    standard library for string formatting.
+
+    See http://bugs.python.org/issue13598 for information about why
+    this is necessary.
+    """
+
+    def __init__(self, args, kwargs):
+        self._args = args
+        self._kwargs = kwargs
+        self._last_index = 0
+
+    def __getitem__(self, key):
+        if key == '':
+            idx = self._last_index
+            self._last_index += 1
+            try:
+                return self._args[idx]
+            except LookupError:
+                pass
+            key = str(idx)
+        return self._kwargs[key]
+
+    def __iter__(self):
+        return iter(self._kwargs)
+
+    def __len__(self):
+        return len(self._kwargs)
+
+
+def inspect_format_method(callable):
+    if not isinstance(callable, (types.MethodType,
+                                 types.BuiltinMethodType)) or \
+       callable.__name__ != 'format':
+        return None
+    obj = callable.__self__
+    if isinstance(obj, string_types):
+        return obj
+
+
 def safe_range(*args):
     """A range that can't generate ranges with a length of more than
     MAX_RANGE items.
@@ -145,6 +195,12 @@ def is_internal_attribute(obj, attr):
     elif isinstance(obj, types.GeneratorType):
         if attr in UNSAFE_GENERATOR_ATTRIBUTES:
             return True
+    elif hasattr(types, 'CoroutineType') and isinstance(obj, types.CoroutineType):
+        if attr in UNSAFE_COROUTINE_ATTRIBUTES:
+            return True
+    elif hasattr(types, 'AsyncGeneratorType') and isinstance(obj, types.AsyncGeneratorType):
+        if attr in UNSAFE_ASYNC_GENERATOR_ATTRIBUTES:
+            return True
     return attr.startswith('__')
 
 
@@ -183,8 +239,8 @@ class SandboxedEnvironment(Environment):
     attributes or functions are safe to access.
 
     If the template tries to access insecure code a :exc:`SecurityError` is
-    raised.  However also other exceptions may occour during the rendering so
-    the caller has to ensure that all exceptions are catched.
+    raised.  However also other exceptions may occur during the rendering so
+    the caller has to ensure that all exceptions are caught.
     """
     sandboxed = True
 
@@ -346,8 +402,24 @@ class SandboxedEnvironment(Environment):
             obj.__class__.__name__
         ), name=attribute, obj=obj, exc=SecurityError)
 
+    def format_string(self, s, args, kwargs):
+        """If a format call is detected, then this is routed through this
+        method so that our safety sandbox can be used for it.
+        """
+        if isinstance(s, Markup):
+            formatter = SandboxedEscapeFormatter(self, s.escape)
+        else:
+            formatter = SandboxedFormatter(self)
+        kwargs = _MagicFormatMapping(args, kwargs)
+        rv = formatter.vformat(s, args, kwargs)
+        return type(s)(rv)
+
     def call(__self, __context, __obj, *args, **kwargs):
         """Call an object from sandboxed code."""
+        fmt = inspect_format_method(__obj)
+        if fmt is not None:
+            return __self.format_string(fmt, args, kwargs)
+
         # the double prefixes are to avoid double keyword argument
         # errors when proxying the call.
         if not __self.is_safe_callable(__obj):
@@ -365,3 +437,39 @@ class ImmutableSandboxedEnvironment(SandboxedEnvironment):
         if not SandboxedEnvironment.is_safe_attribute(self, obj, attr, value):
             return False
         return not modifies_known_mutable(obj, attr)
+
+
+# This really is not a public API apparenlty.
+try:
+    from _string import formatter_field_name_split
+except ImportError:
+    def formatter_field_name_split(field_name):
+        return field_name._formatter_field_name_split()
+
+
+class SandboxedFormatterMixin(object):
+
+    def __init__(self, env):
+        self._env = env
+
+    def get_field(self, field_name, args, kwargs):
+        first, rest = formatter_field_name_split(field_name)
+        obj = self.get_value(first, args, kwargs)
+        for is_attr, i in rest:
+            if is_attr:
+                obj = self._env.getattr(obj, i)
+            else:
+                obj = self._env.getitem(obj, i)
+        return obj, first
+
+class SandboxedFormatter(SandboxedFormatterMixin, Formatter):
+
+    def __init__(self, env):
+        SandboxedFormatterMixin.__init__(self, env)
+        Formatter.__init__(self)
+
+class SandboxedEscapeFormatter(SandboxedFormatterMixin, EscapeFormatter):
+
+    def __init__(self, env, escape):
+        SandboxedFormatterMixin.__init__(self, env)
+        EscapeFormatter.__init__(self, escape)
diff --git a/lib/jinja2/tests.py b/lib/jinja2/tests.py
index bb32349..bd843b7 100644
--- a/lib/jinja2/tests.py
+++ b/lib/jinja2/tests.py
@@ -5,7 +5,7 @@
 
     Jinja test functions. Used with the "is" operator.
 
-    :copyright: (c) 2010 by the Jinja Team.
+    :copyright: (c) 2017 by the Jinja Team.
     :license: BSD, see LICENSE for more details.
 """
 import re
@@ -152,6 +152,16 @@ def test_escaped(value):
     return hasattr(value, '__html__')
 
 
+def test_greaterthan(value, other):
+    """Check if value is greater than other."""
+    return value > other
+
+
+def test_lessthan(value, other):
+    """Check if value is less than other."""
+    return value < other
+
+
 TESTS = {
     'odd':              test_odd,
     'even':             test_even,
@@ -169,5 +179,7 @@ TESTS = {
     'callable':         test_callable,
     'sameas':           test_sameas,
     'equalto':          test_equalto,
-    'escaped':          test_escaped
+    'escaped':          test_escaped,
+    'greaterthan':      test_greaterthan,
+    'lessthan':         test_lessthan
 }
diff --git a/lib/jinja2/utils.py b/lib/jinja2/utils.py
index cdd4cd3..b96d309 100644
--- a/lib/jinja2/utils.py
+++ b/lib/jinja2/utils.py
@@ -5,10 +5,11 @@
 
     Utility functions.
 
-    :copyright: (c) 2010 by the Jinja Team.
+    :copyright: (c) 2017 by the Jinja Team.
     :license: BSD, see LICENSE for more details.
 """
 import re
+import json
 import errno
 from collections import deque
 from threading import Lock
@@ -37,6 +38,8 @@ internal_code = set()
 
 concat = u''.join
 
+_slash_escape = '\\/' not in json.dumps('/')
+
 
 def contextfunction(f):
     """This decorator can be used to mark a function or method context callable.
@@ -109,7 +112,7 @@ def clear_caches():
     """Jinja2 keeps internal caches for environments and lexers.  These are
     used so that Jinja2 doesn't have to recreate environments and lexers all
     the time.  Normally you don't have to care about that but if you are
-    messuring memory consumption you may want to clean the caches.
+    measuring memory consumption you may want to clean the caches.
     """
     from jinja2.environment import _spontaneous_environments
     from jinja2.lexer import _lexer_cache
@@ -183,7 +186,7 @@ def pformat(obj, verbose=False):
         return pformat(obj)
 
 
-def urlize(text, trim_url_limit=None, nofollow=False, target=None):
+def urlize(text, trim_url_limit=None, rel=None, target=None):
     """Converts any URLs in text into clickable links. Works on http://,
     https:// and www. links. Links can have trailing punctuation (periods,
     commas, close-parens) and leading punctuation (opening parens) and
@@ -201,11 +204,9 @@ def urlize(text, trim_url_limit=None, nofollow=False, target=None):
                          and (x[:limit] + (len(x) >=limit and '...'
                          or '')) or x
     words = _word_split_re.split(text_type(escape(text)))
-    nofollow_attr = nofollow and ' rel="nofollow"' or ''
-    if target is not None and isinstance(target, string_types):
-        target_attr = ' target="%s"' % target
-    else:
-        target_attr = ''
+    rel_attr = rel and ' rel="%s"' % text_type(escape(rel)) or ''
+    target_attr = target and ' target="%s"' % escape(target) or ''
+
     for i, word in enumerate(words):
         match = _punctuation_re.match(word)
         if match:
@@ -221,11 +222,11 @@ def urlize(text, trim_url_limit=None, nofollow=False, target=None):
                     middle.endswith('.com')
                 )):
                 middle = '<a href="http://%s"%s%s>%s</a>' % (middle,
-                    nofollow_attr, target_attr, trim_url(middle))
+                    rel_attr, target_attr, trim_url(middle))
             if middle.startswith('http://') or \
                middle.startswith('https://'):
                 middle = '<a href="%s"%s%s>%s</a>' % (middle,
-                    nofollow_attr, target_attr, trim_url(middle))
+                    rel_attr, target_attr, trim_url(middle))
             if '@' in middle and not middle.startswith('www.') and \
                not ':' in middle and _simple_email_re.match(middle):
                 middle = '<a href="mailto:%s">%s</a>' % (middle, middle)
@@ -295,7 +296,7 @@ def unicode_urlencode(obj, charset='utf-8', for_qs=False):
         obj = text_type(obj)
     if isinstance(obj, text_type):
         obj = obj.encode(charset)
-    safe = for_qs and b'' or b'/'
+    safe = not for_qs and b'/' or b''
     rv = text_type(url_quote(obj, safe))
     if for_qs:
         rv = rv.replace('%20', '+')
@@ -487,6 +488,88 @@ except ImportError:
     pass
 
 
+def select_autoescape(enabled_extensions=('html', 'htm', 'xml'),
+                      disabled_extensions=(),
+                      default_for_string=True,
+                      default=False):
+    """Intelligently sets the initial value of autoescaping based on the
+    filename of the template.  This is the recommended way to configure
+    autoescaping if you do not want to write a custom function yourself.
+
+    If you want to enable it for all templates created from strings or
+    for all templates with `.html` and `.xml` extensions::
+
+        from jinja2 import Environment, select_autoescape
+        env = Environment(autoescape=select_autoescape(
+            enabled_extensions=('html', 'xml'),
+            default_for_string=True,
+        ))
+
+    Example configuration to turn it on at all times except if the template
+    ends with `.txt`::
+
+        from jinja2 import Environment, select_autoescape
+        env = Environment(autoescape=select_autoescape(
+            disabled_extensions=('txt',),
+            default_for_string=True,
+            default=True,
+        ))
+
+    The `enabled_extensions` is an iterable of all the extensions that
+    autoescaping should be enabled for.  Likewise `disabled_extensions` is
+    a list of all templates it should be disabled for.  If a template is
+    loaded from a string then the default from `default_for_string` is used.
+    If nothing matches then the initial value of autoescaping is set to the
+    value of `default`.
+
+    For security reasons this function operates case insensitive.
+
+    .. versionadded:: 2.9
+    """
+    enabled_patterns = tuple('.' + x.lstrip('.').lower()
+                             for x in enabled_extensions)
+    disabled_patterns = tuple('.' + x.lstrip('.').lower()
+                              for x in disabled_extensions)
+    def autoescape(template_name):
+        if template_name is None:
+            return default_for_string
+        template_name = template_name.lower()
+        if template_name.endswith(enabled_patterns):
+            return True
+        if template_name.endswith(disabled_patterns):
+            return False
+        return default
+    return autoescape
+
+
+def htmlsafe_json_dumps(obj, dumper=None, **kwargs):
+    """Works exactly like :func:`dumps` but is safe for use in ``<script>``
+    tags.  It accepts the same arguments and returns a JSON string.  Note that
+    this is available in templates through the ``|tojson`` filter which will
+    also mark the result as safe.  Due to how this function escapes certain
+    characters this is safe even if used outside of ``<script>`` tags.
+
+    The following characters are escaped in strings:
+
+    -   ``<``
+    -   ``>``
+    -   ``&``
+    -   ``'``
+
+    This makes it safe to embed such strings in any place in HTML with the
+    notable exception of double quoted attributes.  In that case single
+    quote your attributes or HTML escape it in addition.
+    """
+    if dumper is None:
+        dumper = json.dumps
+    rv = dumper(obj, **kwargs) \
+        .replace(u'<', u'\\u003c') \
+        .replace(u'>', u'\\u003e') \
+        .replace(u'&', u'\\u0026') \
+        .replace(u"'", u'\\u0027')
+    return rv
+
+
 @implements_iterator
 class Cycler(object):
     """A cycle helper for templates."""
@@ -506,12 +589,14 @@ class Cycler(object):
         """Returns the current item."""
         return self.items[self.pos]
 
-    def __next__(self):
+    def next(self):
         """Goes one item ahead and returns it."""
         rv = self.current
         self.pos = (self.pos + 1) % len(self.items)
         return rv
 
+    __next__ = next
+
 
 class Joiner(object):
     """A joining helper for templates."""
@@ -527,5 +612,13 @@ class Joiner(object):
         return self.sep
 
 
+# does this python version support async for in and async generators?
+try:
+    exec('async def _():\n async for _ in ():\n  yield _')
+    have_async_gen = True
+except SyntaxError:
+    have_async_gen = False
+
+
 # Imported here because that's where it was in the past
 from markupsafe import Markup, escape, soft_unicode
diff --git a/lib/jinja2/visitor.py b/lib/jinja2/visitor.py
index 413e7c3..ba526df 100644
--- a/lib/jinja2/visitor.py
+++ b/lib/jinja2/visitor.py
@@ -5,7 +5,7 @@
 
     This module implements a visitor for the nodes.
 
-    :copyright: (c) 2010 by the Jinja Team.
+    :copyright: (c) 2017 by the Jinja Team.
     :license: BSD.
 """
 from jinja2.nodes import Node
diff --git a/tests/api-suite-info/03-get-graph-raw-4/suite.rc b/tests/api-suite-info/03-get-graph-raw-4/suite.rc
index 6299cff..b4a24a1 100644
--- a/tests/api-suite-info/03-get-graph-raw-4/suite.rc
+++ b/tests/api-suite-info/03-get-graph-raw-4/suite.rc
@@ -1,4 +1,5 @@
 [cylc]
+   UTC mode = True
    [[reference test]]
        required run mode = live
        live mode suite timeout = PT30S
diff --git a/lib/cylc/get_task_proxy.py b/tests/api-suite-info/04-api-suite-info-unit-tests.t
old mode 100644
new mode 100755
similarity index 64%
rename from lib/cylc/get_task_proxy.py
rename to tests/api-suite-info/04-api-suite-info-unit-tests.t
index 4edf642..b93c8eb
--- a/lib/cylc/get_task_proxy.py
+++ b/tests/api-suite-info/04-api-suite-info-unit-tests.t
@@ -1,5 +1,4 @@
-#!/usr/bin/env python
-
+#!/bin/bash
 # THIS FILE IS PART OF THE CYLC SUITE ENGINE.
 # Copyright (C) 2008-2017 NIWA
 #
@@ -16,15 +15,10 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-from cylc.task_proxy import TaskProxy
-from cylc.config import SuiteConfig, TaskNotDefinedError
+# Run suite info API unit tests.
+. $(dirname $0)/test_header
 
+set_test_number 1
 
-def get_task_proxy(name, *args, **kwargs):
-    config = SuiteConfig.get_inst()
-    """Return a task proxy for a named task."""
-    try:
-        tdef = config.taskdefs[name]
-    except KeyError:
-        raise TaskNotDefinedError(name)
-    return TaskProxy(tdef, *args, **kwargs)
+TEST_NAME=$TEST_NAME_BASE-unit-tests
+run_ok $TEST_NAME python $CYLC_DIR/lib/cylc/network/https/base_client.py
diff --git a/tests/authentication/00-identity.t b/tests/authentication/00-identity.t
index 81ea6e1..3339975 100644
--- a/tests/authentication/00-identity.t
+++ b/tests/authentication/00-identity.t
@@ -18,7 +18,7 @@
 # Test authentication - privilege 'identity'.
 
 . $(dirname $0)/test_header
-set_test_number 6
+set_test_number 10
 
 install_suite "${TEST_NAME_BASE}" basic
 
@@ -32,16 +32,30 @@ create_test_globalrc '' '
 cylc run "${SUITE_NAME}"
 unset CYLC_CONF_PATH
 
+SRV_D="$(cylc get-global-config --print-run-dir)/${SUITE_NAME}/.service"
+HOST="$(sed -n 's/^CYLC_SUITE_HOST=//p' "${SRV_D}/contact")"
+PORT="$(sed -n 's/^CYLC_SUITE_PORT=//p' "${SRV_D}/contact")"
+run_ok "${TEST_NAME_BASE}-curl-anon" \
+    env no_proxy=* curl -v --cacert "${SRV_D}/ssl.cert" \
+    --digest -u 'anon:the quick brown fox' \
+    "https://${HOST}:${PORT}/id/identify"
+run_ok "${TEST_NAME_BASE}-curl-anon.stdout" \
+    grep -qF "\"name\": \"${SUITE_NAME}\"" "${TEST_NAME_BASE}-curl-anon.stdout"
+run_ok "${TEST_NAME_BASE}-curl-cylc" \
+    env no_proxy=* curl -v --cacert "${SRV_D}/ssl.cert" \
+    --digest -u "cylc:$(<"${SRV_D}/passphrase")" \
+    "https://${HOST}:${PORT}/id/identify"
+run_ok "${TEST_NAME_BASE}-curl-cylc.stdout" \
+    grep -qF "\"name\": \"${SUITE_NAME}\"" "${TEST_NAME_BASE}-curl-cylc.stdout"
+
 # Wait for first task 'foo' to fail.
 cylc suite-state "${SUITE_NAME}" --task=foo --status=failed --point=1 \
     --interval=1 --max-polls=10 || exit 1
 
 # Disable the suite passphrase (to leave us with public access privilege).
-SRV_D="$(cylc get-global-config --print-run-dir)/${SUITE_NAME}/.service"
 mv "${SRV_D}/passphrase" "${SRV_D}/passphrase.DIS"
 
 # Check scan output.
-PORT=$(cylc ping -v "${SUITE_NAME}" | cut -d':' -f 2)
 cylc scan --comms-timeout=5 -fb -n "${SUITE_NAME}" 'localhost' \
     >'scan.out' 2>'/dev/null'
 cmp_ok scan.out << __END__
@@ -65,6 +79,6 @@ grep_ok "\[client-connect\] DENIED (privilege 'identity' < 'shutdown') ${USER}@.
 mv "${SRV_D}/passphrase.DIS" "${SRV_D}/passphrase"
 
 # Stop and purge the suite.
-cylc stop --max-polls=10 --interval=1 "${SUITE_NAME}"
+cylc stop --max-polls=20 --interval=1 "${SUITE_NAME}"
 purge_suite "${SUITE_NAME}"
 exit
diff --git a/tests/authentication/01-description.t b/tests/authentication/01-description.t
index 338b0d9..4194e27 100644
--- a/tests/authentication/01-description.t
+++ b/tests/authentication/01-description.t
@@ -41,7 +41,7 @@ SRV_D="$(cylc get-global-config --print-run-dir)/${SUITE_NAME}/.service"
 mv "${SRV_D}/passphrase" "${SRV_D}/passphrase.DIS"
 
 # Check scan output.
-PORT=$(cylc ping -v "${SUITE_NAME}" | cut -d':' -f 2)
+PORT="$(sed -n 's/^CYLC_SUITE_PORT=//p' "${SRV_D}/contact")"
 cylc scan --comms-timeout=5 -fb -n "${SUITE_NAME}" 'localhost' \
     >'scan.out' 2>'/dev/null'
 cmp_ok scan.out << __END__
diff --git a/tests/authentication/02-state-totals.t b/tests/authentication/02-state-totals.t
index 1418f63..ff353d1 100644
--- a/tests/authentication/02-state-totals.t
+++ b/tests/authentication/02-state-totals.t
@@ -41,7 +41,7 @@ SRV_D="$(cylc get-global-config --print-run-dir)/${SUITE_NAME}/.service"
 mv "${SRV_D}/passphrase" "${SRV_D}/passphrase.DIS"
 
 # Check scan output.
-PORT=$(cylc ping -v "${SUITE_NAME}" | cut -d':' -f 2)
+PORT="$(sed -n 's/^CYLC_SUITE_PORT=//p' "${SRV_D}/contact")"
 cylc scan --comms-timeout=5 -fb -n "${SUITE_NAME}" 'localhost' \
     >'scan.out'
 cmp_ok scan.out << __END__
diff --git a/tests/authentication/03-full-read.t b/tests/authentication/03-full-read.t
index de60695..98dfd1c 100644
--- a/tests/authentication/03-full-read.t
+++ b/tests/authentication/03-full-read.t
@@ -41,7 +41,7 @@ SRV_D="$(cylc get-global-config --print-run-dir)/${SUITE_NAME}/.service"
 mv "${SRV_D}/passphrase" "${SRV_D}/passphrase.DIS"
 
 # Check scan output.
-PORT=$(cylc ping -v "${SUITE_NAME}" | cut -d':' -f 2)
+PORT="$(sed -n 's/^CYLC_SUITE_PORT=//p' "${SRV_D}/contact")"
 cylc scan --comms-timeout=5 -fb -n "${SUITE_NAME}" 'localhost' \
     >'scan.out' 2>'/dev/null'
 cmp_ok scan.out << __END__
diff --git a/tests/authentication/04-shutdown.t b/tests/authentication/04-shutdown.t
index 770262d..1b1b0c5 100644
--- a/tests/authentication/04-shutdown.t
+++ b/tests/authentication/04-shutdown.t
@@ -40,7 +40,7 @@ cylc suite-state "${SUITE_NAME}" --task=foo --status=failed --point=1 \
 SRV_D="$(cylc get-global-config --print-run-dir)/${SUITE_NAME}/.service"
 mv "${SRV_D}/passphrase" "${SRV_D}/passphrase.DIS"
 
-PORT=$(cylc ping -v "${SUITE_NAME}" | cut -d':' -f 2)
+PORT="$(sed -n 's/^CYLC_SUITE_PORT=//p' "${SRV_D}/contact")"
 cylc scan --comms-timeout=5 -fb -n "${SUITE_NAME}" 'localhost' \
     >'scan.out' 2>'/dev/null'
 cmp_ok scan.out << __END__
diff --git a/tests/authentication/05-full-control.t b/tests/authentication/05-full-control.t
index 501bd82..555e3e7 100644
--- a/tests/authentication/05-full-control.t
+++ b/tests/authentication/05-full-control.t
@@ -38,7 +38,8 @@ cylc suite-state "${SUITE_NAME}" --task=foo --status=failed --point=1 \
     --interval=1 --max-polls=10 || exit 1
 
 # Check scan output.
-PORT=$(cylc ping -v "${SUITE_NAME}" | cut -d':' -f 2)
+SRV_D="$(cylc get-global-config --print-run-dir)/${SUITE_NAME}/.service"
+PORT="$(sed -n 's/^CYLC_SUITE_PORT=//p' "${SRV_D}/contact")"
 cylc scan --comms-timeout=5 -fb -n "${SUITE_NAME}" 'localhost' \
     >'scan.out' 2>'/dev/null'
 cmp_ok scan.out << __END__
diff --git a/tests/authentication/06-suite-override.t b/tests/authentication/06-suite-override.t
index f560562..6a498ea 100644
--- a/tests/authentication/06-suite-override.t
+++ b/tests/authentication/06-suite-override.t
@@ -41,7 +41,7 @@ cylc suite-state "${SUITE_NAME}" --task=foo --status=failed --point=1 \
 SRV_D="$(cylc get-global-config --print-run-dir)/${SUITE_NAME}/.service"
 mv "${SRV_D}/passphrase" "${SRV_D}/passphrase.DIS"
 
-PORT=$(cylc ping -v "${SUITE_NAME}" | cut -d':' -f 2)
+PORT="$(sed -n 's/^CYLC_SUITE_PORT=//p' "${SRV_D}/contact")"
 cylc scan --comms-timeout=5 -fb -n "${SUITE_NAME}" 'localhost' \
     >'scan.out' 2>'/dev/null'
 cmp_ok scan.out << __END__
diff --git a/tests/authentication/07-sha-hash.t b/tests/authentication/07-sha-hash.t
index aad9e22..47408b1 100644
--- a/tests/authentication/07-sha-hash.t
+++ b/tests/authentication/07-sha-hash.t
@@ -41,7 +41,8 @@ cylc suite-state "${SUITE_NAME}" --task=foo --status=failed --point=1 \
     --interval=1 --max-polls=10 || exit 1
 
 # Check scan output.
-PORT=$(cylc ping -v "${SUITE_NAME}" | cut -d':' -f 2)
+SRV_D="$(cylc get-global-config --print-run-dir)/${SUITE_NAME}/.service"
+PORT="$(sed -n 's/^CYLC_SUITE_PORT=//p' "${SRV_D}/contact")"
 cylc scan --comms-timeout=5 -fb -n "${SUITE_NAME}" 'localhost' \
     >'scan.out' 2>'/dev/null'
 cmp_ok scan.out << __END__
diff --git a/tests/authentication/11-suite2-stop-suite1.t b/tests/authentication/11-suite2-stop-suite1.t
index 5296283..047619d 100755
--- a/tests/authentication/11-suite2-stop-suite1.t
+++ b/tests/authentication/11-suite2-stop-suite1.t
@@ -42,7 +42,7 @@ __SUITERC__
 cylc run --no-detach "${NAME1}" 1>'1.out' 2>&1 &
 poll '!' test -e "${SUITE1_RUND}/.service/contact"
 run_ok "${TEST_NAME_BASE}" cylc run --no-detach "${NAME2}"
-cylc shutdown "${NAME1}" --max-polls=10 --interval=1 1>'/dev/null' 2>&1 || true
+cylc shutdown "${NAME1}" --max-polls=20 --interval=1 1>'/dev/null' 2>&1 || true
 purge_suite "${NAME1}"
 purge_suite "${NAME2}"
 exit
diff --git a/tests/cyclers/23-no_final_cycle_point.t b/tests/cyclers/23-no_final_cycle_point.t
index b552191..835e42e 100755
--- a/tests/cyclers/23-no_final_cycle_point.t
+++ b/tests/cyclers/23-no_final_cycle_point.t
@@ -34,8 +34,8 @@ TEST_NAME=$TEST_NAME_BASE-run
 run_fail $TEST_NAME cylc run --debug $SUITE_NAME
 grep_ok "This suite requires a final cycle point\." \
     $TEST_NAME.stderr
-contains_ok "$TEST_NAME.stderr" <<'__ERR__'
-cylc.config.SuiteConfigError: 'ERROR: Invalid/unsupported recurrence representation: R1/P0D'
-__ERR__
+grep_ok \
+    "SuiteConfigError: 'ERROR: Invalid/unsupported recurrence representation: R1/P0D'" \
+    "$TEST_NAME.stderr"
 #-------------------------------------------------------------------------------
 purge_suite $SUITE_NAME
diff --git a/tests/cyclers/exclusions/graph.plain.ref b/tests/cyclers/exclusions/graph.plain.ref
index 45987cc..902847a 100644
--- a/tests/cyclers/exclusions/graph.plain.ref
+++ b/tests/cyclers/exclusions/graph.plain.ref
@@ -11,6 +11,13 @@ node "nip.20000101T1800Z" "nip\n20000101T1800Z" unfilled box black
 node "nip.20000102T0000Z" "nip\n20000102T0000Z" unfilled box black
 node "nip.20000102T0600Z" "nip\n20000102T0600Z" unfilled box black
 node "nip.20000102T1200Z" "nip\n20000102T1200Z" unfilled box black
+node "quux.20000101T0000Z" "quux\n20000101T0000Z" unfilled box black
+node "quux.20000101T1000Z" "quux\n20000101T1000Z" unfilled box black
+node "quux.20000101T1500Z" "quux\n20000101T1500Z" unfilled box black
+node "quux.20000101T2000Z" "quux\n20000101T2000Z" unfilled box black
+node "quux.20000102T0100Z" "quux\n20000102T0100Z" unfilled box black
+node "quux.20000102T0600Z" "quux\n20000102T0600Z" unfilled box black
+node "quux.20000102T1100Z" "quux\n20000102T1100Z" unfilled box black
 node "qux.20000101T0000Z" "qux\n20000101T0000Z" unfilled box black
 node "qux.20000102T0000Z" "qux\n20000102T0000Z" unfilled box black
 node "qux.20000102T1200Z" "qux\n20000102T1200Z" unfilled box black
diff --git a/tests/cyclers/exclusions/reference.log b/tests/cyclers/exclusions/reference.log
index f2fe90c..31ccbfa 100644
--- a/tests/cyclers/exclusions/reference.log
+++ b/tests/cyclers/exclusions/reference.log
@@ -1,17 +1,24 @@
-2016-07-04T09:37:17Z INFO - Initial point: 20000101T0000Z
-2016-07-04T09:37:17Z INFO - Final point: 20000102T1200Z
-2016-07-04T09:37:17Z INFO - [qux.20000101T0000Z] -triggered off []
-2016-07-04T09:37:17Z INFO - [nip.20000101T0000Z] -triggered off []
-2016-07-04T09:37:17Z INFO - [baz.20000101T0000Z] -triggered off []
-2016-07-04T09:37:17Z INFO - [start.20000101T0000Z] -triggered off []
-2016-07-04T09:37:19Z INFO - [baz.20000101T1200Z] -triggered off []
-2016-07-04T09:37:19Z INFO - [nip.20000101T1200Z] -triggered off []
-2016-07-04T09:37:21Z INFO - [foo.20000101T0000Z] -triggered off ['start.20000101T0000Z']
-2016-07-04T09:37:22Z INFO - [nip.20000101T1800Z] -triggered off []
-2016-07-04T09:37:24Z INFO - [nip.20000102T0000Z] -triggered off []
-2016-07-04T09:37:24Z INFO - [qux.20000102T0000Z] -triggered off []
-2016-07-04T09:37:24Z INFO - [baz.20000102T0000Z] -triggered off []
-2016-07-04T09:37:24Z INFO - [bar.20000102T0000Z] -triggered off []
-2016-07-04T09:37:26Z INFO - [nip.20000102T0600Z] -triggered off []
-2016-07-04T09:37:26Z INFO - [qux.20000102T1200Z] -triggered off []
-2016-07-04T09:37:28Z INFO - [nip.20000102T1200Z] -triggered off []
+2017-04-13T11:34:26Z INFO - Initial point: 20000101T0000Z
+2017-04-13T11:34:26Z INFO - Final point: 20000102T1200Z
+2017-04-13T11:34:26Z INFO - [quux.20000101T0000Z] -triggered off []
+2017-04-13T11:34:26Z INFO - [start.20000101T0000Z] -triggered off []
+2017-04-13T11:34:26Z INFO - [baz.20000101T0000Z] -triggered off []
+2017-04-13T11:34:26Z INFO - [qux.20000101T0000Z] -triggered off []
+2017-04-13T11:34:26Z INFO - [nip.20000101T0000Z] -triggered off []
+2017-04-13T11:34:28Z INFO - [baz.20000101T1200Z] -triggered off []
+2017-04-13T11:34:28Z INFO - [nip.20000101T1200Z] -triggered off []
+2017-04-13T11:34:28Z INFO - [quux.20000101T1000Z] -triggered off []
+2017-04-13T11:34:29Z INFO - [foo.20000101T0000Z] -triggered off ['start.20000101T0000Z']
+2017-04-13T11:34:32Z INFO - [quux.20000101T1500Z] -triggered off []
+2017-04-13T11:34:32Z INFO - [nip.20000101T1800Z] -triggered off []
+2017-04-13T11:34:34Z INFO - [quux.20000101T2000Z] -triggered off []
+2017-04-13T11:34:36Z INFO - [nip.20000102T0000Z] -triggered off []
+2017-04-13T11:34:36Z INFO - [qux.20000102T0000Z] -triggered off []
+2017-04-13T11:34:36Z INFO - [bar.20000102T0000Z] -triggered off []
+2017-04-13T11:34:36Z INFO - [baz.20000102T0000Z] -triggered off []
+2017-04-13T11:34:36Z INFO - [quux.20000102T0100Z] -triggered off []
+2017-04-13T11:34:39Z INFO - [nip.20000102T0600Z] -triggered off []
+2017-04-13T11:34:39Z INFO - [quux.20000102T0600Z] -triggered off []
+2017-04-13T11:34:40Z INFO - [qux.20000102T1200Z] -triggered off []
+2017-04-13T11:34:41Z INFO - [nip.20000102T1200Z] -triggered off []
+2017-04-13T11:34:41Z INFO - [quux.20000102T1100Z] -triggered off []
diff --git a/tests/cyclers/exclusions/suite.rc b/tests/cyclers/exclusions/suite.rc
index 8500a21..fee9c2d 100644
--- a/tests/cyclers/exclusions/suite.rc
+++ b/tests/cyclers/exclusions/suite.rc
@@ -23,6 +23,11 @@
         # Don't run at a datetime with an offset
         [[[ R/PT6H!20000101T05Z+PT1H ]]]
             graph = nip
+
+        # Don't run a multiple specific datetimes
+        [[[ PT5H!(20000101T05Z,20000102T05Z) ]]]
+            graph = quux
+
 [runtime]
     [[root]]
         script = echo success
diff --git a/tests/cyclers/integer1/graph.plain.ref b/tests/cyclers/integer1/graph.plain.ref
index 6e153bd..4cbc3f6 100644
--- a/tests/cyclers/integer1/graph.plain.ref
+++ b/tests/cyclers/integer1/graph.plain.ref
@@ -67,6 +67,8 @@ node "on_toast.7" "on_toast\n7" filled ellipse black
 node "on_toast.10" "on_toast\n10" filled ellipse black
 node "on_toast.13" "on_toast\n13" filled ellipse black
 node "on_toast.16" "on_toast\n16" filled ellipse black
+node "quux.8" "quux\n8" filled ellipse black
+node "quux.16" "quux\n16" filled ellipse black
 node "qux.7" "qux\n7" filled ellipse black
 node "qux.13" "qux\n13" filled ellipse black
 node "seq.1" "seq\n1" filled ellipse black
diff --git a/tests/cyclers/integer1/reference.log b/tests/cyclers/integer1/reference.log
index 7794a3a..53cd22a 100644
--- a/tests/cyclers/integer1/reference.log
+++ b/tests/cyclers/integer1/reference.log
@@ -1,46 +1,48 @@
-2016-07-07T11:23:11+01 INFO - Initial point: 1
-2016-07-07T11:23:11+01 INFO - Final point: 16
-2016-07-07T11:23:11+01 INFO - [woo.2] -triggered off []
-2016-07-07T11:23:11+01 INFO - [nang.1] -triggered off []
-2016-07-07T11:23:11+01 INFO - [wibble.1] -triggered off []
-2016-07-07T11:23:11+01 INFO - [nong.2] -triggered off []
-2016-07-07T11:23:11+01 INFO - [seq.1] -triggered off []
-2016-07-07T11:23:11+01 INFO - [on_toast.1] -triggered off []
-2016-07-07T11:23:11+01 INFO - [ning.4] -triggered off []
-2016-07-07T11:23:13+01 INFO - [woo.3] -triggered off []
-2016-07-07T11:23:13+01 INFO - [nong.8] -triggered off []
-2016-07-07T11:23:13+01 INFO - [ning.12] -triggered off []
-2016-07-07T11:23:16+01 INFO - [woo.5] -triggered off []
-2016-07-07T11:23:16+01 INFO - [seq.4] -triggered off ['seq.1']
-2016-07-07T11:23:18+01 INFO - [woo.6] -triggered off []
-2016-07-07T11:23:20+01 INFO - [woo.8] -triggered off []
-2016-07-07T11:23:21+01 INFO - [wobble.7] -triggered off ['wibble.1']
-2016-07-07T11:23:22+01 INFO - [woo.9] -triggered off []
-2016-07-07T11:23:25+01 INFO - [woo.11] -triggered off []
-2016-07-07T11:23:27+01 INFO - [foo.1] -triggered off ['seq.1', 'woo.2']
-2016-07-07T11:23:27+01 INFO - [woo.12] -triggered off []
-2016-07-07T11:23:32+01 INFO - [seq.7] -triggered off ['seq.4']
-2016-07-07T11:23:35+01 INFO - [on_toast.4] -triggered off ['foo.1']
-2016-07-07T11:23:40+01 INFO - [foo.4] -triggered off ['foo.1', 'seq.4', 'woo.3', 'woo.5']
-2016-07-07T11:23:40+01 INFO - [bar.1] -triggered off ['foo.1', 'woo.2']
-2016-07-07T11:23:42+01 INFO - [seq.10] -triggered off ['seq.7']
-2016-07-07T11:23:48+01 INFO - [on_toast.7] -triggered off ['foo.4']
-2016-07-07T11:23:53+01 INFO - [foo.7] -triggered off ['foo.4', 'seq.7', 'woo.6', 'woo.8']
-2016-07-07T11:23:53+01 INFO - [bar.4] -triggered off ['foo.4', 'woo.5']
-2016-07-07T11:23:53+01 INFO - [qux.7] -triggered off ['foo.4']
-2016-07-07T11:23:54+01 INFO - [woo.14] -triggered off []
-2016-07-07T11:23:54+01 INFO - [baz.16] -triggered off []
-2016-07-07T11:23:54+01 INFO - [wubble.16] -triggered off ['wobble.7']
-2016-07-07T11:23:54+01 INFO - [ning.16] -triggered off []
-2016-07-07T11:23:56+01 INFO - [woo.15] -triggered off []
-2016-07-07T11:23:57+01 INFO - [seq.13] -triggered off ['seq.10']
-2016-07-07T11:24:01+01 INFO - [on_toast.10] -triggered off ['foo.7']
-2016-07-07T11:24:03+01 INFO - [seq.16] -triggered off ['seq.13']
-2016-07-07T11:24:06+01 INFO - [foo.10] -triggered off ['foo.7', 'seq.10', 'woo.11', 'woo.9']
-2016-07-07T11:24:06+01 INFO - [bar.7] -triggered off ['foo.7', 'woo.8']
-2016-07-07T11:24:14+01 INFO - [on_toast.13] -triggered off ['foo.10']
-2016-07-07T11:24:20+01 INFO - [qux.13] -triggered off ['foo.10']
-2016-07-07T11:24:20+01 INFO - [bar.10] -triggered off ['foo.10', 'woo.11']
-2016-07-07T11:24:20+01 INFO - [foo.13] -triggered off ['foo.10', 'seq.13', 'woo.12', 'woo.14']
-2016-07-07T11:24:27+01 INFO - [on_toast.16] -triggered off ['foo.13']
-2016-07-07T11:24:32+01 INFO - [bar.13] -triggered off ['foo.13', 'woo.14']
+2017-04-13T14:14:20+01 INFO - Initial point: 1
+2017-04-13T14:14:20+01 INFO - Final point: 16
+2017-04-13T14:14:20+01 INFO - [woo.2] -triggered off []
+2017-04-13T14:14:20+01 INFO - [nang.1] -triggered off []
+2017-04-13T14:14:20+01 INFO - [wibble.1] -triggered off []
+2017-04-13T14:14:20+01 INFO - [nong.2] -triggered off []
+2017-04-13T14:14:20+01 INFO - [seq.1] -triggered off []
+2017-04-13T14:14:20+01 INFO - [on_toast.1] -triggered off []
+2017-04-13T14:14:20+01 INFO - [ning.4] -triggered off []
+2017-04-13T14:14:20+01 INFO - [quux.8] -triggered off []
+2017-04-13T14:14:22+01 INFO - [woo.3] -triggered off []
+2017-04-13T14:14:22+01 INFO - [nong.8] -triggered off []
+2017-04-13T14:14:22+01 INFO - [ning.12] -triggered off []
+2017-04-13T14:14:23+01 INFO - [foo.1] -triggered off ['seq.1', 'woo.2']
+2017-04-13T14:14:23+01 INFO - [seq.4] -triggered off ['seq.1']
+2017-04-13T14:14:23+01 INFO - [wobble.7] -triggered off ['wibble.1']
+2017-04-13T14:14:24+01 INFO - [woo.5] -triggered off []
+2017-04-13T14:14:26+01 INFO - [woo.6] -triggered off []
+2017-04-13T14:14:26+01 INFO - [seq.7] -triggered off ['seq.4']
+2017-04-13T14:14:28+01 INFO - [woo.8] -triggered off []
+2017-04-13T14:14:29+01 INFO - [seq.10] -triggered off ['seq.7']
+2017-04-13T14:14:30+01 INFO - [woo.9] -triggered off []
+2017-04-13T14:14:30+01 INFO - [on_toast.4] -triggered off ['foo.1']
+2017-04-13T14:14:32+01 INFO - [woo.11] -triggered off []
+2017-04-13T14:14:33+01 INFO - [seq.13] -triggered off ['seq.10']
+2017-04-13T14:14:35+01 INFO - [woo.12] -triggered off []
+2017-04-13T14:14:36+01 INFO - [foo.4] -triggered off ['foo.1', 'seq.4', 'woo.3', 'woo.5']
+2017-04-13T14:14:36+01 INFO - [bar.1] -triggered off ['foo.1', 'woo.2']
+2017-04-13T14:14:39+01 INFO - [quux.16] -triggered off []
+2017-04-13T14:14:39+01 INFO - [woo.14] -triggered off []
+2017-04-13T14:14:39+01 INFO - [seq.16] -triggered off ['seq.13']
+2017-04-13T14:14:39+01 INFO - [baz.16] -triggered off []
+2017-04-13T14:14:39+01 INFO - [wubble.16] -triggered off ['wobble.7']
+2017-04-13T14:14:39+01 INFO - [ning.16] -triggered off []
+2017-04-13T14:14:41+01 INFO - [woo.15] -triggered off []
+2017-04-13T14:14:43+01 INFO - [on_toast.7] -triggered off ['foo.4']
+2017-04-13T14:14:48+01 INFO - [foo.7] -triggered off ['foo.4', 'seq.7', 'woo.6', 'woo.8']
+2017-04-13T14:14:48+01 INFO - [bar.4] -triggered off ['foo.4', 'woo.5']
+2017-04-13T14:14:48+01 INFO - [qux.7] -triggered off ['foo.4']
+2017-04-13T14:14:56+01 INFO - [on_toast.10] -triggered off ['foo.7']
+2017-04-13T14:15:02+01 INFO - [foo.10] -triggered off ['foo.7', 'seq.10', 'woo.11', 'woo.9']
+2017-04-13T14:15:02+01 INFO - [bar.7] -triggered off ['foo.7', 'woo.8']
+2017-04-13T14:15:09+01 INFO - [on_toast.13] -triggered off ['foo.10']
+2017-04-13T14:15:15+01 INFO - [qux.13] -triggered off ['foo.10']
+2017-04-13T14:15:15+01 INFO - [bar.10] -triggered off ['foo.10', 'woo.11']
+2017-04-13T14:15:15+01 INFO - [foo.13] -triggered off ['foo.10', 'seq.13', 'woo.12', 'woo.14']
+2017-04-13T14:15:23+01 INFO - [on_toast.16] -triggered off ['foo.13']
+2017-04-13T14:15:29+01 INFO - [bar.13] -triggered off ['foo.13', 'woo.14']
diff --git a/tests/cyclers/integer1/suite.rc b/tests/cyclers/integer1/suite.rc
index fcb0bbe..29a2a5e 100644
--- a/tests/cyclers/integer1/suite.rc
+++ b/tests/cyclers/integer1/suite.rc
@@ -38,6 +38,8 @@
             graph = baz
         [[[ R/P4!8 ]]]
             graph = ning
+        [[[ R/P4!(4,12) ]]]    # Multiple exclusion points
+            graph = quux
         [[[ R1/^ ]]]
             graph = nang
         [[[ R/+P1/P6!14 ]]]
@@ -67,3 +69,4 @@ touch typing
         wibble = "fillcolor=violet"
         wobble = "fillcolor=darkviolet"
         wubble = "fillcolor=mediumvioletred"
+        quux = "fillcolor=yellow"
diff --git a/tests/cylc-cat-log/00-local.t b/tests/cylc-cat-log/00-local.t
index 75eaa18..da6eac9 100755
--- a/tests/cylc-cat-log/00-local.t
+++ b/tests/cylc-cat-log/00-local.t
@@ -70,7 +70,7 @@ grep_ok "CYLC_BATCH_SYS_NAME=background" $TEST_NAME.out
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-task-activity
 cylc cat-log -a $SUITE_NAME a-task.1 >$TEST_NAME.out
-grep_ok '\[job-submit ret_code\] 0' $TEST_NAME.out
+grep_ok '\[jobs-submit ret_code\] 0' $TEST_NAME.out
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-task-custom
 cylc cat-log -c 'job.custom-log' $SUITE_NAME a-task.1 >$TEST_NAME.out
diff --git a/tests/cylc-cat-log/01-remote.t b/tests/cylc-cat-log/01-remote.t
index 27bc4ed..c6d3579 100755
--- a/tests/cylc-cat-log/01-remote.t
+++ b/tests/cylc-cat-log/01-remote.t
@@ -66,7 +66,7 @@ grep_ok "CYLC_BATCH_SYS_NAME=background" $TEST_NAME.out
 # local
 TEST_NAME=$TEST_NAME_BASE-task-activity
 cylc cat-log -a $SUITE_NAME a-task.1 >$TEST_NAME.out
-grep_ok '\[job-submit ret_code\] 0' $TEST_NAME.out
+grep_ok '\[jobs-submit ret_code\] 0' $TEST_NAME.out
 #-------------------------------------------------------------------------------
 # remote
 TEST_NAME=$TEST_NAME_BASE-task-custom
diff --git a/tests/cylc-get-config/00-simple/section2.stdout b/tests/cylc-get-config/00-simple/section2.stdout
index 0a37db4..73e15f4 100644
--- a/tests/cylc-get-config/00-simple/section2.stdout
+++ b/tests/cylc-get-config/00-simple/section2.stdout
@@ -41,18 +41,14 @@
     [[[environment filter]]]
         exclude = 
         include = 
-    [[[dummy mode]]]
-        disable pre-script = True
-        disable post-script = True
-        disable retries = True
-        script = echo Dummy task; sleep $(cylc rnd 1 16)
-        disable task event hooks = True
     [[[outputs]]]
-    [[[simulation mode]]]
-        run time range = PT1S, PT16S
-        simulate failure = False
-        disable retries = True
-        disable task event hooks = True
+    [[[simulation]]]
+        fail try 1 only = True
+        fail cycle points = 
+        speedup factor = 
+        disable task event handlers = True
+        default run length = PT10S
+        time limit buffer = PT10S
     [[[suite state polling]]]
         interval = 
         host = 
@@ -119,18 +115,14 @@
     [[[environment filter]]]
         exclude = 
         include = 
-    [[[dummy mode]]]
-        disable pre-script = True
-        disable post-script = True
-        disable retries = True
-        script = echo Dummy task; sleep $(cylc rnd 1 16)
-        disable task event hooks = True
     [[[outputs]]]
-    [[[simulation mode]]]
-        run time range = PT1S, PT16S
-        simulate failure = False
-        disable retries = True
-        disable task event hooks = True
+    [[[simulation]]]
+        fail try 1 only = True
+        fail cycle points = 
+        speedup factor = 
+        disable task event handlers = True
+        default run length = PT10S
+        time limit buffer = PT10S
     [[[suite state polling]]]
         interval = 
         host = 
@@ -198,18 +190,14 @@
     [[[environment filter]]]
         exclude = 
         include = 
-    [[[dummy mode]]]
-        disable pre-script = True
-        disable post-script = True
-        disable retries = True
-        script = echo Dummy task; sleep $(cylc rnd 1 16)
-        disable task event hooks = True
     [[[outputs]]]
-    [[[simulation mode]]]
-        run time range = PT1S, PT16S
-        simulate failure = False
-        disable retries = True
-        disable task event hooks = True
+    [[[simulation]]]
+        fail try 1 only = True
+        fail cycle points = 
+        speedup factor = 
+        disable task event handlers = True
+        default run length = PT10S
+        time limit buffer = PT10S
     [[[suite state polling]]]
         interval = 
         host = 
@@ -277,18 +265,14 @@
     [[[environment filter]]]
         exclude = 
         include = 
-    [[[dummy mode]]]
-        disable pre-script = True
-        disable post-script = True
-        disable retries = True
-        script = echo Dummy task; sleep $(cylc rnd 1 16)
-        disable task event hooks = True
     [[[outputs]]]
-    [[[simulation mode]]]
-        run time range = PT1S, PT16S
-        simulate failure = False
-        disable retries = True
-        disable task event hooks = True
+    [[[simulation]]]
+        fail try 1 only = True
+        fail cycle points = 
+        speedup factor = 
+        disable task event handlers = True
+        default run length = PT10S
+        time limit buffer = PT10S
     [[[suite state polling]]]
         interval = 
         host = 
@@ -356,18 +340,14 @@
     [[[environment filter]]]
         exclude = 
         include = 
-    [[[dummy mode]]]
-        disable pre-script = True
-        disable post-script = True
-        disable retries = True
-        script = echo Dummy task; sleep $(cylc rnd 1 16)
-        disable task event hooks = True
     [[[outputs]]]
-    [[[simulation mode]]]
-        run time range = PT1S, PT16S
-        simulate failure = False
-        disable retries = True
-        disable task event hooks = True
+    [[[simulation]]]
+        fail try 1 only = True
+        fail cycle points = 
+        speedup factor = 
+        disable task event handlers = True
+        default run length = PT10S
+        time limit buffer = PT10S
     [[[suite state polling]]]
         interval = 
         host = 
@@ -435,18 +415,14 @@
     [[[environment filter]]]
         exclude = 
         include = 
-    [[[dummy mode]]]
-        disable pre-script = True
-        disable post-script = True
-        disable retries = True
-        script = echo Dummy task; sleep $(cylc rnd 1 16)
-        disable task event hooks = True
     [[[outputs]]]
-    [[[simulation mode]]]
-        run time range = PT1S, PT16S
-        simulate failure = False
-        disable retries = True
-        disable task event hooks = True
+    [[[simulation]]]
+        fail try 1 only = True
+        fail cycle points = 
+        speedup factor = 
+        disable task event handlers = True
+        default run length = PT10S
+        time limit buffer = PT10S
     [[[suite state polling]]]
         interval = 
         host = 
@@ -514,18 +490,14 @@
     [[[environment filter]]]
         exclude = 
         include = 
-    [[[dummy mode]]]
-        disable pre-script = True
-        disable post-script = True
-        disable retries = True
-        script = echo Dummy task; sleep $(cylc rnd 1 16)
-        disable task event hooks = True
     [[[outputs]]]
-    [[[simulation mode]]]
-        run time range = PT1S, PT16S
-        simulate failure = False
-        disable retries = True
-        disable task event hooks = True
+    [[[simulation]]]
+        fail try 1 only = True
+        fail cycle points = 
+        speedup factor = 
+        disable task event handlers = True
+        default run length = PT10S
+        time limit buffer = PT10S
     [[[suite state polling]]]
         interval = 
         host = 
@@ -592,18 +564,14 @@
     [[[environment filter]]]
         exclude = 
         include = 
-    [[[dummy mode]]]
-        disable pre-script = True
-        disable post-script = True
-        disable retries = True
-        script = echo Dummy task; sleep $(cylc rnd 1 16)
-        disable task event hooks = True
     [[[outputs]]]
-    [[[simulation mode]]]
-        run time range = PT1S, PT16S
-        simulate failure = False
-        disable retries = True
-        disable task event hooks = True
+    [[[simulation]]]
+        fail try 1 only = True
+        fail cycle points = 
+        speedup factor = 
+        disable task event handlers = True
+        default run length = PT10S
+        time limit buffer = PT10S
     [[[suite state polling]]]
         interval = 
         host = 
@@ -671,18 +639,14 @@
     [[[environment filter]]]
         exclude = 
         include = 
-    [[[dummy mode]]]
-        disable pre-script = True
-        disable post-script = True
-        disable retries = True
-        script = echo Dummy task; sleep $(cylc rnd 1 16)
-        disable task event hooks = True
     [[[outputs]]]
-    [[[simulation mode]]]
-        run time range = PT1S, PT16S
-        simulate failure = False
-        disable retries = True
-        disable task event hooks = True
+    [[[simulation]]]
+        fail try 1 only = True
+        fail cycle points = 
+        speedup factor = 
+        disable task event handlers = True
+        default run length = PT10S
+        time limit buffer = PT10S
     [[[suite state polling]]]
         interval = 
         host = 
@@ -708,10 +672,10 @@
         execution polling intervals = 
         execution time limit = 
 [[SERIAL]]
-    script = echo Dummy task; sleep $(cylc rnd 1 16)
     enable resurrection = False
     env-script = 
     err-script = 
+    script = 
     title = 
     URL = 
     extra log files = 
@@ -750,18 +714,14 @@
     [[[environment filter]]]
         exclude = 
         include = 
-    [[[dummy mode]]]
-        disable pre-script = True
-        disable post-script = True
-        disable retries = True
-        script = echo Dummy task; sleep $(cylc rnd 1 16)
-        disable task event hooks = True
     [[[outputs]]]
-    [[[simulation mode]]]
-        run time range = PT1S, PT16S
-        simulate failure = False
-        disable retries = True
-        disable task event hooks = True
+    [[[simulation]]]
+        fail try 1 only = True
+        fail cycle points = 
+        speedup factor = 
+        disable task event handlers = True
+        default run length = PT10S
+        time limit buffer = PT10S
     [[[suite state polling]]]
         interval = 
         host = 
@@ -787,10 +747,10 @@
         execution polling intervals = 
         execution time limit = 
 [[root]]
-    script = echo Dummy task; sleep $(cylc rnd 1 16)
     enable resurrection = False
     env-script = 
     err-script = 
+    script = 
     title = 
     URL = 
     extra log files = 
@@ -828,18 +788,14 @@
     [[[environment filter]]]
         exclude = 
         include = 
-    [[[dummy mode]]]
-        disable pre-script = True
-        disable post-script = True
-        disable retries = True
-        script = echo Dummy task; sleep $(cylc rnd 1 16)
-        disable task event hooks = True
     [[[outputs]]]
-    [[[simulation mode]]]
-        run time range = PT1S, PT16S
-        simulate failure = False
-        disable retries = True
-        disable task event hooks = True
+    [[[simulation]]]
+        fail try 1 only = True
+        fail cycle points = 
+        speedup factor = 
+        disable task event handlers = True
+        default run length = PT10S
+        time limit buffer = PT10S
     [[[suite state polling]]]
         interval = 
         host = 
@@ -865,10 +821,10 @@
         execution polling intervals = 
         execution time limit = 
 [[PARALLEL]]
-    script = echo Dummy task; sleep $(cylc rnd 1 16)
     enable resurrection = False
     env-script = 
     err-script = 
+    script = 
     title = 
     URL = 
     extra log files = 
@@ -907,18 +863,14 @@
     [[[environment filter]]]
         exclude = 
         include = 
-    [[[dummy mode]]]
-        disable pre-script = True
-        disable post-script = True
-        disable retries = True
-        script = echo Dummy task; sleep $(cylc rnd 1 16)
-        disable task event hooks = True
     [[[outputs]]]
-    [[[simulation mode]]]
-        run time range = PT1S, PT16S
-        simulate failure = False
-        disable retries = True
-        disable task event hooks = True
+    [[[simulation]]]
+        fail try 1 only = True
+        fail cycle points = 
+        speedup factor = 
+        disable task event handlers = True
+        default run length = PT10S
+        time limit buffer = PT10S
     [[[suite state polling]]]
         interval = 
         host = 
@@ -986,18 +938,14 @@
     [[[environment filter]]]
         exclude = 
         include = 
-    [[[dummy mode]]]
-        disable pre-script = True
-        disable post-script = True
-        disable retries = True
-        script = echo Dummy task; sleep $(cylc rnd 1 16)
-        disable task event hooks = True
     [[[outputs]]]
-    [[[simulation mode]]]
-        run time range = PT1S, PT16S
-        simulate failure = False
-        disable retries = True
-        disable task event hooks = True
+    [[[simulation]]]
+        fail try 1 only = True
+        fail cycle points = 
+        speedup factor = 
+        disable task event handlers = True
+        default run length = PT10S
+        time limit buffer = PT10S
     [[[suite state polling]]]
         interval = 
         host = 
diff --git a/tests/cylc-get-config/04-dummy-mode-output.t b/tests/cylc-get-config/04-dummy-mode-output.t
index 23e3e44..00b1cd3 100755
--- a/tests/cylc-get-config/04-dummy-mode-output.t
+++ b/tests/cylc-get-config/04-dummy-mode-output.t
@@ -15,23 +15,28 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Test for https://github.com/cylc/cylc/issues/2064
+# Test for completion of custom outputs in dummy and sim modes.
+# And no duplication dummy outputs (GitHub #2064)
 . "$(dirname "$0")/test_header"
 
-set_test_number 4
-
-init_suite "${TEST_NAME_BASE}" "${TEST_SOURCE_DIR}/${TEST_NAME_BASE}/suite.rc"
-
-run_ok "${TEST_NAME_BASE}-bar" \
-    cylc get-config "${SUITE_NAME}" -i '[runtime][bar][dummy mode]script'
-cmp_ok "${TEST_NAME_BASE}-bar.stdout" <<'__OUT__'
-echo Dummy task; sleep $(cylc rnd 1 16)
-sleep 2; cylc message 'greet'
-__OUT__
-run_ok "${TEST_NAME_BASE}-foo" \
-    cylc get-config "${SUITE_NAME}" -i '[runtime][foo][dummy mode]script'
-cmp_ok "${TEST_NAME_BASE}-foo.stdout" <<'__OUT__'
-echo Dummy task; sleep $(cylc rnd 1 16)
-__OUT__
+set_test_number 6
+
+install_suite "${TEST_NAME_BASE}" "${TEST_NAME_BASE}"
+
+run_ok "${TEST_NAME_BASE}-validate" \
+    cylc validate --debug ${SUITE_NAME}
+
+suite_run_fail "${TEST_NAME_BASE}-run-live" \
+    cylc run --reference-test --debug ${SUITE_NAME}
+
+suite_run_ok "${TEST_NAME_BASE}-run-simulation" \
+    cylc run -m 'dummy' --reference-test --debug ${SUITE_NAME}
+
+suite_run_ok "${TEST_NAME_BASE}-run-dummy" \
+    cylc run -m 'simulation' --reference-test --debug ${SUITE_NAME}
+
+LOG=$(cylc log --location $SUITE_NAME)
+count_ok '> meet' ${LOG} 1
+count_ok '> greet' ${LOG} 1
+
 purge_suite "${SUITE_NAME}"
-exit
diff --git a/tests/cylc-get-config/04-dummy-mode-output/reference.log b/tests/cylc-get-config/04-dummy-mode-output/reference.log
new file mode 100644
index 0000000..0bcb54e
--- /dev/null
+++ b/tests/cylc-get-config/04-dummy-mode-output/reference.log
@@ -0,0 +1,4 @@
+2017-03-30T00:34:04Z INFO - Initial point: 20000101T0000Z
+2017-03-30T00:34:04Z INFO - Final point: 20000101T0000Z
+2017-03-30T00:34:04Z INFO - [foo.20000101T0000Z] -triggered off []
+2017-03-30T00:34:04Z INFO - [bar.20000101T0000Z] -triggered off []
diff --git a/tests/cylc-get-config/04-dummy-mode-output/suite.rc b/tests/cylc-get-config/04-dummy-mode-output/suite.rc
index 8ad7b14..6df87f2 100644
--- a/tests/cylc-get-config/04-dummy-mode-output/suite.rc
+++ b/tests/cylc-get-config/04-dummy-mode-output/suite.rc
@@ -1,16 +1,27 @@
-#!Jinja2
+# A suite that will fail in live mode but pass in dummy and sim modes due to
+# automatic completion of custom outputs.
+
 [cylc]
-   UTC mode = True
+    UTC mode = True
+    [[events]]
+        abort on stalled = True
 [scheduling]
     initial cycle point = 2000
-    initial cycle point = 2000
+    final cycle point = 2000
     [[dependencies]]
         [[[P1Y]]]
-            graph = foo => bar
+            graph = "foo:meet & bar:greet => baz"
 [runtime]
+    [[root]]
+        script = true
+        [[[simulation]]]
+            default run length = PT0S
+    [[foo]]
+        script = true
+        [[[outputs]]]
+            meet = meet
     [[bar]]
         script = true
         [[[outputs]]]
             greet = greet
-    [[foo]]
-        script = true
+    [[baz]]
diff --git a/tests/validate/62-null-task-name.t b/tests/cylc-get-site-config/03-host-bool-override.t
old mode 100755
new mode 100644
similarity index 65%
copy from tests/validate/62-null-task-name.t
copy to tests/cylc-get-site-config/03-host-bool-override.t
index 7bad8c9..107f133
--- a/tests/validate/62-null-task-name.t
+++ b/tests/cylc-get-site-config/03-host-bool-override.t
@@ -15,20 +15,20 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-# Fail on null task name!
+# Test site-config host item boolean override (GitHub #2282).
 
 . "$(dirname "$0")/test_header"
+set_test_number 3
 
-set_test_number 8
-
-for GRAPH in 't1 => & t2' 't1 => t2 &' '& t1 => t2' 't1 & => t2'; do
-    cat >'suite.rc' <<__SUITE_RC__
-[scheduling]
-    [[dependencies]]
-        graph = ${GRAPH}
-__SUITE_RC__
-    run_fail "${TEST_NAME_BASE}" cylc validate 'suite.rc'
-    grep_ok 'ERROR, null task name in graph: ' "${TEST_NAME_BASE}.stderr"
-done
+create_test_globalrc '' '
+[hosts]
+    [[localhost]]
+        use login shell = True
+    [[mytesthost]]
+        use login shell = False'
 
+run_ok "${TEST_NAME_BASE}" \
+    cylc get-global-config --item='[hosts][mytesthost]use login shell'
+cmp_ok "${TEST_NAME_BASE}.stdout" <<<'False'
+cmp_ok "${TEST_NAME_BASE}.stderr" <'/dev/null'
 exit
diff --git a/tests/cylc-poll/03-poll-all/suite.rc b/tests/cylc-poll/03-poll-all/suite.rc
index 063bf32..9df3e7e 100644
--- a/tests/cylc-poll/03-poll-all/suite.rc
+++ b/tests/cylc-poll/03-poll-all/suite.rc
@@ -9,6 +9,9 @@ trigger, and the suite to shut down successfully."""
 
 [cylc]
    UTC mode = True
+   [[events]]
+       abort on inactivity = True
+       inactivity = PT2M
    [[reference test]]
        required run mode = live
        live mode suite timeout = PT2M # minutes
@@ -16,7 +19,7 @@ trigger, and the suite to shut down successfully."""
                                 run_kill.20141208T0000Z,\
                                 submit_hold.20141207T0000Z,\
                                 submit_hold.20141208T0000Z
-       
+
 [scheduling]
     initial cycle point = 20141207T0000Z
     final cycle point   = 20141208T0000Z
@@ -33,31 +36,26 @@ trigger, and the suite to shut down successfully."""
 [runtime]
     [[run_kill]]
         script = """
-trap '' EXIT # die silently 
-exit 0"""
+trap '' EXIT
+exit 0
+"""
     [[poll_check_kill]]
         script = """
-            cylc poll $CYLC_SUITE_NAME
-
-            while test $(grep "\[submit_hold.${CYLC_TASK_CYCLE_POINT}\] -submission succeeded
-\[run_kill.${CYLC_TASK_CYCLE_POINT}\] -suiciding" ${CYLC_SUITE_LOG_DIR%suite}suite/log -c) -ne 2 ; do
-                sleep 2
-            done
+cylc poll "${CYLC_SUITE_NAME}"
 
-            eval $(grep "^CYLC_BATCH_SYS_JOB_ID="\
- ${CYLC_SUITE_LOG_DIR%suite}job/$CYLC_TASK_CYCLE_POINT/submit_hold/NN/job.status)
-            echo $CYLC_BATCH_SYS_JOB_ID
-            atrm  $CYLC_BATCH_SYS_JOB_ID
+pat1="[submit_hold.${CYLC_TASK_CYCLE_POINT}] -ready => submitted"
+pat2="[run_kill.${CYLC_TASK_CYCLE_POINT}] -suiciding"
+log="${CYLC_SUITE_LOG_DIR}/log"
+while (($(grep -c -F -e "${pat1}" -e "${pat2}" "${log}") != 2))
+do
+    sleep 2
+done
 
-        """
+st_file="${CYLC_SUITE_RUN_DIR}/log/job/${CYLC_TASK_CYCLE_POINT}/submit_hold/NN/job.status"
+pkill -g "$(awk -F= '$1 == "CYLC_BATCH_SYS_JOB_ID" {print $2}' "${st_file}")"
+"""
     [[poll_now]]
-        script = """ 
-            cylc poll $CYLC_SUITE_NAME
-       """
-        [[[job]]]
-            batch system = at
-            
+        script = cylc poll "${CYLC_SUITE_NAME}"
+
     [[submit_hold]]
-        [[[job]]]
-            batch system = at
-            batch submit command template = at now + 2 minutes
+        init-script = sleep 120
diff --git a/tests/jobscript/12-err-script.t b/tests/cylc-reset/02-output-1.t
similarity index 73%
copy from tests/jobscript/12-err-script.t
copy to tests/cylc-reset/02-output-1.t
index 5823da2..6d320e5 100755
--- a/tests/jobscript/12-err-script.t
+++ b/tests/cylc-reset/02-output-1.t
@@ -1,7 +1,7 @@
 #!/bin/bash
 # THIS FILE IS PART OF THE CYLC SUITE ENGINE.
 # Copyright (C) 2008-2017 NIWA
-# 
+#
 # This program is free software: you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
 # the Free Software Foundation, either version 3 of the License, or
@@ -15,16 +15,14 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Test err-script.
-. "$(dirname "${0}")/test_header"
-set_test_number 4
+# Test "cylc reset --output='TRIGGER|MESSAGE' 'SUITE' 'TASK.ID'".
+. "$(dirname "$0")/test_header"
 
+set_test_number 2
 install_suite "${TEST_NAME_BASE}" "${TEST_NAME_BASE}"
+
 run_ok "${TEST_NAME_BASE}-validate" cylc validate "${SUITE_NAME}"
-run_ok "${TEST_NAME_BASE}-run" cylc run "${SUITE_NAME}" --reference-test --debug
-grep_ok 'ERR foo bar baz qux' "${SUITE_RUN_DIR}/log/job/1/foo/01/job.err"
-run_fail "${TEST_NAME_BASE}-grep-02" \
-    grep -q -F 'ERR foo bar baz qux' "${SUITE_RUN_DIR}/log/job/1/foo/02/job.err"
+run_ok "${TEST_NAME_BASE}" cylc run --reference-test --debug "${SUITE_NAME}"
 
 purge_suite "${SUITE_NAME}"
 exit
diff --git a/tests/cylc-reset/02-output-1/reference.log b/tests/cylc-reset/02-output-1/reference.log
new file mode 100644
index 0000000..9abc5a9
--- /dev/null
+++ b/tests/cylc-reset/02-output-1/reference.log
@@ -0,0 +1,6 @@
+2016-01-14T11:55:10Z INFO - Initial point: 1
+2016-01-14T11:55:10Z INFO - Final point: 1
+2016-01-14T11:55:10Z INFO - [t1.1] -triggered off []
+2016-01-14T11:55:10Z INFO - [t2.1] -triggered off []
+2016-01-14T11:55:10Z INFO - [t3.1] -triggered off ['t1.1', 't2.1']
+2016-01-14T11:55:13Z INFO - [t4.1] -triggered off ['t1.1', 't2.1']
diff --git a/tests/cylc-reset/02-output-1/suite.rc b/tests/cylc-reset/02-output-1/suite.rc
new file mode 100644
index 0000000..4b9f5f9
--- /dev/null
+++ b/tests/cylc-reset/02-output-1/suite.rc
@@ -0,0 +1,39 @@
+[cylc]
+   UTC mode = True
+   [[events]]
+       abort on stalled = True
+   [[reference test]]
+       live mode suite timeout = PT1M
+       required run mode = live
+[scheduling]
+    [[dependencies]]
+        graph = """
+t1 => t3
+t2 => t3
+t1:hello & t2:greet => t4
+"""
+[runtime]
+    [[t1]]
+        script=true
+        [[[outputs]]]
+            hello = Hello World
+    [[t2]]
+        script=true
+        [[[outputs]]]
+            greet = Greet World
+    [[t3]]
+        script = """
+LOG="${CYLC_SUITE_LOG_DIR}/log"
+cylc reset --debug --output=hello "${CYLC_SUITE_NAME}" 't1.1'
+while ! grep -qF '[t1.1] -reset output to complete: hello' "${LOG}"; do
+    sleep 1  # make sure reset completes
+done
+cylc reset --debug --output='Greet World' "${CYLC_SUITE_NAME}" 't2.1'
+while ! grep -qF '[t2.1] -reset output to complete: Greet World' "${LOG}"; do
+    sleep 1  # make sure reset completes
+done
+"""
+        [[[job]]]
+            execution time limit = PT30S
+    [[t4]]
+        script = true
diff --git a/tests/jobscript/12-err-script.t b/tests/cylc-reset/03-output-2.t
similarity index 67%
copy from tests/jobscript/12-err-script.t
copy to tests/cylc-reset/03-output-2.t
index 5823da2..01c0222 100755
--- a/tests/jobscript/12-err-script.t
+++ b/tests/cylc-reset/03-output-2.t
@@ -1,7 +1,7 @@
 #!/bin/bash
 # THIS FILE IS PART OF THE CYLC SUITE ENGINE.
 # Copyright (C) 2008-2017 NIWA
-# 
+#
 # This program is free software: you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
 # the Free Software Foundation, either version 3 of the License, or
@@ -15,16 +15,27 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Test err-script.
-. "$(dirname "${0}")/test_header"
-set_test_number 4
+# Test "cylc reset --output='!OUTPUT' 'SUITE' 'TASK.ID'".
+. "$(dirname "$0")/test_header"
 
+set_test_number 3
 install_suite "${TEST_NAME_BASE}" "${TEST_NAME_BASE}"
+
 run_ok "${TEST_NAME_BASE}-validate" cylc validate "${SUITE_NAME}"
-run_ok "${TEST_NAME_BASE}-run" cylc run "${SUITE_NAME}" --reference-test --debug
-grep_ok 'ERR foo bar baz qux' "${SUITE_RUN_DIR}/log/job/1/foo/01/job.err"
-run_fail "${TEST_NAME_BASE}-grep-02" \
-    grep -q -F 'ERR foo bar baz qux' "${SUITE_RUN_DIR}/log/job/1/foo/02/job.err"
+run_ok "${TEST_NAME_BASE}" cylc run --reference-test --debug "${SUITE_NAME}"
+cmp_ok "${SUITE_RUN_DIR}/cylc-show.out" <<'__OUT__'
+title: (not given)
+description: (not given)
+
+prerequisites (- => not satisfied):
+  (None)
 
+outputs (- => not completed):
+  + t1.1 submitted
+  + t1.1 started
+  + t1.1 succeeded
+  - t1.1 Greet World
+  - t1.1 Hello World
+__OUT__
 purge_suite "${SUITE_NAME}"
 exit
diff --git a/tests/cylc-reset/03-output-2/reference.log b/tests/cylc-reset/03-output-2/reference.log
new file mode 100644
index 0000000..8e08cf8
--- /dev/null
+++ b/tests/cylc-reset/03-output-2/reference.log
@@ -0,0 +1,4 @@
+2016-01-14T11:55:10Z INFO - Initial point: 1
+2016-01-14T11:55:10Z INFO - Final point: 1
+2016-01-14T11:55:10Z INFO - [t1.1] -triggered off []
+2016-01-14T11:55:10Z INFO - [t2.1] -triggered off ['t1.1']
diff --git a/tests/cylc-reset/03-output-2/suite.rc b/tests/cylc-reset/03-output-2/suite.rc
new file mode 100644
index 0000000..ed67298
--- /dev/null
+++ b/tests/cylc-reset/03-output-2/suite.rc
@@ -0,0 +1,30 @@
+[cylc]
+   UTC mode = True
+   [[events]]
+       abort on stalled = True
+   [[reference test]]
+       live mode suite timeout = PT1M
+       required run mode = live
+[scheduling]
+    [[dependencies]]
+        graph = t1 => t2
+[runtime]
+    [[t1]]
+        script=true
+        [[[outputs]]]
+            hello = Hello World
+            greet = Greet World
+    [[t2]]
+        script = """
+LOG="${CYLC_SUITE_LOG_DIR}/log"
+cylc reset --output='!hello' --output='!Greet World' "${CYLC_SUITE_NAME}" 't1.1'
+while ! grep -qF -e '[t1.1] -reset output to incomplete: hello' "${LOG}"; do
+    sleep 1  # make sure reset completes
+done
+while ! grep -qF '[t1.1] -reset output to incomplete: Greet World' "${LOG}"; do
+    sleep 1  # make sure reset completes
+done
+cylc show "${CYLC_SUITE_NAME}" 't1.1' >"${CYLC_SUITE_RUN_DIR}/cylc-show.out"
+"""
+        [[[job]]]
+            execution time limit = PT30S
diff --git a/tests/jobscript/10-bad-syntax.t b/tests/cylc-scan/03-monitor.t
old mode 100755
new mode 100644
similarity index 61%
copy from tests/jobscript/10-bad-syntax.t
copy to tests/cylc-scan/03-monitor.t
index b397dfb..24949a6
--- a/tests/jobscript/10-bad-syntax.t
+++ b/tests/cylc-scan/03-monitor.t
@@ -15,10 +15,10 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Test "cylc jobscript" when we have bad syntax in "script" value.
-. "$(dirname "${0}")/test_header"
+# Test cylc monitor USER_AT_HOST interface.
+. $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
-set_test_number 5
+set_test_number 4
 #-------------------------------------------------------------------------------
 init_suite "${TEST_NAME_BASE}" <<'__SUITE_RC__'
 [scheduling]
@@ -26,22 +26,15 @@ init_suite "${TEST_NAME_BASE}" <<'__SUITE_RC__'
         graph = foo
 [runtime]
     [[foo]]
-        script = fi
+        script = sleep 60
 __SUITE_RC__
 
-TEST_NAME="${TEST_NAME_BASE}"-simple
-run_fail "${TEST_NAME}" cylc jobscript "${SUITE_NAME}" 'foo.1'
-cmp_ok "${TEST_NAME}.stdout" <'/dev/null'
-contains_ok "${TEST_NAME}.stderr" <<__ERR__
-ERROR: no jobscript generated
-__ERR__
-purge_suite "${SUITE_NAME}"
-#-------------------------------------------------------------------------------
-install_suite "${TEST_NAME_BASE}" "${TEST_NAME_BASE}"
-TEST_NAME="${TEST_NAME_BASE}-advanced-validate"
-run_ok "${TEST_NAME}" cylc validate "${SUITE_NAME}"
-TEST_NAME="${TEST_NAME_BASE}-advanced-run"
-run_ok "${TEST_NAME}" cylc run "${SUITE_NAME}" --reference-test --debug
+run_ok "${TEST_NAME_BASE}-validate" cylc validate "${SUITE_NAME}"
+run_ok "${TEST_NAME_BASE}-run" cylc run "${SUITE_NAME}"
+
+TEST_NAME="${TEST_NAME_BASE}-monitor"
+run_ok "${TEST_NAME}" cylc monitor $(cylc scan | grep "${SUITE_NAME}") --once
+grep_ok "${SUITE_NAME} - 1 task" "${TEST_NAME}.stdout"
 #-------------------------------------------------------------------------------
+cylc stop --kill "${SUITE_NAME}"
 purge_suite "${SUITE_NAME}"
-exit
diff --git a/tests/cylc-show/clock-triggered-non-utc-mode/reference-untz.log b/tests/cylc-show/clock-triggered-non-utc-mode/reference-untz.log
index dc2a4b4..e3dce78 100644
--- a/tests/cylc-show/clock-triggered-non-utc-mode/reference-untz.log
+++ b/tests/cylc-show/clock-triggered-non-utc-mode/reference-untz.log
@@ -1,43 +1,8 @@
-2014-11-26T18:56:18+07 INFO - Suite starting at 2014-11-26T18:56:18+07
-2014-11-26T18:56:18+07 INFO - Log event clock: real time
 2014-11-26T18:56:18+07 INFO - Run mode: live
 2014-11-26T18:56:18+07 INFO - Initial point: 20140808T0900$TZ_OFFSET_BASIC
 2014-11-26T18:56:18+07 INFO - Final point: 20140808T0900$TZ_OFFSET_BASIC
 2014-11-26T18:56:18+07 INFO - Cold Start 20140808T0900$TZ_OFFSET_BASIC
-2014-11-26T18:56:18+07 INFO - [show.20140808T0900$TZ_OFFSET_BASIC] -initiate job-submit
 2014-11-26T18:56:18+07 INFO - [show.20140808T0900$TZ_OFFSET_BASIC] -triggered off []
-2014-11-26T18:56:19+07 INFO - 3384
-
-2014-11-26T18:56:19+07 INFO - [show.20140808T0900$TZ_OFFSET_BASIC] -submit_method_id=3384
-2014-11-26T18:56:19+07 INFO - [show.20140808T0900$TZ_OFFSET_BASIC] -submission succeeded
-2014-11-26T18:56:19+07 INFO - [show.20140808T1000+07] -holding (beyond suite stop point) 20140808T0900$TZ_OFFSET_BASIC
-2014-11-26T18:56:19+07 INFO - [show.20140808T0900$TZ_OFFSET_BASIC] -(current:submitted)> show.20140808T0900$TZ_OFFSET_BASIC started at 2014-11-26T18:56:19+07
-2014-11-26T18:56:20+07 INFO - [show.20140808T0900$TZ_OFFSET_BASIC] -(current:running)> show.20140808T0900$TZ_OFFSET_BASIC succeeded at 2014-11-26T18:56:20+07
-2014-11-26T18:56:21+07 INFO - [foo.20140808T0900$TZ_OFFSET_BASIC] -initiate job-submit
 2014-11-26T18:56:21+07 INFO - [foo.20140808T0900$TZ_OFFSET_BASIC] -triggered off ['show.20140808T0900$TZ_OFFSET_BASIC']
-2014-11-26T18:56:22+07 INFO - 3503
-
-2014-11-26T18:56:22+07 INFO - [foo.20140808T0900$TZ_OFFSET_BASIC] -submit_method_id=3503
-2014-11-26T18:56:22+07 INFO - [foo.20140808T0900$TZ_OFFSET_BASIC] -submission succeeded
-2014-11-26T18:56:22+07 INFO - [foo.20140808T1000+07] -holding (beyond suite stop point) 20140808T0900$TZ_OFFSET_BASIC
-2014-11-26T18:56:22+07 INFO - [foo.20140808T0900$TZ_OFFSET_BASIC] -(current:submitted)> foo.20140808T0900$TZ_OFFSET_BASIC started at 2014-11-26T18:56:22+07
-2014-11-26T18:56:23+07 INFO - [baz.20140808T0900$TZ_OFFSET_BASIC] -initiate job-submit
-2014-11-26T18:56:23+07 INFO - [bar.20140808T0900$TZ_OFFSET_BASIC] -initiate job-submit
 2014-11-26T18:56:23+07 INFO - [baz.20140808T0900$TZ_OFFSET_BASIC] -triggered off ['foo.20140808T0900$TZ_OFFSET_BASIC']
 2014-11-26T18:56:23+07 INFO - [bar.20140808T0900$TZ_OFFSET_BASIC] -triggered off ['foo.20140808T0900$TZ_OFFSET_BASIC']
-2014-11-26T18:56:24+07 INFO - 3615
-
-2014-11-26T18:56:24+07 INFO - [baz.20140808T0900$TZ_OFFSET_BASIC] -submit_method_id=3615
-2014-11-26T18:56:24+07 INFO - [baz.20140808T0900$TZ_OFFSET_BASIC] -submission succeeded
-2014-11-26T18:56:24+07 INFO - 3614
-
-2014-11-26T18:56:24+07 INFO - [bar.20140808T0900$TZ_OFFSET_BASIC] -submit_method_id=3614
-2014-11-26T18:56:24+07 INFO - [bar.20140808T0900$TZ_OFFSET_BASIC] -submission succeeded
-2014-11-26T18:56:24+07 INFO - [baz.20140808T1000+07] -holding (beyond suite stop point) 20140808T0900$TZ_OFFSET_BASIC
-2014-11-26T18:56:24+07 INFO - [bar.20140808T1000+07] -holding (beyond suite stop point) 20140808T0900$TZ_OFFSET_BASIC
-2014-11-26T18:56:24+07 INFO - [baz.20140808T0900$TZ_OFFSET_BASIC] -(current:submitted)> baz.20140808T0900$TZ_OFFSET_BASIC started at 2014-11-26T18:56:24+07
-2014-11-26T18:56:24+07 INFO - [bar.20140808T0900$TZ_OFFSET_BASIC] -(current:submitted)> bar.20140808T0900$TZ_OFFSET_BASIC started at 2014-11-26T18:56:24+07
-2014-11-26T18:56:25+07 INFO - [baz.20140808T0900$TZ_OFFSET_BASIC] -(current:running)> baz.20140808T0900$TZ_OFFSET_BASIC succeeded at 2014-11-26T18:56:24+07
-2014-11-26T18:56:25+07 INFO - [bar.20140808T0900$TZ_OFFSET_BASIC] -(current:running)> bar.20140808T0900$TZ_OFFSET_BASIC succeeded at 2014-11-26T18:56:24+07
-2014-11-26T18:56:33+07 INFO - [foo.20140808T0900$TZ_OFFSET_BASIC] -(current:running)> foo.20140808T0900$TZ_OFFSET_BASIC succeeded at 2014-11-26T18:56:32+07
-2014-11-26T18:56:34+07 INFO - Suite shutting down at 2014-11-26T18:56:34+07
diff --git a/tests/cylc-submit/00-bg.t b/tests/cylc-submit/00-bg.t
index 39e0cdf..c98d83b 100755
--- a/tests/cylc-submit/00-bg.t
+++ b/tests/cylc-submit/00-bg.t
@@ -92,7 +92,7 @@ else
     poll ! grep -q 'CYLC_BATCH_SYS_JOB_ID=' "${ST_FILE}" 2>/dev/null
     JOB_ID=$(awk -F= '$1 == "CYLC_BATCH_SYS_JOB_ID" {print $2}' "${ST_FILE}")
 fi
-grep_ok "Job ID: ${JOB_ID}" "${TEST_NAME_BASE}.stdout"
+contains_ok "${TEST_NAME_BASE}.stdout" <<<"[foo.1] Job ID: ${JOB_ID}"
 cmp_ok "${TEST_NAME_BASE}.stderr" <'/dev/null'
 #-------------------------------------------------------------------------------
 if [[ -n "${SSH}" ]]; then
diff --git a/tests/cylc-submit/11-multi.t b/tests/cylc-submit/11-multi.t
new file mode 100755
index 0000000..e071dbc
--- /dev/null
+++ b/tests/cylc-submit/11-multi.t
@@ -0,0 +1,63 @@
+#!/bin/bash
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2017 NIWA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#-------------------------------------------------------------------------------
+# Test "cylc submit" multiple tasks + families.
+CYLC_TEST_IS_GENERIC=false
+. "$(dirname "$0")/test_header"
+
+set_test_number 4
+
+init_suite "${TEST_NAME_BASE}" <<'__SUITE_RC__'
+[cylc]
+    UTC mode = True
+    cycle point format = %Y
+[scheduling]
+    initial cycle point = 2020
+    final cycle point = 2021
+    [[dependencies]]
+        [[[P1Y]]]
+            graph = FOO & bar
+[runtime]
+    [[FOO]]
+        script = echo "${CYLC_TASK_ID}"
+    [[FOO1, FOO2, FOO3]]
+        inherit = FOO
+    [[food]]
+        inherit = FOO1
+    [[fool]]
+        inherit = FOO2
+    [[foot]]
+        inherit = FOO3
+    [[bar]]
+        script = echo "${CYLC_TASK_ID}"
+__SUITE_RC__
+
+run_ok "${TEST_NAME_BASE}-validate" cylc validate "${SUITE_NAME}"
+run_ok "${TEST_NAME_BASE}" cylc submit "${SUITE_NAME}" 'FOO.2020' 'bar.2021'
+for TASK_ID in 'food.2020' 'fool.2020' 'foot.2020' 'bar.2021'; do
+    POINT="${TASK_ID#*.}"
+    NAME="${TASK_ID%.*}"
+    ST_FILE="${SUITE_RUN_DIR}/log/job/${POINT}/${NAME}/01/job.status"
+    JOB_ID="$(awk -F= '$1 == "CYLC_BATCH_SYS_JOB_ID" {print $2}' "${ST_FILE}")"
+    echo "[${TASK_ID}] Job ID: ${JOB_ID}"
+done >'expected.out'
+contains_ok "${TEST_NAME_BASE}.stdout" 'expected.out'
+cmp_ok "${TEST_NAME_BASE}.stderr" <'/dev/null'
+
+purge_suite "${SUITE_NAME}"
+exit
+
diff --git a/tests/database/00-simple.t b/tests/database/00-simple.t
index f3b09b4..014943b 100644
--- a/tests/database/00-simple.t
+++ b/tests/database/00-simple.t
@@ -17,14 +17,14 @@
 #-------------------------------------------------------------------------------
 # Suite database content, a basic non-cycling suite of 3 tasks
 . "$(dirname "$0")/test_header"
-set_test_number 8
+set_test_number 9
 install_suite "${TEST_NAME_BASE}" "${TEST_NAME_BASE}"
 
 run_ok "${TEST_NAME_BASE}-validate" cylc validate "${SUITE_NAME}"
 suite_run_ok "${TEST_NAME_BASE}-run" cylc run --debug "${SUITE_NAME}"
 
 if ! which sqlite3 > /dev/null; then
-    skip 6 "sqlite3 not installed?"
+    skip 7 "sqlite3 not installed?"
     purge_suite "${SUITE_NAME}"
     exit 0
 fi
@@ -66,5 +66,10 @@ sqlite3 "${DB_FILE}" 'SELECT name, cycle, status FROM task_states ORDER BY name'
     >"${NAME}"
 cmp_ok "${TEST_SOURCE_DIR}/${TEST_NAME_BASE}/${NAME}" "${NAME}"
 
+NAME='select-inheritance.out'
+sqlite3 "${DB_FILE}" 'SELECT namespace, inheritance FROM inheritance ORDER BY namespace' \
+    >"${NAME}"
+cmp_ok "${TEST_SOURCE_DIR}/${TEST_NAME_BASE}/${NAME}" "${NAME}"
+
 purge_suite "${SUITE_NAME}"
 exit
diff --git a/tests/database/00-simple/schema.out b/tests/database/00-simple/schema.out
index d4f18c2..41d672f 100644
--- a/tests/database/00-simple/schema.out
+++ b/tests/database/00-simple/schema.out
@@ -2,6 +2,7 @@ CREATE TABLE broadcast_events(time TEXT, change TEXT, point TEXT, namespace TEXT
 CREATE TABLE broadcast_states(point TEXT, namespace TEXT, key TEXT, value TEXT, PRIMARY KEY(point, namespace, key));
 CREATE TABLE broadcast_states_checkpoints(id INTEGER, point TEXT, namespace TEXT, key TEXT, value TEXT, PRIMARY KEY(id, point, namespace, key));
 CREATE TABLE checkpoint_id(id INTEGER, time TEXT, event TEXT, PRIMARY KEY(id));
+CREATE TABLE inheritance(namespace TEXT, inheritance TEXT, PRIMARY KEY(namespace));
 CREATE TABLE suite_params(key TEXT, value TEXT, PRIMARY KEY(key));
 CREATE TABLE suite_params_checkpoints(id INTEGER, key TEXT, value TEXT, PRIMARY KEY(id, key));
 CREATE TABLE suite_template_vars(key TEXT, value TEXT, PRIMARY KEY(key));
diff --git a/tests/database/00-simple/select-inheritance.out b/tests/database/00-simple/select-inheritance.out
new file mode 100644
index 0000000..c470c1d
--- /dev/null
+++ b/tests/database/00-simple/select-inheritance.out
@@ -0,0 +1,4 @@
+bar|bar root
+baz|baz root
+foo|foo root
+root|root
diff --git a/tests/database/00-simple/select-task-events.out b/tests/database/00-simple/select-task-events.out
index 8fd2119..29777b4 100644
--- a/tests/database/00-simple/select-task-events.out
+++ b/tests/database/00-simple/select-task-events.out
@@ -1,17 +1,17 @@
-foo|1|incrementing submit number|
-foo|1|submission succeeded|
+foo|1|output completed|submitted
+foo|1|submitted|
 foo|1|output completed|started
 foo|1|started|
 foo|1|output completed|succeeded
 foo|1|succeeded|
-bar|1|incrementing submit number|
-bar|1|submission succeeded|
+bar|1|output completed|submitted
+bar|1|submitted|
 bar|1|output completed|started
 bar|1|started|
 bar|1|output completed|succeeded
 bar|1|succeeded|
-baz|1|incrementing submit number|
-baz|1|submission succeeded|
+baz|1|output completed|submitted
+baz|1|submitted|
 baz|1|output completed|started
 baz|1|started|
 baz|1|output completed|succeeded
diff --git a/tests/deprecations/00-all.t b/tests/deprecations/00-all.t
index b71ae22..f8b3ebc 100755
--- a/tests/deprecations/00-all.t
+++ b/tests/deprecations/00-all.t
@@ -49,6 +49,10 @@ cmp_ok val.out <<__END__
  * (6.11.0) [runtime][foo, cat, dog][retry delays] -> [runtime][foo, cat, dog][job][execution retry delays] - value unchanged
  * (6.11.0) [runtime][foo, cat, dog][submission polling intervals] -> [runtime][foo, cat, dog][job][submission polling intervals] - value unchanged
  * (6.11.0) [runtime][foo, cat, dog][execution polling intervals] -> [runtime][foo, cat, dog][job][execution polling intervals] - value unchanged
+ * (7.2.2) [cylc][dummy mode] - DELETED (OBSOLETE)
+ * (7.2.2) [cylc][simulation mode] - DELETED (OBSOLETE)
+ * (7.2.2) [runtime][foo, cat, dog][dummy mode] - DELETED (OBSOLETE)
+ * (7.2.2) [runtime][foo, cat, dog][simulation mode] - DELETED (OBSOLETE)
 __END__
 #-------------------------------------------------------------------------------
 purge_suite $SUITE_NAME
diff --git a/tests/deprecations/00-all/suite.rc b/tests/deprecations/00-all/suite.rc
index 9eb6721..c827147 100644
--- a/tests/deprecations/00-all/suite.rc
+++ b/tests/deprecations/00-all/suite.rc
@@ -2,6 +2,9 @@
 # Test automatic deprecation and deletion of config items as specified
 # in lib/cylc/cfgspec/suite.py.
 
+[cylc]
+    [[dummy mode]]
+    [[simulation mode]]
 [scheduling]
     initial cycle point = 20150808T00
     final cycle point = 20150808T00
@@ -21,8 +24,10 @@
         retry delays = 6*PT1H # deprecate
         submission polling intervals = PT1H, PT2H
         execution polling intervals = 10*PT30M
+        [[[simulation mode]]]
+            command scripting = "echo script" # deprecate
         [[[dummy mode]]]
-        command scripting = "echo script" # deprecate
+            command scripting = "echo script" # deprecate
         [[[job submission]]] # deprecate all these
             command template = qsub something something
             method = pbs
diff --git a/tests/events/08-task-event-handler-retry.t b/tests/events/08-task-event-handler-retry.t
index cebfb23..b7670da 100755
--- a/tests/events/08-task-event-handler-retry.t
+++ b/tests/events/08-task-event-handler-retry.t
@@ -23,7 +23,7 @@ OPT_SET=
 if [[ "${TEST_NAME_BASE}" == *-globalcfg ]]; then
     create_test_globalrc "" "
 [task events]
-    handlers=hello-event-handler '%(name)s' '%(event)s'
+    handlers = hello-event-handler %(name)s %(event)s %(suite_url)s %(task_url)s %(message)s %(point)s %(submit_num)s %(id)s
     handler events=succeeded, failed
     handler retry delays=PT0S, 2*PT1S"
     OPT_SET='-s GLOBALCFG=True'
@@ -33,15 +33,17 @@ run_ok "${TEST_NAME_BASE}-validate" cylc validate ${OPT_SET} "${SUITE_NAME}"
 suite_run_ok "${TEST_NAME_BASE}-run" \
     cylc run --reference-test --debug ${OPT_SET} "${SUITE_NAME}"
 
+SUITE_URL=http://my-suites.com/${SUITE_NAME}.html
+TASK_URL=http://my-suites.com/${SUITE_NAME}/t1.html
 LOG="${SUITE_RUN_DIR}/log/job/1/t1/NN/job-activity.log"
 sed "/(('event-handler-00', 'succeeded'), 1)/!d; s/^.* \[/[/" "${LOG}" \
     >'edited-job-activity.log'
-cmp_ok 'edited-job-activity.log' <<'__LOG__'
-[(('event-handler-00', 'succeeded'), 1) cmd] hello-event-handler 't1' 'succeeded'
+cmp_ok 'edited-job-activity.log' <<__LOG__
+[(('event-handler-00', 'succeeded'), 1) cmd] hello-event-handler t1 succeeded ${SUITE_URL} ${TASK_URL} 'job succeeded' 1 1 t1.1
 [(('event-handler-00', 'succeeded'), 1) ret_code] 1
-[(('event-handler-00', 'succeeded'), 1) cmd] hello-event-handler 't1' 'succeeded'
+[(('event-handler-00', 'succeeded'), 1) cmd] hello-event-handler t1 succeeded ${SUITE_URL} ${TASK_URL} 'job succeeded' 1 1 t1.1
 [(('event-handler-00', 'succeeded'), 1) ret_code] 1
-[(('event-handler-00', 'succeeded'), 1) cmd] hello-event-handler 't1' 'succeeded'
+[(('event-handler-00', 'succeeded'), 1) cmd] hello-event-handler t1 succeeded ${SUITE_URL} ${TASK_URL} 'job succeeded' 1 1 t1.1
 [(('event-handler-00', 'succeeded'), 1) ret_code] 0
 [(('event-handler-00', 'succeeded'), 1) out] hello
 __LOG__
diff --git a/tests/events/08-task-event-handler-retry/suite.rc b/tests/events/08-task-event-handler-retry/suite.rc
index 5afcc95..9ca4b2a 100644
--- a/tests/events/08-task-event-handler-retry/suite.rc
+++ b/tests/events/08-task-event-handler-retry/suite.rc
@@ -1,6 +1,7 @@
 #!jinja2
 
 title=Task Event Handler Retry
+URL = http://my-suites.com/${CYLC_SUITE_NAME}.html
 
 [cylc]
     [[reference test]]
@@ -12,6 +13,7 @@ title=Task Event Handler Retry
 
 [runtime]
     [[t1]]
+        URL = http://my-suites.com/${CYLC_SUITE_NAME}/${CYLC_TASK_NAME}.html
         script=true
 {% if HOST is defined %}
         [[[remote]]]
@@ -19,7 +21,7 @@ title=Task Event Handler Retry
 {% endif %}
 {% if GLOBALCFG is not defined %}
         [[[events]]]
-            handlers=hello-event-handler '%(name)s' '%(event)s'
+            handlers = hello-event-handler %(name)s %(event)s %(suite_url)s %(task_url)s %(message)s %(point)s %(submit_num)s %(id)s
             handler events=succeeded, failed
             handler retry delays=PT0S, 2*PT1S
 {% endif %}{# not GLOBALCFG is not defined #}
diff --git a/tests/events/10-task-event-job-logs-retrieve.t b/tests/events/10-task-event-job-logs-retrieve.t
index c6578a5..434ce7d 100755
--- a/tests/events/10-task-event-job-logs-retrieve.t
+++ b/tests/events/10-task-event-job-logs-retrieve.t
@@ -47,18 +47,18 @@ sed "/'job-logs-retrieve'/!d" \
     "${SUITE_RUN_DIR}/log/job/1/t1/"{01,02,03}"/job-activity.log" \
     >'edited-activities.log'
 cmp_ok 'edited-activities.log' <<'__LOG__'
-[('job-logs-retrieve', 1) ret_code] 0
-[('job-logs-retrieve', 2) ret_code] 0
-[('job-logs-retrieve', 3) ret_code] 0
+[(('job-logs-retrieve', 'retry'), 1) ret_code] 0
+[(('job-logs-retrieve', 'retry'), 2) ret_code] 0
+[(('job-logs-retrieve', 'succeeded'), 3) ret_code] 0
 __LOG__
 
 grep -F 'will run after' "${SUITE_RUN_DIR}/log/suite/log" \
     | cut -d' ' -f 4-10 | sort >"edited-log"
 if [[ "${TEST_NAME_BASE}" == *-globalcfg ]]; then
     cmp_ok 'edited-log' <<'__LOG__'
-[t1.1] -('job-logs-retrieve', 1) will run after PT5S
-[t1.1] -('job-logs-retrieve', 2) will run after PT5S
-[t1.1] -('job-logs-retrieve', 3) will run after PT5S
+1/t1/01 ('job-logs-retrieve', 'retry') will run after PT5S
+1/t1/02 ('job-logs-retrieve', 'retry') will run after PT5S
+1/t1/03 ('job-logs-retrieve', 'succeeded') will run after PT5S
 __LOG__
 else
     cmp_ok 'edited-log' <'/dev/null'  # P0Y not displayed
diff --git a/tests/events/11-cycle-task-event-job-logs-retrieve.t b/tests/events/11-cycle-task-event-job-logs-retrieve.t
index 05c13ab..0a6e607 100755
--- a/tests/events/11-cycle-task-event-job-logs-retrieve.t
+++ b/tests/events/11-cycle-task-event-job-logs-retrieve.t
@@ -39,9 +39,9 @@ sed "/'job-logs-retrieve'/!d" \
     "${SUITE_RUN_DIR}/log/job/2020-02-02T02:02Z/t"{1,2}'/'{01,02,03}'/job-activity.log' \
     >'edited-activities.log'
 cmp_ok 'edited-activities.log' <<__LOG__
-[('job-logs-retrieve', 1) ret_code] 0
-[('job-logs-retrieve', 2) ret_code] 0
-[('job-logs-retrieve', 3) ret_code] 0
+[(('job-logs-retrieve', 'retry'), 1) ret_code] 0
+[(('job-logs-retrieve', 'retry'), 2) ret_code] 0
+[(('job-logs-retrieve', 'succeeded'), 3) ret_code] 0
 __LOG__
 
 purge_suite_remote "${HOST}" "${SUITE_NAME}"
diff --git a/tests/events/15-host-task-event-handler-retry-globalcfg.t b/tests/events/15-host-task-event-handler-retry-globalcfg.t
index 3a93a1d..6aa2bb5 100755
--- a/tests/events/15-host-task-event-handler-retry-globalcfg.t
+++ b/tests/events/15-host-task-event-handler-retry-globalcfg.t
@@ -54,10 +54,10 @@ cmp_ok 'edited-job-activity.log' <<'__LOG__'
 __LOG__
 
 grep 'event-handler-00.*will run after' "${SUITE_RUN_DIR}/log/suite/log" \
-    | cut -d' ' -f 4-11 >'edited-log'
+    | cut -d' ' -f 4-10 >'edited-log'
 # Note: P0Y delays are not displayed
 cmp_ok 'edited-log' <<'__LOG__'
-[t1.1] -(('event-handler-00', 'succeeded'), 1) will run after PT1S
+1/t1/01 ('event-handler-00', 'succeeded') will run after PT1S
 __LOG__
 
 purge_suite_remote "${HOST}" "${SUITE_NAME}"
diff --git a/tests/events/20-suite-event-handlers.t b/tests/events/20-suite-event-handlers.t
index d209c72..bc9661b 100755
--- a/tests/events/20-suite-event-handlers.t
+++ b/tests/events/20-suite-event-handlers.t
@@ -23,7 +23,7 @@ if [[ "${TEST_NAME_BASE}" == *-globalcfg ]]; then
     create_test_globalrc "" "
 [cylc]
     [[events]]
-        handlers = echo 'Your %(suite)s suite has a %(event)s event.'
+        handlers = echo 'Your %(suite)s suite has a %(event)s event and URL %(suite_url)s.'
         handler events = startup"
     OPT_SET='-s GLOBALCFG=True'
 fi
@@ -36,7 +36,7 @@ suite_run_ok "${TEST_NAME_BASE}-run" \
 
 LOG_FILE="$(cylc get-global-config --print-run-dir)/${SUITE_NAME}/log/suite/log"
 grep_ok "\\[('suite-event-handler-00', 'startup') ret_code\\] 0" "${LOG_FILE}"
-grep_ok "\\[('suite-event-handler-00', 'startup') out\\] Your ${SUITE_NAME} suite has a startup event." "${LOG_FILE}"
+grep_ok "\\[('suite-event-handler-00', 'startup') out\\] Your ${SUITE_NAME} suite has a startup event and URL http://mysuites.com/${SUITE_NAME}.html." "${LOG_FILE}"
 
 purge_suite "${SUITE_NAME}"
 exit
diff --git a/tests/events/20-suite-event-handlers/suite.rc b/tests/events/20-suite-event-handlers/suite.rc
index b8b1939..ff11a3b 100644
--- a/tests/events/20-suite-event-handlers/suite.rc
+++ b/tests/events/20-suite-event-handlers/suite.rc
@@ -1,11 +1,12 @@
 #!jinja2
 
 title=Suite Event Mail
+URL = http://mysuites.com/${CYLC_SUITE_NAME}.html
 
 [cylc]
     [[events]]
 {% if GLOBALCFG is not defined %}
-        handlers = echo 'Your %(suite)s suite has a %(event)s event.'
+        handlers = echo 'Your %(suite)s suite has a %(event)s event and URL %(suite_url)s.'
         handler events = startup
 {% endif %}{# not GLOBALCFG is not defined #}
     [[reference test]]
diff --git a/tests/events/32-task-event-job-logs-retrieve-2.t b/tests/events/32-task-event-job-logs-retrieve-2.t
index db6a138..7b09856 100755
--- a/tests/events/32-task-event-job-logs-retrieve-2.t
+++ b/tests/events/32-task-event-job-logs-retrieve-2.t
@@ -36,7 +36,7 @@ sed "/'job-logs-retrieve'/!d" \
     "${SUITE_RUN_DIR}/log/job/1/t1/01/job-activity.log" \
     >'edited-activities.log'
 cmp_ok 'edited-activities.log' <<'__LOG__'
-[('job-logs-retrieve', 1) ret_code] 0
+[(('job-logs-retrieve', 'succeeded'), 1) ret_code] 0
 __LOG__
 exists_ok "${SUITE_RUN_DIR}/log/job/1/t1/01/job.out"
 exists_fail "${SUITE_RUN_DIR}/log/job/1/t1/01/job.err"
diff --git a/tests/events/33-task-event-job-logs-retrieve-3.t b/tests/events/33-task-event-job-logs-retrieve-3.t
index 4e03de3..f561ce8 100755
--- a/tests/events/33-task-event-job-logs-retrieve-3.t
+++ b/tests/events/33-task-event-job-logs-retrieve-3.t
@@ -36,10 +36,10 @@ sed "/'job-logs-retrieve'/!d" \
     "${SUITE_RUN_DIR}/log/job/1/t1/01/job-activity.log" \
     >'edited-activities.log'
 cmp_ok 'edited-activities.log' <<'__LOG__'
-[('job-logs-retrieve', 1) ret_code] 1
-[('job-logs-retrieve', 1) err] File(s) not retrieved: job.err
-[('job-logs-retrieve', 1) ret_code] 1
-[('job-logs-retrieve', 1) err] File(s) not retrieved: job.err
+[(('job-logs-retrieve', 'failed'), 1) ret_code] 1
+[(('job-logs-retrieve', 'failed'), 1) err] File(s) not retrieved: job.err
+[(('job-logs-retrieve', 'failed'), 1) ret_code] 1
+[(('job-logs-retrieve', 'failed'), 1) err] File(s) not retrieved: job.err
 __LOG__
 exists_ok "${SUITE_RUN_DIR}/log/job/1/t1/01/job.out"
 exists_fail "${SUITE_RUN_DIR}/log/job/1/t1/01/job.err"
diff --git a/tests/graph_parser/00-unittests.t b/tests/graph_parser/00-unittests.t
index 6bec684..dc92c8f 100755
--- a/tests/graph_parser/00-unittests.t
+++ b/tests/graph_parser/00-unittests.t
@@ -18,7 +18,8 @@
 # Run graph parser unit tests.
 . $(dirname $0)/test_header
 
-set_test_number 1
+set_test_number 2
 
 TEST_NAME=$TEST_NAME_BASE-unit-tests
 run_ok $TEST_NAME python $CYLC_DIR/lib/cylc/graph_parser.py
+run_ok $TEST_NAME python $CYLC_DIR/lib/cylc/cycling/__init__.py
diff --git a/tests/cyclers/23-no_final_cycle_point.t b/tests/graphing/09-ref-graph.t
old mode 100755
new mode 100644
similarity index 67%
copy from tests/cyclers/23-no_final_cycle_point.t
copy to tests/graphing/09-ref-graph.t
index b552191..70ad758
--- a/tests/cyclers/23-no_final_cycle_point.t
+++ b/tests/graphing/09-ref-graph.t
@@ -15,27 +15,18 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Test intercycle dependencies.
+# Test that "cylc graph --reference -O foo.ref SUITE" works. GitHub #2249.
 . $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
-set_test_number 6
+set_test_number 3
 #-------------------------------------------------------------------------------
-install_suite $TEST_NAME_BASE no_final_cycle_point
+install_suite $TEST_NAME_BASE $TEST_NAME_BASE
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-validate
-run_fail $TEST_NAME cylc validate $SUITE_NAME
-grep_ok "This suite requires a final cycle point\." \
-    $TEST_NAME.stderr
-contains_ok "$TEST_NAME.stderr" <<'__ERR__'
-'ERROR: Invalid/unsupported recurrence representation: R1/P0D'
-__ERR__
+run_ok $TEST_NAME cylc validate "$SUITE_NAME"
 #-------------------------------------------------------------------------------
-TEST_NAME=$TEST_NAME_BASE-run
-run_fail $TEST_NAME cylc run --debug $SUITE_NAME
-grep_ok "This suite requires a final cycle point\." \
-    $TEST_NAME.stderr
-contains_ok "$TEST_NAME.stderr" <<'__ERR__'
-cylc.config.SuiteConfigError: 'ERROR: Invalid/unsupported recurrence representation: R1/P0D'
-__ERR__
+TEST_NAME=$TEST_NAME_BASE-graph
+run_ok $TEST_NAME cylc graph --reference -O new.ref "$SUITE_NAME"
+cmp_ok new.ref $TEST_SOURCE_DIR/$TEST_NAME_BASE/graph.ref
 #-------------------------------------------------------------------------------
 purge_suite $SUITE_NAME
diff --git a/tests/graphing/09-ref-graph/graph.ref b/tests/graphing/09-ref-graph/graph.ref
new file mode 100644
index 0000000..466898d
--- /dev/null
+++ b/tests/graphing/09-ref-graph/graph.ref
@@ -0,0 +1,7 @@
+edge "foo.20140808T0000+12" "bar.20140808T0000+12" solid
+graph
+node "bar.20140808T0000+12" "bar\n20140808T0000+12" unfilled box black
+node "foo.20140808T0000+12" "foo\n20140808T0000+12" unfilled box black
+node "foo.20140808T0600+12" "foo\n20140808T0600+12" unfilled box black
+node "foo.20140808T1200+12" "foo\n20140808T1200+12" unfilled box black
+stop
diff --git a/tests/graphing/09-ref-graph/suite.rc b/tests/graphing/09-ref-graph/suite.rc
new file mode 100644
index 0000000..e7aab38
--- /dev/null
+++ b/tests/graphing/09-ref-graph/suite.rc
@@ -0,0 +1,14 @@
+
+[cylc]
+    cycle point time zone = +12
+
+[scheduling]
+    initial cycle point = 20140808T00
+    [[dependencies]]
+       [[[R//PT6H]]] # (= [[[PT6H]]])
+            graph = foo
+       [[[T00]]]
+            graph = foo => bar
+
+[visualization]
+    number of cycle points = 3
diff --git a/tests/hold-release/20-reset-waiting-output/suite.rc b/tests/hold-release/20-reset-waiting-output/suite.rc
index bb032f4..42118f8 100644
--- a/tests/hold-release/20-reset-waiting-output/suite.rc
+++ b/tests/hold-release/20-reset-waiting-output/suite.rc
@@ -16,15 +16,15 @@ while ! grep -qF 'INFO - Command succeeded: hold_suite()' "${LOG}"; do
     sleep 1  # make sure hold completes
 done
 cylc reset --state='succeeded' "${CYLC_SUITE_NAME}" 't2.1'
-while ! grep -qF \
-    "INFO - Command succeeded: reset_task_states([u't2.1'], state=succeeded)" \
+while ! grep -q \
+    "INFO - Command succeeded: reset_task_states(\\[u't2.1'\\],.*state=succeeded" \
     "${LOG}"
 do
     sleep 1  # make sure reset succeeded completes
 done
 cylc reset --state='waiting' "${CYLC_SUITE_NAME}" 't2.1'
-while ! grep -qF \
-    "INFO - Command succeeded: reset_task_states([u't2.1'], state=waiting)" \
+while ! grep -q \
+    "INFO - Command succeeded: reset_task_states(\\[u't2.1'\\],.*state=waiting" \
     "${LOG}"
 do
     sleep 1  # make sure reset waiting completes
diff --git a/tests/job-file-trap/01-loadleveler.t b/tests/job-file-trap/01-loadleveler.t
index 925f7ec..a8602da 100755
--- a/tests/job-file-trap/01-loadleveler.t
+++ b/tests/job-file-trap/01-loadleveler.t
@@ -22,15 +22,15 @@
 CYLC_TEST_IS_GENERIC=false
 . $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
-RC_PREV="[test battery][batch systems][loadleveler]"
-export CYLC_TEST_HOST=$( \
-    cylc get-global-config -i "${RC_PREV}host" 2>'/dev/null')
-if [[ -z $CYLC_TEST_HOST ]]; then
+RC_PREF="[test battery][batch systems][loadleveler]"
+export CYLC_TEST_BATCH_TASK_HOST=$( \
+    cylc get-global-config -i "${RC_PREF}host" 2>'/dev/null')
+if [[ -z $CYLC_TEST_BATCH_TASK_HOST ]]; then
     skip_all '"[test battery][batch systems][loadleveler]host": not defined'
 fi
 set_test_number 6
 export CYLC_TEST_DIRECTIVES=$( \
-    cylc get-global-config -i "${RC_PREV}[directives]" 2>'/dev/null')
+    cylc get-global-config -i "${RC_PREF}[directives]" 2>'/dev/null')
 install_suite $TEST_NAME_BASE $TEST_NAME_BASE
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-validate
@@ -38,18 +38,19 @@ run_ok $TEST_NAME cylc validate $SUITE_NAME
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-run
 run_ok $TEST_NAME cylc run --reference-test --debug $SUITE_NAME
+sleep 5
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-t1.1
 T1_JOB_FILE=$SUITE_RUN_DIR/log/job/1/t1/01/job
 exists_ok $T1_JOB_FILE
-run_fail $TEST_NAME grep -q -e '^# TRAP VACATION SIGNALS:' $T1_JOB_FILE
+run_fail $TEST_NAME grep -q -e '^CYLC_VACATION_SIGNALS' $T1_JOB_FILE
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-t2.1
 T2_JOB_FILE=$SUITE_RUN_DIR/log/job/1/t2/01/job
 exists_ok $T2_JOB_FILE
-grep_ok '^# TRAP VACATION SIGNALS:' $T2_JOB_FILE
+grep_ok '^CYLC_VACATION_SIGNALS' $T2_JOB_FILE
 #-------------------------------------------------------------------------------
-if [[ $CYLC_TEST_HOST != 'localhost' ]]; then
+if [[ $CYLC_TEST_BATCH_TASK_HOST != 'localhost' ]]; then
     purge_suite_remote "${CYLC_TEST_BATCH_TASK_HOST}" "${SUITE_NAME}"
 fi
 purge_suite $SUITE_NAME
diff --git a/tests/job-file-trap/01-loadleveler/suite.rc b/tests/job-file-trap/01-loadleveler/suite.rc
index 4d5958d..7c5f6d1 100644
--- a/tests/job-file-trap/01-loadleveler/suite.rc
+++ b/tests/job-file-trap/01-loadleveler/suite.rc
@@ -2,7 +2,7 @@
 [cylc]
    [[reference test]]
        required run mode=live
-       live mode suite timeout=5 # minutes
+       live mode suite timeout=PT5M # minutes
 
 [scheduling]
     [[dependencies]]
diff --git a/tests/job-file-trap/02-pipefail.t b/tests/job-file-trap/02-pipefail.t
index 421f02f..209e9a1 100755
--- a/tests/job-file-trap/02-pipefail.t
+++ b/tests/job-file-trap/02-pipefail.t
@@ -30,7 +30,6 @@ suite_run_fail "${TEST_NAME_BASE}-run" \
 T1_STATUS_FILE="${SUITE_RUN_DIR}/log/job/1/t1/01/job.status"
 contains_ok "${T1_STATUS_FILE}" <<'__STATUS__'
 CYLC_JOB_EXIT=EXIT
-CYLC_JOB_EXIT=ERR
 __STATUS__
 
 purge_suite "${SUITE_NAME}"
diff --git a/tests/job-submission/05-activity-log.t b/tests/job-submission/05-activity-log.t
index 0e870e7..728cd94 100755
--- a/tests/job-submission/05-activity-log.t
+++ b/tests/job-submission/05-activity-log.t
@@ -28,10 +28,10 @@ suite_run_ok "${TEST_NAME_BASE}-run" \
 
 T1_ACTIVITY_LOG="${SUITE_RUN_DIR}/log/job/1/t1/NN/job-activity.log"
 
-grep_ok '\[job-submit ret_code\] 0' "${T1_ACTIVITY_LOG}"
-grep_ok '\[job-kill ret_code\] 1' "${T1_ACTIVITY_LOG}"
-grep_ok '\[job-kill out\] [^|]*\|1/t1/01\|1' "${T1_ACTIVITY_LOG}"
-grep_ok '\[job-poll out\] [^|]*\|1/t1/01\|background\|[^|]*\|1\|\|\|\|[^|]*\|' \
+grep_ok '\[jobs-submit ret_code\] 0' "${T1_ACTIVITY_LOG}"
+grep_ok '\[jobs-kill ret_code\] 1' "${T1_ACTIVITY_LOG}"
+grep_ok '\[jobs-kill out\] [^|]*\|1/t1/01\|1' "${T1_ACTIVITY_LOG}"
+grep_ok '\[jobs-poll out\] [^|]*\|1/t1/01\|background\|[^|]*\|1\|\|\|\|[^|]*\|' \
     "${T1_ACTIVITY_LOG}"
 grep_ok \
     "\\[(('event-handler-00', 'failed'), 1) out\\] failed ${SUITE_NAME} t1\\.1 job failed" \
diff --git a/tests/job-submission/09-activity-log-host-bad-submit/suite.rc b/tests/job-submission/09-activity-log-host-bad-submit/suite.rc
index 29050d4..9a6d694 100644
--- a/tests/job-submission/09-activity-log-host-bad-submit/suite.rc
+++ b/tests/job-submission/09-activity-log-host-bad-submit/suite.rc
@@ -26,7 +26,7 @@
 set -x
 # Test that the original command is printed
 A_LOG="$(dirname "$0")/../../bad-submitter/01/job-activity.log"
-grep '\[job-submit cmd\] cylc jobs-submit.* --host={{CYLC_TEST_HOST}}' \
+grep '\[jobs-submit cmd\] cylc jobs-submit.* --host={{CYLC_TEST_HOST}}' \
     "${A_LOG}"
 # Test that some STDERR is printed, with the host name as prefix
 grep '({{CYLC_TEST_HOST}}) .* \[STDERR\] ' "${A_LOG}"
diff --git a/tests/jobscript/00-torture/foo.ref-jobfile b/tests/jobscript/00-torture/foo.ref-jobfile
index 45d762d..625c19e 100644
--- a/tests/jobscript/00-torture/foo.ref-jobfile
+++ b/tests/jobscript/00-torture/foo.ref-jobfile
@@ -30,7 +30,7 @@ cylc__job__inst__cylc_env() {
 
 cylc__job__inst__user_env() {
     # TASK RUNTIME ENVIRONMENT:
-    E_ONE="$( cylc util rnd 1 10 )"
+    E_ONE="$(( RANDOM % 10 ))"
     E_TWO="$VAR_IS"
     E_THR="$CYLC_SUITE_SHARE_PATH"
     E_FOU="$CYLC_TASK_NAME"
diff --git a/tests/jobscript/00-torture/suite.rc b/tests/jobscript/00-torture/suite.rc
index 7cf6a62..d1811b6 100644
--- a/tests/jobscript/00-torture/suite.rc
+++ b/tests/jobscript/00-torture/suite.rc
@@ -72,7 +72,7 @@ echo VAR_CS is $VAR_CS
 export VAR_PostCS=postcs"""
         [[[environment]]]
             # path to cylc must be available:
-            E_ONE = $( cylc util rnd 1 10 )
+            E_ONE = $(( RANDOM % 10 ))
             # init-script must be done:
             E_TWO = $VAR_IS
             # cylc-defined variables must be done:
diff --git a/tests/jobscript/10-bad-syntax.t b/tests/jobscript/10-bad-syntax.t
index b397dfb..0cdeae7 100755
--- a/tests/jobscript/10-bad-syntax.t
+++ b/tests/jobscript/10-bad-syntax.t
@@ -18,7 +18,7 @@
 # Test "cylc jobscript" when we have bad syntax in "script" value.
 . "$(dirname "${0}")/test_header"
 #-------------------------------------------------------------------------------
-set_test_number 5
+set_test_number 8
 #-------------------------------------------------------------------------------
 init_suite "${TEST_NAME_BASE}" <<'__SUITE_RC__'
 [scheduling]
@@ -37,6 +37,27 @@ ERROR: no jobscript generated
 __ERR__
 purge_suite "${SUITE_NAME}"
 #-------------------------------------------------------------------------------
+init_suite "${TEST_NAME_BASE}" <<'__SUITE_RC__'
+[scheduling]
+    [[dependencies]]
+        graph = foo
+[runtime]
+    [[foo]]
+        script = true
+        pre-script = """
+# stuff 1
+# stuff 2
+# stuff 3
+"""
+__SUITE_RC__
+
+TEST_NAME="${TEST_NAME_BASE}"-comment-only
+run_ok "${TEST_NAME}" cylc jobscript "${SUITE_NAME}" 'foo.1'
+grep_ok 'cylc__job__inst__script' "${TEST_NAME}.stdout"
+run_fail "${TEST_NAME}.stdout.pre_script" \
+    grep -F -q 'cylc__job__inst__pre_script' "${TEST_NAME}.stdout"
+purge_suite "${SUITE_NAME}"
+#-------------------------------------------------------------------------------
 install_suite "${TEST_NAME_BASE}" "${TEST_NAME_BASE}"
 TEST_NAME="${TEST_NAME_BASE}-advanced-validate"
 run_ok "${TEST_NAME}" cylc validate "${SUITE_NAME}"
diff --git a/tests/jobscript/11-shell-ksh.t b/tests/jobscript/11-shell-ksh.t
index da263b1..13c0281 100755
--- a/tests/jobscript/11-shell-ksh.t
+++ b/tests/jobscript/11-shell-ksh.t
@@ -17,7 +17,7 @@
 #-------------------------------------------------------------------------------
 # Test job script OK with ksh. If ksh installed, assume ksh93.
 . "$(dirname "${0}")/test_header"
-if ! KSH="$(which ksh)" 2>'/dev/null'; then
+if ! KSH="$(which ksh 2>/dev/null)"; then
     skip_all 'ksh not installed'
 fi
 set_test_number 5
diff --git a/tests/jobscript/12-err-script.t b/tests/jobscript/12-err-script.t
index 5823da2..b7391b2 100755
--- a/tests/jobscript/12-err-script.t
+++ b/tests/jobscript/12-err-script.t
@@ -22,7 +22,7 @@ set_test_number 4
 install_suite "${TEST_NAME_BASE}" "${TEST_NAME_BASE}"
 run_ok "${TEST_NAME_BASE}-validate" cylc validate "${SUITE_NAME}"
 run_ok "${TEST_NAME_BASE}-run" cylc run "${SUITE_NAME}" --reference-test --debug
-grep_ok 'ERR foo bar baz qux' "${SUITE_RUN_DIR}/log/job/1/foo/01/job.err"
+grep_ok 'EXIT foo bar baz qux' "${SUITE_RUN_DIR}/log/job/1/foo/01/job.err"
 run_fail "${TEST_NAME_BASE}-grep-02" \
     grep -q -F 'ERR foo bar baz qux' "${SUITE_RUN_DIR}/log/job/1/foo/02/job.err"
 
diff --git a/tests/jobscript/10-bad-syntax.t b/tests/jobscript/14-sge-format.t
similarity index 66%
copy from tests/jobscript/10-bad-syntax.t
copy to tests/jobscript/14-sge-format.t
index b397dfb..e418e99 100755
--- a/tests/jobscript/10-bad-syntax.t
+++ b/tests/jobscript/14-sge-format.t
@@ -15,33 +15,21 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Test "cylc jobscript" when we have bad syntax in "script" value.
+# Test that SGE directives are formatted correctly. GitHub #2215
 . "$(dirname "${0}")/test_header"
 #-------------------------------------------------------------------------------
-set_test_number 5
-#-------------------------------------------------------------------------------
-init_suite "${TEST_NAME_BASE}" <<'__SUITE_RC__'
-[scheduling]
-    [[dependencies]]
-        graph = foo
-[runtime]
-    [[foo]]
-        script = fi
-__SUITE_RC__
-
-TEST_NAME="${TEST_NAME_BASE}"-simple
-run_fail "${TEST_NAME}" cylc jobscript "${SUITE_NAME}" 'foo.1'
-cmp_ok "${TEST_NAME}.stdout" <'/dev/null'
-contains_ok "${TEST_NAME}.stderr" <<__ERR__
-ERROR: no jobscript generated
-__ERR__
-purge_suite "${SUITE_NAME}"
-#-------------------------------------------------------------------------------
+set_test_number 6
 install_suite "${TEST_NAME_BASE}" "${TEST_NAME_BASE}"
-TEST_NAME="${TEST_NAME_BASE}-advanced-validate"
+#-------------------------------------------------------------------------------
+TEST_NAME="${TEST_NAME_BASE}-validate"
 run_ok "${TEST_NAME}" cylc validate "${SUITE_NAME}"
-TEST_NAME="${TEST_NAME_BASE}-advanced-run"
-run_ok "${TEST_NAME}" cylc run "${SUITE_NAME}" --reference-test --debug
+#-------------------------------------------------------------------------------
+TEST_NAME="${TEST_NAME_BASE}-script"
+run_ok "${TEST_NAME}" cylc jobscript "${SUITE_NAME}" foo.1
+grep_ok "^#\$ -l h_rt=0:10:00$" "${TEST_NAME}.stdout"
+grep_ok "^#\$ -l s_vmem=1G,s_cpu=60$" "${TEST_NAME}.stdout"
+grep_ok "^#\$ -V$" "${TEST_NAME}.stdout"
+grep_ok "^#\$ -q queuename$" "${TEST_NAME}.stdout"
 #-------------------------------------------------------------------------------
 purge_suite "${SUITE_NAME}"
 exit
diff --git a/tests/jobscript/14-sge-format/suite.rc b/tests/jobscript/14-sge-format/suite.rc
new file mode 100644
index 0000000..7c908bc
--- /dev/null
+++ b/tests/jobscript/14-sge-format/suite.rc
@@ -0,0 +1,13 @@
+title = "Job script: directives test for SGE"
+[scheduling]
+    [[dependencies]]
+        graph = foo
+[runtime]
+    [[foo]]
+        [[[job]]]
+            batch system = sge
+            execution time limit = PT10M
+        [[[directives]]]
+            -V =
+            -q = queuename
+            -l = s_vmem=1G,s_cpu=60
diff --git a/tests/jobscript/12-err-script.t b/tests/jobscript/15-semicolon.t
similarity index 82%
copy from tests/jobscript/12-err-script.t
copy to tests/jobscript/15-semicolon.t
index 5823da2..ab3aa2b 100755
--- a/tests/jobscript/12-err-script.t
+++ b/tests/jobscript/15-semicolon.t
@@ -15,16 +15,14 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Test err-script.
+# Test error trapping in cmd1; cmd2 syntax. If cmd1 fails, the error trap
+# should trigger.
 . "$(dirname "${0}")/test_header"
-set_test_number 4
+set_test_number 2
 
 install_suite "${TEST_NAME_BASE}" "${TEST_NAME_BASE}"
 run_ok "${TEST_NAME_BASE}-validate" cylc validate "${SUITE_NAME}"
 run_ok "${TEST_NAME_BASE}-run" cylc run "${SUITE_NAME}" --reference-test --debug
-grep_ok 'ERR foo bar baz qux' "${SUITE_RUN_DIR}/log/job/1/foo/01/job.err"
-run_fail "${TEST_NAME_BASE}-grep-02" \
-    grep -q -F 'ERR foo bar baz qux' "${SUITE_RUN_DIR}/log/job/1/foo/02/job.err"
 
 purge_suite "${SUITE_NAME}"
 exit
diff --git a/tests/jobscript/15-semicolon/reference.log b/tests/jobscript/15-semicolon/reference.log
new file mode 100644
index 0000000..90cba27
--- /dev/null
+++ b/tests/jobscript/15-semicolon/reference.log
@@ -0,0 +1,4 @@
+2017-02-09T16:45:37Z INFO - Initial point: 1
+2017-02-09T16:45:37Z INFO - Final point: 1
+2017-02-09T16:45:37Z INFO - [foo.1] -triggered off []
+2017-02-09T16:45:37Z INFO - [foo.1] -triggered off []
diff --git a/tests/jobscript/15-semicolon/suite.rc b/tests/jobscript/15-semicolon/suite.rc
new file mode 100644
index 0000000..20c5fdb
--- /dev/null
+++ b/tests/jobscript/15-semicolon/suite.rc
@@ -0,0 +1,17 @@
+[cylc]
+    [[reference test]]
+        expected task failures = foo.1
+    [[events]]
+        abort on stalled = True
+[scheduling]
+    [[dependencies]]
+        graph = foo
+[runtime]
+    [[foo]]
+        script = """
+if ((${CYLC_TASK_SUBMIT_NUMBER} == 1)); then
+    false; true
+fi
+"""
+        [[[job]]]
+            execution retry delays = PT0S
diff --git a/tests/jobscript/12-err-script.t b/tests/jobscript/16-midfail.t
similarity index 82%
copy from tests/jobscript/12-err-script.t
copy to tests/jobscript/16-midfail.t
index 5823da2..ecda7fb 100755
--- a/tests/jobscript/12-err-script.t
+++ b/tests/jobscript/16-midfail.t
@@ -15,16 +15,13 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Test err-script.
+# Test error trapping of internal commands in job script. GitHub #2218
 . "$(dirname "${0}")/test_header"
-set_test_number 4
+set_test_number 2
 
 install_suite "${TEST_NAME_BASE}" "${TEST_NAME_BASE}"
 run_ok "${TEST_NAME_BASE}-validate" cylc validate "${SUITE_NAME}"
 run_ok "${TEST_NAME_BASE}-run" cylc run "${SUITE_NAME}" --reference-test --debug
-grep_ok 'ERR foo bar baz qux' "${SUITE_RUN_DIR}/log/job/1/foo/01/job.err"
-run_fail "${TEST_NAME_BASE}-grep-02" \
-    grep -q -F 'ERR foo bar baz qux' "${SUITE_RUN_DIR}/log/job/1/foo/02/job.err"
 
 purge_suite "${SUITE_NAME}"
 exit
diff --git a/tests/jobscript/16-midfail/reference.log b/tests/jobscript/16-midfail/reference.log
new file mode 100644
index 0000000..90cba27
--- /dev/null
+++ b/tests/jobscript/16-midfail/reference.log
@@ -0,0 +1,4 @@
+2017-02-09T16:45:37Z INFO - Initial point: 1
+2017-02-09T16:45:37Z INFO - Final point: 1
+2017-02-09T16:45:37Z INFO - [foo.1] -triggered off []
+2017-02-09T16:45:37Z INFO - [foo.1] -triggered off []
diff --git a/tests/jobscript/16-midfail/suite.rc b/tests/jobscript/16-midfail/suite.rc
new file mode 100644
index 0000000..472679e
--- /dev/null
+++ b/tests/jobscript/16-midfail/suite.rc
@@ -0,0 +1,18 @@
+[cylc]
+    [[reference test]]
+        expected task failures = foo.1
+    [[events]]
+        abort on stalled = True
+[scheduling]
+    [[dependencies]]
+        graph = foo
+[runtime]
+    [[foo]]
+        script = """
+if ((${CYLC_TASK_SUBMIT_NUMBER} == 1)); then
+    false
+    true
+fi
+"""
+        [[[job]]]
+            execution retry delays = PT0S
diff --git a/tests/jobscript/12-err-script.t b/tests/jobscript/17-envfail.t
similarity index 76%
copy from tests/jobscript/12-err-script.t
copy to tests/jobscript/17-envfail.t
index 5823da2..1ce5154 100755
--- a/tests/jobscript/12-err-script.t
+++ b/tests/jobscript/17-envfail.t
@@ -15,16 +15,13 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Test err-script.
+# Test trapping of errors in env var definitions in job script. GitHub #2218
 . "$(dirname "${0}")/test_header"
-set_test_number 4
+set_test_number 2
 
 install_suite "${TEST_NAME_BASE}" "${TEST_NAME_BASE}"
 run_ok "${TEST_NAME_BASE}-validate" cylc validate "${SUITE_NAME}"
-run_ok "${TEST_NAME_BASE}-run" cylc run "${SUITE_NAME}" --reference-test --debug
-grep_ok 'ERR foo bar baz qux' "${SUITE_RUN_DIR}/log/job/1/foo/01/job.err"
-run_fail "${TEST_NAME_BASE}-grep-02" \
-    grep -q -F 'ERR foo bar baz qux' "${SUITE_RUN_DIR}/log/job/1/foo/02/job.err"
+run_fail "${TEST_NAME_BASE}-run" cylc run "${SUITE_NAME}" --reference-test --debug
 
 purge_suite "${SUITE_NAME}"
 exit
diff --git a/tests/jobscript/17-envfail/reference.log b/tests/jobscript/17-envfail/reference.log
new file mode 100644
index 0000000..b1f9a89
--- /dev/null
+++ b/tests/jobscript/17-envfail/reference.log
@@ -0,0 +1,3 @@
+2017-02-09T16:45:37Z INFO - Initial point: 1
+2017-02-09T16:45:37Z INFO - Final point: 1
+2017-02-09T16:45:37Z INFO - [foo.1] -triggered off []
diff --git a/tests/jobscript/17-envfail/suite.rc b/tests/jobscript/17-envfail/suite.rc
new file mode 100644
index 0000000..bc419b3
--- /dev/null
+++ b/tests/jobscript/17-envfail/suite.rc
@@ -0,0 +1,10 @@
+[cylc]
+    abort if any task fails = True
+[scheduling]
+    [[dependencies]]
+        graph = foo
+[runtime]
+    [[foo]]
+        script = true
+        [[[environment]]]
+            FOO=$(this is an error)
diff --git a/tests/cyclers/23-no_final_cycle_point.t b/tests/modes/03-dummy-env.t
old mode 100755
new mode 100644
similarity index 68%
copy from tests/cyclers/23-no_final_cycle_point.t
copy to tests/modes/03-dummy-env.t
index b552191..8011a0d
--- a/tests/cyclers/23-no_final_cycle_point.t
+++ b/tests/modes/03-dummy-env.t
@@ -15,27 +15,20 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Test intercycle dependencies.
+
+# Test that user environment is disabled along with env-script in dummy mode.
+# And that remote host is disabled in dummy local mode.
+
 . $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
-set_test_number 6
+set_test_number 2
 #-------------------------------------------------------------------------------
-install_suite $TEST_NAME_BASE no_final_cycle_point
+install_suite $TEST_NAME_BASE $TEST_NAME_BASE
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-validate
-run_fail $TEST_NAME cylc validate $SUITE_NAME
-grep_ok "This suite requires a final cycle point\." \
-    $TEST_NAME.stderr
-contains_ok "$TEST_NAME.stderr" <<'__ERR__'
-'ERROR: Invalid/unsupported recurrence representation: R1/P0D'
-__ERR__
+run_ok $TEST_NAME cylc validate $SUITE_NAME
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-run
-run_fail $TEST_NAME cylc run --debug $SUITE_NAME
-grep_ok "This suite requires a final cycle point\." \
-    $TEST_NAME.stderr
-contains_ok "$TEST_NAME.stderr" <<'__ERR__'
-cylc.config.SuiteConfigError: 'ERROR: Invalid/unsupported recurrence representation: R1/P0D'
-__ERR__
+suite_run_ok $TEST_NAME cylc run -m dummy-local --reference-test --debug $SUITE_NAME
 #-------------------------------------------------------------------------------
 purge_suite $SUITE_NAME
diff --git a/tests/modes/03-dummy-env/reference.log b/tests/modes/03-dummy-env/reference.log
new file mode 100644
index 0000000..ecd6c05
--- /dev/null
+++ b/tests/modes/03-dummy-env/reference.log
@@ -0,0 +1,3 @@
+2017-04-04T20:49:39Z INFO - Initial point: 1
+2017-04-04T20:49:39Z INFO - Final point: 1
+2017-04-04T20:49:39Z INFO - [oxygas.1] -triggered off []
diff --git a/tests/modes/03-dummy-env/suite.rc b/tests/modes/03-dummy-env/suite.rc
new file mode 100644
index 0000000..64c5b92
--- /dev/null
+++ b/tests/modes/03-dummy-env/suite.rc
@@ -0,0 +1,18 @@
+[cylc]
+    force run mode = dummy-local
+    [[reference test]]
+        dummy-local mode suite timeout = PT30S
+[scheduling]
+    [[dependencies]]
+        graph = oxygas
+[runtime]
+    [[root]]
+        script = sleep 1
+        [[[simulation]]]
+            default run length = PT1S
+    [[oxygas]]
+        env-script = ELSE=foo
+        [[[remote]]]
+            host = els055
+        [[[environment]]]
+            SOMETHING = "some-modification-$ELSE"
diff --git a/tests/modes/dummy/suite.rc b/tests/modes/dummy/suite.rc
index 8627cd4..2c65b62 100644
--- a/tests/modes/dummy/suite.rc
+++ b/tests/modes/dummy/suite.rc
@@ -10,5 +10,5 @@
             c:fail => !a & !b & !c"""
 [runtime]
     [[a, b, c]]
-        [[[dummy mode]]]
-            script = false
+        [[[simulation]]]
+            fail cycle points = all
diff --git a/tests/modes/simulation/suite.rc b/tests/modes/simulation/suite.rc
index b1a0bca..0b51648 100644
--- a/tests/modes/simulation/suite.rc
+++ b/tests/modes/simulation/suite.rc
@@ -7,5 +7,3 @@
         graph = "a => b => c"
 [runtime]
     [[a, b, c]]
-        [[[simulation mode]]]
-            run time range = PT1S,PT2S
diff --git a/tests/validate/04-check-examples.t b/tests/param_expand/01-basic.t
old mode 100755
new mode 100644
similarity index 61%
rename from tests/validate/04-check-examples.t
rename to tests/param_expand/01-basic.t
index f23375e..54bcf9f
--- a/tests/validate/04-check-examples.t
+++ b/tests/param_expand/01-basic.t
@@ -15,28 +15,17 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Test validation of example suites
+# check tasks and graph generated by parameter expansion.
 . $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
+set_test_number 2
 #-------------------------------------------------------------------------------
-
-TEST_NAME=$TEST_NAME_BASE
-
-SDEFS=$(find $CYLC_DIR/examples -name suite.rc)
-set_test_number $(echo "$SDEFS" | wc -l)
-
-for SDEF in $SDEFS; do
-    # capture validation stderr:
-    SDEF_NAME=$(basename $(dirname $SDEF))
-    RES=$( cylc val --debug $SDEF 2>&1 >/dev/null )
-    TEST_NAME=$TEST_NAME_BASE-$TEST_NUMBER-"$SDEF_NAME"
-    if [[ -n $RES ]]; then
-        fail $TEST_NAME
-        echo "$SDEF failed validation" >$TEST_NAME.stderr
-        echo "$RES" >>$TEST_NAME.stderr
-        mkdir -p $TEST_LOG_DIR
-        cp $TEST_NAME.stderr $TEST_LOG_DIR/
-    else
-        ok $TEST_NAME
-    fi
-done
+install_suite $TEST_NAME_BASE $TEST_NAME_BASE
+#-------------------------------------------------------------------------------
+TEST_NAME=$TEST_NAME_BASE-validate
+run_ok $TEST_NAME cylc validate "${SUITE_NAME}"
+#-------------------------------------------------------------------------------
+cylc graph --reference $SUITE_NAME > new.graph.ref
+cmp_ok new.graph.ref $TEST_SOURCE_DIR/$TEST_NAME_BASE/graph.ref
+#-------------------------------------------------------------------------------
+purge_suite $SUITE_NAME
diff --git a/tests/param_expand/01-basic/graph.ref b/tests/param_expand/01-basic/graph.ref
new file mode 100644
index 0000000..fb05fd3
--- /dev/null
+++ b/tests/param_expand/01-basic/graph.ref
@@ -0,0 +1,48 @@
+edge "foo_cat.1" "bar_j1.1" solid
+edge "foo_cat.1" "bar_j2.1" solid
+edge "foo_cat.1" "bar_j3.1" solid
+edge "foo_cat.1" "bar_j4.1" solid
+edge "foo_cat.1" "bar_j5.1" solid
+edge "foo_dog.1" "bar_j1.1" solid
+edge "foo_dog.1" "bar_j2.1" solid
+edge "foo_dog.1" "bar_j3.1" solid
+edge "foo_dog.1" "bar_j4.1" solid
+edge "foo_dog.1" "bar_j5.1" solid
+edge "foo_fish.1" "bar_j1.1" solid
+edge "foo_fish.1" "bar_j2.1" solid
+edge "foo_fish.1" "bar_j3.1" solid
+edge "foo_fish.1" "bar_j4.1" solid
+edge "foo_fish.1" "bar_j5.1" solid
+edge "qux_j1.1" "waz_k01.1" solid
+edge "qux_j1.1" "waz_k05.1" solid
+edge "qux_j1.1" "waz_k09.1" solid
+edge "qux_j2.1" "waz_k01.1" solid
+edge "qux_j2.1" "waz_k05.1" solid
+edge "qux_j2.1" "waz_k09.1" solid
+edge "qux_j3.1" "waz_k01.1" solid
+edge "qux_j3.1" "waz_k05.1" solid
+edge "qux_j3.1" "waz_k09.1" solid
+edge "qux_j4.1" "waz_k01.1" solid
+edge "qux_j4.1" "waz_k05.1" solid
+edge "qux_j4.1" "waz_k09.1" solid
+edge "qux_j5.1" "waz_k01.1" solid
+edge "qux_j5.1" "waz_k05.1" solid
+edge "qux_j5.1" "waz_k09.1" solid
+graph
+node "bar_j1.1" "bar_j1\n1" unfilled box black
+node "bar_j2.1" "bar_j2\n1" unfilled box black
+node "bar_j3.1" "bar_j3\n1" unfilled box black
+node "bar_j4.1" "bar_j4\n1" unfilled box black
+node "bar_j5.1" "bar_j5\n1" unfilled box black
+node "foo_cat.1" "foo_cat\n1" unfilled box black
+node "foo_dog.1" "foo_dog\n1" unfilled box black
+node "foo_fish.1" "foo_fish\n1" unfilled box black
+node "qux_j1.1" "qux_j1\n1" unfilled box black
+node "qux_j2.1" "qux_j2\n1" unfilled box black
+node "qux_j3.1" "qux_j3\n1" unfilled box black
+node "qux_j4.1" "qux_j4\n1" unfilled box black
+node "qux_j5.1" "qux_j5\n1" unfilled box black
+node "waz_k01.1" "waz_k01\n1" unfilled box black
+node "waz_k05.1" "waz_k05\n1" unfilled box black
+node "waz_k09.1" "waz_k09\n1" unfilled box black
+stop
diff --git a/tests/param_expand/01-basic/suite.rc b/tests/param_expand/01-basic/suite.rc
new file mode 100644
index 0000000..a649366
--- /dev/null
+++ b/tests/param_expand/01-basic/suite.rc
@@ -0,0 +1,18 @@
+[cylc]
+    [[parameters]]
+        i = cat, dog, fish
+	j = 1..5
+	k = 1..10..4
+[scheduling]
+    [[dependencies]]
+        graph = """
+            foo<i> => bar<j>
+	    qux<j> => waz<k>
+	"""
+[runtime]
+    [[root]]
+	script = true
+    [[foo<i>]]
+    [[bar<j>]]
+    [[qux<j>]]
+    [[waz<k>]]
diff --git a/tests/profile-battery/00-compatability.t b/tests/profile-battery/00-compatability.t
index 7c20ab4..d8d328b 100755
--- a/tests/profile-battery/00-compatability.t
+++ b/tests/profile-battery/00-compatability.t
@@ -18,7 +18,7 @@
 # Ensure that any changes to cylc haven't broken the profile-battery command
 . $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
-set_test_number 5
+set_test_number 4
 #-------------------------------------------------------------------------------
 # Check the format of `cylc version --long`.
 run_ok "${TEST_NAME_BASE}-cylc-version" python -c "
@@ -35,27 +35,6 @@ TEST_NAME="${TEST_NAME_BASE}-cylc-list-hello-world-suite"
 run_ok "${TEST_NAME}" cylc list "${CYLC_DIR}/dev/suites/hello-world"
 cmp_ok "${TEST_NAME}.stdout" "${TEST_NAME}.stdout" "hello-world"
 #-------------------------------------------------------------------------------
-# Check that the suites located in $CYLC_DIR/dev/suites are still valid.
-TEST_NAME="${TEST_NAME_BASE}-dev-suites-validate"
-mkdir "${TEST_LOG_DIR}/${TEST_NAME}" -p
-broken=
-for suite in $(find "${CYLC_DIR}/dev/suites" -name suite.rc)
-do
-    if ! cylc validate "${suite}" 2>&1 >/dev/null
-    then
-        broken="${suite}\n${broken}"
-    fi
-done
-if [[ -z "${broken}" ]]
-then
-    ok "${TEST_NAME}"
-else
-    echo -en "The following suites failed validation:\n${broken}" \
-        > "${TEST_NAME}.stderr"
-    cp "${TEST_NAME}.stderr" "${TEST_LOG_DIR}/${TEST_NAME}.stderr"
-    fail "${TEST_NAME}"
-fi
-#-------------------------------------------------------------------------------
 # Run the test experiment.
 TEST_NAME="${TEST_NAME_BASE}-run-test-experiment"
 LOG_DIR="${TEST_LOG_DIR}/${TEST_NAME}"
diff --git a/tests/reload/17-graphing-change.t b/tests/reload/17-graphing-change.t
index 61fca01..845d25d 100755
--- a/tests/reload/17-graphing-change.t
+++ b/tests/reload/17-graphing-change.t
@@ -37,7 +37,9 @@ cp "$TEST_SOURCE_DIR/graphing-change/suite-1.rc" "$TEST_DIR/$SUITE_NAME/suite.rc
 # reload suite
 TEST_NAME=$TEST_NAME_BASE-add-reload
 run_ok $TEST_NAME cylc reload $SUITE_NAME
-sleep 5
+while (($(grep -c 'Reload completed' "${LOG_FILE}" || true) < 1)); do
+    sleep 1  # make sure reload 1 completes
+done
 
 # check suite log
 grep_ok "Added task: 'one'" $LOG_FILE
@@ -50,7 +52,9 @@ cp "$TEST_SOURCE_DIR/graphing-change/suite.rc" "$TEST_DIR/$SUITE_NAME/suite.rc"
 # reload suite
 TEST_NAME=$TEST_NAME_BASE-remove-reload
 run_ok $TEST_NAME cylc reload $SUITE_NAME
-sleep 5
+while (($(grep -c 'Reload completed' "${LOG_FILE}" || true) < 2)); do
+    sleep 1  # make sure reload 2 completes
+done
 
 # check suite log
 grep_ok "Removed task: 'one'" $LOG_FILE
@@ -67,7 +71,9 @@ cp "$TEST_SOURCE_DIR/graphing-change/suite-2.rc" "$TEST_DIR/$SUITE_NAME/suite.rc
 # reload suite
 TEST_NAME=$TEST_NAME_BASE-swap-reload
 run_ok $TEST_NAME cylc reload $SUITE_NAME
-sleep 5
+while (($(grep -c 'Reload completed' "${LOG_FILE}" || true) < 3)); do
+    sleep 1  # make sure reload 3 completes
+done
 
 # check suite log
 TEST_NAME=$TEST_NAME_BASE-swap-log
diff --git a/tests/restart/15-state-to-db/cylc-suite-db.dump b/tests/restart/15-state-to-db/cylc-suite-db.dump
index 70ea33b..f8ece7f 100644
--- a/tests/restart/15-state-to-db/cylc-suite-db.dump
+++ b/tests/restart/15-state-to-db/cylc-suite-db.dump
@@ -21,50 +21,42 @@ INSERT INTO "task_states" VALUES('foo','20040101T0000Z','2016-06-30T11:24:53Z','
 INSERT INTO "task_states" VALUES('bar','20030101T0000Z','2016-06-30T11:24:54Z','2016-06-30T11:24:57Z',1,NULL,1,'localhost','background','27757','succeeded');
 INSERT INTO "task_states" VALUES('bar','20040101T0000Z','2016-06-30T11:24:56Z','2016-06-30T11:24:56Z',0,NULL,1,NULL,NULL,NULL,'held');
 CREATE TABLE task_events(name TEXT, cycle TEXT, time TEXT, submit_num INTEGER, event TEXT, message TEXT, misc TEXT);
-INSERT INTO "task_events" VALUES('foo','20000101T0000Z','2016-06-30T11:24:45Z',1,'incrementing submit number','','localhost');
-INSERT INTO "task_events" VALUES('foo','20000101T0000Z','2016-06-30T11:24:46Z',1,'submission succeeded',NULL,'localhost');
+INSERT INTO "task_events" VALUES('foo','20000101T0000Z','2016-06-30T11:24:46Z',1,'submitted',NULL,'localhost');
 INSERT INTO "task_events" VALUES('foo','20000101T0000Z','2016-06-30T11:24:46Z',1,'output completed','started','localhost');
 INSERT INTO "task_events" VALUES('foo','20000101T0000Z','2016-06-30T11:24:46Z',1,'started',NULL,'localhost');
 INSERT INTO "task_events" VALUES('foo','20000101T0000Z','2016-06-30T11:24:48Z',1,'output completed','succeeded','localhost');
 INSERT INTO "task_events" VALUES('foo','20000101T0000Z','2016-06-30T11:24:48Z',1,'succeeded',NULL,'localhost');
-INSERT INTO "task_events" VALUES('foo','20010101T0000Z','2016-06-30T11:24:47Z',1,'incrementing submit number','','localhost');
-INSERT INTO "task_events" VALUES('bar','20000101T0000Z','2016-06-30T11:24:49Z',1,'incrementing submit number','','localhost');
-INSERT INTO "task_events" VALUES('foo','20010101T0000Z','2016-06-30T11:24:49Z',1,'submission succeeded',NULL,'localhost');
+INSERT INTO "task_events" VALUES('foo','20010101T0000Z','2016-06-30T11:24:49Z',1,'submitted',NULL,'localhost');
 INSERT INTO "task_events" VALUES('foo','20010101T0000Z','2016-06-30T11:24:49Z',1,'output completed','started','localhost');
 INSERT INTO "task_events" VALUES('foo','20010101T0000Z','2016-06-30T11:24:49Z',1,'started',NULL,'localhost');
-INSERT INTO "task_events" VALUES('bar','20000101T0000Z','2016-06-30T11:24:50Z',1,'submission succeeded',NULL,'localhost');
+INSERT INTO "task_events" VALUES('bar','20000101T0000Z','2016-06-30T11:24:50Z',1,'submitted',NULL,'localhost');
 INSERT INTO "task_events" VALUES('bar','20000101T0000Z','2016-06-30T11:24:50Z',1,'output completed','started','localhost');
 INSERT INTO "task_events" VALUES('bar','20000101T0000Z','2016-06-30T11:24:50Z',1,'started',NULL,'localhost');
-INSERT INTO "task_events" VALUES('foo','20020101T0000Z','2016-06-30T11:24:50Z',1,'incrementing submit number','','localhost');
 INSERT INTO "task_events" VALUES('foo','20010101T0000Z','2016-06-30T11:24:50Z',1,'output completed','succeeded','localhost');
 INSERT INTO "task_events" VALUES('foo','20010101T0000Z','2016-06-30T11:24:50Z',1,'succeeded',NULL,'localhost');
-INSERT INTO "task_events" VALUES('bar','20010101T0000Z','2016-06-30T11:24:51Z',1,'incrementing submit number','','localhost');
 INSERT INTO "task_events" VALUES('bar','20000101T0000Z','2016-06-30T11:24:51Z',1,'output completed','succeeded','localhost');
 INSERT INTO "task_events" VALUES('bar','20000101T0000Z','2016-06-30T11:24:51Z',1,'succeeded',NULL,'localhost');
-INSERT INTO "task_events" VALUES('foo','20020101T0000Z','2016-06-30T11:24:51Z',1,'submission succeeded',NULL,'localhost');
+INSERT INTO "task_events" VALUES('foo','20020101T0000Z','2016-06-30T11:24:51Z',1,'submitted',NULL,'localhost');
 INSERT INTO "task_events" VALUES('foo','20020101T0000Z','2016-06-30T11:24:51Z',1,'output completed','started','localhost');
 INSERT INTO "task_events" VALUES('foo','20020101T0000Z','2016-06-30T11:24:51Z',1,'started',NULL,'localhost');
-INSERT INTO "task_events" VALUES('bar','20010101T0000Z','2016-06-30T11:24:52Z',1,'submission succeeded',NULL,'localhost');
+INSERT INTO "task_events" VALUES('bar','20010101T0000Z','2016-06-30T11:24:52Z',1,'submitted',NULL,'localhost');
 INSERT INTO "task_events" VALUES('bar','20010101T0000Z','2016-06-30T11:24:52Z',1,'output completed','started','localhost');
 INSERT INTO "task_events" VALUES('bar','20010101T0000Z','2016-06-30T11:24:52Z',1,'started',NULL,'localhost');
 INSERT INTO "task_events" VALUES('foo','20020101T0000Z','2016-06-30T11:24:52Z',1,'output completed','succeeded','localhost');
 INSERT INTO "task_events" VALUES('foo','20020101T0000Z','2016-06-30T11:24:52Z',1,'succeeded',NULL,'localhost');
-INSERT INTO "task_events" VALUES('foo','20030101T0000Z','2016-06-30T11:24:52Z',1,'incrementing submit number','','localhost');
-INSERT INTO "task_events" VALUES('bar','20020101T0000Z','2016-06-30T11:24:53Z',1,'incrementing submit number','','localhost');
 INSERT INTO "task_events" VALUES('bar','20010101T0000Z','2016-06-30T11:24:53Z',1,'output completed','succeeded','localhost');
 INSERT INTO "task_events" VALUES('bar','20010101T0000Z','2016-06-30T11:24:53Z',1,'succeeded',NULL,'localhost');
-INSERT INTO "task_events" VALUES('foo','20030101T0000Z','2016-06-30T11:24:53Z',1,'submission succeeded',NULL,'localhost');
+INSERT INTO "task_events" VALUES('foo','20030101T0000Z','2016-06-30T11:24:53Z',1,'submitted',NULL,'localhost');
 INSERT INTO "task_events" VALUES('foo','20030101T0000Z','2016-06-30T11:24:53Z',1,'output completed','started','localhost');
 INSERT INTO "task_events" VALUES('foo','20030101T0000Z','2016-06-30T11:24:53Z',1,'started',NULL,'localhost');
-INSERT INTO "task_events" VALUES('bar','20020101T0000Z','2016-06-30T11:24:54Z',1,'submission succeeded',NULL,'localhost');
+INSERT INTO "task_events" VALUES('bar','20020101T0000Z','2016-06-30T11:24:54Z',1,'submitted',NULL,'localhost');
 INSERT INTO "task_events" VALUES('bar','20020101T0000Z','2016-06-30T11:24:54Z',1,'output completed','started','localhost');
 INSERT INTO "task_events" VALUES('bar','20020101T0000Z','2016-06-30T11:24:54Z',1,'started',NULL,'localhost');
 INSERT INTO "task_events" VALUES('foo','20030101T0000Z','2016-06-30T11:24:54Z',1,'output completed','succeeded','localhost');
 INSERT INTO "task_events" VALUES('foo','20030101T0000Z','2016-06-30T11:24:54Z',1,'succeeded',NULL,'localhost');
 INSERT INTO "task_events" VALUES('bar','20020101T0000Z','2016-06-30T11:24:55Z',1,'output completed','succeeded','localhost');
 INSERT INTO "task_events" VALUES('bar','20020101T0000Z','2016-06-30T11:24:55Z',1,'succeeded',NULL,'localhost');
-INSERT INTO "task_events" VALUES('bar','20030101T0000Z','2016-06-30T11:24:55Z',1,'incrementing submit number','','localhost');
-INSERT INTO "task_events" VALUES('bar','20030101T0000Z','2016-06-30T11:24:56Z',1,'submission succeeded',NULL,'localhost');
+INSERT INTO "task_events" VALUES('bar','20030101T0000Z','2016-06-30T11:24:56Z',1,'submitted',NULL,'localhost');
 INSERT INTO "task_events" VALUES('bar','20030101T0000Z','2016-06-30T11:24:56Z',1,'output completed','started','localhost');
 INSERT INTO "task_events" VALUES('bar','20030101T0000Z','2016-06-30T11:24:56Z',1,'started',NULL,'localhost');
 INSERT INTO "task_events" VALUES('bar','20030101T0000Z','2016-06-30T11:24:57Z',1,'output completed','succeeded','localhost');
diff --git a/tests/restart/24-upgrade-db-611/cylc-suite-db.dump b/tests/restart/24-upgrade-db-611/cylc-suite-db.dump
index 94a417d..cbaecc9 100644
--- a/tests/restart/24-upgrade-db-611/cylc-suite-db.dump
+++ b/tests/restart/24-upgrade-db-611/cylc-suite-db.dump
@@ -25,52 +25,43 @@ CREATE TABLE broadcast_states_checkpoints(id INTEGER, point TEXT, namespace TEXT
 CREATE TABLE checkpoint_id(id INTEGER, time TEXT, event TEXT, PRIMARY KEY(id));
 INSERT INTO "checkpoint_id" VALUES(0,'2016-12-05T12:30:05Z','latest');
 CREATE TABLE task_events(name TEXT, cycle TEXT, time TEXT, submit_num INTEGER, event TEXT, message TEXT);
-INSERT INTO "task_events" VALUES('t1','2016','2016-12-05T12:29:58Z',1,'incrementing submit number','');
-INSERT INTO "task_events" VALUES('t2','2016','2016-12-05T12:29:58Z',1,'incrementing submit number','');
-INSERT INTO "task_events" VALUES('t3','2016','2016-12-05T12:29:58Z',1,'incrementing submit number','');
-INSERT INTO "task_events" VALUES('t1','2016','2016-12-05T12:29:59Z',1,'submission succeeded',NULL);
+INSERT INTO "task_events" VALUES('t1','2016','2016-12-05T12:29:59Z',1,'submitted',NULL);
 INSERT INTO "task_events" VALUES('t1','2016','2016-12-05T12:29:59Z',1,'output completed','started');
 INSERT INTO "task_events" VALUES('t1','2016','2016-12-05T12:29:59Z',1,'started',NULL);
-INSERT INTO "task_events" VALUES('t2','2016','2016-12-05T12:29:59Z',1,'submission succeeded',NULL);
+INSERT INTO "task_events" VALUES('t2','2016','2016-12-05T12:29:59Z',1,'submitted',NULL);
 INSERT INTO "task_events" VALUES('t2','2016','2016-12-05T12:29:59Z',1,'output completed','started');
 INSERT INTO "task_events" VALUES('t2','2016','2016-12-05T12:29:59Z',1,'started',NULL);
-INSERT INTO "task_events" VALUES('t3','2016','2016-12-05T12:29:59Z',1,'submission succeeded',NULL);
+INSERT INTO "task_events" VALUES('t3','2016','2016-12-05T12:29:59Z',1,'submitted',NULL);
 INSERT INTO "task_events" VALUES('t3','2016','2016-12-05T12:29:59Z',1,'output completed','started');
 INSERT INTO "task_events" VALUES('t3','2016','2016-12-05T12:29:59Z',1,'started',NULL);
-INSERT INTO "task_events" VALUES('t1','2017','2016-12-05T12:30:00Z',1,'incrementing submit number','');
 INSERT INTO "task_events" VALUES('t1','2016','2016-12-05T12:30:00Z',1,'output completed','succeeded');
 INSERT INTO "task_events" VALUES('t1','2016','2016-12-05T12:30:00Z',1,'succeeded',NULL);
-INSERT INTO "task_events" VALUES('t3','2017','2016-12-05T12:30:00Z',1,'incrementing submit number','');
 INSERT INTO "task_events" VALUES('t3','2016','2016-12-05T12:30:00Z',1,'output completed','succeeded');
 INSERT INTO "task_events" VALUES('t3','2016','2016-12-05T12:30:00Z',1,'succeeded',NULL);
 INSERT INTO "task_events" VALUES('t2','2016','2016-12-05T12:30:00Z',1,'output completed','succeeded');
 INSERT INTO "task_events" VALUES('t2','2016','2016-12-05T12:30:00Z',1,'succeeded',NULL);
-INSERT INTO "task_events" VALUES('t2','2017','2016-12-05T12:30:00Z',1,'incrementing submit number','');
-INSERT INTO "task_events" VALUES('t1','2017','2016-12-05T12:30:01Z',1,'submission succeeded',NULL);
+INSERT INTO "task_events" VALUES('t1','2017','2016-12-05T12:30:01Z',1,'submitted',NULL);
 INSERT INTO "task_events" VALUES('t1','2017','2016-12-05T12:30:02Z',1,'output completed','started');
 INSERT INTO "task_events" VALUES('t1','2017','2016-12-05T12:30:02Z',1,'started',NULL);
-INSERT INTO "task_events" VALUES('t3','2017','2016-12-05T12:30:02Z',1,'submission succeeded',NULL);
+INSERT INTO "task_events" VALUES('t3','2017','2016-12-05T12:30:02Z',1,'submitted',NULL);
 INSERT INTO "task_events" VALUES('t3','2017','2016-12-05T12:30:02Z',1,'output completed','started');
 INSERT INTO "task_events" VALUES('t3','2017','2016-12-05T12:30:02Z',1,'started',NULL);
-INSERT INTO "task_events" VALUES('t2','2017','2016-12-05T12:30:01Z',1,'submission succeeded',NULL);
+INSERT INTO "task_events" VALUES('t2','2017','2016-12-05T12:30:01Z',1,'submitted',NULL);
 INSERT INTO "task_events" VALUES('t2','2017','2016-12-05T12:30:02Z',1,'output completed','started');
 INSERT INTO "task_events" VALUES('t2','2017','2016-12-05T12:30:02Z',1,'started',NULL);
 INSERT INTO "task_events" VALUES('t1','2017','2016-12-05T12:30:03Z',1,'output completed','succeeded');
 INSERT INTO "task_events" VALUES('t1','2017','2016-12-05T12:30:03Z',1,'succeeded',NULL);
 INSERT INTO "task_events" VALUES('t3','2017','2016-12-05T12:30:03Z',1,'output completed','succeeded');
 INSERT INTO "task_events" VALUES('t3','2017','2016-12-05T12:30:03Z',1,'succeeded',NULL);
-INSERT INTO "task_events" VALUES('t3','2018','2016-12-05T12:30:03Z',1,'incrementing submit number','');
-INSERT INTO "task_events" VALUES('t1','2018','2016-12-05T12:30:03Z',1,'incrementing submit number','');
-INSERT INTO "task_events" VALUES('t2','2018','2016-12-05T12:30:03Z',1,'incrementing submit number','');
 INSERT INTO "task_events" VALUES('t2','2017','2016-12-05T12:30:03Z',1,'output completed','succeeded');
 INSERT INTO "task_events" VALUES('t2','2017','2016-12-05T12:30:03Z',1,'succeeded',NULL);
-INSERT INTO "task_events" VALUES('t3','2018','2016-12-05T12:30:04Z',1,'submission succeeded',NULL);
+INSERT INTO "task_events" VALUES('t3','2018','2016-12-05T12:30:04Z',1,'submitted',NULL);
 INSERT INTO "task_events" VALUES('t3','2018','2016-12-05T12:30:04Z',1,'output completed','started');
 INSERT INTO "task_events" VALUES('t3','2018','2016-12-05T12:30:04Z',1,'started',NULL);
-INSERT INTO "task_events" VALUES('t1','2018','2016-12-05T12:30:04Z',1,'submission succeeded',NULL);
+INSERT INTO "task_events" VALUES('t1','2018','2016-12-05T12:30:04Z',1,'submitted',NULL);
 INSERT INTO "task_events" VALUES('t1','2018','2016-12-05T12:30:04Z',1,'output completed','started');
 INSERT INTO "task_events" VALUES('t1','2018','2016-12-05T12:30:04Z',1,'started',NULL);
-INSERT INTO "task_events" VALUES('t2','2018','2016-12-05T12:30:04Z',1,'submission succeeded',NULL);
+INSERT INTO "task_events" VALUES('t2','2018','2016-12-05T12:30:04Z',1,'submitted',NULL);
 INSERT INTO "task_events" VALUES('t2','2018','2016-12-05T12:30:04Z',1,'output completed','started');
 INSERT INTO "task_events" VALUES('t2','2018','2016-12-05T12:30:04Z',1,'started',NULL);
 INSERT INTO "task_events" VALUES('t3','2018','2016-12-05T12:30:05Z',1,'output completed','succeeded');
diff --git a/tests/suite-state/00-polling.t b/tests/suite-state/00-polling.t
index 5130de5..3d25965 100644
--- a/tests/suite-state/00-polling.t
+++ b/tests/suite-state/00-polling.t
@@ -20,7 +20,7 @@
 
 . $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
-set_test_number 3
+set_test_number 5
 #-------------------------------------------------------------------------------
 install_suite $TEST_NAME_BASE polling
 #-------------------------------------------------------------------------------
@@ -42,6 +42,21 @@ run_ok $TEST_NAME cylc val --debug --set UPSTREAM=$UPSTREAM $SUITE_NAME
 cylc run $UPSTREAM
 
 #-------------------------------------------------------------------------------
+# check auto-generated task script for lbad
+cylc get-config --set UPSTREAM=$UPSTREAM -i '[runtime][lbad]script' $SUITE_NAME > lbad.script
+cmp_ok lbad.script << __END__
+echo cylc suite-state --task=bad --point=\$CYLC_TASK_CYCLE_POINT --status=fail --interval=2 --max-polls=20 $UPSTREAM
+cylc suite-state --task=bad --point=\$CYLC_TASK_CYCLE_POINT --status=fail --interval=2 --max-polls=20 $UPSTREAM
+__END__
+
+# check auto-generated task script for lgood
+cylc get-config --set UPSTREAM=$UPSTREAM -i '[runtime][lgood]script' $SUITE_NAME > lgood.script
+cmp_ok lgood.script << __END__
+echo cylc suite-state --task=good --point=\$CYLC_TASK_CYCLE_POINT --status=succeed --interval=2 --max-polls=20 $UPSTREAM
+cylc suite-state --task=good --point=\$CYLC_TASK_CYCLE_POINT --status=succeed --interval=2 --max-polls=20 $UPSTREAM
+__END__
+
+#-------------------------------------------------------------------------------
 # run the suite-state polling test suite
 TEST_NAME=$TEST_NAME_BASE-run
 suite_run_ok $TEST_NAME cylc run --reference-test --debug --set UPSTREAM=$UPSTREAM $SUITE_NAME
diff --git a/tests/suite-state/01-polling.t b/tests/suite-state/01-polling.t
index 8ce5197..1d7363c 100644
--- a/tests/suite-state/01-polling.t
+++ b/tests/suite-state/01-polling.t
@@ -20,7 +20,7 @@
 
 . $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
-set_test_number 3
+set_test_number 5
 #-------------------------------------------------------------------------------
 install_suite $TEST_NAME_BASE polling
 #-------------------------------------------------------------------------------
@@ -38,6 +38,21 @@ TEST_NAME=$TEST_NAME_BASE-validate-polling
 run_ok $TEST_NAME cylc val --debug --set UPSTREAM=$UPSTREAM $SUITE_NAME
 
 #-------------------------------------------------------------------------------
+# check auto-generated task script for lbad
+cylc get-config --set UPSTREAM=$UPSTREAM -i '[runtime][lbad]script' $SUITE_NAME > lbad.script
+cmp_ok lbad.script << __END__
+echo cylc suite-state --task=bad --point=\$CYLC_TASK_CYCLE_POINT --status=fail --interval=2 --max-polls=20 $UPSTREAM
+cylc suite-state --task=bad --point=\$CYLC_TASK_CYCLE_POINT --status=fail --interval=2 --max-polls=20 $UPSTREAM
+__END__
+
+# check auto-generated task script for lgood
+cylc get-config --set UPSTREAM=$UPSTREAM -i '[runtime][lgood]script' $SUITE_NAME > lgood.script
+cmp_ok lgood.script << __END__
+echo cylc suite-state --task=good --point=\$CYLC_TASK_CYCLE_POINT --status=succeed --interval=2 --max-polls=20 $UPSTREAM
+cylc suite-state --task=good --point=\$CYLC_TASK_CYCLE_POINT --status=succeed --interval=2 --max-polls=20 $UPSTREAM
+__END__
+
+#-------------------------------------------------------------------------------
 # run the upstream suite and detach (not a test)
 cylc run $UPSTREAM
 
diff --git a/tests/tutorial/cycling/reflogs/tut.five b/tests/tutorial/cycling/reflogs/tut.five
index 632db88..8283a65 100644
--- a/tests/tutorial/cycling/reflogs/tut.five
+++ b/tests/tutorial/cycling/reflogs/tut.five
@@ -1,68 +1,8 @@
-2013/10/06 23:34:02 INFO - Thread-2 start (Event Handler Submission)
-2013/10/06 23:34:02 INFO - Thread-3 start (Poll and Kill Command Submission)
-2013/10/06 23:34:02 INFO - port:7766
-2013/10/06 23:34:02 INFO - Suite starting at 2013-10-06 23:34:02.443099
-2013/10/06 23:34:02 INFO - Log event clock: real time
 2013/10/06 23:34:02 INFO - Run mode: live
 2013/10/06 23:34:02 INFO - Initial point: 20130808T0000Z
 2013/10/06 23:34:02 INFO - Final point: 20130808T1200Z
-2013/10/06 23:34:02 INFO - Thread-4 start (Job Submission)
-2013/10/06 23:34:02 INFO - Thread-5 start (Request Handling)
-2013/10/06 23:34:02 INFO - Cold Start 20130808T0000Z
-2013/10/06 23:34:02 INFO - [prep.20130808T0000Z] -(setting:submitting)
 2013/10/06 23:34:02 INFO - [prep.20130808T0000Z] -triggered off []
-2013/10/06 23:34:03 INFO - [prep.20130808T0000Z] -(current:submitting)> prep.20130808T0000Z submitting now
-2013/10/06 23:34:04 INFO - [prep.20130808T0000Z] -(current:submitting)> prep.20130808T0000Z submission succeeded
-2013/10/06 23:34:04 INFO - [prep.20130808T0000Z] -(setting:submitted)
-2013/10/06 23:34:04 INFO - [prep.20130808T0000Z] -(current:submitted)> prep.20130808T0000Z submit_method_id=3521
-2013/10/06 23:34:04 INFO - [prep.20130808T0000Z] -(current:submitted)> prep.20130808T0000Z started at 2013-10-06T23:34:03
-2013/10/06 23:34:04 INFO - [prep.20130808T0000Z] -(setting:running)
-2013/10/06 23:34:04 INFO - [prep.20130808T0000Z] -(current:running)> prep.20130808T0000Z succeeded at 2013-10-06T23:34:03
-2013/10/06 23:34:04 INFO - [prep.20130808T0000Z] -(setting:succeeded)
-2013/10/06 23:34:04 INFO - [foo.20130808T0000Z] -(setting:submitting)
 2013/10/06 23:34:04 INFO - [foo.20130808T0000Z] -triggered off ['prep.20130808T0000Z']
-2013/10/06 23:34:06 INFO - [foo.20130808T0000Z] -(current:submitting)> foo.20130808T0000Z submitting now
-2013/10/06 23:34:06 INFO - [foo.20130808T0000Z] -(current:submitting)> foo.20130808T0000Z submission succeeded
-2013/10/06 23:34:06 INFO - [foo.20130808T0000Z] -(setting:submitted)
-2013/10/06 23:34:06 INFO - [foo.20130808T0000Z] -(current:submitted)> foo.20130808T0000Z submit_method_id=3565
-2013/10/06 23:34:06 INFO - [foo.20130808T0000Z] -(current:submitted)> foo.20130808T0000Z started at 2013-10-06T23:34:05
-2013/10/06 23:34:06 INFO - [foo.20130808T0000Z] -(setting:running)
-2013/10/06 23:34:06 INFO - [foo.20130808T0000Z] -(current:running)> foo.20130808T0000Z succeeded at 2013-10-06T23:34:05
-2013/10/06 23:34:06 INFO - [foo.20130808T0000Z] -(setting:succeeded)
-2013/10/06 23:34:06 INFO - [bar.20130808T0000Z] -(setting:submitting)
 2013/10/06 23:34:06 INFO - [bar.20130808T0000Z] -triggered off ['foo.20130808T0000Z']
-2013/10/06 23:34:07 INFO - [foo.20130808T1200Z] -(setting:submitting)
 2013/10/06 23:34:07 INFO - [foo.20130808T1200Z] -triggered off ['foo.20130808T0000Z']
-2013/10/06 23:34:08 INFO - [bar.20130808T0000Z] -(current:submitting)> bar.20130808T0000Z submitting now
-2013/10/06 23:34:08 INFO - [bar.20130808T0000Z] -(current:submitting)> bar.20130808T0000Z submission succeeded
-2013/10/06 23:34:08 INFO - [bar.20130808T0000Z] -(setting:submitted)
-2013/10/06 23:34:08 INFO - [bar.20130808T0000Z] -(current:submitted)> bar.20130808T0000Z submit_method_id=3609
-2013/10/06 23:34:08 INFO - [bar.20130808T0000Z] -(current:submitted)> bar.20130808T0000Z started at 2013-10-06T23:34:07
-2013/10/06 23:34:08 INFO - [bar.20130808T0000Z] -(setting:running)
-2013/10/06 23:34:08 INFO - [bar.20130808T0000Z] -(current:running)> bar.20130808T0000Z succeeded at 2013-10-06T23:34:08
-2013/10/06 23:34:08 INFO - [bar.20130808T0000Z] -(setting:succeeded)
-2013/10/06 23:34:08 INFO - [foo.20130808T1200Z] -(current:submitting)> foo.20130808T1200Z submitting now
-2013/10/06 23:34:08 INFO - [foo.20130808T1200Z] -(current:submitting)> foo.20130808T1200Z started at 2013-10-06T23:34:07
-2013/10/06 23:34:08 INFO - [foo.20130808T1200Z] -(setting:running)
-2013/10/06 23:34:08 INFO - [foo.20130808T1200Z] -(current:running)> foo.20130808T1200Z succeeded at 2013-10-06T23:34:08
-2013/10/06 23:34:08 INFO - [foo.20130808T1200Z] -(setting:succeeded)
-2013/10/06 23:34:08 WARNING - [foo.20130808T1200Z] -Assuming non-reported outputs were completed:
-foo.20130808T1200Z submitted
-2013/10/06 23:34:08 INFO - [foo.20130809T0000Z] -HOLDING (beyond suite stop cycle) 20130808T1200Z
-2013/10/06 23:34:08 INFO - [foo.20130809T0000Z] -(setting:held)
-2013/10/06 23:34:09 INFO - [foo.20130808T1200Z] -(current:succeeded)> foo.20130808T1200Z submission succeeded
-2013/10/06 23:34:09 INFO - [foo.20130808T1200Z] -(current:succeeded)> foo.20130808T1200Z submit_method_id=3634
-2013/10/06 23:34:09 INFO - [bar.20130808T1200Z] -(setting:submitting)
 2013/10/06 23:34:09 INFO - [bar.20130808T1200Z] -triggered off ['foo.20130808T1200Z']
-2013/10/06 23:34:11 INFO - [bar.20130808T1200Z] -(current:submitting)> bar.20130808T1200Z submitting now
-2013/10/06 23:34:11 INFO - [bar.20130808T1200Z] -(current:submitting)> bar.20130808T1200Z submission succeeded
-2013/10/06 23:34:11 INFO - [bar.20130808T1200Z] -(setting:submitted)
-2013/10/06 23:34:11 INFO - [bar.20130808T1200Z] -(current:submitted)> bar.20130808T1200Z submit_method_id=3697
-2013/10/06 23:34:11 INFO - [bar.20130808T1200Z] -(current:submitted)> bar.20130808T1200Z started at 2013-10-06T23:34:11
-2013/10/06 23:34:11 INFO - [bar.20130808T1200Z] -(setting:running)
-2013/10/06 23:34:11 INFO - [bar.20130808T1200Z] -(current:running)> bar.20130808T1200Z succeeded at 2013-10-06T23:34:11
-2013/10/06 23:34:11 INFO - [bar.20130808T1200Z] -(setting:succeeded)
-2013/10/06 23:34:11 INFO - [bar.20130809T0000Z] -HOLDING (beyond suite stop cycle) 20130808T1200Z
-2013/10/06 23:34:11 INFO - [bar.20130809T0000Z] -(setting:held)
-2013/10/06 23:34:12 INFO - All normal cycling tasks have spawned past the final cycle 20130808T1200Z
-2013/10/06 23:34:12 INFO - Suite shutting down at 2013-10-06 23:34:12.508844
diff --git a/tests/tutorial/cycling/reflogs/tut.four b/tests/tutorial/cycling/reflogs/tut.four
index e5a661b..12fd462 100644
--- a/tests/tutorial/cycling/reflogs/tut.four
+++ b/tests/tutorial/cycling/reflogs/tut.four
@@ -1,68 +1,9 @@
-2013/10/06 23:34:15 INFO - Thread-2 start (Event Handler Submission)
-2013/10/06 23:34:15 INFO - Thread-3 start (Poll and Kill Command Submission)
-2013/10/06 23:34:15 INFO - port:7766
-2013/10/06 23:34:15 INFO - Suite starting at 2013-10-06 23:34:15.780208
-2013/10/06 23:34:15 INFO - Log event clock: real time
 2013/10/06 23:34:15 INFO - Run mode: live
 2013/10/06 23:34:15 INFO - Initial point: 20130808T0000Z
 2013/10/06 23:34:15 INFO - Final point: 20130808T1200Z
-2013/10/06 23:34:15 INFO - Thread-4 start (Job Submission)
-2013/10/06 23:34:15 INFO - Thread-5 start (Request Handling)
 2013/10/06 23:34:15 INFO - Cold Start 20130808T0000Z
-2013/10/06 23:34:15 INFO - [prep.20130808T0000Z] -(setting:submitting)
 2013/10/06 23:34:15 INFO - [prep.20130808T0000Z] -triggered off []
-2013/10/06 23:34:16 INFO - [prep.20130808T0000Z] -(current:submitting)> prep.20130808T0000Z submitting now
-2013/10/06 23:34:17 INFO - [prep.20130808T0000Z] -(current:submitting)> prep.20130808T0000Z submission succeeded
-2013/10/06 23:34:17 INFO - [prep.20130808T0000Z] -(setting:submitted)
-2013/10/06 23:34:17 INFO - [prep.20130808T0000Z] -(current:submitted)> prep.20130808T0000Z submit_method_id=3835
-2013/10/06 23:34:17 INFO - [prep.20130808T0000Z] -(current:submitted)> prep.20130808T0000Z started at 2013-10-06T23:34:17
-2013/10/06 23:34:17 INFO - [prep.20130808T0000Z] -(setting:running)
-2013/10/06 23:34:17 INFO - [prep.20130808T0000Z] -(current:running)> prep.20130808T0000Z succeeded at 2013-10-06T23:34:17
-2013/10/06 23:34:17 INFO - [prep.20130808T0000Z] -(setting:succeeded)
-2013/10/06 23:34:17 INFO - [foo.20130808T0000Z] -(setting:submitting)
 2013/10/06 23:34:17 INFO - [foo.20130808T0000Z] -triggered off ['prep.20130808T0000Z']
-2013/10/06 23:34:19 INFO - [foo.20130808T0000Z] -(current:submitting)> foo.20130808T0000Z submitting now
-2013/10/06 23:34:19 INFO - [foo.20130808T0000Z] -(current:submitting)> foo.20130808T0000Z submission succeeded
-2013/10/06 23:34:19 INFO - [foo.20130808T0000Z] -(setting:submitted)
-2013/10/06 23:34:19 INFO - [foo.20130808T0000Z] -(current:submitted)> foo.20130808T0000Z submit_method_id=3879
-2013/10/06 23:34:19 INFO - [foo.20130808T0000Z] -(current:submitted)> foo.20130808T0000Z started at 2013-10-06T23:34:19
-2013/10/06 23:34:19 INFO - [foo.20130808T0000Z] -(setting:running)
-2013/10/06 23:34:19 INFO - [foo.20130808T0000Z] -(current:running)> foo.20130808T0000Z succeeded at 2013-10-06T23:34:19
-2013/10/06 23:34:19 INFO - [foo.20130808T0000Z] -(setting:succeeded)
-2013/10/06 23:34:19 INFO - [bar.20130808T0000Z] -(setting:submitting)
 2013/10/06 23:34:19 INFO - [bar.20130808T0000Z] -triggered off ['foo.20130808T0000Z']
-2013/10/06 23:34:20 INFO - [foo.20130808T1200Z] -(setting:submitting)
 2013/10/06 23:34:20 INFO - [foo.20130808T1200Z] -triggered off ['foo.20130808T0000Z', 'prep.20130808T0000Z']
-2013/10/06 23:34:21 INFO - [bar.20130808T0000Z] -(current:submitting)> bar.20130808T0000Z submitting now
-2013/10/06 23:34:21 INFO - [bar.20130808T0000Z] -(current:submitting)> bar.20130808T0000Z submission succeeded
-2013/10/06 23:34:21 INFO - [bar.20130808T0000Z] -(setting:submitted)
-2013/10/06 23:34:21 INFO - [bar.20130808T0000Z] -(current:submitted)> bar.20130808T0000Z submit_method_id=3923
-2013/10/06 23:34:21 INFO - [bar.20130808T0000Z] -(current:submitted)> bar.20130808T0000Z started at 2013-10-06T23:34:21
-2013/10/06 23:34:21 INFO - [bar.20130808T0000Z] -(setting:running)
-2013/10/06 23:34:21 INFO - [bar.20130808T0000Z] -(current:running)> bar.20130808T0000Z succeeded at 2013-10-06T23:34:21
-2013/10/06 23:34:21 INFO - [bar.20130808T0000Z] -(setting:succeeded)
-2013/10/06 23:34:21 INFO - [foo.20130808T1200Z] -(current:submitting)> foo.20130808T1200Z submitting now
-2013/10/06 23:34:21 INFO - [foo.20130808T1200Z] -(current:submitting)> foo.20130808T1200Z started at 2013-10-06T23:34:21
-2013/10/06 23:34:21 INFO - [foo.20130808T1200Z] -(setting:running)
-2013/10/06 23:34:21 INFO - [foo.20130808T1200Z] -(current:running)> foo.20130808T1200Z succeeded at 2013-10-06T23:34:21
-2013/10/06 23:34:21 INFO - [foo.20130808T1200Z] -(setting:succeeded)
-2013/10/06 23:34:21 WARNING - [foo.20130808T1200Z] -Assuming non-reported outputs were completed:
-foo.20130808T1200Z submitted
-2013/10/06 23:34:21 INFO - [foo.20130809T0000Z] -HOLDING (beyond suite stop cycle) 20130808T1200Z
-2013/10/06 23:34:21 INFO - [foo.20130809T0000Z] -(setting:held)
-2013/10/06 23:34:22 INFO - [foo.20130808T1200Z] -(current:succeeded)> foo.20130808T1200Z submission succeeded
-2013/10/06 23:34:22 INFO - [foo.20130808T1200Z] -(current:succeeded)> foo.20130808T1200Z submit_method_id=3948
-2013/10/06 23:34:22 INFO - [bar.20130808T1200Z] -(setting:submitting)
 2013/10/06 23:34:22 INFO - [bar.20130808T1200Z] -triggered off ['foo.20130808T1200Z']
-2013/10/06 23:34:24 INFO - [bar.20130808T1200Z] -(current:submitting)> bar.20130808T1200Z submitting now
-2013/10/06 23:34:24 INFO - [bar.20130808T1200Z] -(current:submitting)> bar.20130808T1200Z submission succeeded
-2013/10/06 23:34:24 INFO - [bar.20130808T1200Z] -(setting:submitted)
-2013/10/06 23:34:24 INFO - [bar.20130808T1200Z] -(current:submitted)> bar.20130808T1200Z submit_method_id=4012
-2013/10/06 23:34:24 INFO - [bar.20130808T1200Z] -(current:submitted)> bar.20130808T1200Z started at 2013-10-06T23:34:24
-2013/10/06 23:34:24 INFO - [bar.20130808T1200Z] -(setting:running)
-2013/10/06 23:34:24 INFO - [bar.20130808T1200Z] -(current:running)> bar.20130808T1200Z succeeded at 2013-10-06T23:34:24
-2013/10/06 23:34:24 INFO - [bar.20130808T1200Z] -(setting:succeeded)
-2013/10/06 23:34:24 INFO - [bar.20130809T0000Z] -HOLDING (beyond suite stop cycle) 20130808T1200Z
-2013/10/06 23:34:24 INFO - [bar.20130809T0000Z] -(setting:held)
-2013/10/06 23:34:25 INFO - All normal cycling tasks have spawned past the final cycle 20130808T1200Z
-2013/10/06 23:34:25 INFO - Suite shutting down at 2013-10-06 23:34:25.841156
diff --git a/tests/tutorial/cycling/reflogs/tut.one b/tests/tutorial/cycling/reflogs/tut.one
index 0ecb9a7..d2cb902 100644
--- a/tests/tutorial/cycling/reflogs/tut.one
+++ b/tests/tutorial/cycling/reflogs/tut.one
@@ -1,58 +1,8 @@
-2013/10/06 23:34:30 INFO - Thread-2 start (Event Handler Submission)
-2013/10/06 23:34:30 INFO - Thread-3 start (Poll and Kill Command Submission)
-2013/10/06 23:34:30 INFO - port:7766
-2013/10/06 23:34:30 INFO - Suite starting at 2013-10-06 23:34:30.105865
-2013/10/06 23:34:30 INFO - Log event clock: real time
 2013/10/06 23:34:30 INFO - Run mode: live
 2013/10/06 23:34:30 INFO - Initial point: 20130808T0000Z
 2013/10/06 23:34:30 INFO - Final point: 20130808T1200Z
-2013/10/06 23:34:30 INFO - Thread-4 start (Job Submission)
-2013/10/06 23:34:30 INFO - Thread-5 start (Request Handling)
 2013/10/06 23:34:30 INFO - Cold Start 20130808T0000Z
-2013/10/06 23:34:30 INFO - [foo.20130808T0000Z] -(setting:submitting)
 2013/10/06 23:34:30 INFO - [foo.20130808T0000Z] -triggered off []
-2013/10/06 23:34:31 INFO - [foo.20130808T0000Z] -(current:submitting)> foo.20130808T0000Z submitting now
-2013/10/06 23:34:32 INFO - [foo.20130808T0000Z] -(current:submitting)> foo.20130808T0000Z submission succeeded
-2013/10/06 23:34:32 INFO - [foo.20130808T0000Z] -(setting:submitted)
-2013/10/06 23:34:32 INFO - [foo.20130808T0000Z] -(current:submitted)> foo.20130808T0000Z submit_method_id=4150
-2013/10/06 23:34:32 INFO - [foo.20130808T0000Z] -(current:submitted)> foo.20130808T0000Z started at 2013-10-06T23:34:31
-2013/10/06 23:34:32 INFO - [foo.20130808T0000Z] -(setting:running)
-2013/10/06 23:34:32 INFO - [foo.20130808T0000Z] -(current:running)> foo.20130808T0000Z succeeded at 2013-10-06T23:34:31
-2013/10/06 23:34:32 INFO - [foo.20130808T0000Z] -(setting:succeeded)
-2013/10/06 23:34:32 INFO - [bar.20130808T0000Z] -(setting:submitting)
 2013/10/06 23:34:32 INFO - [bar.20130808T0000Z] -triggered off ['foo.20130808T0000Z']
-2013/10/06 23:34:33 INFO - [foo.20130808T1200Z] -(setting:submitting)
 2013/10/06 23:34:33 INFO - [foo.20130808T1200Z] -triggered off []
-2013/10/06 23:34:34 INFO - [bar.20130808T0000Z] -(current:submitting)> bar.20130808T0000Z submitting now
-2013/10/06 23:34:34 INFO - [bar.20130808T0000Z] -(current:submitting)> bar.20130808T0000Z submission succeeded
-2013/10/06 23:34:34 INFO - [bar.20130808T0000Z] -(setting:submitted)
-2013/10/06 23:34:34 INFO - [bar.20130808T0000Z] -(current:submitted)> bar.20130808T0000Z submit_method_id=4194
-2013/10/06 23:34:34 INFO - [bar.20130808T0000Z] -(current:submitted)> bar.20130808T0000Z started at 2013-10-06T23:34:33
-2013/10/06 23:34:34 INFO - [bar.20130808T0000Z] -(setting:running)
-2013/10/06 23:34:34 INFO - [bar.20130808T0000Z] -(current:running)> bar.20130808T0000Z succeeded at 2013-10-06T23:34:33
-2013/10/06 23:34:34 INFO - [bar.20130808T0000Z] -(setting:succeeded)
-2013/10/06 23:34:34 INFO - [foo.20130808T1200Z] -(current:submitting)> foo.20130808T1200Z submitting now
-2013/10/06 23:34:34 INFO - [foo.20130808T1200Z] -(current:submitting)> foo.20130808T1200Z started at 2013-10-06T23:34:33
-2013/10/06 23:34:34 INFO - [foo.20130808T1200Z] -(setting:running)
-2013/10/06 23:34:34 INFO - [foo.20130808T1200Z] -(current:running)> foo.20130808T1200Z succeeded at 2013-10-06T23:34:33
-2013/10/06 23:34:34 INFO - [foo.20130808T1200Z] -(setting:succeeded)
-2013/10/06 23:34:34 WARNING - [foo.20130808T1200Z] -Assuming non-reported outputs were completed:
-foo.20130808T1200Z submitted
-2013/10/06 23:34:34 INFO - [foo.20130809T0000Z] -HOLDING (beyond suite stop cycle) 20130808T1200Z
-2013/10/06 23:34:34 INFO - [foo.20130809T0000Z] -(setting:held)
-2013/10/06 23:34:35 INFO - [foo.20130808T1200Z] -(current:succeeded)> foo.20130808T1200Z submission succeeded
-2013/10/06 23:34:35 INFO - [foo.20130808T1200Z] -(current:succeeded)> foo.20130808T1200Z submit_method_id=4219
-2013/10/06 23:34:35 INFO - [bar.20130808T1200Z] -(setting:submitting)
 2013/10/06 23:34:35 INFO - [bar.20130808T1200Z] -triggered off ['foo.20130808T1200Z']
-2013/10/06 23:34:37 INFO - [bar.20130808T1200Z] -(current:submitting)> bar.20130808T1200Z submitting now
-2013/10/06 23:34:37 INFO - [bar.20130808T1200Z] -(current:submitting)> bar.20130808T1200Z submission succeeded
-2013/10/06 23:34:37 INFO - [bar.20130808T1200Z] -(setting:submitted)
-2013/10/06 23:34:37 INFO - [bar.20130808T1200Z] -(current:submitted)> bar.20130808T1200Z submit_method_id=4282
-2013/10/06 23:34:37 INFO - [bar.20130808T1200Z] -(current:submitted)> bar.20130808T1200Z started at 2013-10-06T23:34:36
-2013/10/06 23:34:37 INFO - [bar.20130808T1200Z] -(setting:running)
-2013/10/06 23:34:37 INFO - [bar.20130808T1200Z] -(current:running)> bar.20130808T1200Z succeeded at 2013-10-06T23:34:36
-2013/10/06 23:34:37 INFO - [bar.20130808T1200Z] -(setting:succeeded)
-2013/10/06 23:34:37 INFO - [bar.20130809T0000Z] -HOLDING (beyond suite stop cycle) 20130808T1200Z
-2013/10/06 23:34:37 INFO - [bar.20130809T0000Z] -(setting:held)
-2013/10/06 23:34:38 INFO - All normal cycling tasks have spawned past the final cycle 20130808T1200Z
-2013/10/06 23:34:38 INFO - Suite shutting down at 2013-10-06 23:34:38.152780
diff --git a/tests/tutorial/cycling/reflogs/tut.three b/tests/tutorial/cycling/reflogs/tut.three
index 65ba5b8..b3b52c6 100644
--- a/tests/tutorial/cycling/reflogs/tut.three
+++ b/tests/tutorial/cycling/reflogs/tut.three
@@ -1,69 +1,9 @@
-2013/10/06 23:34:42 INFO - Thread-2 start (Event Handler Submission)
-2013/10/06 23:34:42 INFO - Thread-3 start (Poll and Kill Command Submission)
-2013/10/06 23:34:42 INFO - port:7766
-2013/10/06 23:34:42 INFO - Suite starting at 2013-10-06 23:34:42.315602
-2013/10/06 23:34:42 INFO - Log event clock: real time
 2013/10/06 23:34:42 INFO - Run mode: live
 2013/10/06 23:34:42 INFO - Initial point: 20130808T0000+13
 2013/10/06 23:34:42 INFO - Final point: 20130808T1200+13
-2013/10/06 23:34:42 INFO - Thread-4 start (Job Submission)
-2013/10/06 23:34:42 INFO - Thread-5 start (Request Handling)
 2013/10/06 23:34:42 INFO - Cold Start 20130808T0000+13
-2013/10/06 23:34:42 INFO - [prep.20130808T0000+13] -(setting:submitting)
 2013/10/06 23:34:42 INFO - [prep.20130808T0000+13] -triggered off []
-2013/10/06 23:34:43 INFO - [prep.20130808T0000+13] -(current:submitting)> prep.20130808T0000+13 submitting now
-2013/10/06 23:34:44 INFO - [prep.20130808T0000+13] -(current:submitting)> prep.20130808T0000+13 submission succeeded
-2013/10/06 23:34:44 INFO - [prep.20130808T0000+13] -(setting:submitted)
-2013/10/06 23:34:44 INFO - [prep.20130808T0000+13] -(current:submitted)> prep.20130808T0000+13 submit_method_id=4420
-2013/10/06 23:34:44 INFO - [prep.20130808T0000+13] -(current:submitted)> prep.20130808T0000+13 started at 2013-10-06T23:34:43
-2013/10/06 23:34:44 INFO - [prep.20130808T0000+13] -(setting:running)
-2013/10/06 23:34:44 INFO - [prep.20130808T0000+13] -(current:running)> prep.20130808T0000+13 succeeded at 2013-10-06T23:34:43
-2013/10/06 23:34:44 INFO - [prep.20130808T0000+13] -(setting:succeeded)
-2013/10/06 23:34:44 INFO - [foo.20130808T0000+13] -(setting:submitting)
 2013/10/06 23:34:44 INFO - [foo.20130808T0000+13] -triggered off ['prep.20130808T0000+13']
-2013/10/06 23:34:46 INFO - [foo.20130808T0000+13] -(current:submitting)> foo.20130808T0000+13 submitting now
-2013/10/06 23:34:46 INFO - [foo.20130808T0000+13] -(current:submitting)> foo.20130808T0000+13 submission succeeded
-2013/10/06 23:34:46 INFO - [foo.20130808T0000+13] -(setting:submitted)
-2013/10/06 23:34:46 INFO - [foo.20130808T0000+13] -(current:submitted)> foo.20130808T0000+13 submit_method_id=4464
-2013/10/06 23:34:46 INFO - [foo.20130808T0000+13] -(current:submitted)> foo.20130808T0000+13 started at 2013-10-06T23:34:45
-2013/10/06 23:34:46 INFO - [foo.20130808T0000+13] -(setting:running)
-2013/10/06 23:34:46 INFO - [foo.20130808T0000+13] -(current:running)> foo.20130808T0000+13 succeeded at 2013-10-06T23:34:45
-2013/10/06 23:34:46 INFO - [foo.20130808T0000+13] -(setting:succeeded)
-2013/10/06 23:34:46 INFO - [bar.20130808T0000+13] -(setting:submitting)
 2013/10/06 23:34:46 INFO - [bar.20130808T0000+13] -triggered off ['foo.20130808T0000+13']
-2013/10/06 23:34:47 INFO - [foo.20130808T1200+13] -(setting:submitting)
 2013/10/06 23:34:47 INFO - [foo.20130808T1200+13] -triggered off ['foo.20130808T0000+13']
-2013/10/06 23:34:48 INFO - [bar.20130808T0000+13] -(current:submitting)> bar.20130808T0000+13 submitting now
-2013/10/06 23:34:48 INFO - [bar.20130808T0000+13] -(current:submitting)> bar.20130808T0000+13 submission succeeded
-2013/10/06 23:34:48 INFO - [bar.20130808T0000+13] -(setting:submitted)
-2013/10/06 23:34:48 INFO - [bar.20130808T0000+13] -(current:submitted)> bar.20130808T0000+13 submit_method_id=4508
-2013/10/06 23:34:48 INFO - [bar.20130808T0000+13] -(current:submitted)> bar.20130808T0000+13 started at 2013-10-06T23:34:47
-2013/10/06 23:34:48 INFO - [bar.20130808T0000+13] -(setting:running)
-2013/10/06 23:34:48 INFO - [bar.20130808T0000+13] -(current:running)> bar.20130808T0000+13 succeeded at 2013-10-06T23:34:47
-2013/10/06 23:34:48 INFO - [bar.20130808T0000+13] -(setting:succeeded)
-2013/10/06 23:34:48 INFO - [foo.20130808T1200+13] -(current:submitting)> foo.20130808T1200+13 submitting now
-2013/10/06 23:34:48 INFO - [foo.20130808T1200+13] -(current:submitting)> foo.20130808T1200+13 started at 2013-10-06T23:34:47
-2013/10/06 23:34:48 INFO - [foo.20130808T1200+13] -(setting:running)
-2013/10/06 23:34:48 INFO - [foo.20130808T1200+13] -(current:running)> foo.20130808T1200+13 succeeded at 2013-10-06T23:34:47
-2013/10/06 23:34:48 INFO - [foo.20130808T1200+13] -(setting:succeeded)
-2013/10/06 23:34:48 WARNING - [foo.20130808T1200+13] -Assuming non-reported outputs were completed:
-foo.20130808T1200+13 submitted
-2013/10/06 23:34:48 INFO - [foo.20130809T0000+13] -HOLDING (beyond suite stop cycle) 20130808T1200+13
-2013/10/06 23:34:48 INFO - [foo.20130809T0000+13] -(setting:held)
-2013/10/06 23:34:49 INFO - [foo.20130808T1200+13] -(current:succeeded)> foo.20130808T1200+13 submission succeeded
-2013/10/06 23:34:49 INFO - [foo.20130808T1200+13] -(current:succeeded)> foo.20130808T1200+13 submit_method_id=4533
-2013/10/06 23:34:49 INFO - [bar.20130808T1200+13] -(setting:submitting)
 2013/10/06 23:34:49 INFO - [bar.20130808T1200+13] -triggered off ['foo.20130808T1200+13']
-2013/10/06 23:34:51 INFO - [bar.20130808T1200+13] -(current:submitting)> bar.20130808T1200+13 submitting now
-2013/10/06 23:34:51 INFO - [bar.20130808T1200+13] -(current:submitting)> bar.20130808T1200+13 submission succeeded
-2013/10/06 23:34:51 INFO - [bar.20130808T1200+13] -(setting:submitted)
-2013/10/06 23:34:51 INFO - [bar.20130808T1200+13] -(current:submitted)> bar.20130808T1200+13 submit_method_id=4596
-2013/10/06 23:34:51 INFO - [bar.20130808T1200+13] -(current:submitted)> bar.20130808T1200+13 started at 2013-10-06T23:34:50
-2013/10/06 23:34:51 INFO - [bar.20130808T1200+13] -(setting:running)
-2013/10/06 23:34:51 INFO - [bar.20130808T1200+13] -(current:running)> bar.20130808T1200+13 succeeded at 2013-10-06T23:34:51
-2013/10/06 23:34:51 INFO - [bar.20130808T1200+13] -(setting:succeeded)
-2013/10/06 23:34:51 INFO - [bar.20130809T0000+13] -HOLDING (beyond suite stop cycle) 20130808T1200+13
-2013/10/06 23:34:51 INFO - [bar.20130809T0000+13] -(setting:held)
-2013/10/06 23:34:52 INFO - All normal cycling tasks have spawned past the final cycle 20130808T1200+13
-2013/10/06 23:34:52 INFO - All non-cycling tasks have succeeded
-2013/10/06 23:34:52 INFO - Suite shutting down at 2013-10-06 23:34:52.378623
diff --git a/tests/tutorial/cycling/reflogs/tut.two b/tests/tutorial/cycling/reflogs/tut.two
index ba718e7..0b676b4 100644
--- a/tests/tutorial/cycling/reflogs/tut.two
+++ b/tests/tutorial/cycling/reflogs/tut.two
@@ -1,58 +1,8 @@
-2013/10/06 23:34:55 INFO - Thread-2 start (Event Handler Submission)
-2013/10/06 23:34:55 INFO - Thread-3 start (Poll and Kill Command Submission)
-2013/10/06 23:34:55 INFO - port:7766
-2013/10/06 23:34:55 INFO - Suite starting at 2013-10-06 23:34:55.660582
-2013/10/06 23:34:55 INFO - Log event clock: real time
 2013/10/06 23:34:55 INFO - Run mode: live
 2013/10/06 23:34:55 INFO - Initial point: 20130808T0000Z
 2013/10/06 23:34:55 INFO - Final point: 20130808T1200Z
-2013/10/06 23:34:55 INFO - Thread-4 start (Job Submission)
-2013/10/06 23:34:55 INFO - Thread-5 start (Request Handling)
 2013/10/06 23:34:55 INFO - Cold Start 20130808T0000Z
-2013/10/06 23:34:55 INFO - [foo.20130808T0000Z] -(setting:submitting)
 2013/10/06 23:34:55 INFO - [foo.20130808T0000Z] -triggered off []
-2013/10/06 23:34:56 INFO - [foo.20130808T0000Z] -(current:submitting)> foo.20130808T0000Z submitting now
-2013/10/06 23:34:57 INFO - [foo.20130808T0000Z] -(current:submitting)> foo.20130808T0000Z submission succeeded
-2013/10/06 23:34:57 INFO - [foo.20130808T0000Z] -(setting:submitted)
-2013/10/06 23:34:57 INFO - [foo.20130808T0000Z] -(current:submitted)> foo.20130808T0000Z submit_method_id=4734
-2013/10/06 23:34:57 INFO - [foo.20130808T0000Z] -(current:submitted)> foo.20130808T0000Z started at 2013-10-06T23:34:56
-2013/10/06 23:34:57 INFO - [foo.20130808T0000Z] -(setting:running)
-2013/10/06 23:34:57 INFO - [foo.20130808T0000Z] -(current:running)> foo.20130808T0000Z succeeded at 2013-10-06T23:34:57
-2013/10/06 23:34:57 INFO - [foo.20130808T0000Z] -(setting:succeeded)
-2013/10/06 23:34:57 INFO - [bar.20130808T0000Z] -(setting:submitting)
 2013/10/06 23:34:57 INFO - [bar.20130808T0000Z] -triggered off ['foo.20130808T0000Z']
-2013/10/06 23:34:58 INFO - [foo.20130808T1200Z] -(setting:submitting)
 2013/10/06 23:34:58 INFO - [foo.20130808T1200Z] -triggered off ['foo.20130808T0000Z']
-2013/10/06 23:34:59 INFO - [bar.20130808T0000Z] -(current:submitting)> bar.20130808T0000Z submitting now
-2013/10/06 23:34:59 INFO - [bar.20130808T0000Z] -(current:submitting)> bar.20130808T0000Z submission succeeded
-2013/10/06 23:34:59 INFO - [bar.20130808T0000Z] -(setting:submitted)
-2013/10/06 23:34:59 INFO - [bar.20130808T0000Z] -(current:submitted)> bar.20130808T0000Z submit_method_id=4804
-2013/10/06 23:34:59 INFO - [bar.20130808T0000Z] -(current:submitted)> bar.20130808T0000Z started at 2013-10-06T23:34:58
-2013/10/06 23:34:59 INFO - [bar.20130808T0000Z] -(setting:running)
-2013/10/06 23:34:59 INFO - [bar.20130808T0000Z] -(current:running)> bar.20130808T0000Z succeeded at 2013-10-06T23:34:59
-2013/10/06 23:34:59 INFO - [bar.20130808T0000Z] -(setting:succeeded)
-2013/10/06 23:34:59 INFO - [foo.20130808T1200Z] -(current:submitting)> foo.20130808T1200Z submitting now
-2013/10/06 23:34:59 INFO - [foo.20130808T1200Z] -(current:submitting)> foo.20130808T1200Z started at 2013-10-06T23:34:59
-2013/10/06 23:34:59 INFO - [foo.20130808T1200Z] -(setting:running)
-2013/10/06 23:34:59 INFO - [foo.20130808T1200Z] -(current:running)> foo.20130808T1200Z succeeded at 2013-10-06T23:34:59
-2013/10/06 23:34:59 INFO - [foo.20130808T1200Z] -(setting:succeeded)
-2013/10/06 23:34:59 WARNING - [foo.20130808T1200Z] -Assuming non-reported outputs were completed:
-foo.20130808T1200Z submitted
-2013/10/06 23:34:59 INFO - [foo.20130809T0000Z] -HOLDING (beyond suite stop cycle) 20130808T1200Z
-2013/10/06 23:34:59 INFO - [foo.20130809T0000Z] -(setting:held)
-2013/10/06 23:35:00 INFO - [foo.20130808T1200Z] -(current:succeeded)> foo.20130808T1200Z submission succeeded
-2013/10/06 23:35:00 INFO - [foo.20130808T1200Z] -(current:succeeded)> foo.20130808T1200Z submit_method_id=4827
-2013/10/06 23:35:00 INFO - [bar.20130808T1200Z] -(setting:submitting)
 2013/10/06 23:35:00 INFO - [bar.20130808T1200Z] -triggered off ['foo.20130808T1200Z']
-2013/10/06 23:35:02 INFO - [bar.20130808T1200Z] -(current:submitting)> bar.20130808T1200Z submitting now
-2013/10/06 23:35:02 INFO - [bar.20130808T1200Z] -(current:submitting)> bar.20130808T1200Z submission succeeded
-2013/10/06 23:35:02 INFO - [bar.20130808T1200Z] -(setting:submitted)
-2013/10/06 23:35:02 INFO - [bar.20130808T1200Z] -(current:submitted)> bar.20130808T1200Z submit_method_id=4892
-2013/10/06 23:35:02 INFO - [bar.20130808T1200Z] -(current:submitted)> bar.20130808T1200Z started at 2013-10-06T23:35:02
-2013/10/06 23:35:02 INFO - [bar.20130808T1200Z] -(setting:running)
-2013/10/06 23:35:02 INFO - [bar.20130808T1200Z] -(current:running)> bar.20130808T1200Z succeeded at 2013-10-06T23:35:02
-2013/10/06 23:35:02 INFO - [bar.20130808T1200Z] -(setting:succeeded)
-2013/10/06 23:35:02 INFO - [bar.20130809T0000Z] -HOLDING (beyond suite stop cycle) 20130808T1200Z
-2013/10/06 23:35:02 INFO - [bar.20130809T0000Z] -(setting:held)
-2013/10/06 23:35:03 INFO - All normal cycling tasks have spawned past the final cycle 20130808T1200Z
-2013/10/06 23:35:03 INFO - Suite shutting down at 2013-10-06 23:35:03.713192
diff --git a/tests/tutorial/oneoff/reflogs/tut.basic b/tests/tutorial/oneoff/reflogs/tut.basic
index bf70eb1..1a5c244 100644
--- a/tests/tutorial/oneoff/reflogs/tut.basic
+++ b/tests/tutorial/oneoff/reflogs/tut.basic
@@ -1,22 +1,4 @@
-2013/10/06 23:32:07 INFO - Thread-2 start (Event Handler Submission)
-2013/10/06 23:32:07 INFO - Thread-3 start (Poll and Kill Command Submission)
-2013/10/06 23:32:07 INFO - port:7766
-2013/10/06 23:32:07 INFO - Suite starting at 2013-10-06 23:32:07.186120
-2013/10/06 23:32:07 INFO - Log event clock: real time
 2013/10/06 23:32:07 INFO - Run mode: live
 2013/10/06 23:32:07 INFO - Initial point: 1
 2013/10/06 23:32:07 INFO - Final point: 1
-2013/10/06 23:32:07 INFO - Thread-4 start (Job Submission)
-2013/10/06 23:32:07 INFO - Thread-5 start (Request Handling)
-2013/10/06 23:32:07 INFO - [hello.1] -(setting:submitting)
 2013/10/06 23:32:07 INFO - [hello.1] -triggered off []
-2013/10/06 23:32:08 INFO - [hello.1] -(current:submitting)> hello.1 submitting now
-2013/10/06 23:32:09 INFO - [hello.1] -(current:submitting)> hello.1 submission succeeded
-2013/10/06 23:32:09 INFO - [hello.1] -(setting:submitted)
-2013/10/06 23:32:09 INFO - [hello.1] -(current:submitted)> hello.1 submit_method_id=942
-2013/10/06 23:32:09 INFO - [hello.1] -(current:submitted)> hello.1 started at 2013-10-06T23:32:08
-2013/10/06 23:32:09 INFO - [hello.1] -(setting:running)
-2013/10/06 23:32:09 INFO - [hello.1] -(current:running)> hello.1 succeeded at 2013-10-06T23:32:08
-2013/10/06 23:32:09 INFO - [hello.1] -(setting:succeeded)
-2013/10/06 23:32:09 INFO - All non-cycling tasks have succeeded
-2013/10/06 23:32:09 INFO - Suite shutting down at 2013-10-06 23:32:09.197190
diff --git a/tests/tutorial/oneoff/reflogs/tut.external b/tests/tutorial/oneoff/reflogs/tut.external
index 12d4721..82853c7 100644
--- a/tests/tutorial/oneoff/reflogs/tut.external
+++ b/tests/tutorial/oneoff/reflogs/tut.external
@@ -1,22 +1,4 @@
-2013/10/06 23:32:13 INFO - Thread-2 start (Event Handler Submission)
-2013/10/06 23:32:13 INFO - Thread-3 start (Poll and Kill Command Submission)
-2013/10/06 23:32:13 INFO - port:7766
-2013/10/06 23:32:13 INFO - Suite starting at 2013-10-06 23:32:13.823575
-2013/10/06 23:32:13 INFO - Log event clock: real time
 2013/10/06 23:32:13 INFO - Run mode: live
 2013/10/06 23:32:13 INFO - Initial point: 1
 2013/10/06 23:32:13 INFO - Final point: 1
-2013/10/06 23:32:13 INFO - Thread-4 start (Job Submission)
-2013/10/06 23:32:13 INFO - Thread-5 start (Request Handling)
-2013/10/06 23:32:13 INFO - [hello.1] -(setting:submitting)
 2013/10/06 23:32:13 INFO - [hello.1] -triggered off []
-2013/10/06 23:32:14 INFO - [hello.1] -(current:submitting)> hello.1 submitting now
-2013/10/06 23:32:15 INFO - [hello.1] -(current:submitting)> hello.1 submission succeeded
-2013/10/06 23:32:15 INFO - [hello.1] -(setting:submitted)
-2013/10/06 23:32:15 INFO - [hello.1] -(current:submitted)> hello.1 submit_method_id=1080
-2013/10/06 23:32:15 INFO - [hello.1] -(current:submitted)> hello.1 started at 2013-10-06T23:32:15
-2013/10/06 23:32:15 INFO - [hello.1] -(setting:running)
-2013/10/06 23:32:15 INFO - [hello.1] -(current:running)> hello.1 succeeded at 2013-10-06T23:32:15
-2013/10/06 23:32:15 INFO - [hello.1] -(setting:succeeded)
-2013/10/06 23:32:15 INFO - All non-cycling tasks have succeeded
-2013/10/06 23:32:15 INFO - Suite shutting down at 2013-10-06 23:32:15.834065
diff --git a/tests/tutorial/oneoff/reflogs/tut.ftrigger1 b/tests/tutorial/oneoff/reflogs/tut.ftrigger1
index 2744591..b5d8d24 100644
--- a/tests/tutorial/oneoff/reflogs/tut.ftrigger1
+++ b/tests/tutorial/oneoff/reflogs/tut.ftrigger1
@@ -1,41 +1,6 @@
-2013/10/06 23:32:20 INFO - Thread-2 start (Event Handler Submission)
-2013/10/06 23:32:20 INFO - Thread-3 start (Poll and Kill Command Submission)
-2013/10/06 23:32:20 INFO - port:7766
-2013/10/06 23:32:20 INFO - Suite starting at 2013-10-06 23:32:20.441809
-2013/10/06 23:32:20 INFO - Log event clock: real time
 2013/10/06 23:32:20 INFO - Run mode: live
 2013/10/06 23:32:20 INFO - Initial point: 1
 2013/10/06 23:32:20 INFO - Final point: 1
-2013/10/06 23:32:20 INFO - Thread-4 start (Job Submission)
-2013/10/06 23:32:20 INFO - Thread-5 start (Request Handling)
-2013/10/06 23:32:20 INFO - [foo.1] -(setting:submitting)
 2013/10/06 23:32:20 INFO - [foo.1] -triggered off []
-2013/10/06 23:32:21 INFO - [foo.1] -(current:submitting)> foo.1 submitting now
-2013/10/06 23:32:22 INFO - [foo.1] -(current:submitting)> foo.1 submission succeeded
-2013/10/06 23:32:22 INFO - [foo.1] -(setting:submitted)
-2013/10/06 23:32:22 INFO - [foo.1] -(current:submitted)> foo.1 submit_method_id=1219
-2013/10/06 23:32:22 INFO - [foo.1] -(current:submitted)> foo.1 started at 2013-10-06T23:32:21
-2013/10/06 23:32:22 INFO - [foo.1] -(setting:running)
-2013/10/06 23:32:22 INFO - [foo.1] -(current:running)> foo.1 succeeded at 2013-10-06T23:32:21
-2013/10/06 23:32:22 INFO - [foo.1] -(setting:succeeded)
-2013/10/06 23:32:22 INFO - [greeter_2.1] -(setting:submitting)
-2013/10/06 23:32:22 INFO - [greeter_1.1] -(setting:submitting)
 2013/10/06 23:32:22 INFO - [greeter_2.1] -triggered off ['foo.1']
 2013/10/06 23:32:22 INFO - [greeter_1.1] -triggered off ['foo.1']
-2013/10/06 23:32:24 INFO - [greeter_2.1] -(current:submitting)> greeter_2.1 submitting now
-2013/10/06 23:32:24 INFO - [greeter_2.1] -(current:submitting)> greeter_2.1 submission succeeded
-2013/10/06 23:32:24 INFO - [greeter_2.1] -(setting:submitted)
-2013/10/06 23:32:24 INFO - [greeter_2.1] -(current:submitted)> greeter_2.1 submit_method_id=1267
-2013/10/06 23:32:24 INFO - [greeter_2.1] -(current:submitted)> greeter_2.1 started at 2013-10-06T23:32:23
-2013/10/06 23:32:24 INFO - [greeter_2.1] -(setting:running)
-2013/10/06 23:32:24 INFO - [greeter_2.1] -(current:running)> greeter_2.1 succeeded at 2013-10-06T23:32:23
-2013/10/06 23:32:24 INFO - [greeter_2.1] -(setting:succeeded)
-2013/10/06 23:32:24 INFO - [greeter_1.1] -(current:submitting)> greeter_1.1 submitting now
-2013/10/06 23:32:24 INFO - [greeter_1.1] -(current:submitting)> greeter_1.1 started at 2013-10-06T23:32:23
-2013/10/06 23:32:24 INFO - [greeter_1.1] -(setting:running)
-2013/10/06 23:32:24 INFO - [greeter_1.1] -(current:running)> greeter_1.1 succeeded at 2013-10-06T23:32:23
-2013/10/06 23:32:24 INFO - [greeter_1.1] -(setting:succeeded)
-2013/10/06 23:32:24 WARNING - [greeter_1.1] -Assuming non-reported outputs were completed:
-greeter_1.1 submitted
-2013/10/06 23:32:24 INFO - All non-cycling tasks have succeeded
-2013/10/06 23:32:24 INFO - Suite shutting down at 2013-10-06 23:32:24.463753
diff --git a/tests/tutorial/oneoff/reflogs/tut.ftrigger2 b/tests/tutorial/oneoff/reflogs/tut.ftrigger2
index 0278b11..8f65173 100644
--- a/tests/tutorial/oneoff/reflogs/tut.ftrigger2
+++ b/tests/tutorial/oneoff/reflogs/tut.ftrigger2
@@ -1,53 +1,7 @@
-2013/10/06 23:32:30 INFO - Thread-2 start (Event Handler Submission)
-2013/10/06 23:32:30 INFO - Thread-3 start (Poll and Kill Command Submission)
-2013/10/06 23:32:30 INFO - port:7766
-2013/10/06 23:32:30 INFO - Suite starting at 2013-10-06 23:32:30.301166
-2013/10/06 23:32:30 INFO - Log event clock: real time
 2013/10/06 23:32:30 INFO - Run mode: live
 2013/10/06 23:32:30 INFO - Initial point: 1
 2013/10/06 23:32:30 INFO - Final point: 1
-2013/10/06 23:32:30 INFO - Thread-4 start (Job Submission)
-2013/10/06 23:32:30 INFO - Thread-5 start (Request Handling)
-2013/10/06 23:32:30 INFO - [foo.1] -(setting:submitting)
 2013/10/06 23:32:30 INFO - [foo.1] -triggered off []
-2013/10/06 23:32:31 INFO - [foo.1] -(current:submitting)> foo.1 submitting now
-2013/10/06 23:32:32 INFO - [foo.1] -(current:submitting)> foo.1 submission succeeded
-2013/10/06 23:32:32 INFO - [foo.1] -(setting:submitted)
-2013/10/06 23:32:32 INFO - [foo.1] -(current:submitted)> foo.1 submit_method_id=1466
-2013/10/06 23:32:32 INFO - [foo.1] -(current:submitted)> foo.1 started at 2013-10-06T23:32:31
-2013/10/06 23:32:32 INFO - [foo.1] -(setting:running)
-2013/10/06 23:32:32 INFO - [foo.1] -(current:running)> foo.1 succeeded at 2013-10-06T23:32:31
-2013/10/06 23:32:32 INFO - [foo.1] -(setting:succeeded)
-2013/10/06 23:32:32 INFO - [greeter_2.1] -(setting:submitting)
-2013/10/06 23:32:32 INFO - [greeter_1.1] -(setting:submitting)
 2013/10/06 23:32:32 INFO - [greeter_2.1] -triggered off ['foo.1']
 2013/10/06 23:32:32 INFO - [greeter_1.1] -triggered off ['foo.1']
-2013/10/06 23:32:34 INFO - [greeter_2.1] -(current:submitting)> greeter_2.1 submitting now
-2013/10/06 23:32:34 INFO - [greeter_2.1] -(current:submitting)> greeter_2.1 submission succeeded
-2013/10/06 23:32:34 INFO - [greeter_2.1] -(setting:submitted)
-2013/10/06 23:32:34 INFO - [greeter_2.1] -(current:submitted)> greeter_2.1 submit_method_id=1525
-2013/10/06 23:32:34 INFO - [greeter_2.1] -(current:submitted)> greeter_2.1 started at 2013-10-06T23:32:33
-2013/10/06 23:32:34 INFO - [greeter_2.1] -(setting:running)
-2013/10/06 23:32:34 INFO - [greeter_2.1] -(current:running)> greeter_2.1 succeeded at 2013-10-06T23:32:33
-2013/10/06 23:32:34 INFO - [greeter_2.1] -(setting:succeeded)
-2013/10/06 23:32:34 INFO - [greeter_1.1] -(current:submitting)> greeter_1.1 submitting now
-2013/10/06 23:32:34 INFO - [greeter_1.1] -(current:submitting)> greeter_1.1 started at 2013-10-06T23:32:33
-2013/10/06 23:32:34 INFO - [greeter_1.1] -(setting:running)
-2013/10/06 23:32:34 INFO - [greeter_1.1] -(current:running)> greeter_1.1 succeeded at 2013-10-06T23:32:33
-2013/10/06 23:32:34 INFO - [greeter_1.1] -(setting:succeeded)
-2013/10/06 23:32:34 WARNING - [greeter_1.1] -Assuming non-reported outputs were completed:
-greeter_1.1 submitted
-2013/10/06 23:32:34 INFO - [bar.1] -(setting:submitting)
 2013/10/06 23:32:34 INFO - [bar.1] -triggered off ['greeter_1.1', 'greeter_2.1']
-2013/10/06 23:32:35 INFO - [greeter_1.1] -(current:succeeded)> greeter_1.1 submission succeeded
-2013/10/06 23:32:35 INFO - [greeter_1.1] -(current:succeeded)> greeter_1.1 submit_method_id=1552
-2013/10/06 23:32:37 INFO - [bar.1] -(current:submitting)> bar.1 submitting now
-2013/10/06 23:32:37 INFO - [bar.1] -(current:submitting)> bar.1 submission succeeded
-2013/10/06 23:32:37 INFO - [bar.1] -(setting:submitted)
-2013/10/06 23:32:37 INFO - [bar.1] -(current:submitted)> bar.1 submit_method_id=1615
-2013/10/06 23:32:37 INFO - [bar.1] -(current:submitted)> bar.1 started at 2013-10-06T23:32:36
-2013/10/06 23:32:37 INFO - [bar.1] -(setting:running)
-2013/10/06 23:32:37 INFO - [bar.1] -(current:running)> bar.1 succeeded at 2013-10-06T23:32:36
-2013/10/06 23:32:37 INFO - [bar.1] -(setting:succeeded)
-2013/10/06 23:32:37 INFO - All non-cycling tasks have succeeded
-2013/10/06 23:32:37 INFO - Suite shutting down at 2013-10-06 23:32:37.333500
diff --git a/tests/tutorial/oneoff/reflogs/tut.goodbye b/tests/tutorial/oneoff/reflogs/tut.goodbye
index bd445b8..7de6b25 100644
--- a/tests/tutorial/oneoff/reflogs/tut.goodbye
+++ b/tests/tutorial/oneoff/reflogs/tut.goodbye
@@ -1,32 +1,5 @@
-2013/10/06 23:32:42 INFO - Thread-2 start (Event Handler Submission)
-2013/10/06 23:32:42 INFO - Thread-3 start (Poll and Kill Command Submission)
-2013/10/06 23:32:42 INFO - port:7766
-2013/10/06 23:32:42 INFO - Suite starting at 2013-10-06 23:32:42.379969
-2013/10/06 23:32:42 INFO - Log event clock: real time
 2013/10/06 23:32:42 INFO - Run mode: live
 2013/10/06 23:32:42 INFO - Initial point: 1
 2013/10/06 23:32:42 INFO - Final point: 1
-2013/10/06 23:32:42 INFO - Thread-4 start (Job Submission)
-2013/10/06 23:32:42 INFO - Thread-5 start (Request Handling)
-2013/10/06 23:32:42 INFO - [hello.1] -(setting:submitting)
 2013/10/06 23:32:42 INFO - [hello.1] -triggered off []
-2013/10/06 23:32:43 INFO - [hello.1] -(current:submitting)> hello.1 submitting now
-2013/10/06 23:32:44 INFO - [hello.1] -(current:submitting)> hello.1 submission succeeded
-2013/10/06 23:32:44 INFO - [hello.1] -(setting:submitted)
-2013/10/06 23:32:44 INFO - [hello.1] -(current:submitted)> hello.1 submit_method_id=1780
-2013/10/06 23:32:44 INFO - [hello.1] -(current:submitted)> hello.1 started at 2013-10-06T23:32:43
-2013/10/06 23:32:44 INFO - [hello.1] -(setting:running)
-2013/10/06 23:32:44 INFO - [hello.1] -(current:running)> hello.1 succeeded at 2013-10-06T23:32:43
-2013/10/06 23:32:44 INFO - [hello.1] -(setting:succeeded)
-2013/10/06 23:32:44 INFO - [goodbye.1] -(setting:submitting)
 2013/10/06 23:32:44 INFO - [goodbye.1] -triggered off ['hello.1']
-2013/10/06 23:32:46 INFO - [goodbye.1] -(current:submitting)> goodbye.1 submitting now
-2013/10/06 23:32:46 INFO - [goodbye.1] -(current:submitting)> goodbye.1 submission succeeded
-2013/10/06 23:32:46 INFO - [goodbye.1] -(setting:submitted)
-2013/10/06 23:32:46 INFO - [goodbye.1] -(current:submitted)> goodbye.1 submit_method_id=1825
-2013/10/06 23:32:46 INFO - [goodbye.1] -(current:submitted)> goodbye.1 started at 2013-10-06T23:32:45
-2013/10/06 23:32:46 INFO - [goodbye.1] -(setting:running)
-2013/10/06 23:32:46 INFO - [goodbye.1] -(current:running)> goodbye.1 succeeded at 2013-10-06T23:32:45
-2013/10/06 23:32:46 INFO - [goodbye.1] -(setting:succeeded)
-2013/10/06 23:32:46 INFO - All non-cycling tasks have succeeded
-2013/10/06 23:32:46 INFO - Suite shutting down at 2013-10-06 23:32:46.397353
diff --git a/tests/tutorial/oneoff/reflogs/tut.inherit b/tests/tutorial/oneoff/reflogs/tut.inherit
index 4201e4a..0a70fd7 100644
--- a/tests/tutorial/oneoff/reflogs/tut.inherit
+++ b/tests/tutorial/oneoff/reflogs/tut.inherit
@@ -1,32 +1,5 @@
-2013/10/06 23:32:51 INFO - Thread-2 start (Event Handler Submission)
-2013/10/06 23:32:51 INFO - Thread-3 start (Poll and Kill Command Submission)
-2013/10/06 23:32:51 INFO - port:7766
-2013/10/06 23:32:51 INFO - Suite starting at 2013-10-06 23:32:51.343294
-2013/10/06 23:32:51 INFO - Log event clock: real time
 2013/10/06 23:32:51 INFO - Run mode: live
 2013/10/06 23:32:51 INFO - Initial point: 1
 2013/10/06 23:32:51 INFO - Final point: 1
-2013/10/06 23:32:51 INFO - Thread-4 start (Job Submission)
-2013/10/06 23:32:51 INFO - Thread-5 start (Request Handling)
-2013/10/06 23:32:51 INFO - [hello.1] -(setting:submitting)
 2013/10/06 23:32:51 INFO - [hello.1] -triggered off []
-2013/10/06 23:32:52 INFO - [hello.1] -(current:submitting)> hello.1 submitting now
-2013/10/06 23:32:53 INFO - [hello.1] -(current:submitting)> hello.1 submission succeeded
-2013/10/06 23:32:53 INFO - [hello.1] -(setting:submitted)
-2013/10/06 23:32:53 INFO - [hello.1] -(current:submitted)> hello.1 submit_method_id=1974
-2013/10/06 23:32:53 INFO - [hello.1] -(current:submitted)> hello.1 started at 2013-10-06T23:32:52
-2013/10/06 23:32:53 INFO - [hello.1] -(setting:running)
-2013/10/06 23:32:53 INFO - [hello.1] -(current:running)> hello.1 succeeded at 2013-10-06T23:32:52
-2013/10/06 23:32:53 INFO - [hello.1] -(setting:succeeded)
-2013/10/06 23:32:53 INFO - [goodbye.1] -(setting:submitting)
 2013/10/06 23:32:53 INFO - [goodbye.1] -triggered off ['hello.1']
-2013/10/06 23:32:55 INFO - [goodbye.1] -(current:submitting)> goodbye.1 submitting now
-2013/10/06 23:32:55 INFO - [goodbye.1] -(current:submitting)> goodbye.1 submission succeeded
-2013/10/06 23:32:55 INFO - [goodbye.1] -(setting:submitted)
-2013/10/06 23:32:55 INFO - [goodbye.1] -(current:submitted)> goodbye.1 submit_method_id=2018
-2013/10/06 23:32:55 INFO - [goodbye.1] -(current:submitted)> goodbye.1 started at 2013-10-06T23:32:54
-2013/10/06 23:32:55 INFO - [goodbye.1] -(setting:running)
-2013/10/06 23:32:55 INFO - [goodbye.1] -(current:running)> goodbye.1 succeeded at 2013-10-06T23:32:54
-2013/10/06 23:32:55 INFO - [goodbye.1] -(setting:succeeded)
-2013/10/06 23:32:55 INFO - All non-cycling tasks have succeeded
-2013/10/06 23:32:55 INFO - Suite shutting down at 2013-10-06 23:32:55.364281
diff --git a/tests/tutorial/oneoff/reflogs/tut.jinja2 b/tests/tutorial/oneoff/reflogs/tut.jinja2
index cafe8be..f2a943b 100644
--- a/tests/tutorial/oneoff/reflogs/tut.jinja2
+++ b/tests/tutorial/oneoff/reflogs/tut.jinja2
@@ -1,51 +1,7 @@
-2013/10/06 23:32:59 INFO - Thread-2 start (Event Handler Submission)
-2013/10/06 23:32:59 INFO - Thread-3 start (Poll and Kill Command Submission)
-2013/10/06 23:32:59 INFO - port:7766
-2013/10/06 23:32:59 INFO - Suite starting at 2013-10-06 23:32:59.126325
-2013/10/06 23:32:59 INFO - Log event clock: real time
 2013/10/06 23:32:59 INFO - Run mode: live
 2013/10/06 23:32:59 INFO - Initial point: 1
 2013/10/06 23:32:59 INFO - Final point: 1
-2013/10/06 23:32:59 INFO - Thread-4 start (Job Submission)
-2013/10/06 23:32:59 INFO - Thread-5 start (Request Handling)
-2013/10/06 23:32:59 INFO - [hello.1] -(setting:submitting)
 2013/10/06 23:32:59 INFO - [hello.1] -triggered off []
-2013/10/06 23:33:00 INFO - [hello.1] -(current:submitting)> hello.1 submitting now
-2013/10/06 23:33:01 INFO - [hello.1] -(current:submitting)> hello.1 submission succeeded
-2013/10/06 23:33:01 INFO - [hello.1] -(setting:submitted)
-2013/10/06 23:33:01 INFO - [hello.1] -(current:submitted)> hello.1 submit_method_id=2177
-2013/10/06 23:33:01 INFO - [hello.1] -(current:submitted)> hello.1 started at 2013-10-06T23:33:00
-2013/10/06 23:33:01 INFO - [hello.1] -(setting:running)
-2013/10/06 23:33:01 INFO - [hello.1] -(current:running)> hello.1 succeeded at 2013-10-06T23:33:00
-2013/10/06 23:33:01 INFO - [hello.1] -(setting:succeeded)
-2013/10/06 23:33:01 INFO - [goodbye_2.1] -(setting:submitting)
-2013/10/06 23:33:01 INFO - [goodbye_1.1] -(setting:submitting)
-2013/10/06 23:33:01 INFO - [goodbye_0.1] -(setting:submitting)
 2013/10/06 23:33:01 INFO - [goodbye_2.1] -triggered off ['hello.1']
 2013/10/06 23:33:01 INFO - [goodbye_1.1] -triggered off ['hello.1']
 2013/10/06 23:33:01 INFO - [goodbye_0.1] -triggered off ['hello.1']
-2013/10/06 23:33:03 INFO - [goodbye_2.1] -(current:submitting)> goodbye_2.1 submitting now
-2013/10/06 23:33:03 INFO - [goodbye_2.1] -(current:submitting)> goodbye_2.1 submission succeeded
-2013/10/06 23:33:03 INFO - [goodbye_2.1] -(setting:submitted)
-2013/10/06 23:33:03 INFO - [goodbye_2.1] -(current:submitted)> goodbye_2.1 submit_method_id=2221
-2013/10/06 23:33:03 INFO - [goodbye_2.1] -(current:submitted)> goodbye_2.1 started at 2013-10-06T23:33:02
-2013/10/06 23:33:03 INFO - [goodbye_2.1] -(setting:running)
-2013/10/06 23:33:03 INFO - [goodbye_2.1] -(current:running)> goodbye_2.1 succeeded at 2013-10-06T23:33:02
-2013/10/06 23:33:03 INFO - [goodbye_2.1] -(setting:succeeded)
-2013/10/06 23:33:03 INFO - [goodbye_1.1] -(current:submitting)> goodbye_1.1 submitting now
-2013/10/06 23:33:03 INFO - [goodbye_1.1] -(current:submitting)> goodbye_1.1 started at 2013-10-06T23:33:02
-2013/10/06 23:33:03 INFO - [goodbye_1.1] -(setting:running)
-2013/10/06 23:33:03 INFO - [goodbye_1.1] -(current:running)> goodbye_1.1 succeeded at 2013-10-06T23:33:02
-2013/10/06 23:33:03 INFO - [goodbye_1.1] -(setting:succeeded)
-2013/10/06 23:33:03 WARNING - [goodbye_1.1] -Assuming non-reported outputs were completed:
-goodbye_1.1 submitted
-2013/10/06 23:33:03 INFO - [goodbye_0.1] -(current:submitting)> goodbye_0.1 submitting now
-2013/10/06 23:33:03 INFO - [goodbye_0.1] -(current:submitting)> goodbye_0.1 submission succeeded
-2013/10/06 23:33:03 INFO - [goodbye_0.1] -(setting:submitted)
-2013/10/06 23:33:03 INFO - [goodbye_0.1] -(current:submitted)> goodbye_0.1 submit_method_id=2271
-2013/10/06 23:33:03 INFO - [goodbye_0.1] -(current:submitted)> goodbye_0.1 started at 2013-10-06T23:33:02
-2013/10/06 23:33:03 INFO - [goodbye_0.1] -(setting:running)
-2013/10/06 23:33:03 INFO - [goodbye_0.1] -(current:running)> goodbye_0.1 succeeded at 2013-10-06T23:33:02
-2013/10/06 23:33:03 INFO - [goodbye_0.1] -(setting:succeeded)
-2013/10/06 23:33:03 INFO - All non-cycling tasks have succeeded
-2013/10/06 23:33:03 INFO - Suite shutting down at 2013-10-06 23:33:03.159824
diff --git a/tests/tutorial/oneoff/reflogs/tut.minimal b/tests/tutorial/oneoff/reflogs/tut.minimal
index 2489d32..f11df69 100644
--- a/tests/tutorial/oneoff/reflogs/tut.minimal
+++ b/tests/tutorial/oneoff/reflogs/tut.minimal
@@ -1,22 +1,4 @@
-2013/10/06 23:33:16 INFO - Thread-2 start (Event Handler Submission)
-2013/10/06 23:33:16 INFO - Thread-3 start (Poll and Kill Command Submission)
-2013/10/06 23:33:16 INFO - port:7766
-2013/10/06 23:33:16 INFO - Suite starting at 2013-10-06 23:33:16.205120
-2013/10/06 23:33:16 INFO - Log event clock: real time
 2013/10/06 23:33:16 INFO - Run mode: live
 2013/10/06 23:33:16 INFO - Initial point: 1
 2013/10/06 23:33:16 INFO - Final point: 1
-2013/10/06 23:33:16 INFO - Thread-4 start (Job Submission)
-2013/10/06 23:33:16 INFO - Thread-5 start (Request Handling)
-2013/10/06 23:33:16 INFO - [foo.1] -(setting:submitting)
 2013/10/06 23:33:16 INFO - [foo.1] -triggered off []
-2013/10/06 23:33:17 INFO - [foo.1] -(current:submitting)> foo.1 submitting now
-2013/10/06 23:33:18 INFO - [foo.1] -(current:submitting)> foo.1 submission succeeded
-2013/10/06 23:33:18 INFO - [foo.1] -(setting:submitted)
-2013/10/06 23:33:18 INFO - [foo.1] -(current:submitted)> foo.1 submit_method_id=2663
-2013/10/06 23:33:18 INFO - [foo.1] -(current:submitted)> foo.1 started at 2013-10-06T23:33:17
-2013/10/06 23:33:18 INFO - [foo.1] -(setting:running)
-2013/10/06 23:33:18 INFO - [foo.1] -(current:running)> foo.1 succeeded at 2013-10-06T23:33:17
-2013/10/06 23:33:18 INFO - [foo.1] -(setting:succeeded)
-2013/10/06 23:33:18 INFO - All non-cycling tasks have succeeded
-2013/10/06 23:33:18 INFO - Suite shutting down at 2013-10-06 23:33:18.215216
diff --git a/tests/tutorial/oneoff/reflogs/tut.remote b/tests/tutorial/oneoff/reflogs/tut.remote
index cb7fb35..ccf536a 100644
--- a/tests/tutorial/oneoff/reflogs/tut.remote
+++ b/tests/tutorial/oneoff/reflogs/tut.remote
@@ -1,22 +1,4 @@
-2013/10/06 23:33:23 INFO - Thread-2 start (Event Handler Submission)
-2013/10/06 23:33:23 INFO - Thread-3 start (Poll and Kill Command Submission)
-2013/10/06 23:33:23 INFO - port:7766
-2013/10/06 23:33:23 INFO - Suite starting at 2013-10-06 23:33:23.096626
-2013/10/06 23:33:23 INFO - Log event clock: real time
 2013/10/06 23:33:23 INFO - Run mode: live
 2013/10/06 23:33:23 INFO - Initial point: 1
 2013/10/06 23:33:23 INFO - Final point: 1
-2013/10/06 23:33:23 INFO - Thread-4 start (Job Submission)
-2013/10/06 23:33:23 INFO - Thread-5 start (Request Handling)
-2013/10/06 23:33:23 INFO - [hello.1] -(setting:submitting)
 2013/10/06 23:33:23 INFO - [hello.1] -triggered off []
-2013/10/06 23:33:24 INFO - [hello.1] -(current:submitting)> hello.1 submitting now
-2013/10/06 23:33:25 INFO - [hello.1] -(current:submitting)> hello.1 submission succeeded
-2013/10/06 23:33:25 INFO - [hello.1] -(setting:submitted)
-2013/10/06 23:33:25 INFO - [hello.1] -(current:submitted)> hello.1 submit_method_id=2801
-2013/10/06 23:33:25 INFO - [hello.1] -(current:submitted)> hello.1 started at 2013-10-06T23:33:24
-2013/10/06 23:33:25 INFO - [hello.1] -(setting:running)
-2013/10/06 23:33:25 INFO - [hello.1] -(current:running)> hello.1 succeeded at 2013-10-06T23:33:24
-2013/10/06 23:33:25 INFO - [hello.1] -(setting:succeeded)
-2013/10/06 23:33:25 INFO - All non-cycling tasks have succeeded
-2013/10/06 23:33:25 INFO - Suite shutting down at 2013-10-06 23:33:25.108744
diff --git a/tests/tutorial/oneoff/reflogs/tut.retry b/tests/tutorial/oneoff/reflogs/tut.retry
index b4bf479..663c426 100644
--- a/tests/tutorial/oneoff/reflogs/tut.retry
+++ b/tests/tutorial/oneoff/reflogs/tut.retry
@@ -1,46 +1,6 @@
-2013/10/06 23:33:29 INFO - Thread-2 start (Event Handler Submission)
-2013/10/06 23:33:29 INFO - Thread-3 start (Poll and Kill Command Submission)
-2013/10/06 23:33:29 INFO - port:7766
-2013/10/06 23:33:29 INFO - Suite starting at 2013-10-06 23:33:29.982771
-2013/10/06 23:33:29 INFO - Log event clock: real time
 2013/10/06 23:33:29 INFO - Run mode: live
 2013/10/06 23:33:29 INFO - Initial point: 1
 2013/10/06 23:33:29 INFO - Final point: 1
-2013/10/06 23:33:29 INFO - Thread-4 start (Job Submission)
-2013/10/06 23:33:29 INFO - Thread-5 start (Request Handling)
-2013/10/06 23:33:29 INFO - [hello.1] -(setting:submitting)
 2013/10/06 23:33:29 INFO - [hello.1] -triggered off []
-2013/10/06 23:33:30 INFO - [hello.1] -(current:submitting)> hello.1 submitting now
-2013/10/06 23:33:31 INFO - [hello.1] -(current:submitting)> hello.1 submission succeeded
-2013/10/06 23:33:31 INFO - [hello.1] -(setting:submitted)
-2013/10/06 23:33:31 INFO - [hello.1] -(current:submitted)> hello.1 submit_method_id=2940
-2013/10/06 23:33:31 INFO - [hello.1] -(current:submitted)> hello.1 started at 2013-10-06T23:33:31
-2013/10/06 23:33:31 INFO - [hello.1] -(setting:running)
-2013/10/06 23:33:31 CRITICAL - [hello.1] -(current:running)> Task job script received signal EXIT at 2013-10-06T23:33:31
-2013/10/06 23:33:31 CRITICAL - [hello.1] -(current:running)> hello.1 failed at 2013-10-06T23:33:31
-2013/10/06 23:33:31 INFO - [hello.1] -job failed, retrying in 0.1 minutes
-2013/10/06 23:33:31 INFO - [hello.1] -(setting:retrying)
-2013/10/06 23:33:38 INFO - [hello.1] -(setting:submitting)
 2013/10/06 23:33:38 INFO - [hello.1] -triggered off []
-2013/10/06 23:33:39 INFO - [hello.1] -(current:submitting)> hello.1 submitting now
-2013/10/06 23:33:39 INFO - [hello.1] -(current:submitting)> hello.1 submission succeeded
-2013/10/06 23:33:39 INFO - [hello.1] -(setting:submitted)
-2013/10/06 23:33:39 INFO - [hello.1] -(current:submitted)> hello.1 submit_method_id=2983
-2013/10/06 23:33:39 INFO - [hello.1] -(current:submitted)> hello.1 started at 2013-10-06T23:33:38
-2013/10/06 23:33:39 INFO - [hello.1] -(setting:running)
-2013/10/06 23:33:39 CRITICAL - [hello.1] -(current:running)> Task job script received signal EXIT at 2013-10-06T23:33:38
-2013/10/06 23:33:39 CRITICAL - [hello.1] -(current:running)> hello.1 failed at 2013-10-06T23:33:38
-2013/10/06 23:33:39 INFO - [hello.1] -job failed, retrying in 0.1 minutes
-2013/10/06 23:33:39 INFO - [hello.1] -(setting:retrying)
-2013/10/06 23:33:45 INFO - [hello.1] -(setting:submitting)
 2013/10/06 23:33:45 INFO - [hello.1] -triggered off []
-2013/10/06 23:33:46 INFO - [hello.1] -(current:submitting)> hello.1 submitting now
-2013/10/06 23:33:46 INFO - [hello.1] -(current:submitting)> hello.1 submission succeeded
-2013/10/06 23:33:46 INFO - [hello.1] -(setting:submitted)
-2013/10/06 23:33:46 INFO - [hello.1] -(current:submitted)> hello.1 submit_method_id=3026
-2013/10/06 23:33:46 INFO - [hello.1] -(current:submitted)> hello.1 started at 2013-10-06T23:33:45
-2013/10/06 23:33:46 INFO - [hello.1] -(setting:running)
-2013/10/06 23:33:46 INFO - [hello.1] -(current:running)> hello.1 succeeded at 2013-10-06T23:33:45
-2013/10/06 23:33:46 INFO - [hello.1] -(setting:succeeded)
-2013/10/06 23:33:46 INFO - All non-cycling tasks have succeeded
-2013/10/06 23:33:46 INFO - Suite shutting down at 2013-10-06 23:33:46.026634
diff --git a/tests/tutorial/oneoff/reflogs/tut.suicide b/tests/tutorial/oneoff/reflogs/tut.suicide
index 8a4b08a..264daaa 100644
--- a/tests/tutorial/oneoff/reflogs/tut.suicide
+++ b/tests/tutorial/oneoff/reflogs/tut.suicide
@@ -1,34 +1,6 @@
-2014/01/08 10:04:17 INFO - Thread-2 start (Event Handlers)
-2014/01/08 10:04:17 INFO - Thread-3 start (Poll & Kill Commands)
-2014/01/08 10:04:17 INFO - port:7766
-2014/01/08 10:04:17 INFO - Suite starting at 2014-01-08 10:04:17.657578
-2014/01/08 10:04:17 INFO - Log event clock: real time
 2014/01/08 10:04:17 INFO - Run mode: live
 2014/01/08 10:04:17 INFO - Initial point: 1
 2014/01/08 10:04:17 INFO - Final point: 1
-2014/01/08 10:04:17 INFO - Thread-4 start (Job Submission)
-2014/01/08 10:04:17 INFO - Thread-5 start (Request Handling)
 2014/01/08 10:04:17 INFO - [hello.1] -triggered off []
-2014/01/08 10:04:18 INFO - [hello.1] -(current:ready)> hello.1 submitting now
-2014/01/08 10:04:19 INFO - [hello.1] -(current:ready)> hello.1 submission succeeded
-2014/01/08 10:04:19 INFO - [hello.1] -(current:submitted)> hello.1 submit_method_id=23632
-2014/01/08 10:04:19 INFO - [hello.1] -(current:submitted)> hello.1 started at 2014-01-08T10:04:19
-2014/01/08 10:04:29 INFO - [hello.1] -(current:running)> hello.1 succeeded at 2014-01-08T10:04:29
 2014/01/08 10:04:30 INFO - [goodbye.1] -triggered off ['hello.1']
-2014/01/08 10:04:31 INFO - [goodbye.1] -(current:ready)> goodbye.1 submitting now
-2014/01/08 10:04:31 INFO - [goodbye.1] -(current:ready)> goodbye.1 submission succeeded
-2014/01/08 10:04:31 INFO - [goodbye.1] -(current:submitted)> goodbye.1 submit_method_id=23674
-2014/01/08 10:04:31 INFO - [goodbye.1] -(current:submitted)> goodbye.1 started at 2014-01-08T10:04:31
-2014/01/08 10:04:42 CRITICAL - [goodbye.1] -(current:running)> Task job script received signal ERR at 2014-01-08T10:04:42
-2014/01/08 10:04:42 CRITICAL - [goodbye.1] -(current:running)> goodbye.1 failed at 2014-01-08T10:04:42
 2014/01/08 10:04:44 INFO - [really_goodbye.1] -triggered off ['goodbye.1']
-2014/01/08 10:04:45 INFO - [really_goodbye.1] -(current:ready)> really_goodbye.1 submitting now
-2014/01/08 10:04:45 INFO - [really_goodbye.1] -(current:ready)> really_goodbye.1 submission succeeded
-2014/01/08 10:04:45 INFO - [really_goodbye.1] -(current:submitted)> really_goodbye.1 submit_method_id=23722
-2014/01/08 10:04:45 INFO - [really_goodbye.1] -(current:submitted)> really_goodbye.1 started at 2014-01-08T10:04:44
-2014/01/08 10:04:52 INFO - [really_goodbye.1] -(current:running)> really_goodbye.1 succeeded at 2014-01-08T10:04:51
-2014/01/08 10:04:53 INFO - Stopping: 
-  + all non-cycling tasks have succeeded
-2014/01/08 10:04:53 INFO - Thread-4 exit (Job Submission)
-2014/01/08 10:04:53 INFO - Thread-2 exit (Event Handlers)
-2014/01/08 10:04:53 INFO - Thread-3 exit (Poll & Kill Commands)
diff --git a/tests/jobscript/10-bad-syntax.t b/tests/validate/04-builtin-suites.t
old mode 100755
new mode 100644
similarity index 50%
copy from tests/jobscript/10-bad-syntax.t
copy to tests/validate/04-builtin-suites.t
index b397dfb..95b380e
--- a/tests/jobscript/10-bad-syntax.t
+++ b/tests/validate/04-builtin-suites.t
@@ -15,33 +15,34 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Test "cylc jobscript" when we have bad syntax in "script" value.
-. "$(dirname "${0}")/test_header"
+# Ensure that any changes to cylc haven't broken the profile-battery command
+. "$(dirname "$0")/test_header"
 #-------------------------------------------------------------------------------
-set_test_number 5
+# Generate a list of suites.
+SUITES=($(find "${CYLC_DIR}/"{examples,dev/suites} -name 'suite.rc'))
+ABS_PATH_LENGTH=${#CYLC_DIR}
 #-------------------------------------------------------------------------------
-init_suite "${TEST_NAME_BASE}" <<'__SUITE_RC__'
-[scheduling]
-    [[dependencies]]
-        graph = foo
-[runtime]
-    [[foo]]
-        script = fi
-__SUITE_RC__
-
-TEST_NAME="${TEST_NAME_BASE}"-simple
-run_fail "${TEST_NAME}" cylc jobscript "${SUITE_NAME}" 'foo.1'
-cmp_ok "${TEST_NAME}.stdout" <'/dev/null'
-contains_ok "${TEST_NAME}.stderr" <<__ERR__
-ERROR: no jobscript generated
-__ERR__
-purge_suite "${SUITE_NAME}"
+# Filter out certain warnings to prevent tests being failed by them.
+function filter_warnings() {
+    python -c "import re, sys
+msgs=[r'.*naked dummy tasks detected.*\n(\+\t.*\n)+',
+      r'.*clock-(trigger|expire) offsets are normally positive.*\n']
+file_name = sys.argv[1]
+with open(file_name, 'r') as in_file:
+    contents = in_file.read()
+    with open(file_name + '.processed', 'w+') as out_file:
+        for msg in msgs:
+            contents = re.sub(msg, '', contents)
+        out_file.write(contents)" "$1"
+}
 #-------------------------------------------------------------------------------
-install_suite "${TEST_NAME_BASE}" "${TEST_NAME_BASE}"
-TEST_NAME="${TEST_NAME_BASE}-advanced-validate"
-run_ok "${TEST_NAME}" cylc validate "${SUITE_NAME}"
-TEST_NAME="${TEST_NAME_BASE}-advanced-run"
-run_ok "${TEST_NAME}" cylc run "${SUITE_NAME}" --reference-test --debug
+set_test_number $((( ((${#SUITES[@]})) * 2 )))
 #-------------------------------------------------------------------------------
-purge_suite "${SUITE_NAME}"
-exit
+# Validate suites.
+for suite in ${SUITES[@]}; do
+    suite_name=$(sed 's/\//-/g' <<<"${suite:$ABS_PATH_LENGTH}")
+    TEST_NAME="${TEST_NAME_BASE}${suite_name}"
+    run_ok "${TEST_NAME}" cylc validate "${suite}" -v -v
+    filter_warnings "${TEST_NAME}.stderr"
+    cmp_ok "${TEST_NAME}.stderr.processed" /dev/null
+done
diff --git a/tests/validate/62-null-task-name.t b/tests/validate/62-null-task-name.t
index 7bad8c9..7fe6629 100755
--- a/tests/validate/62-null-task-name.t
+++ b/tests/validate/62-null-task-name.t
@@ -19,9 +19,10 @@
 
 . "$(dirname "$0")/test_header"
 
-set_test_number 8
+set_test_number 10
 
-for GRAPH in 't1 => & t2' 't1 => t2 &' '& t1 => t2' 't1 & => t2'; do
+for GRAPH in 't1 => & t2' 't1 => t2 &' '& t1 => t2' 't1 & => t2' 't1 => => t2'
+do
     cat >'suite.rc' <<__SUITE_RC__
 [scheduling]
     [[dependencies]]
diff --git a/tests/validate/62-null-task-name.t b/tests/validate/63-collapse-secondary-parent.t
similarity index 61%
copy from tests/validate/62-null-task-name.t
copy to tests/validate/63-collapse-secondary-parent.t
index 7bad8c9..af64d75 100755
--- a/tests/validate/62-null-task-name.t
+++ b/tests/validate/63-collapse-secondary-parent.t
@@ -15,20 +15,31 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-# Fail on null task name!
+# Fail attempt to collapse a non first-parent family in the graph.
+# GitHub #2229.
 
 . "$(dirname "$0")/test_header"
 
-set_test_number 8
+set_test_number 2
 
-for GRAPH in 't1 => & t2' 't1 => t2 &' '& t1 => t2' 't1 & => t2'; do
-    cat >'suite.rc' <<__SUITE_RC__
+cat >'suite.rc' <<__SUITE_RC__
 [scheduling]
     [[dependencies]]
-        graph = ${GRAPH}
+        graph = BAR
+[runtime]
+    [[root]]
+        script = sleep 1
+    [[FOO]]
+    [[BAR]]
+    [[ukv_um_recon_ls]]
+        inherit = FOO, BAR
+[visualization]
+    collapsed families = BAR  # Troublesome setting.
 __SUITE_RC__
-    run_fail "${TEST_NAME_BASE}" cylc validate 'suite.rc'
-    grep_ok 'ERROR, null task name in graph: ' "${TEST_NAME_BASE}.stderr"
-done
+
+run_fail "${TEST_NAME_BASE}" cylc validate 'suite.rc'
+
+ERR='ERROR \[visualization\]collapsed families: BAR is not a first parent'
+grep_ok "$ERR" "${TEST_NAME_BASE}.stderr"
 
 exit

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/cylc.git



More information about the debian-science-commits mailing list